1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/netwerk/sctp/src/netinet/sctp_indata.c Wed Dec 31 06:09:35 2014 +0100 1.3 @@ -0,0 +1,5639 @@ 1.4 +/*- 1.5 + * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 1.6 + * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 1.7 + * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 1.8 + * 1.9 + * Redistribution and use in source and binary forms, with or without 1.10 + * modification, are permitted provided that the following conditions are met: 1.11 + * 1.12 + * a) Redistributions of source code must retain the above copyright notice, 1.13 + * this list of conditions and the following disclaimer. 1.14 + * 1.15 + * b) Redistributions in binary form must reproduce the above copyright 1.16 + * notice, this list of conditions and the following disclaimer in 1.17 + * the documentation and/or other materials provided with the distribution. 1.18 + * 1.19 + * c) Neither the name of Cisco Systems, Inc. nor the names of its 1.20 + * contributors may be used to endorse or promote products derived 1.21 + * from this software without specific prior written permission. 1.22 + * 1.23 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 1.24 + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 1.25 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1.26 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 1.27 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 1.28 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 1.29 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 1.30 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 1.31 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 1.32 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 1.33 + * THE POSSIBILITY OF SUCH DAMAGE. 1.34 + */ 1.35 + 1.36 +#ifdef __FreeBSD__ 1.37 +#include <sys/cdefs.h> 1.38 +__FBSDID("$FreeBSD: head/sys/netinet/sctp_indata.c 258228 2013-11-16 16:09:09Z tuexen $"); 1.39 +#endif 1.40 + 1.41 +#include <netinet/sctp_os.h> 1.42 +#include <netinet/sctp_var.h> 1.43 +#include <netinet/sctp_sysctl.h> 1.44 +#include <netinet/sctp_pcb.h> 1.45 +#include <netinet/sctp_header.h> 1.46 +#include <netinet/sctputil.h> 1.47 +#include <netinet/sctp_output.h> 1.48 +#include <netinet/sctp_input.h> 1.49 +#include <netinet/sctp_indata.h> 1.50 +#include <netinet/sctp_uio.h> 1.51 +#include <netinet/sctp_timer.h> 1.52 + 1.53 + 1.54 +/* 1.55 + * NOTES: On the outbound side of things I need to check the sack timer to 1.56 + * see if I should generate a sack into the chunk queue (if I have data to 1.57 + * send that is and will be sending it .. for bundling. 1.58 + * 1.59 + * The callback in sctp_usrreq.c will get called when the socket is read from. 1.60 + * This will cause sctp_service_queues() to get called on the top entry in 1.61 + * the list. 1.62 + */ 1.63 + 1.64 +void 1.65 +sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 1.66 +{ 1.67 + asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc); 1.68 +} 1.69 + 1.70 +/* Calculate what the rwnd would be */ 1.71 +uint32_t 1.72 +sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 1.73 +{ 1.74 + uint32_t calc = 0; 1.75 + 1.76 + /* 1.77 + * This is really set wrong with respect to a 1-2-m socket. Since 1.78 + * the sb_cc is the count that everyone as put up. When we re-write 1.79 + * sctp_soreceive then we will fix this so that ONLY this 1.80 + * associations data is taken into account. 1.81 + */ 1.82 + if (stcb->sctp_socket == NULL) 1.83 + return (calc); 1.84 + 1.85 + if (stcb->asoc.sb_cc == 0 && 1.86 + asoc->size_on_reasm_queue == 0 && 1.87 + asoc->size_on_all_streams == 0) { 1.88 + /* Full rwnd granted */ 1.89 + calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND); 1.90 + return (calc); 1.91 + } 1.92 + /* get actual space */ 1.93 + calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 1.94 + 1.95 + /* 1.96 + * take out what has NOT been put on socket queue and we yet hold 1.97 + * for putting up. 1.98 + */ 1.99 + calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue + 1.100 + asoc->cnt_on_reasm_queue * MSIZE)); 1.101 + calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams + 1.102 + asoc->cnt_on_all_streams * MSIZE)); 1.103 + 1.104 + if (calc == 0) { 1.105 + /* out of space */ 1.106 + return (calc); 1.107 + } 1.108 + 1.109 + /* what is the overhead of all these rwnd's */ 1.110 + calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 1.111 + /* If the window gets too small due to ctrl-stuff, reduce it 1.112 + * to 1, even it is 0. SWS engaged 1.113 + */ 1.114 + if (calc < stcb->asoc.my_rwnd_control_len) { 1.115 + calc = 1; 1.116 + } 1.117 + return (calc); 1.118 +} 1.119 + 1.120 + 1.121 + 1.122 +/* 1.123 + * Build out our readq entry based on the incoming packet. 1.124 + */ 1.125 +struct sctp_queued_to_read * 1.126 +sctp_build_readq_entry(struct sctp_tcb *stcb, 1.127 + struct sctp_nets *net, 1.128 + uint32_t tsn, uint32_t ppid, 1.129 + uint32_t context, uint16_t stream_no, 1.130 + uint16_t stream_seq, uint8_t flags, 1.131 + struct mbuf *dm) 1.132 +{ 1.133 + struct sctp_queued_to_read *read_queue_e = NULL; 1.134 + 1.135 + sctp_alloc_a_readq(stcb, read_queue_e); 1.136 + if (read_queue_e == NULL) { 1.137 + goto failed_build; 1.138 + } 1.139 + read_queue_e->sinfo_stream = stream_no; 1.140 + read_queue_e->sinfo_ssn = stream_seq; 1.141 + read_queue_e->sinfo_flags = (flags << 8); 1.142 + read_queue_e->sinfo_ppid = ppid; 1.143 + read_queue_e->sinfo_context = context; 1.144 + read_queue_e->sinfo_timetolive = 0; 1.145 + read_queue_e->sinfo_tsn = tsn; 1.146 + read_queue_e->sinfo_cumtsn = tsn; 1.147 + read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 1.148 + read_queue_e->whoFrom = net; 1.149 + read_queue_e->length = 0; 1.150 + atomic_add_int(&net->ref_count, 1); 1.151 + read_queue_e->data = dm; 1.152 + read_queue_e->spec_flags = 0; 1.153 + read_queue_e->tail_mbuf = NULL; 1.154 + read_queue_e->aux_data = NULL; 1.155 + read_queue_e->stcb = stcb; 1.156 + read_queue_e->port_from = stcb->rport; 1.157 + read_queue_e->do_not_ref_stcb = 0; 1.158 + read_queue_e->end_added = 0; 1.159 + read_queue_e->some_taken = 0; 1.160 + read_queue_e->pdapi_aborted = 0; 1.161 +failed_build: 1.162 + return (read_queue_e); 1.163 +} 1.164 + 1.165 + 1.166 +/* 1.167 + * Build out our readq entry based on the incoming packet. 1.168 + */ 1.169 +static struct sctp_queued_to_read * 1.170 +sctp_build_readq_entry_chk(struct sctp_tcb *stcb, 1.171 + struct sctp_tmit_chunk *chk) 1.172 +{ 1.173 + struct sctp_queued_to_read *read_queue_e = NULL; 1.174 + 1.175 + sctp_alloc_a_readq(stcb, read_queue_e); 1.176 + if (read_queue_e == NULL) { 1.177 + goto failed_build; 1.178 + } 1.179 + read_queue_e->sinfo_stream = chk->rec.data.stream_number; 1.180 + read_queue_e->sinfo_ssn = chk->rec.data.stream_seq; 1.181 + read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8); 1.182 + read_queue_e->sinfo_ppid = chk->rec.data.payloadtype; 1.183 + read_queue_e->sinfo_context = stcb->asoc.context; 1.184 + read_queue_e->sinfo_timetolive = 0; 1.185 + read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq; 1.186 + read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq; 1.187 + read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 1.188 + read_queue_e->whoFrom = chk->whoTo; 1.189 + read_queue_e->aux_data = NULL; 1.190 + read_queue_e->length = 0; 1.191 + atomic_add_int(&chk->whoTo->ref_count, 1); 1.192 + read_queue_e->data = chk->data; 1.193 + read_queue_e->tail_mbuf = NULL; 1.194 + read_queue_e->stcb = stcb; 1.195 + read_queue_e->port_from = stcb->rport; 1.196 + read_queue_e->spec_flags = 0; 1.197 + read_queue_e->do_not_ref_stcb = 0; 1.198 + read_queue_e->end_added = 0; 1.199 + read_queue_e->some_taken = 0; 1.200 + read_queue_e->pdapi_aborted = 0; 1.201 +failed_build: 1.202 + return (read_queue_e); 1.203 +} 1.204 + 1.205 + 1.206 +struct mbuf * 1.207 +sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo) 1.208 +{ 1.209 + struct sctp_extrcvinfo *seinfo; 1.210 + struct sctp_sndrcvinfo *outinfo; 1.211 + struct sctp_rcvinfo *rcvinfo; 1.212 + struct sctp_nxtinfo *nxtinfo; 1.213 +#if defined(__Userspace_os_Windows) 1.214 + WSACMSGHDR *cmh; 1.215 +#else 1.216 + struct cmsghdr *cmh; 1.217 +#endif 1.218 + struct mbuf *ret; 1.219 + int len; 1.220 + int use_extended; 1.221 + int provide_nxt; 1.222 + 1.223 + if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 1.224 + sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 1.225 + sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 1.226 + /* user does not want any ancillary data */ 1.227 + return (NULL); 1.228 + } 1.229 + 1.230 + len = 0; 1.231 + if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 1.232 + len += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 1.233 + } 1.234 + seinfo = (struct sctp_extrcvinfo *)sinfo; 1.235 + if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) && 1.236 + (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) { 1.237 + provide_nxt = 1; 1.238 + len += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 1.239 + } else { 1.240 + provide_nxt = 0; 1.241 + } 1.242 + if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 1.243 + if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 1.244 + use_extended = 1; 1.245 + len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 1.246 + } else { 1.247 + use_extended = 0; 1.248 + len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 1.249 + } 1.250 + } else { 1.251 + use_extended = 0; 1.252 + } 1.253 + 1.254 + ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 1.255 + if (ret == NULL) { 1.256 + /* No space */ 1.257 + return (ret); 1.258 + } 1.259 + SCTP_BUF_LEN(ret) = 0; 1.260 + 1.261 + /* We need a CMSG header followed by the struct */ 1.262 +#if defined(__Userspace_os_Windows) 1.263 + cmh = mtod(ret, WSACMSGHDR *); 1.264 +#else 1.265 + cmh = mtod(ret, struct cmsghdr *); 1.266 +#endif 1.267 + if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 1.268 + cmh->cmsg_level = IPPROTO_SCTP; 1.269 + cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo)); 1.270 + cmh->cmsg_type = SCTP_RCVINFO; 1.271 + rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh); 1.272 + rcvinfo->rcv_sid = sinfo->sinfo_stream; 1.273 + rcvinfo->rcv_ssn = sinfo->sinfo_ssn; 1.274 + rcvinfo->rcv_flags = sinfo->sinfo_flags; 1.275 + rcvinfo->rcv_ppid = sinfo->sinfo_ppid; 1.276 + rcvinfo->rcv_tsn = sinfo->sinfo_tsn; 1.277 + rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn; 1.278 + rcvinfo->rcv_context = sinfo->sinfo_context; 1.279 + rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id; 1.280 +#if defined(__Userspace_os_Windows) 1.281 + cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo))); 1.282 +#else 1.283 + cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo))); 1.284 +#endif 1.285 + SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 1.286 + } 1.287 + if (provide_nxt) { 1.288 + cmh->cmsg_level = IPPROTO_SCTP; 1.289 + cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo)); 1.290 + cmh->cmsg_type = SCTP_NXTINFO; 1.291 + nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh); 1.292 + nxtinfo->nxt_sid = seinfo->sreinfo_next_stream; 1.293 + nxtinfo->nxt_flags = 0; 1.294 + if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) { 1.295 + nxtinfo->nxt_flags |= SCTP_UNORDERED; 1.296 + } 1.297 + if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) { 1.298 + nxtinfo->nxt_flags |= SCTP_NOTIFICATION; 1.299 + } 1.300 + if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) { 1.301 + nxtinfo->nxt_flags |= SCTP_COMPLETE; 1.302 + } 1.303 + nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid; 1.304 + nxtinfo->nxt_length = seinfo->sreinfo_next_length; 1.305 + nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid; 1.306 +#if defined(__Userspace_os_Windows) 1.307 + cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo))); 1.308 +#else 1.309 + cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo))); 1.310 +#endif 1.311 + SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); 1.312 + } 1.313 + if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 1.314 + cmh->cmsg_level = IPPROTO_SCTP; 1.315 + outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 1.316 + if (use_extended) { 1.317 + cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 1.318 + cmh->cmsg_type = SCTP_EXTRCV; 1.319 + memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo)); 1.320 + SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 1.321 + } else { 1.322 + cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 1.323 + cmh->cmsg_type = SCTP_SNDRCV; 1.324 + *outinfo = *sinfo; 1.325 + SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 1.326 + } 1.327 + } 1.328 + return (ret); 1.329 +} 1.330 + 1.331 + 1.332 +static void 1.333 +sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn) 1.334 +{ 1.335 + uint32_t gap, i, cumackp1; 1.336 + int fnd = 0; 1.337 + 1.338 + if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 1.339 + return; 1.340 + } 1.341 + cumackp1 = asoc->cumulative_tsn + 1; 1.342 + if (SCTP_TSN_GT(cumackp1, tsn)) { 1.343 + /* this tsn is behind the cum ack and thus we don't 1.344 + * need to worry about it being moved from one to the other. 1.345 + */ 1.346 + return; 1.347 + } 1.348 + SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 1.349 + if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 1.350 + SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn); 1.351 + sctp_print_mapping_array(asoc); 1.352 +#ifdef INVARIANTS 1.353 + panic("Things are really messed up now!!"); 1.354 +#endif 1.355 + } 1.356 + SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1.357 + SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); 1.358 + if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1.359 + asoc->highest_tsn_inside_nr_map = tsn; 1.360 + } 1.361 + if (tsn == asoc->highest_tsn_inside_map) { 1.362 + /* We must back down to see what the new highest is */ 1.363 + for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) { 1.364 + SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn); 1.365 + if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 1.366 + asoc->highest_tsn_inside_map = i; 1.367 + fnd = 1; 1.368 + break; 1.369 + } 1.370 + } 1.371 + if (!fnd) { 1.372 + asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1; 1.373 + } 1.374 + } 1.375 +} 1.376 + 1.377 + 1.378 +/* 1.379 + * We are delivering currently from the reassembly queue. We must continue to 1.380 + * deliver until we either: 1) run out of space. 2) run out of sequential 1.381 + * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag. 1.382 + */ 1.383 +static void 1.384 +sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc) 1.385 +{ 1.386 + struct sctp_tmit_chunk *chk, *nchk; 1.387 + uint16_t nxt_todel; 1.388 + uint16_t stream_no; 1.389 + int end = 0; 1.390 + int cntDel; 1.391 + struct sctp_queued_to_read *control, *ctl, *nctl; 1.392 + 1.393 + if (stcb == NULL) 1.394 + return; 1.395 + 1.396 + cntDel = stream_no = 0; 1.397 + if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 1.398 + (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) || 1.399 + (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 1.400 + /* socket above is long gone or going.. */ 1.401 + abandon: 1.402 + asoc->fragmented_delivery_inprogress = 0; 1.403 + TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) { 1.404 + TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 1.405 + asoc->size_on_reasm_queue -= chk->send_size; 1.406 + sctp_ucount_decr(asoc->cnt_on_reasm_queue); 1.407 + /* 1.408 + * Lose the data pointer, since its in the socket 1.409 + * buffer 1.410 + */ 1.411 + if (chk->data) { 1.412 + sctp_m_freem(chk->data); 1.413 + chk->data = NULL; 1.414 + } 1.415 + /* Now free the address and data */ 1.416 + sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1.417 + /*sa_ignore FREED_MEMORY*/ 1.418 + } 1.419 + return; 1.420 + } 1.421 + SCTP_TCB_LOCK_ASSERT(stcb); 1.422 + TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) { 1.423 + if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) { 1.424 + /* Can't deliver more :< */ 1.425 + return; 1.426 + } 1.427 + stream_no = chk->rec.data.stream_number; 1.428 + nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1; 1.429 + if (nxt_todel != chk->rec.data.stream_seq && 1.430 + (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 1.431 + /* 1.432 + * Not the next sequence to deliver in its stream OR 1.433 + * unordered 1.434 + */ 1.435 + return; 1.436 + } 1.437 + if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1.438 + 1.439 + control = sctp_build_readq_entry_chk(stcb, chk); 1.440 + if (control == NULL) { 1.441 + /* out of memory? */ 1.442 + return; 1.443 + } 1.444 + /* save it off for our future deliveries */ 1.445 + stcb->asoc.control_pdapi = control; 1.446 + if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) 1.447 + end = 1; 1.448 + else 1.449 + end = 0; 1.450 + sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq); 1.451 + sctp_add_to_readq(stcb->sctp_ep, 1.452 + stcb, control, &stcb->sctp_socket->so_rcv, end, 1.453 + SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 1.454 + cntDel++; 1.455 + } else { 1.456 + if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) 1.457 + end = 1; 1.458 + else 1.459 + end = 0; 1.460 + sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq); 1.461 + if (sctp_append_to_readq(stcb->sctp_ep, stcb, 1.462 + stcb->asoc.control_pdapi, 1.463 + chk->data, end, chk->rec.data.TSN_seq, 1.464 + &stcb->sctp_socket->so_rcv)) { 1.465 + /* 1.466 + * something is very wrong, either 1.467 + * control_pdapi is NULL, or the tail_mbuf 1.468 + * is corrupt, or there is a EOM already on 1.469 + * the mbuf chain. 1.470 + */ 1.471 + if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 1.472 + goto abandon; 1.473 + } else { 1.474 +#ifdef INVARIANTS 1.475 + if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) { 1.476 + panic("This should not happen control_pdapi NULL?"); 1.477 + } 1.478 + /* if we did not panic, it was a EOM */ 1.479 + panic("Bad chunking ??"); 1.480 +#else 1.481 + if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) { 1.482 + SCTP_PRINTF("This should not happen control_pdapi NULL?\n"); 1.483 + } 1.484 + SCTP_PRINTF("Bad chunking ??\n"); 1.485 + SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n"); 1.486 + 1.487 +#endif 1.488 + goto abandon; 1.489 + } 1.490 + } 1.491 + cntDel++; 1.492 + } 1.493 + /* pull it we did it */ 1.494 + TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 1.495 + if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1.496 + asoc->fragmented_delivery_inprogress = 0; 1.497 + if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 1.498 + asoc->strmin[stream_no].last_sequence_delivered++; 1.499 + } 1.500 + if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 1.501 + SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1.502 + } 1.503 + } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1.504 + /* 1.505 + * turn the flag back on since we just delivered 1.506 + * yet another one. 1.507 + */ 1.508 + asoc->fragmented_delivery_inprogress = 1; 1.509 + } 1.510 + asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq; 1.511 + asoc->last_flags_delivered = chk->rec.data.rcv_flags; 1.512 + asoc->last_strm_seq_delivered = chk->rec.data.stream_seq; 1.513 + asoc->last_strm_no_delivered = chk->rec.data.stream_number; 1.514 + 1.515 + asoc->tsn_last_delivered = chk->rec.data.TSN_seq; 1.516 + asoc->size_on_reasm_queue -= chk->send_size; 1.517 + sctp_ucount_decr(asoc->cnt_on_reasm_queue); 1.518 + /* free up the chk */ 1.519 + chk->data = NULL; 1.520 + sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1.521 + 1.522 + if (asoc->fragmented_delivery_inprogress == 0) { 1.523 + /* 1.524 + * Now lets see if we can deliver the next one on 1.525 + * the stream 1.526 + */ 1.527 + struct sctp_stream_in *strm; 1.528 + 1.529 + strm = &asoc->strmin[stream_no]; 1.530 + nxt_todel = strm->last_sequence_delivered + 1; 1.531 + TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) { 1.532 + /* Deliver more if we can. */ 1.533 + if (nxt_todel == ctl->sinfo_ssn) { 1.534 + TAILQ_REMOVE(&strm->inqueue, ctl, next); 1.535 + asoc->size_on_all_streams -= ctl->length; 1.536 + sctp_ucount_decr(asoc->cnt_on_all_streams); 1.537 + strm->last_sequence_delivered++; 1.538 + sctp_mark_non_revokable(asoc, ctl->sinfo_tsn); 1.539 + sctp_add_to_readq(stcb->sctp_ep, stcb, 1.540 + ctl, 1.541 + &stcb->sctp_socket->so_rcv, 1, 1.542 + SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 1.543 + } else { 1.544 + break; 1.545 + } 1.546 + nxt_todel = strm->last_sequence_delivered + 1; 1.547 + } 1.548 + break; 1.549 + } 1.550 + } 1.551 +} 1.552 + 1.553 +/* 1.554 + * Queue the chunk either right into the socket buffer if it is the next one 1.555 + * to go OR put it in the correct place in the delivery queue. If we do 1.556 + * append to the so_buf, keep doing so until we are out of order. One big 1.557 + * question still remains, what to do when the socket buffer is FULL?? 1.558 + */ 1.559 +static void 1.560 +sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc, 1.561 + struct sctp_queued_to_read *control, int *abort_flag) 1.562 +{ 1.563 + /* 1.564 + * FIX-ME maybe? What happens when the ssn wraps? If we are getting 1.565 + * all the data in one stream this could happen quite rapidly. One 1.566 + * could use the TSN to keep track of things, but this scheme breaks 1.567 + * down in the other type of stream useage that could occur. Send a 1.568 + * single msg to stream 0, send 4Billion messages to stream 1, now 1.569 + * send a message to stream 0. You have a situation where the TSN 1.570 + * has wrapped but not in the stream. Is this worth worrying about 1.571 + * or should we just change our queue sort at the bottom to be by 1.572 + * TSN. 1.573 + * 1.574 + * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2 1.575 + * with TSN 1? If the peer is doing some sort of funky TSN/SSN 1.576 + * assignment this could happen... and I don't see how this would be 1.577 + * a violation. So for now I am undecided an will leave the sort by 1.578 + * SSN alone. Maybe a hybred approach is the answer 1.579 + * 1.580 + */ 1.581 + struct sctp_stream_in *strm; 1.582 + struct sctp_queued_to_read *at; 1.583 + int queue_needed; 1.584 + uint16_t nxt_todel; 1.585 + struct mbuf *oper; 1.586 + 1.587 + queue_needed = 1; 1.588 + asoc->size_on_all_streams += control->length; 1.589 + sctp_ucount_incr(asoc->cnt_on_all_streams); 1.590 + strm = &asoc->strmin[control->sinfo_stream]; 1.591 + nxt_todel = strm->last_sequence_delivered + 1; 1.592 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 1.593 + sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD); 1.594 + } 1.595 + SCTPDBG(SCTP_DEBUG_INDATA1, 1.596 + "queue to stream called for ssn:%u lastdel:%u nxt:%u\n", 1.597 + (uint32_t) control->sinfo_stream, 1.598 + (uint32_t) strm->last_sequence_delivered, 1.599 + (uint32_t) nxt_todel); 1.600 + if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) { 1.601 + /* The incoming sseq is behind where we last delivered? */ 1.602 + SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n", 1.603 + control->sinfo_ssn, strm->last_sequence_delivered); 1.604 + protocol_error: 1.605 + /* 1.606 + * throw it in the stream so it gets cleaned up in 1.607 + * association destruction 1.608 + */ 1.609 + TAILQ_INSERT_HEAD(&strm->inqueue, control, next); 1.610 + oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1.611 + 0, M_NOWAIT, 1, MT_DATA); 1.612 + if (oper) { 1.613 + struct sctp_paramhdr *ph; 1.614 + uint32_t *ippp; 1.615 + SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 1.616 + (sizeof(uint32_t) * 3); 1.617 + ph = mtod(oper, struct sctp_paramhdr *); 1.618 + ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1.619 + ph->param_length = htons(SCTP_BUF_LEN(oper)); 1.620 + ippp = (uint32_t *) (ph + 1); 1.621 + *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_1); 1.622 + ippp++; 1.623 + *ippp = control->sinfo_tsn; 1.624 + ippp++; 1.625 + *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn); 1.626 + } 1.627 + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_1; 1.628 + sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1.629 + *abort_flag = 1; 1.630 + return; 1.631 + 1.632 + } 1.633 + if (nxt_todel == control->sinfo_ssn) { 1.634 + /* can be delivered right away? */ 1.635 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 1.636 + sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); 1.637 + } 1.638 + /* EY it wont be queued if it could be delivered directly*/ 1.639 + queue_needed = 0; 1.640 + asoc->size_on_all_streams -= control->length; 1.641 + sctp_ucount_decr(asoc->cnt_on_all_streams); 1.642 + strm->last_sequence_delivered++; 1.643 + 1.644 + sctp_mark_non_revokable(asoc, control->sinfo_tsn); 1.645 + sctp_add_to_readq(stcb->sctp_ep, stcb, 1.646 + control, 1.647 + &stcb->sctp_socket->so_rcv, 1, 1.648 + SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 1.649 + TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) { 1.650 + /* all delivered */ 1.651 + nxt_todel = strm->last_sequence_delivered + 1; 1.652 + if (nxt_todel == control->sinfo_ssn) { 1.653 + TAILQ_REMOVE(&strm->inqueue, control, next); 1.654 + asoc->size_on_all_streams -= control->length; 1.655 + sctp_ucount_decr(asoc->cnt_on_all_streams); 1.656 + strm->last_sequence_delivered++; 1.657 + /* 1.658 + * We ignore the return of deliver_data here 1.659 + * since we always can hold the chunk on the 1.660 + * d-queue. And we have a finite number that 1.661 + * can be delivered from the strq. 1.662 + */ 1.663 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 1.664 + sctp_log_strm_del(control, NULL, 1.665 + SCTP_STR_LOG_FROM_IMMED_DEL); 1.666 + } 1.667 + sctp_mark_non_revokable(asoc, control->sinfo_tsn); 1.668 + sctp_add_to_readq(stcb->sctp_ep, stcb, 1.669 + control, 1.670 + &stcb->sctp_socket->so_rcv, 1, 1.671 + SCTP_READ_LOCK_NOT_HELD, 1.672 + SCTP_SO_NOT_LOCKED); 1.673 + continue; 1.674 + } 1.675 + break; 1.676 + } 1.677 + } 1.678 + if (queue_needed) { 1.679 + /* 1.680 + * Ok, we did not deliver this guy, find the correct place 1.681 + * to put it on the queue. 1.682 + */ 1.683 + if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) { 1.684 + goto protocol_error; 1.685 + } 1.686 + if (TAILQ_EMPTY(&strm->inqueue)) { 1.687 + /* Empty queue */ 1.688 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 1.689 + sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD); 1.690 + } 1.691 + TAILQ_INSERT_HEAD(&strm->inqueue, control, next); 1.692 + } else { 1.693 + TAILQ_FOREACH(at, &strm->inqueue, next) { 1.694 + if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) { 1.695 + /* 1.696 + * one in queue is bigger than the 1.697 + * new one, insert before this one 1.698 + */ 1.699 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 1.700 + sctp_log_strm_del(control, at, 1.701 + SCTP_STR_LOG_FROM_INSERT_MD); 1.702 + } 1.703 + TAILQ_INSERT_BEFORE(at, control, next); 1.704 + break; 1.705 + } else if (at->sinfo_ssn == control->sinfo_ssn) { 1.706 + /* 1.707 + * Gak, He sent me a duplicate str 1.708 + * seq number 1.709 + */ 1.710 + /* 1.711 + * foo bar, I guess I will just free 1.712 + * this new guy, should we abort 1.713 + * too? FIX ME MAYBE? Or it COULD be 1.714 + * that the SSN's have wrapped. 1.715 + * Maybe I should compare to TSN 1.716 + * somehow... sigh for now just blow 1.717 + * away the chunk! 1.718 + */ 1.719 + 1.720 + if (control->data) 1.721 + sctp_m_freem(control->data); 1.722 + control->data = NULL; 1.723 + asoc->size_on_all_streams -= control->length; 1.724 + sctp_ucount_decr(asoc->cnt_on_all_streams); 1.725 + if (control->whoFrom) { 1.726 + sctp_free_remote_addr(control->whoFrom); 1.727 + control->whoFrom = NULL; 1.728 + } 1.729 + sctp_free_a_readq(stcb, control); 1.730 + return; 1.731 + } else { 1.732 + if (TAILQ_NEXT(at, next) == NULL) { 1.733 + /* 1.734 + * We are at the end, insert 1.735 + * it after this one 1.736 + */ 1.737 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 1.738 + sctp_log_strm_del(control, at, 1.739 + SCTP_STR_LOG_FROM_INSERT_TL); 1.740 + } 1.741 + TAILQ_INSERT_AFTER(&strm->inqueue, 1.742 + at, control, next); 1.743 + break; 1.744 + } 1.745 + } 1.746 + } 1.747 + } 1.748 + } 1.749 +} 1.750 + 1.751 +/* 1.752 + * Returns two things: You get the total size of the deliverable parts of the 1.753 + * first fragmented message on the reassembly queue. And you get a 1 back if 1.754 + * all of the message is ready or a 0 back if the message is still incomplete 1.755 + */ 1.756 +static int 1.757 +sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t *t_size) 1.758 +{ 1.759 + struct sctp_tmit_chunk *chk; 1.760 + uint32_t tsn; 1.761 + 1.762 + *t_size = 0; 1.763 + chk = TAILQ_FIRST(&asoc->reasmqueue); 1.764 + if (chk == NULL) { 1.765 + /* nothing on the queue */ 1.766 + return (0); 1.767 + } 1.768 + if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 1.769 + /* Not a first on the queue */ 1.770 + return (0); 1.771 + } 1.772 + tsn = chk->rec.data.TSN_seq; 1.773 + TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) { 1.774 + if (tsn != chk->rec.data.TSN_seq) { 1.775 + return (0); 1.776 + } 1.777 + *t_size += chk->send_size; 1.778 + if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1.779 + return (1); 1.780 + } 1.781 + tsn++; 1.782 + } 1.783 + return (0); 1.784 +} 1.785 + 1.786 +static void 1.787 +sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc) 1.788 +{ 1.789 + struct sctp_tmit_chunk *chk; 1.790 + uint16_t nxt_todel; 1.791 + uint32_t tsize, pd_point; 1.792 + 1.793 + doit_again: 1.794 + chk = TAILQ_FIRST(&asoc->reasmqueue); 1.795 + if (chk == NULL) { 1.796 + /* Huh? */ 1.797 + asoc->size_on_reasm_queue = 0; 1.798 + asoc->cnt_on_reasm_queue = 0; 1.799 + return; 1.800 + } 1.801 + if (asoc->fragmented_delivery_inprogress == 0) { 1.802 + nxt_todel = 1.803 + asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 1.804 + if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 1.805 + (nxt_todel == chk->rec.data.stream_seq || 1.806 + (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 1.807 + /* 1.808 + * Yep the first one is here and its ok to deliver 1.809 + * but should we? 1.810 + */ 1.811 + if (stcb->sctp_socket) { 1.812 + pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT, 1.813 + stcb->sctp_ep->partial_delivery_point); 1.814 + } else { 1.815 + pd_point = stcb->sctp_ep->partial_delivery_point; 1.816 + } 1.817 + if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) { 1.818 + /* 1.819 + * Yes, we setup to start reception, by 1.820 + * backing down the TSN just in case we 1.821 + * can't deliver. If we 1.822 + */ 1.823 + asoc->fragmented_delivery_inprogress = 1; 1.824 + asoc->tsn_last_delivered = 1.825 + chk->rec.data.TSN_seq - 1; 1.826 + asoc->str_of_pdapi = 1.827 + chk->rec.data.stream_number; 1.828 + asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 1.829 + asoc->pdapi_ppid = chk->rec.data.payloadtype; 1.830 + asoc->fragment_flags = chk->rec.data.rcv_flags; 1.831 + sctp_service_reassembly(stcb, asoc); 1.832 + } 1.833 + } 1.834 + } else { 1.835 + /* Service re-assembly will deliver stream data queued 1.836 + * at the end of fragmented delivery.. but it wont know 1.837 + * to go back and call itself again... we do that here 1.838 + * with the got doit_again 1.839 + */ 1.840 + sctp_service_reassembly(stcb, asoc); 1.841 + if (asoc->fragmented_delivery_inprogress == 0) { 1.842 + /* finished our Fragmented delivery, could be 1.843 + * more waiting? 1.844 + */ 1.845 + goto doit_again; 1.846 + } 1.847 + } 1.848 +} 1.849 + 1.850 +/* 1.851 + * Dump onto the re-assembly queue, in its proper place. After dumping on the 1.852 + * queue, see if anthing can be delivered. If so pull it off (or as much as 1.853 + * we can. If we run out of space then we must dump what we can and set the 1.854 + * appropriate flag to say we queued what we could. 1.855 + */ 1.856 +static void 1.857 +sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, 1.858 + struct sctp_tmit_chunk *chk, int *abort_flag) 1.859 +{ 1.860 + struct mbuf *oper; 1.861 + uint32_t cum_ackp1, prev_tsn, post_tsn; 1.862 + struct sctp_tmit_chunk *at, *prev, *next; 1.863 + 1.864 + prev = next = NULL; 1.865 + cum_ackp1 = asoc->tsn_last_delivered + 1; 1.866 + if (TAILQ_EMPTY(&asoc->reasmqueue)) { 1.867 + /* This is the first one on the queue */ 1.868 + TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next); 1.869 + /* 1.870 + * we do not check for delivery of anything when only one 1.871 + * fragment is here 1.872 + */ 1.873 + asoc->size_on_reasm_queue = chk->send_size; 1.874 + sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1.875 + if (chk->rec.data.TSN_seq == cum_ackp1) { 1.876 + if (asoc->fragmented_delivery_inprogress == 0 && 1.877 + (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) != 1.878 + SCTP_DATA_FIRST_FRAG) { 1.879 + /* 1.880 + * An empty queue, no delivery inprogress, 1.881 + * we hit the next one and it does NOT have 1.882 + * a FIRST fragment mark. 1.883 + */ 1.884 + SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n"); 1.885 + oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1.886 + 0, M_NOWAIT, 1, MT_DATA); 1.887 + 1.888 + if (oper) { 1.889 + struct sctp_paramhdr *ph; 1.890 + uint32_t *ippp; 1.891 + 1.892 + SCTP_BUF_LEN(oper) = 1.893 + sizeof(struct sctp_paramhdr) + 1.894 + (sizeof(uint32_t) * 3); 1.895 + ph = mtod(oper, struct sctp_paramhdr *); 1.896 + ph->param_type = 1.897 + htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1.898 + ph->param_length = htons(SCTP_BUF_LEN(oper)); 1.899 + ippp = (uint32_t *) (ph + 1); 1.900 + *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_2); 1.901 + ippp++; 1.902 + *ippp = chk->rec.data.TSN_seq; 1.903 + ippp++; 1.904 + *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1.905 + 1.906 + } 1.907 + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_2; 1.908 + sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1.909 + *abort_flag = 1; 1.910 + } else if (asoc->fragmented_delivery_inprogress && 1.911 + (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { 1.912 + /* 1.913 + * We are doing a partial delivery and the 1.914 + * NEXT chunk MUST be either the LAST or 1.915 + * MIDDLE fragment NOT a FIRST 1.916 + */ 1.917 + SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n"); 1.918 + oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1.919 + 0, M_NOWAIT, 1, MT_DATA); 1.920 + if (oper) { 1.921 + struct sctp_paramhdr *ph; 1.922 + uint32_t *ippp; 1.923 + 1.924 + SCTP_BUF_LEN(oper) = 1.925 + sizeof(struct sctp_paramhdr) + 1.926 + (3 *sizeof(uint32_t)); 1.927 + ph = mtod(oper, struct sctp_paramhdr *); 1.928 + ph->param_type = 1.929 + htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1.930 + ph->param_length = htons(SCTP_BUF_LEN(oper)); 1.931 + ippp = (uint32_t *) (ph + 1); 1.932 + *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_3); 1.933 + ippp++; 1.934 + *ippp = chk->rec.data.TSN_seq; 1.935 + ippp++; 1.936 + *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1.937 + } 1.938 + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_3; 1.939 + sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1.940 + *abort_flag = 1; 1.941 + } else if (asoc->fragmented_delivery_inprogress) { 1.942 + /* 1.943 + * Here we are ok with a MIDDLE or LAST 1.944 + * piece 1.945 + */ 1.946 + if (chk->rec.data.stream_number != 1.947 + asoc->str_of_pdapi) { 1.948 + /* Got to be the right STR No */ 1.949 + SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n", 1.950 + chk->rec.data.stream_number, 1.951 + asoc->str_of_pdapi); 1.952 + oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1.953 + 0, M_NOWAIT, 1, MT_DATA); 1.954 + if (oper) { 1.955 + struct sctp_paramhdr *ph; 1.956 + uint32_t *ippp; 1.957 + 1.958 + SCTP_BUF_LEN(oper) = 1.959 + sizeof(struct sctp_paramhdr) + 1.960 + (sizeof(uint32_t) * 3); 1.961 + ph = mtod(oper, 1.962 + struct sctp_paramhdr *); 1.963 + ph->param_type = 1.964 + htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1.965 + ph->param_length = 1.966 + htons(SCTP_BUF_LEN(oper)); 1.967 + ippp = (uint32_t *) (ph + 1); 1.968 + *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_4); 1.969 + ippp++; 1.970 + *ippp = chk->rec.data.TSN_seq; 1.971 + ippp++; 1.972 + *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1.973 + } 1.974 + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_4; 1.975 + sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1.976 + *abort_flag = 1; 1.977 + } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) != 1.978 + SCTP_DATA_UNORDERED && 1.979 + chk->rec.data.stream_seq != asoc->ssn_of_pdapi) { 1.980 + /* Got to be the right STR Seq */ 1.981 + SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n", 1.982 + chk->rec.data.stream_seq, 1.983 + asoc->ssn_of_pdapi); 1.984 + oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1.985 + 0, M_NOWAIT, 1, MT_DATA); 1.986 + if (oper) { 1.987 + struct sctp_paramhdr *ph; 1.988 + uint32_t *ippp; 1.989 + 1.990 + SCTP_BUF_LEN(oper) = 1.991 + sizeof(struct sctp_paramhdr) + 1.992 + (3 * sizeof(uint32_t)); 1.993 + ph = mtod(oper, 1.994 + struct sctp_paramhdr *); 1.995 + ph->param_type = 1.996 + htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1.997 + ph->param_length = 1.998 + htons(SCTP_BUF_LEN(oper)); 1.999 + ippp = (uint32_t *) (ph + 1); 1.1000 + *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_5); 1.1001 + ippp++; 1.1002 + *ippp = chk->rec.data.TSN_seq; 1.1003 + ippp++; 1.1004 + *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1.1005 + 1.1006 + } 1.1007 + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_5; 1.1008 + sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1.1009 + *abort_flag = 1; 1.1010 + } 1.1011 + } 1.1012 + } 1.1013 + return; 1.1014 + } 1.1015 + /* Find its place */ 1.1016 + TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 1.1017 + if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) { 1.1018 + /* 1.1019 + * one in queue is bigger than the new one, insert 1.1020 + * before this one 1.1021 + */ 1.1022 + /* A check */ 1.1023 + asoc->size_on_reasm_queue += chk->send_size; 1.1024 + sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1.1025 + next = at; 1.1026 + TAILQ_INSERT_BEFORE(at, chk, sctp_next); 1.1027 + break; 1.1028 + } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) { 1.1029 + /* Gak, He sent me a duplicate str seq number */ 1.1030 + /* 1.1031 + * foo bar, I guess I will just free this new guy, 1.1032 + * should we abort too? FIX ME MAYBE? Or it COULD be 1.1033 + * that the SSN's have wrapped. Maybe I should 1.1034 + * compare to TSN somehow... sigh for now just blow 1.1035 + * away the chunk! 1.1036 + */ 1.1037 + if (chk->data) { 1.1038 + sctp_m_freem(chk->data); 1.1039 + chk->data = NULL; 1.1040 + } 1.1041 + sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1.1042 + return; 1.1043 + } else { 1.1044 + prev = at; 1.1045 + if (TAILQ_NEXT(at, sctp_next) == NULL) { 1.1046 + /* 1.1047 + * We are at the end, insert it after this 1.1048 + * one 1.1049 + */ 1.1050 + /* check it first */ 1.1051 + asoc->size_on_reasm_queue += chk->send_size; 1.1052 + sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1.1053 + TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next); 1.1054 + break; 1.1055 + } 1.1056 + } 1.1057 + } 1.1058 + /* Now the audits */ 1.1059 + if (prev) { 1.1060 + prev_tsn = chk->rec.data.TSN_seq - 1; 1.1061 + if (prev_tsn == prev->rec.data.TSN_seq) { 1.1062 + /* 1.1063 + * Ok the one I am dropping onto the end is the 1.1064 + * NEXT. A bit of valdiation here. 1.1065 + */ 1.1066 + if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1.1067 + SCTP_DATA_FIRST_FRAG || 1.1068 + (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1.1069 + SCTP_DATA_MIDDLE_FRAG) { 1.1070 + /* 1.1071 + * Insert chk MUST be a MIDDLE or LAST 1.1072 + * fragment 1.1073 + */ 1.1074 + if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1.1075 + SCTP_DATA_FIRST_FRAG) { 1.1076 + SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n"); 1.1077 + SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n"); 1.1078 + oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1.1079 + 0, M_NOWAIT, 1, MT_DATA); 1.1080 + if (oper) { 1.1081 + struct sctp_paramhdr *ph; 1.1082 + uint32_t *ippp; 1.1083 + 1.1084 + SCTP_BUF_LEN(oper) = 1.1085 + sizeof(struct sctp_paramhdr) + 1.1086 + (3 * sizeof(uint32_t)); 1.1087 + ph = mtod(oper, 1.1088 + struct sctp_paramhdr *); 1.1089 + ph->param_type = 1.1090 + htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1.1091 + ph->param_length = 1.1092 + htons(SCTP_BUF_LEN(oper)); 1.1093 + ippp = (uint32_t *) (ph + 1); 1.1094 + *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_6); 1.1095 + ippp++; 1.1096 + *ippp = chk->rec.data.TSN_seq; 1.1097 + ippp++; 1.1098 + *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1.1099 + 1.1100 + } 1.1101 + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_6; 1.1102 + sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1.1103 + *abort_flag = 1; 1.1104 + return; 1.1105 + } 1.1106 + if (chk->rec.data.stream_number != 1.1107 + prev->rec.data.stream_number) { 1.1108 + /* 1.1109 + * Huh, need the correct STR here, 1.1110 + * they must be the same. 1.1111 + */ 1.1112 + SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n", 1.1113 + chk->rec.data.stream_number, 1.1114 + prev->rec.data.stream_number); 1.1115 + oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1.1116 + 0, M_NOWAIT, 1, MT_DATA); 1.1117 + if (oper) { 1.1118 + struct sctp_paramhdr *ph; 1.1119 + uint32_t *ippp; 1.1120 + 1.1121 + SCTP_BUF_LEN(oper) = 1.1122 + sizeof(struct sctp_paramhdr) + 1.1123 + (3 * sizeof(uint32_t)); 1.1124 + ph = mtod(oper, 1.1125 + struct sctp_paramhdr *); 1.1126 + ph->param_type = 1.1127 + htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1.1128 + ph->param_length = 1.1129 + htons(SCTP_BUF_LEN(oper)); 1.1130 + ippp = (uint32_t *) (ph + 1); 1.1131 + *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_7); 1.1132 + ippp++; 1.1133 + *ippp = chk->rec.data.TSN_seq; 1.1134 + ippp++; 1.1135 + *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1.1136 + } 1.1137 + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_7; 1.1138 + sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1.1139 + *abort_flag = 1; 1.1140 + return; 1.1141 + } 1.1142 + if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1.1143 + chk->rec.data.stream_seq != 1.1144 + prev->rec.data.stream_seq) { 1.1145 + /* 1.1146 + * Huh, need the correct STR here, 1.1147 + * they must be the same. 1.1148 + */ 1.1149 + SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1.1150 + chk->rec.data.stream_seq, 1.1151 + prev->rec.data.stream_seq); 1.1152 + oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1.1153 + 0, M_NOWAIT, 1, MT_DATA); 1.1154 + if (oper) { 1.1155 + struct sctp_paramhdr *ph; 1.1156 + uint32_t *ippp; 1.1157 + 1.1158 + SCTP_BUF_LEN(oper) = 1.1159 + sizeof(struct sctp_paramhdr) + 1.1160 + (3 * sizeof(uint32_t)); 1.1161 + ph = mtod(oper, 1.1162 + struct sctp_paramhdr *); 1.1163 + ph->param_type = 1.1164 + htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1.1165 + ph->param_length = 1.1166 + htons(SCTP_BUF_LEN(oper)); 1.1167 + ippp = (uint32_t *) (ph + 1); 1.1168 + *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_8); 1.1169 + ippp++; 1.1170 + *ippp = chk->rec.data.TSN_seq; 1.1171 + ippp++; 1.1172 + *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1.1173 + } 1.1174 + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_8; 1.1175 + sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1.1176 + *abort_flag = 1; 1.1177 + return; 1.1178 + } 1.1179 + } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1.1180 + SCTP_DATA_LAST_FRAG) { 1.1181 + /* Insert chk MUST be a FIRST */ 1.1182 + if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1.1183 + SCTP_DATA_FIRST_FRAG) { 1.1184 + SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n"); 1.1185 + oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1.1186 + 0, M_NOWAIT, 1, MT_DATA); 1.1187 + if (oper) { 1.1188 + struct sctp_paramhdr *ph; 1.1189 + uint32_t *ippp; 1.1190 + 1.1191 + SCTP_BUF_LEN(oper) = 1.1192 + sizeof(struct sctp_paramhdr) + 1.1193 + (3 * sizeof(uint32_t)); 1.1194 + ph = mtod(oper, 1.1195 + struct sctp_paramhdr *); 1.1196 + ph->param_type = 1.1197 + htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1.1198 + ph->param_length = 1.1199 + htons(SCTP_BUF_LEN(oper)); 1.1200 + ippp = (uint32_t *) (ph + 1); 1.1201 + *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_9); 1.1202 + ippp++; 1.1203 + *ippp = chk->rec.data.TSN_seq; 1.1204 + ippp++; 1.1205 + *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1.1206 + 1.1207 + } 1.1208 + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_9; 1.1209 + sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1.1210 + *abort_flag = 1; 1.1211 + return; 1.1212 + } 1.1213 + } 1.1214 + } 1.1215 + } 1.1216 + if (next) { 1.1217 + post_tsn = chk->rec.data.TSN_seq + 1; 1.1218 + if (post_tsn == next->rec.data.TSN_seq) { 1.1219 + /* 1.1220 + * Ok the one I am inserting ahead of is my NEXT 1.1221 + * one. A bit of valdiation here. 1.1222 + */ 1.1223 + if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1.1224 + /* Insert chk MUST be a last fragment */ 1.1225 + if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) 1.1226 + != SCTP_DATA_LAST_FRAG) { 1.1227 + SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n"); 1.1228 + SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n"); 1.1229 + oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1.1230 + 0, M_NOWAIT, 1, MT_DATA); 1.1231 + if (oper) { 1.1232 + struct sctp_paramhdr *ph; 1.1233 + uint32_t *ippp; 1.1234 + 1.1235 + SCTP_BUF_LEN(oper) = 1.1236 + sizeof(struct sctp_paramhdr) + 1.1237 + ( 3 * sizeof(uint32_t)); 1.1238 + ph = mtod(oper, 1.1239 + struct sctp_paramhdr *); 1.1240 + ph->param_type = 1.1241 + htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1.1242 + ph->param_length = 1.1243 + htons(SCTP_BUF_LEN(oper)); 1.1244 + ippp = (uint32_t *) (ph + 1); 1.1245 + *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_10); 1.1246 + ippp++; 1.1247 + *ippp = chk->rec.data.TSN_seq; 1.1248 + ippp++; 1.1249 + *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1.1250 + } 1.1251 + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_10; 1.1252 + sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1.1253 + *abort_flag = 1; 1.1254 + return; 1.1255 + } 1.1256 + } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1.1257 + SCTP_DATA_MIDDLE_FRAG || 1.1258 + (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1.1259 + SCTP_DATA_LAST_FRAG) { 1.1260 + /* 1.1261 + * Insert chk CAN be MIDDLE or FIRST NOT 1.1262 + * LAST 1.1263 + */ 1.1264 + if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == 1.1265 + SCTP_DATA_LAST_FRAG) { 1.1266 + SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n"); 1.1267 + SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n"); 1.1268 + oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1.1269 + 0, M_NOWAIT, 1, MT_DATA); 1.1270 + if (oper) { 1.1271 + struct sctp_paramhdr *ph; 1.1272 + uint32_t *ippp; 1.1273 + 1.1274 + SCTP_BUF_LEN(oper) = 1.1275 + sizeof(struct sctp_paramhdr) + 1.1276 + (3 * sizeof(uint32_t)); 1.1277 + ph = mtod(oper, 1.1278 + struct sctp_paramhdr *); 1.1279 + ph->param_type = 1.1280 + htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1.1281 + ph->param_length = 1.1282 + htons(SCTP_BUF_LEN(oper)); 1.1283 + ippp = (uint32_t *) (ph + 1); 1.1284 + *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_11); 1.1285 + ippp++; 1.1286 + *ippp = chk->rec.data.TSN_seq; 1.1287 + ippp++; 1.1288 + *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1.1289 + 1.1290 + } 1.1291 + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_11; 1.1292 + sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1.1293 + *abort_flag = 1; 1.1294 + return; 1.1295 + } 1.1296 + if (chk->rec.data.stream_number != 1.1297 + next->rec.data.stream_number) { 1.1298 + /* 1.1299 + * Huh, need the correct STR here, 1.1300 + * they must be the same. 1.1301 + */ 1.1302 + SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n", 1.1303 + chk->rec.data.stream_number, 1.1304 + next->rec.data.stream_number); 1.1305 + oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1.1306 + 0, M_NOWAIT, 1, MT_DATA); 1.1307 + if (oper) { 1.1308 + struct sctp_paramhdr *ph; 1.1309 + uint32_t *ippp; 1.1310 + 1.1311 + SCTP_BUF_LEN(oper) = 1.1312 + sizeof(struct sctp_paramhdr) + 1.1313 + (3 * sizeof(uint32_t)); 1.1314 + ph = mtod(oper, 1.1315 + struct sctp_paramhdr *); 1.1316 + ph->param_type = 1.1317 + htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1.1318 + ph->param_length = 1.1319 + htons(SCTP_BUF_LEN(oper)); 1.1320 + ippp = (uint32_t *) (ph + 1); 1.1321 + *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_12); 1.1322 + ippp++; 1.1323 + *ippp = chk->rec.data.TSN_seq; 1.1324 + ippp++; 1.1325 + *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1.1326 + 1.1327 + } 1.1328 + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_12; 1.1329 + sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1.1330 + *abort_flag = 1; 1.1331 + return; 1.1332 + } 1.1333 + if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && 1.1334 + chk->rec.data.stream_seq != 1.1335 + next->rec.data.stream_seq) { 1.1336 + /* 1.1337 + * Huh, need the correct STR here, 1.1338 + * they must be the same. 1.1339 + */ 1.1340 + SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n", 1.1341 + chk->rec.data.stream_seq, 1.1342 + next->rec.data.stream_seq); 1.1343 + oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1.1344 + 0, M_NOWAIT, 1, MT_DATA); 1.1345 + if (oper) { 1.1346 + struct sctp_paramhdr *ph; 1.1347 + uint32_t *ippp; 1.1348 + 1.1349 + SCTP_BUF_LEN(oper) = 1.1350 + sizeof(struct sctp_paramhdr) + 1.1351 + (3 * sizeof(uint32_t)); 1.1352 + ph = mtod(oper, 1.1353 + struct sctp_paramhdr *); 1.1354 + ph->param_type = 1.1355 + htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1.1356 + ph->param_length = 1.1357 + htons(SCTP_BUF_LEN(oper)); 1.1358 + ippp = (uint32_t *) (ph + 1); 1.1359 + *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_13); 1.1360 + ippp++; 1.1361 + *ippp = chk->rec.data.TSN_seq; 1.1362 + ippp++; 1.1363 + *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); 1.1364 + } 1.1365 + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_13; 1.1366 + sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1.1367 + *abort_flag = 1; 1.1368 + return; 1.1369 + } 1.1370 + } 1.1371 + } 1.1372 + } 1.1373 + /* Do we need to do some delivery? check */ 1.1374 + sctp_deliver_reasm_check(stcb, asoc); 1.1375 +} 1.1376 + 1.1377 +/* 1.1378 + * This is an unfortunate routine. It checks to make sure a evil guy is not 1.1379 + * stuffing us full of bad packet fragments. A broken peer could also do this 1.1380 + * but this is doubtful. It is to bad I must worry about evil crackers sigh 1.1381 + * :< more cycles. 1.1382 + */ 1.1383 +static int 1.1384 +sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc, 1.1385 + uint32_t TSN_seq) 1.1386 +{ 1.1387 + struct sctp_tmit_chunk *at; 1.1388 + uint32_t tsn_est; 1.1389 + 1.1390 + TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { 1.1391 + if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) { 1.1392 + /* is it one bigger? */ 1.1393 + tsn_est = at->rec.data.TSN_seq + 1; 1.1394 + if (tsn_est == TSN_seq) { 1.1395 + /* yep. It better be a last then */ 1.1396 + if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1.1397 + SCTP_DATA_LAST_FRAG) { 1.1398 + /* 1.1399 + * Ok this guy belongs next to a guy 1.1400 + * that is NOT last, it should be a 1.1401 + * middle/last, not a complete 1.1402 + * chunk. 1.1403 + */ 1.1404 + return (1); 1.1405 + } else { 1.1406 + /* 1.1407 + * This guy is ok since its a LAST 1.1408 + * and the new chunk is a fully 1.1409 + * self- contained one. 1.1410 + */ 1.1411 + return (0); 1.1412 + } 1.1413 + } 1.1414 + } else if (TSN_seq == at->rec.data.TSN_seq) { 1.1415 + /* Software error since I have a dup? */ 1.1416 + return (1); 1.1417 + } else { 1.1418 + /* 1.1419 + * Ok, 'at' is larger than new chunk but does it 1.1420 + * need to be right before it. 1.1421 + */ 1.1422 + tsn_est = TSN_seq + 1; 1.1423 + if (tsn_est == at->rec.data.TSN_seq) { 1.1424 + /* Yep, It better be a first */ 1.1425 + if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != 1.1426 + SCTP_DATA_FIRST_FRAG) { 1.1427 + return (1); 1.1428 + } else { 1.1429 + return (0); 1.1430 + } 1.1431 + } 1.1432 + } 1.1433 + } 1.1434 + return (0); 1.1435 +} 1.1436 + 1.1437 +static int 1.1438 +sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, 1.1439 + struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length, 1.1440 + struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag, 1.1441 + int *break_flag, int last_chunk) 1.1442 +{ 1.1443 + /* Process a data chunk */ 1.1444 + /* struct sctp_tmit_chunk *chk; */ 1.1445 + struct sctp_tmit_chunk *chk; 1.1446 + uint32_t tsn, gap; 1.1447 + struct mbuf *dmbuf; 1.1448 + int the_len; 1.1449 + int need_reasm_check = 0; 1.1450 + uint16_t strmno, strmseq; 1.1451 + struct mbuf *oper; 1.1452 + struct sctp_queued_to_read *control; 1.1453 + int ordered; 1.1454 + uint32_t protocol_id; 1.1455 + uint8_t chunk_flags; 1.1456 + struct sctp_stream_reset_list *liste; 1.1457 + 1.1458 + chk = NULL; 1.1459 + tsn = ntohl(ch->dp.tsn); 1.1460 + chunk_flags = ch->ch.chunk_flags; 1.1461 + if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) { 1.1462 + asoc->send_sack = 1; 1.1463 + } 1.1464 + protocol_id = ch->dp.protocol_id; 1.1465 + ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0); 1.1466 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 1.1467 + sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS); 1.1468 + } 1.1469 + if (stcb == NULL) { 1.1470 + return (0); 1.1471 + } 1.1472 + SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn); 1.1473 + if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { 1.1474 + /* It is a duplicate */ 1.1475 + SCTP_STAT_INCR(sctps_recvdupdata); 1.1476 + if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1.1477 + /* Record a dup for the next outbound sack */ 1.1478 + asoc->dup_tsns[asoc->numduptsns] = tsn; 1.1479 + asoc->numduptsns++; 1.1480 + } 1.1481 + asoc->send_sack = 1; 1.1482 + return (0); 1.1483 + } 1.1484 + /* Calculate the number of TSN's between the base and this TSN */ 1.1485 + SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 1.1486 + if (gap >= (SCTP_MAPPING_ARRAY << 3)) { 1.1487 + /* Can't hold the bit in the mapping at max array, toss it */ 1.1488 + return (0); 1.1489 + } 1.1490 + if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) { 1.1491 + SCTP_TCB_LOCK_ASSERT(stcb); 1.1492 + if (sctp_expand_mapping_array(asoc, gap)) { 1.1493 + /* Can't expand, drop it */ 1.1494 + return (0); 1.1495 + } 1.1496 + } 1.1497 + if (SCTP_TSN_GT(tsn, *high_tsn)) { 1.1498 + *high_tsn = tsn; 1.1499 + } 1.1500 + /* See if we have received this one already */ 1.1501 + if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) || 1.1502 + SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) { 1.1503 + SCTP_STAT_INCR(sctps_recvdupdata); 1.1504 + if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1.1505 + /* Record a dup for the next outbound sack */ 1.1506 + asoc->dup_tsns[asoc->numduptsns] = tsn; 1.1507 + asoc->numduptsns++; 1.1508 + } 1.1509 + asoc->send_sack = 1; 1.1510 + return (0); 1.1511 + } 1.1512 + /* 1.1513 + * Check to see about the GONE flag, duplicates would cause a sack 1.1514 + * to be sent up above 1.1515 + */ 1.1516 + if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 1.1517 + (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1.1518 + (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) 1.1519 + ) { 1.1520 + /* 1.1521 + * wait a minute, this guy is gone, there is no longer a 1.1522 + * receiver. Send peer an ABORT! 1.1523 + */ 1.1524 + struct mbuf *op_err; 1.1525 + 1.1526 + op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1.1527 + sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1.1528 + *abort_flag = 1; 1.1529 + return (0); 1.1530 + } 1.1531 + /* 1.1532 + * Now before going further we see if there is room. If NOT then we 1.1533 + * MAY let one through only IF this TSN is the one we are waiting 1.1534 + * for on a partial delivery API. 1.1535 + */ 1.1536 + 1.1537 + /* now do the tests */ 1.1538 + if (((asoc->cnt_on_all_streams + 1.1539 + asoc->cnt_on_reasm_queue + 1.1540 + asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) || 1.1541 + (((int)asoc->my_rwnd) <= 0)) { 1.1542 + /* 1.1543 + * When we have NO room in the rwnd we check to make sure 1.1544 + * the reader is doing its job... 1.1545 + */ 1.1546 + if (stcb->sctp_socket->so_rcv.sb_cc) { 1.1547 + /* some to read, wake-up */ 1.1548 +#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1.1549 + struct socket *so; 1.1550 + 1.1551 + so = SCTP_INP_SO(stcb->sctp_ep); 1.1552 + atomic_add_int(&stcb->asoc.refcnt, 1); 1.1553 + SCTP_TCB_UNLOCK(stcb); 1.1554 + SCTP_SOCKET_LOCK(so, 1); 1.1555 + SCTP_TCB_LOCK(stcb); 1.1556 + atomic_subtract_int(&stcb->asoc.refcnt, 1); 1.1557 + if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1.1558 + /* assoc was freed while we were unlocked */ 1.1559 + SCTP_SOCKET_UNLOCK(so, 1); 1.1560 + return (0); 1.1561 + } 1.1562 +#endif 1.1563 + sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1.1564 +#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1.1565 + SCTP_SOCKET_UNLOCK(so, 1); 1.1566 +#endif 1.1567 + } 1.1568 + /* now is it in the mapping array of what we have accepted? */ 1.1569 + if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) && 1.1570 + SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1.1571 + /* Nope not in the valid range dump it */ 1.1572 + sctp_set_rwnd(stcb, asoc); 1.1573 + if ((asoc->cnt_on_all_streams + 1.1574 + asoc->cnt_on_reasm_queue + 1.1575 + asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) { 1.1576 + SCTP_STAT_INCR(sctps_datadropchklmt); 1.1577 + } else { 1.1578 + SCTP_STAT_INCR(sctps_datadroprwnd); 1.1579 + } 1.1580 + *break_flag = 1; 1.1581 + return (0); 1.1582 + } 1.1583 + } 1.1584 + strmno = ntohs(ch->dp.stream_id); 1.1585 + if (strmno >= asoc->streamincnt) { 1.1586 + struct sctp_paramhdr *phdr; 1.1587 + struct mbuf *mb; 1.1588 + 1.1589 + mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2), 1.1590 + 0, M_NOWAIT, 1, MT_DATA); 1.1591 + if (mb != NULL) { 1.1592 + /* add some space up front so prepend will work well */ 1.1593 + SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr)); 1.1594 + phdr = mtod(mb, struct sctp_paramhdr *); 1.1595 + /* 1.1596 + * Error causes are just param's and this one has 1.1597 + * two back to back phdr, one with the error type 1.1598 + * and size, the other with the streamid and a rsvd 1.1599 + */ 1.1600 + SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2); 1.1601 + phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM); 1.1602 + phdr->param_length = 1.1603 + htons(sizeof(struct sctp_paramhdr) * 2); 1.1604 + phdr++; 1.1605 + /* We insert the stream in the type field */ 1.1606 + phdr->param_type = ch->dp.stream_id; 1.1607 + /* And set the length to 0 for the rsvd field */ 1.1608 + phdr->param_length = 0; 1.1609 + sctp_queue_op_err(stcb, mb); 1.1610 + } 1.1611 + SCTP_STAT_INCR(sctps_badsid); 1.1612 + SCTP_TCB_LOCK_ASSERT(stcb); 1.1613 + SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1.1614 + if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1.1615 + asoc->highest_tsn_inside_nr_map = tsn; 1.1616 + } 1.1617 + if (tsn == (asoc->cumulative_tsn + 1)) { 1.1618 + /* Update cum-ack */ 1.1619 + asoc->cumulative_tsn = tsn; 1.1620 + } 1.1621 + return (0); 1.1622 + } 1.1623 + /* 1.1624 + * Before we continue lets validate that we are not being fooled by 1.1625 + * an evil attacker. We can only have 4k chunks based on our TSN 1.1626 + * spread allowed by the mapping array 512 * 8 bits, so there is no 1.1627 + * way our stream sequence numbers could have wrapped. We of course 1.1628 + * only validate the FIRST fragment so the bit must be set. 1.1629 + */ 1.1630 + strmseq = ntohs(ch->dp.stream_sequence); 1.1631 +#ifdef SCTP_ASOCLOG_OF_TSNS 1.1632 + SCTP_TCB_LOCK_ASSERT(stcb); 1.1633 + if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) { 1.1634 + asoc->tsn_in_at = 0; 1.1635 + asoc->tsn_in_wrapped = 1; 1.1636 + } 1.1637 + asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn; 1.1638 + asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno; 1.1639 + asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq; 1.1640 + asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length; 1.1641 + asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags; 1.1642 + asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb; 1.1643 + asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at; 1.1644 + asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1; 1.1645 + asoc->tsn_in_at++; 1.1646 +#endif 1.1647 + if ((chunk_flags & SCTP_DATA_FIRST_FRAG) && 1.1648 + (TAILQ_EMPTY(&asoc->resetHead)) && 1.1649 + (chunk_flags & SCTP_DATA_UNORDERED) == 0 && 1.1650 + SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) { 1.1651 + /* The incoming sseq is behind where we last delivered? */ 1.1652 + SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n", 1.1653 + strmseq, asoc->strmin[strmno].last_sequence_delivered); 1.1654 + oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1.1655 + 0, M_NOWAIT, 1, MT_DATA); 1.1656 + if (oper) { 1.1657 + struct sctp_paramhdr *ph; 1.1658 + uint32_t *ippp; 1.1659 + 1.1660 + SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 1.1661 + (3 * sizeof(uint32_t)); 1.1662 + ph = mtod(oper, struct sctp_paramhdr *); 1.1663 + ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1.1664 + ph->param_length = htons(SCTP_BUF_LEN(oper)); 1.1665 + ippp = (uint32_t *) (ph + 1); 1.1666 + *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_14); 1.1667 + ippp++; 1.1668 + *ippp = tsn; 1.1669 + ippp++; 1.1670 + *ippp = ((strmno << 16) | strmseq); 1.1671 + 1.1672 + } 1.1673 + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_14; 1.1674 + sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1.1675 + *abort_flag = 1; 1.1676 + return (0); 1.1677 + } 1.1678 + /************************************ 1.1679 + * From here down we may find ch-> invalid 1.1680 + * so its a good idea NOT to use it. 1.1681 + *************************************/ 1.1682 + 1.1683 + the_len = (chk_length - sizeof(struct sctp_data_chunk)); 1.1684 + if (last_chunk == 0) { 1.1685 + dmbuf = SCTP_M_COPYM(*m, 1.1686 + (offset + sizeof(struct sctp_data_chunk)), 1.1687 + the_len, M_NOWAIT); 1.1688 +#ifdef SCTP_MBUF_LOGGING 1.1689 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 1.1690 + struct mbuf *mat; 1.1691 + 1.1692 + for (mat = dmbuf; mat; mat = SCTP_BUF_NEXT(mat)) { 1.1693 + if (SCTP_BUF_IS_EXTENDED(mat)) { 1.1694 + sctp_log_mb(mat, SCTP_MBUF_ICOPY); 1.1695 + } 1.1696 + } 1.1697 + } 1.1698 +#endif 1.1699 + } else { 1.1700 + /* We can steal the last chunk */ 1.1701 + int l_len; 1.1702 + dmbuf = *m; 1.1703 + /* lop off the top part */ 1.1704 + m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); 1.1705 + if (SCTP_BUF_NEXT(dmbuf) == NULL) { 1.1706 + l_len = SCTP_BUF_LEN(dmbuf); 1.1707 + } else { 1.1708 + /* need to count up the size hopefully 1.1709 + * does not hit this to often :-0 1.1710 + */ 1.1711 + struct mbuf *lat; 1.1712 + 1.1713 + l_len = 0; 1.1714 + for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) { 1.1715 + l_len += SCTP_BUF_LEN(lat); 1.1716 + } 1.1717 + } 1.1718 + if (l_len > the_len) { 1.1719 + /* Trim the end round bytes off too */ 1.1720 + m_adj(dmbuf, -(l_len - the_len)); 1.1721 + } 1.1722 + } 1.1723 + if (dmbuf == NULL) { 1.1724 + SCTP_STAT_INCR(sctps_nomem); 1.1725 + return (0); 1.1726 + } 1.1727 + if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && 1.1728 + asoc->fragmented_delivery_inprogress == 0 && 1.1729 + TAILQ_EMPTY(&asoc->resetHead) && 1.1730 + ((ordered == 0) || 1.1731 + ((uint16_t)(asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq && 1.1732 + TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) { 1.1733 + /* Candidate for express delivery */ 1.1734 + /* 1.1735 + * Its not fragmented, No PD-API is up, Nothing in the 1.1736 + * delivery queue, Its un-ordered OR ordered and the next to 1.1737 + * deliver AND nothing else is stuck on the stream queue, 1.1738 + * And there is room for it in the socket buffer. Lets just 1.1739 + * stuff it up the buffer.... 1.1740 + */ 1.1741 + 1.1742 + /* It would be nice to avoid this copy if we could :< */ 1.1743 + sctp_alloc_a_readq(stcb, control); 1.1744 + sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 1.1745 + protocol_id, 1.1746 + strmno, strmseq, 1.1747 + chunk_flags, 1.1748 + dmbuf); 1.1749 + if (control == NULL) { 1.1750 + goto failed_express_del; 1.1751 + } 1.1752 + SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1.1753 + if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1.1754 + asoc->highest_tsn_inside_nr_map = tsn; 1.1755 + } 1.1756 + sctp_add_to_readq(stcb->sctp_ep, stcb, 1.1757 + control, &stcb->sctp_socket->so_rcv, 1.1758 + 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 1.1759 + 1.1760 + if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) { 1.1761 + /* for ordered, bump what we delivered */ 1.1762 + asoc->strmin[strmno].last_sequence_delivered++; 1.1763 + } 1.1764 + SCTP_STAT_INCR(sctps_recvexpress); 1.1765 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 1.1766 + sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, 1.1767 + SCTP_STR_LOG_FROM_EXPRS_DEL); 1.1768 + } 1.1769 + control = NULL; 1.1770 + 1.1771 + goto finish_express_del; 1.1772 + } 1.1773 +failed_express_del: 1.1774 + /* If we reach here this is a new chunk */ 1.1775 + chk = NULL; 1.1776 + control = NULL; 1.1777 + /* Express for fragmented delivery? */ 1.1778 + if ((asoc->fragmented_delivery_inprogress) && 1.1779 + (stcb->asoc.control_pdapi) && 1.1780 + (asoc->str_of_pdapi == strmno) && 1.1781 + (asoc->ssn_of_pdapi == strmseq) 1.1782 + ) { 1.1783 + control = stcb->asoc.control_pdapi; 1.1784 + if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { 1.1785 + /* Can't be another first? */ 1.1786 + goto failed_pdapi_express_del; 1.1787 + } 1.1788 + if (tsn == (control->sinfo_tsn + 1)) { 1.1789 + /* Yep, we can add it on */ 1.1790 + int end = 0; 1.1791 + 1.1792 + if (chunk_flags & SCTP_DATA_LAST_FRAG) { 1.1793 + end = 1; 1.1794 + } 1.1795 + if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end, 1.1796 + tsn, 1.1797 + &stcb->sctp_socket->so_rcv)) { 1.1798 + SCTP_PRINTF("Append fails end:%d\n", end); 1.1799 + goto failed_pdapi_express_del; 1.1800 + } 1.1801 + 1.1802 + SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1.1803 + if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1.1804 + asoc->highest_tsn_inside_nr_map = tsn; 1.1805 + } 1.1806 + SCTP_STAT_INCR(sctps_recvexpressm); 1.1807 + asoc->tsn_last_delivered = tsn; 1.1808 + asoc->fragment_flags = chunk_flags; 1.1809 + asoc->tsn_of_pdapi_last_delivered = tsn; 1.1810 + asoc->last_flags_delivered = chunk_flags; 1.1811 + asoc->last_strm_seq_delivered = strmseq; 1.1812 + asoc->last_strm_no_delivered = strmno; 1.1813 + if (end) { 1.1814 + /* clean up the flags and such */ 1.1815 + asoc->fragmented_delivery_inprogress = 0; 1.1816 + if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) { 1.1817 + asoc->strmin[strmno].last_sequence_delivered++; 1.1818 + } 1.1819 + stcb->asoc.control_pdapi = NULL; 1.1820 + if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) { 1.1821 + /* There could be another message ready */ 1.1822 + need_reasm_check = 1; 1.1823 + } 1.1824 + } 1.1825 + control = NULL; 1.1826 + goto finish_express_del; 1.1827 + } 1.1828 + } 1.1829 + failed_pdapi_express_del: 1.1830 + control = NULL; 1.1831 + if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 1.1832 + SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1.1833 + if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1.1834 + asoc->highest_tsn_inside_nr_map = tsn; 1.1835 + } 1.1836 + } else { 1.1837 + SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 1.1838 + if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) { 1.1839 + asoc->highest_tsn_inside_map = tsn; 1.1840 + } 1.1841 + } 1.1842 + if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 1.1843 + sctp_alloc_a_chunk(stcb, chk); 1.1844 + if (chk == NULL) { 1.1845 + /* No memory so we drop the chunk */ 1.1846 + SCTP_STAT_INCR(sctps_nomem); 1.1847 + if (last_chunk == 0) { 1.1848 + /* we copied it, free the copy */ 1.1849 + sctp_m_freem(dmbuf); 1.1850 + } 1.1851 + return (0); 1.1852 + } 1.1853 + chk->rec.data.TSN_seq = tsn; 1.1854 + chk->no_fr_allowed = 0; 1.1855 + chk->rec.data.stream_seq = strmseq; 1.1856 + chk->rec.data.stream_number = strmno; 1.1857 + chk->rec.data.payloadtype = protocol_id; 1.1858 + chk->rec.data.context = stcb->asoc.context; 1.1859 + chk->rec.data.doing_fast_retransmit = 0; 1.1860 + chk->rec.data.rcv_flags = chunk_flags; 1.1861 + chk->asoc = asoc; 1.1862 + chk->send_size = the_len; 1.1863 + chk->whoTo = net; 1.1864 + atomic_add_int(&net->ref_count, 1); 1.1865 + chk->data = dmbuf; 1.1866 + } else { 1.1867 + sctp_alloc_a_readq(stcb, control); 1.1868 + sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 1.1869 + protocol_id, 1.1870 + strmno, strmseq, 1.1871 + chunk_flags, 1.1872 + dmbuf); 1.1873 + if (control == NULL) { 1.1874 + /* No memory so we drop the chunk */ 1.1875 + SCTP_STAT_INCR(sctps_nomem); 1.1876 + if (last_chunk == 0) { 1.1877 + /* we copied it, free the copy */ 1.1878 + sctp_m_freem(dmbuf); 1.1879 + } 1.1880 + return (0); 1.1881 + } 1.1882 + control->length = the_len; 1.1883 + } 1.1884 + 1.1885 + /* Mark it as received */ 1.1886 + /* Now queue it where it belongs */ 1.1887 + if (control != NULL) { 1.1888 + /* First a sanity check */ 1.1889 + if (asoc->fragmented_delivery_inprogress) { 1.1890 + /* 1.1891 + * Ok, we have a fragmented delivery in progress if 1.1892 + * this chunk is next to deliver OR belongs in our 1.1893 + * view to the reassembly, the peer is evil or 1.1894 + * broken. 1.1895 + */ 1.1896 + uint32_t estimate_tsn; 1.1897 + 1.1898 + estimate_tsn = asoc->tsn_last_delivered + 1; 1.1899 + if (TAILQ_EMPTY(&asoc->reasmqueue) && 1.1900 + (estimate_tsn == control->sinfo_tsn)) { 1.1901 + /* Evil/Broke peer */ 1.1902 + sctp_m_freem(control->data); 1.1903 + control->data = NULL; 1.1904 + if (control->whoFrom) { 1.1905 + sctp_free_remote_addr(control->whoFrom); 1.1906 + control->whoFrom = NULL; 1.1907 + } 1.1908 + sctp_free_a_readq(stcb, control); 1.1909 + oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1.1910 + 0, M_NOWAIT, 1, MT_DATA); 1.1911 + if (oper) { 1.1912 + struct sctp_paramhdr *ph; 1.1913 + uint32_t *ippp; 1.1914 + 1.1915 + SCTP_BUF_LEN(oper) = 1.1916 + sizeof(struct sctp_paramhdr) + 1.1917 + (3 * sizeof(uint32_t)); 1.1918 + ph = mtod(oper, struct sctp_paramhdr *); 1.1919 + ph->param_type = 1.1920 + htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1.1921 + ph->param_length = htons(SCTP_BUF_LEN(oper)); 1.1922 + ippp = (uint32_t *) (ph + 1); 1.1923 + *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_15); 1.1924 + ippp++; 1.1925 + *ippp = tsn; 1.1926 + ippp++; 1.1927 + *ippp = ((strmno << 16) | strmseq); 1.1928 + } 1.1929 + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_15; 1.1930 + sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1.1931 + *abort_flag = 1; 1.1932 + return (0); 1.1933 + } else { 1.1934 + if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { 1.1935 + sctp_m_freem(control->data); 1.1936 + control->data = NULL; 1.1937 + if (control->whoFrom) { 1.1938 + sctp_free_remote_addr(control->whoFrom); 1.1939 + control->whoFrom = NULL; 1.1940 + } 1.1941 + sctp_free_a_readq(stcb, control); 1.1942 + 1.1943 + oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1.1944 + 0, M_NOWAIT, 1, MT_DATA); 1.1945 + if (oper) { 1.1946 + struct sctp_paramhdr *ph; 1.1947 + uint32_t *ippp; 1.1948 + 1.1949 + SCTP_BUF_LEN(oper) = 1.1950 + sizeof(struct sctp_paramhdr) + 1.1951 + ( 3 * sizeof(uint32_t)); 1.1952 + ph = mtod(oper, 1.1953 + struct sctp_paramhdr *); 1.1954 + ph->param_type = 1.1955 + htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1.1956 + ph->param_length = 1.1957 + htons(SCTP_BUF_LEN(oper)); 1.1958 + ippp = (uint32_t *) (ph + 1); 1.1959 + *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_16); 1.1960 + ippp++; 1.1961 + *ippp = tsn; 1.1962 + ippp++; 1.1963 + *ippp = ((strmno << 16) | strmseq); 1.1964 + } 1.1965 + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_16; 1.1966 + sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1.1967 + *abort_flag = 1; 1.1968 + return (0); 1.1969 + } 1.1970 + } 1.1971 + } else { 1.1972 + /* No PDAPI running */ 1.1973 + if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 1.1974 + /* 1.1975 + * Reassembly queue is NOT empty validate 1.1976 + * that this tsn does not need to be in 1.1977 + * reasembly queue. If it does then our peer 1.1978 + * is broken or evil. 1.1979 + */ 1.1980 + if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { 1.1981 + sctp_m_freem(control->data); 1.1982 + control->data = NULL; 1.1983 + if (control->whoFrom) { 1.1984 + sctp_free_remote_addr(control->whoFrom); 1.1985 + control->whoFrom = NULL; 1.1986 + } 1.1987 + sctp_free_a_readq(stcb, control); 1.1988 + oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1.1989 + 0, M_NOWAIT, 1, MT_DATA); 1.1990 + if (oper) { 1.1991 + struct sctp_paramhdr *ph; 1.1992 + uint32_t *ippp; 1.1993 + 1.1994 + SCTP_BUF_LEN(oper) = 1.1995 + sizeof(struct sctp_paramhdr) + 1.1996 + (3 * sizeof(uint32_t)); 1.1997 + ph = mtod(oper, 1.1998 + struct sctp_paramhdr *); 1.1999 + ph->param_type = 1.2000 + htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1.2001 + ph->param_length = 1.2002 + htons(SCTP_BUF_LEN(oper)); 1.2003 + ippp = (uint32_t *) (ph + 1); 1.2004 + *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_17); 1.2005 + ippp++; 1.2006 + *ippp = tsn; 1.2007 + ippp++; 1.2008 + *ippp = ((strmno << 16) | strmseq); 1.2009 + } 1.2010 + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_17; 1.2011 + sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1.2012 + *abort_flag = 1; 1.2013 + return (0); 1.2014 + } 1.2015 + } 1.2016 + } 1.2017 + /* ok, if we reach here we have passed the sanity checks */ 1.2018 + if (chunk_flags & SCTP_DATA_UNORDERED) { 1.2019 + /* queue directly into socket buffer */ 1.2020 + sctp_mark_non_revokable(asoc, control->sinfo_tsn); 1.2021 + sctp_add_to_readq(stcb->sctp_ep, stcb, 1.2022 + control, 1.2023 + &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 1.2024 + } else { 1.2025 + /* 1.2026 + * Special check for when streams are resetting. We 1.2027 + * could be more smart about this and check the 1.2028 + * actual stream to see if it is not being reset.. 1.2029 + * that way we would not create a HOLB when amongst 1.2030 + * streams being reset and those not being reset. 1.2031 + * 1.2032 + * We take complete messages that have a stream reset 1.2033 + * intervening (aka the TSN is after where our 1.2034 + * cum-ack needs to be) off and put them on a 1.2035 + * pending_reply_queue. The reassembly ones we do 1.2036 + * not have to worry about since they are all sorted 1.2037 + * and proceessed by TSN order. It is only the 1.2038 + * singletons I must worry about. 1.2039 + */ 1.2040 + if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 1.2041 + SCTP_TSN_GT(tsn, liste->tsn)) { 1.2042 + /* 1.2043 + * yep its past where we need to reset... go 1.2044 + * ahead and queue it. 1.2045 + */ 1.2046 + if (TAILQ_EMPTY(&asoc->pending_reply_queue)) { 1.2047 + /* first one on */ 1.2048 + TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 1.2049 + } else { 1.2050 + struct sctp_queued_to_read *ctlOn, *nctlOn; 1.2051 + unsigned char inserted = 0; 1.2052 + 1.2053 + TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) { 1.2054 + if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) { 1.2055 + continue; 1.2056 + } else { 1.2057 + /* found it */ 1.2058 + TAILQ_INSERT_BEFORE(ctlOn, control, next); 1.2059 + inserted = 1; 1.2060 + break; 1.2061 + } 1.2062 + } 1.2063 + if (inserted == 0) { 1.2064 + /* 1.2065 + * must be put at end, use 1.2066 + * prevP (all setup from 1.2067 + * loop) to setup nextP. 1.2068 + */ 1.2069 + TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 1.2070 + } 1.2071 + } 1.2072 + } else { 1.2073 + sctp_queue_data_to_stream(stcb, asoc, control, abort_flag); 1.2074 + if (*abort_flag) { 1.2075 + return (0); 1.2076 + } 1.2077 + } 1.2078 + } 1.2079 + } else { 1.2080 + /* Into the re-assembly queue */ 1.2081 + sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag); 1.2082 + if (*abort_flag) { 1.2083 + /* 1.2084 + * the assoc is now gone and chk was put onto the 1.2085 + * reasm queue, which has all been freed. 1.2086 + */ 1.2087 + *m = NULL; 1.2088 + return (0); 1.2089 + } 1.2090 + } 1.2091 +finish_express_del: 1.2092 + if (tsn == (asoc->cumulative_tsn + 1)) { 1.2093 + /* Update cum-ack */ 1.2094 + asoc->cumulative_tsn = tsn; 1.2095 + } 1.2096 + if (last_chunk) { 1.2097 + *m = NULL; 1.2098 + } 1.2099 + if (ordered) { 1.2100 + SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks); 1.2101 + } else { 1.2102 + SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks); 1.2103 + } 1.2104 + SCTP_STAT_INCR(sctps_recvdata); 1.2105 + /* Set it present please */ 1.2106 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 1.2107 + sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN); 1.2108 + } 1.2109 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 1.2110 + sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 1.2111 + asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); 1.2112 + } 1.2113 + /* check the special flag for stream resets */ 1.2114 + if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 1.2115 + SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) { 1.2116 + /* 1.2117 + * we have finished working through the backlogged TSN's now 1.2118 + * time to reset streams. 1: call reset function. 2: free 1.2119 + * pending_reply space 3: distribute any chunks in 1.2120 + * pending_reply_queue. 1.2121 + */ 1.2122 + struct sctp_queued_to_read *ctl, *nctl; 1.2123 + 1.2124 + sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams); 1.2125 + TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); 1.2126 + SCTP_FREE(liste, SCTP_M_STRESET); 1.2127 + /*sa_ignore FREED_MEMORY*/ 1.2128 + liste = TAILQ_FIRST(&asoc->resetHead); 1.2129 + if (TAILQ_EMPTY(&asoc->resetHead)) { 1.2130 + /* All can be removed */ 1.2131 + TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) { 1.2132 + TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 1.2133 + sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); 1.2134 + if (*abort_flag) { 1.2135 + return (0); 1.2136 + } 1.2137 + } 1.2138 + } else { 1.2139 + TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) { 1.2140 + if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) { 1.2141 + break; 1.2142 + } 1.2143 + /* 1.2144 + * if ctl->sinfo_tsn is <= liste->tsn we can 1.2145 + * process it which is the NOT of 1.2146 + * ctl->sinfo_tsn > liste->tsn 1.2147 + */ 1.2148 + TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); 1.2149 + sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); 1.2150 + if (*abort_flag) { 1.2151 + return (0); 1.2152 + } 1.2153 + } 1.2154 + } 1.2155 + /* 1.2156 + * Now service re-assembly to pick up anything that has been 1.2157 + * held on reassembly queue? 1.2158 + */ 1.2159 + sctp_deliver_reasm_check(stcb, asoc); 1.2160 + need_reasm_check = 0; 1.2161 + } 1.2162 + 1.2163 + if (need_reasm_check) { 1.2164 + /* Another one waits ? */ 1.2165 + sctp_deliver_reasm_check(stcb, asoc); 1.2166 + } 1.2167 + return (1); 1.2168 +} 1.2169 + 1.2170 +int8_t sctp_map_lookup_tab[256] = { 1.2171 + 0, 1, 0, 2, 0, 1, 0, 3, 1.2172 + 0, 1, 0, 2, 0, 1, 0, 4, 1.2173 + 0, 1, 0, 2, 0, 1, 0, 3, 1.2174 + 0, 1, 0, 2, 0, 1, 0, 5, 1.2175 + 0, 1, 0, 2, 0, 1, 0, 3, 1.2176 + 0, 1, 0, 2, 0, 1, 0, 4, 1.2177 + 0, 1, 0, 2, 0, 1, 0, 3, 1.2178 + 0, 1, 0, 2, 0, 1, 0, 6, 1.2179 + 0, 1, 0, 2, 0, 1, 0, 3, 1.2180 + 0, 1, 0, 2, 0, 1, 0, 4, 1.2181 + 0, 1, 0, 2, 0, 1, 0, 3, 1.2182 + 0, 1, 0, 2, 0, 1, 0, 5, 1.2183 + 0, 1, 0, 2, 0, 1, 0, 3, 1.2184 + 0, 1, 0, 2, 0, 1, 0, 4, 1.2185 + 0, 1, 0, 2, 0, 1, 0, 3, 1.2186 + 0, 1, 0, 2, 0, 1, 0, 7, 1.2187 + 0, 1, 0, 2, 0, 1, 0, 3, 1.2188 + 0, 1, 0, 2, 0, 1, 0, 4, 1.2189 + 0, 1, 0, 2, 0, 1, 0, 3, 1.2190 + 0, 1, 0, 2, 0, 1, 0, 5, 1.2191 + 0, 1, 0, 2, 0, 1, 0, 3, 1.2192 + 0, 1, 0, 2, 0, 1, 0, 4, 1.2193 + 0, 1, 0, 2, 0, 1, 0, 3, 1.2194 + 0, 1, 0, 2, 0, 1, 0, 6, 1.2195 + 0, 1, 0, 2, 0, 1, 0, 3, 1.2196 + 0, 1, 0, 2, 0, 1, 0, 4, 1.2197 + 0, 1, 0, 2, 0, 1, 0, 3, 1.2198 + 0, 1, 0, 2, 0, 1, 0, 5, 1.2199 + 0, 1, 0, 2, 0, 1, 0, 3, 1.2200 + 0, 1, 0, 2, 0, 1, 0, 4, 1.2201 + 0, 1, 0, 2, 0, 1, 0, 3, 1.2202 + 0, 1, 0, 2, 0, 1, 0, 8 1.2203 +}; 1.2204 + 1.2205 + 1.2206 +void 1.2207 +sctp_slide_mapping_arrays(struct sctp_tcb *stcb) 1.2208 +{ 1.2209 + /* 1.2210 + * Now we also need to check the mapping array in a couple of ways. 1.2211 + * 1) Did we move the cum-ack point? 1.2212 + * 1.2213 + * When you first glance at this you might think 1.2214 + * that all entries that make up the postion 1.2215 + * of the cum-ack would be in the nr-mapping array 1.2216 + * only.. i.e. things up to the cum-ack are always 1.2217 + * deliverable. Thats true with one exception, when 1.2218 + * its a fragmented message we may not deliver the data 1.2219 + * until some threshold (or all of it) is in place. So 1.2220 + * we must OR the nr_mapping_array and mapping_array to 1.2221 + * get a true picture of the cum-ack. 1.2222 + */ 1.2223 + struct sctp_association *asoc; 1.2224 + int at; 1.2225 + uint8_t val; 1.2226 + int slide_from, slide_end, lgap, distance; 1.2227 + uint32_t old_cumack, old_base, old_highest, highest_tsn; 1.2228 + 1.2229 + asoc = &stcb->asoc; 1.2230 + 1.2231 + old_cumack = asoc->cumulative_tsn; 1.2232 + old_base = asoc->mapping_array_base_tsn; 1.2233 + old_highest = asoc->highest_tsn_inside_map; 1.2234 + /* 1.2235 + * We could probably improve this a small bit by calculating the 1.2236 + * offset of the current cum-ack as the starting point. 1.2237 + */ 1.2238 + at = 0; 1.2239 + for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) { 1.2240 + val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from]; 1.2241 + if (val == 0xff) { 1.2242 + at += 8; 1.2243 + } else { 1.2244 + /* there is a 0 bit */ 1.2245 + at += sctp_map_lookup_tab[val]; 1.2246 + break; 1.2247 + } 1.2248 + } 1.2249 + asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at-1); 1.2250 + 1.2251 + if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) && 1.2252 + SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) { 1.2253 +#ifdef INVARIANTS 1.2254 + panic("huh, cumack 0x%x greater than high-tsn 0x%x in map", 1.2255 + asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 1.2256 +#else 1.2257 + SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n", 1.2258 + asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 1.2259 + sctp_print_mapping_array(asoc); 1.2260 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 1.2261 + sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 1.2262 + } 1.2263 + asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 1.2264 + asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn; 1.2265 +#endif 1.2266 + } 1.2267 + if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 1.2268 + highest_tsn = asoc->highest_tsn_inside_nr_map; 1.2269 + } else { 1.2270 + highest_tsn = asoc->highest_tsn_inside_map; 1.2271 + } 1.2272 + if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) { 1.2273 + /* The complete array was completed by a single FR */ 1.2274 + /* highest becomes the cum-ack */ 1.2275 + int clr; 1.2276 +#ifdef INVARIANTS 1.2277 + unsigned int i; 1.2278 +#endif 1.2279 + 1.2280 + /* clear the array */ 1.2281 + clr = ((at+7) >> 3); 1.2282 + if (clr > asoc->mapping_array_size) { 1.2283 + clr = asoc->mapping_array_size; 1.2284 + } 1.2285 + memset(asoc->mapping_array, 0, clr); 1.2286 + memset(asoc->nr_mapping_array, 0, clr); 1.2287 +#ifdef INVARIANTS 1.2288 + for (i = 0; i < asoc->mapping_array_size; i++) { 1.2289 + if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) { 1.2290 + SCTP_PRINTF("Error Mapping array's not clean at clear\n"); 1.2291 + sctp_print_mapping_array(asoc); 1.2292 + } 1.2293 + } 1.2294 +#endif 1.2295 + asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; 1.2296 + asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 1.2297 + } else if (at >= 8) { 1.2298 + /* we can slide the mapping array down */ 1.2299 + /* slide_from holds where we hit the first NON 0xff byte */ 1.2300 + 1.2301 + /* 1.2302 + * now calculate the ceiling of the move using our highest 1.2303 + * TSN value 1.2304 + */ 1.2305 + SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn); 1.2306 + slide_end = (lgap >> 3); 1.2307 + if (slide_end < slide_from) { 1.2308 + sctp_print_mapping_array(asoc); 1.2309 +#ifdef INVARIANTS 1.2310 + panic("impossible slide"); 1.2311 +#else 1.2312 + SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n", 1.2313 + lgap, slide_end, slide_from, at); 1.2314 + return; 1.2315 +#endif 1.2316 + } 1.2317 + if (slide_end > asoc->mapping_array_size) { 1.2318 +#ifdef INVARIANTS 1.2319 + panic("would overrun buffer"); 1.2320 +#else 1.2321 + SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n", 1.2322 + asoc->mapping_array_size, slide_end); 1.2323 + slide_end = asoc->mapping_array_size; 1.2324 +#endif 1.2325 + } 1.2326 + distance = (slide_end - slide_from) + 1; 1.2327 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 1.2328 + sctp_log_map(old_base, old_cumack, old_highest, 1.2329 + SCTP_MAP_PREPARE_SLIDE); 1.2330 + sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end, 1.2331 + (uint32_t) lgap, SCTP_MAP_SLIDE_FROM); 1.2332 + } 1.2333 + if (distance + slide_from > asoc->mapping_array_size || 1.2334 + distance < 0) { 1.2335 + /* 1.2336 + * Here we do NOT slide forward the array so that 1.2337 + * hopefully when more data comes in to fill it up 1.2338 + * we will be able to slide it forward. Really I 1.2339 + * don't think this should happen :-0 1.2340 + */ 1.2341 + 1.2342 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 1.2343 + sctp_log_map((uint32_t) distance, (uint32_t) slide_from, 1.2344 + (uint32_t) asoc->mapping_array_size, 1.2345 + SCTP_MAP_SLIDE_NONE); 1.2346 + } 1.2347 + } else { 1.2348 + int ii; 1.2349 + 1.2350 + for (ii = 0; ii < distance; ii++) { 1.2351 + asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii]; 1.2352 + asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii]; 1.2353 + 1.2354 + } 1.2355 + for (ii = distance; ii < asoc->mapping_array_size; ii++) { 1.2356 + asoc->mapping_array[ii] = 0; 1.2357 + asoc->nr_mapping_array[ii] = 0; 1.2358 + } 1.2359 + if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) { 1.2360 + asoc->highest_tsn_inside_map += (slide_from << 3); 1.2361 + } 1.2362 + if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) { 1.2363 + asoc->highest_tsn_inside_nr_map += (slide_from << 3); 1.2364 + } 1.2365 + asoc->mapping_array_base_tsn += (slide_from << 3); 1.2366 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 1.2367 + sctp_log_map(asoc->mapping_array_base_tsn, 1.2368 + asoc->cumulative_tsn, asoc->highest_tsn_inside_map, 1.2369 + SCTP_MAP_SLIDE_RESULT); 1.2370 + } 1.2371 + } 1.2372 + } 1.2373 +} 1.2374 + 1.2375 +void 1.2376 +sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap) 1.2377 +{ 1.2378 + struct sctp_association *asoc; 1.2379 + uint32_t highest_tsn; 1.2380 + 1.2381 + asoc = &stcb->asoc; 1.2382 + if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 1.2383 + highest_tsn = asoc->highest_tsn_inside_nr_map; 1.2384 + } else { 1.2385 + highest_tsn = asoc->highest_tsn_inside_map; 1.2386 + } 1.2387 + 1.2388 + /* 1.2389 + * Now we need to see if we need to queue a sack or just start the 1.2390 + * timer (if allowed). 1.2391 + */ 1.2392 + if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 1.2393 + /* 1.2394 + * Ok special case, in SHUTDOWN-SENT case. here we 1.2395 + * maker sure SACK timer is off and instead send a 1.2396 + * SHUTDOWN and a SACK 1.2397 + */ 1.2398 + if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 1.2399 + sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 1.2400 + stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA+SCTP_LOC_18); 1.2401 + } 1.2402 + sctp_send_shutdown(stcb, 1.2403 + ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination)); 1.2404 + sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1.2405 + } else { 1.2406 + int is_a_gap; 1.2407 + 1.2408 + /* is there a gap now ? */ 1.2409 + is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 1.2410 + 1.2411 + /* 1.2412 + * CMT DAC algorithm: increase number of packets 1.2413 + * received since last ack 1.2414 + */ 1.2415 + stcb->asoc.cmt_dac_pkts_rcvd++; 1.2416 + 1.2417 + if ((stcb->asoc.send_sack == 1) || /* We need to send a SACK */ 1.2418 + ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no 1.2419 + * longer is one */ 1.2420 + (stcb->asoc.numduptsns) || /* we have dup's */ 1.2421 + (is_a_gap) || /* is still a gap */ 1.2422 + (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */ 1.2423 + (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */ 1.2424 + ) { 1.2425 + 1.2426 + if ((stcb->asoc.sctp_cmt_on_off > 0) && 1.2427 + (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) && 1.2428 + (stcb->asoc.send_sack == 0) && 1.2429 + (stcb->asoc.numduptsns == 0) && 1.2430 + (stcb->asoc.delayed_ack) && 1.2431 + (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) { 1.2432 + 1.2433 + /* 1.2434 + * CMT DAC algorithm: With CMT, 1.2435 + * delay acks even in the face of 1.2436 + 1.2437 + * reordering. Therefore, if acks 1.2438 + * that do not have to be sent 1.2439 + * because of the above reasons, 1.2440 + * will be delayed. That is, acks 1.2441 + * that would have been sent due to 1.2442 + * gap reports will be delayed with 1.2443 + * DAC. Start the delayed ack timer. 1.2444 + */ 1.2445 + sctp_timer_start(SCTP_TIMER_TYPE_RECV, 1.2446 + stcb->sctp_ep, stcb, NULL); 1.2447 + } else { 1.2448 + /* 1.2449 + * Ok we must build a SACK since the 1.2450 + * timer is pending, we got our 1.2451 + * first packet OR there are gaps or 1.2452 + * duplicates. 1.2453 + */ 1.2454 + (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 1.2455 + sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1.2456 + } 1.2457 + } else { 1.2458 + if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 1.2459 + sctp_timer_start(SCTP_TIMER_TYPE_RECV, 1.2460 + stcb->sctp_ep, stcb, NULL); 1.2461 + } 1.2462 + } 1.2463 + } 1.2464 +} 1.2465 + 1.2466 +void 1.2467 +sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc) 1.2468 +{ 1.2469 + struct sctp_tmit_chunk *chk; 1.2470 + uint32_t tsize, pd_point; 1.2471 + uint16_t nxt_todel; 1.2472 + 1.2473 + if (asoc->fragmented_delivery_inprogress) { 1.2474 + sctp_service_reassembly(stcb, asoc); 1.2475 + } 1.2476 + /* Can we proceed further, i.e. the PD-API is complete */ 1.2477 + if (asoc->fragmented_delivery_inprogress) { 1.2478 + /* no */ 1.2479 + return; 1.2480 + } 1.2481 + /* 1.2482 + * Now is there some other chunk I can deliver from the reassembly 1.2483 + * queue. 1.2484 + */ 1.2485 + doit_again: 1.2486 + chk = TAILQ_FIRST(&asoc->reasmqueue); 1.2487 + if (chk == NULL) { 1.2488 + asoc->size_on_reasm_queue = 0; 1.2489 + asoc->cnt_on_reasm_queue = 0; 1.2490 + return; 1.2491 + } 1.2492 + nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; 1.2493 + if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && 1.2494 + ((nxt_todel == chk->rec.data.stream_seq) || 1.2495 + (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { 1.2496 + /* 1.2497 + * Yep the first one is here. We setup to start reception, 1.2498 + * by backing down the TSN just in case we can't deliver. 1.2499 + */ 1.2500 + 1.2501 + /* 1.2502 + * Before we start though either all of the message should 1.2503 + * be here or the socket buffer max or nothing on the 1.2504 + * delivery queue and something can be delivered. 1.2505 + */ 1.2506 + if (stcb->sctp_socket) { 1.2507 + pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT, 1.2508 + stcb->sctp_ep->partial_delivery_point); 1.2509 + } else { 1.2510 + pd_point = stcb->sctp_ep->partial_delivery_point; 1.2511 + } 1.2512 + if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) { 1.2513 + asoc->fragmented_delivery_inprogress = 1; 1.2514 + asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1; 1.2515 + asoc->str_of_pdapi = chk->rec.data.stream_number; 1.2516 + asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 1.2517 + asoc->pdapi_ppid = chk->rec.data.payloadtype; 1.2518 + asoc->fragment_flags = chk->rec.data.rcv_flags; 1.2519 + sctp_service_reassembly(stcb, asoc); 1.2520 + if (asoc->fragmented_delivery_inprogress == 0) { 1.2521 + goto doit_again; 1.2522 + } 1.2523 + } 1.2524 + } 1.2525 +} 1.2526 + 1.2527 +int 1.2528 +sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, 1.2529 + struct sockaddr *src, struct sockaddr *dst, 1.2530 + struct sctphdr *sh, struct sctp_inpcb *inp, 1.2531 + struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t *high_tsn, 1.2532 +#if defined(__FreeBSD__) 1.2533 + uint8_t use_mflowid, uint32_t mflowid, 1.2534 +#endif 1.2535 + uint32_t vrf_id, uint16_t port) 1.2536 +{ 1.2537 + struct sctp_data_chunk *ch, chunk_buf; 1.2538 + struct sctp_association *asoc; 1.2539 + int num_chunks = 0; /* number of control chunks processed */ 1.2540 + int stop_proc = 0; 1.2541 + int chk_length, break_flag, last_chunk; 1.2542 + int abort_flag = 0, was_a_gap; 1.2543 + struct mbuf *m; 1.2544 + uint32_t highest_tsn; 1.2545 + 1.2546 + /* set the rwnd */ 1.2547 + sctp_set_rwnd(stcb, &stcb->asoc); 1.2548 + 1.2549 + m = *mm; 1.2550 + SCTP_TCB_LOCK_ASSERT(stcb); 1.2551 + asoc = &stcb->asoc; 1.2552 + if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 1.2553 + highest_tsn = asoc->highest_tsn_inside_nr_map; 1.2554 + } else { 1.2555 + highest_tsn = asoc->highest_tsn_inside_map; 1.2556 + } 1.2557 + was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 1.2558 + /* 1.2559 + * setup where we got the last DATA packet from for any SACK that 1.2560 + * may need to go out. Don't bump the net. This is done ONLY when a 1.2561 + * chunk is assigned. 1.2562 + */ 1.2563 + asoc->last_data_chunk_from = net; 1.2564 + 1.2565 +#ifndef __Panda__ 1.2566 + /*- 1.2567 + * Now before we proceed we must figure out if this is a wasted 1.2568 + * cluster... i.e. it is a small packet sent in and yet the driver 1.2569 + * underneath allocated a full cluster for it. If so we must copy it 1.2570 + * to a smaller mbuf and free up the cluster mbuf. This will help 1.2571 + * with cluster starvation. Note for __Panda__ we don't do this 1.2572 + * since it has clusters all the way down to 64 bytes. 1.2573 + */ 1.2574 + if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) { 1.2575 + /* we only handle mbufs that are singletons.. not chains */ 1.2576 + m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA); 1.2577 + if (m) { 1.2578 + /* ok lets see if we can copy the data up */ 1.2579 + caddr_t *from, *to; 1.2580 + /* get the pointers and copy */ 1.2581 + to = mtod(m, caddr_t *); 1.2582 + from = mtod((*mm), caddr_t *); 1.2583 + memcpy(to, from, SCTP_BUF_LEN((*mm))); 1.2584 + /* copy the length and free up the old */ 1.2585 + SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm)); 1.2586 + sctp_m_freem(*mm); 1.2587 + /* sucess, back copy */ 1.2588 + *mm = m; 1.2589 + } else { 1.2590 + /* We are in trouble in the mbuf world .. yikes */ 1.2591 + m = *mm; 1.2592 + } 1.2593 + } 1.2594 +#endif 1.2595 + /* get pointer to the first chunk header */ 1.2596 + ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 1.2597 + sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 1.2598 + if (ch == NULL) { 1.2599 + return (1); 1.2600 + } 1.2601 + /* 1.2602 + * process all DATA chunks... 1.2603 + */ 1.2604 + *high_tsn = asoc->cumulative_tsn; 1.2605 + break_flag = 0; 1.2606 + asoc->data_pkts_seen++; 1.2607 + while (stop_proc == 0) { 1.2608 + /* validate chunk length */ 1.2609 + chk_length = ntohs(ch->ch.chunk_length); 1.2610 + if (length - *offset < chk_length) { 1.2611 + /* all done, mutulated chunk */ 1.2612 + stop_proc = 1; 1.2613 + continue; 1.2614 + } 1.2615 + if (ch->ch.chunk_type == SCTP_DATA) { 1.2616 + if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) { 1.2617 + /* 1.2618 + * Need to send an abort since we had a 1.2619 + * invalid data chunk. 1.2620 + */ 1.2621 + struct mbuf *op_err; 1.2622 + 1.2623 + op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)), 1.2624 + 0, M_NOWAIT, 1, MT_DATA); 1.2625 + 1.2626 + if (op_err) { 1.2627 + struct sctp_paramhdr *ph; 1.2628 + uint32_t *ippp; 1.2629 + 1.2630 + SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) + 1.2631 + (2 * sizeof(uint32_t)); 1.2632 + ph = mtod(op_err, struct sctp_paramhdr *); 1.2633 + ph->param_type = 1.2634 + htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1.2635 + ph->param_length = htons(SCTP_BUF_LEN(op_err)); 1.2636 + ippp = (uint32_t *) (ph + 1); 1.2637 + *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_19); 1.2638 + ippp++; 1.2639 + *ippp = asoc->cumulative_tsn; 1.2640 + 1.2641 + } 1.2642 + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_19; 1.2643 + sctp_abort_association(inp, stcb, m, iphlen, 1.2644 + src, dst, sh, op_err, 1.2645 +#if defined(__FreeBSD__) 1.2646 + use_mflowid, mflowid, 1.2647 +#endif 1.2648 + vrf_id, port); 1.2649 + return (2); 1.2650 + } 1.2651 +#ifdef SCTP_AUDITING_ENABLED 1.2652 + sctp_audit_log(0xB1, 0); 1.2653 +#endif 1.2654 + if (SCTP_SIZE32(chk_length) == (length - *offset)) { 1.2655 + last_chunk = 1; 1.2656 + } else { 1.2657 + last_chunk = 0; 1.2658 + } 1.2659 + if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch, 1.2660 + chk_length, net, high_tsn, &abort_flag, &break_flag, 1.2661 + last_chunk)) { 1.2662 + num_chunks++; 1.2663 + } 1.2664 + if (abort_flag) 1.2665 + return (2); 1.2666 + 1.2667 + if (break_flag) { 1.2668 + /* 1.2669 + * Set because of out of rwnd space and no 1.2670 + * drop rep space left. 1.2671 + */ 1.2672 + stop_proc = 1; 1.2673 + continue; 1.2674 + } 1.2675 + } else { 1.2676 + /* not a data chunk in the data region */ 1.2677 + switch (ch->ch.chunk_type) { 1.2678 + case SCTP_INITIATION: 1.2679 + case SCTP_INITIATION_ACK: 1.2680 + case SCTP_SELECTIVE_ACK: 1.2681 + case SCTP_NR_SELECTIVE_ACK: 1.2682 + case SCTP_HEARTBEAT_REQUEST: 1.2683 + case SCTP_HEARTBEAT_ACK: 1.2684 + case SCTP_ABORT_ASSOCIATION: 1.2685 + case SCTP_SHUTDOWN: 1.2686 + case SCTP_SHUTDOWN_ACK: 1.2687 + case SCTP_OPERATION_ERROR: 1.2688 + case SCTP_COOKIE_ECHO: 1.2689 + case SCTP_COOKIE_ACK: 1.2690 + case SCTP_ECN_ECHO: 1.2691 + case SCTP_ECN_CWR: 1.2692 + case SCTP_SHUTDOWN_COMPLETE: 1.2693 + case SCTP_AUTHENTICATION: 1.2694 + case SCTP_ASCONF_ACK: 1.2695 + case SCTP_PACKET_DROPPED: 1.2696 + case SCTP_STREAM_RESET: 1.2697 + case SCTP_FORWARD_CUM_TSN: 1.2698 + case SCTP_ASCONF: 1.2699 + /* 1.2700 + * Now, what do we do with KNOWN chunks that 1.2701 + * are NOT in the right place? 1.2702 + * 1.2703 + * For now, I do nothing but ignore them. We 1.2704 + * may later want to add sysctl stuff to 1.2705 + * switch out and do either an ABORT() or 1.2706 + * possibly process them. 1.2707 + */ 1.2708 + if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) { 1.2709 + struct mbuf *op_err; 1.2710 + 1.2711 + op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION); 1.2712 + sctp_abort_association(inp, stcb, 1.2713 + m, iphlen, 1.2714 + src, dst, 1.2715 + sh, op_err, 1.2716 +#if defined(__FreeBSD__) 1.2717 + use_mflowid, mflowid, 1.2718 +#endif 1.2719 + vrf_id, port); 1.2720 + return (2); 1.2721 + } 1.2722 + break; 1.2723 + default: 1.2724 + /* unknown chunk type, use bit rules */ 1.2725 + if (ch->ch.chunk_type & 0x40) { 1.2726 + /* Add a error report to the queue */ 1.2727 + struct mbuf *merr; 1.2728 + struct sctp_paramhdr *phd; 1.2729 + 1.2730 + merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_NOWAIT, 1, MT_DATA); 1.2731 + if (merr) { 1.2732 + phd = mtod(merr, struct sctp_paramhdr *); 1.2733 + /* 1.2734 + * We cheat and use param 1.2735 + * type since we did not 1.2736 + * bother to define a error 1.2737 + * cause struct. They are 1.2738 + * the same basic format 1.2739 + * with different names. 1.2740 + */ 1.2741 + phd->param_type = 1.2742 + htons(SCTP_CAUSE_UNRECOG_CHUNK); 1.2743 + phd->param_length = 1.2744 + htons(chk_length + sizeof(*phd)); 1.2745 + SCTP_BUF_LEN(merr) = sizeof(*phd); 1.2746 + SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT); 1.2747 + if (SCTP_BUF_NEXT(merr)) { 1.2748 + if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL)) { 1.2749 + sctp_m_freem(merr); 1.2750 + } else { 1.2751 + sctp_queue_op_err(stcb, merr); 1.2752 + } 1.2753 + } else { 1.2754 + sctp_m_freem(merr); 1.2755 + } 1.2756 + } 1.2757 + } 1.2758 + if ((ch->ch.chunk_type & 0x80) == 0) { 1.2759 + /* discard the rest of this packet */ 1.2760 + stop_proc = 1; 1.2761 + } /* else skip this bad chunk and 1.2762 + * continue... */ 1.2763 + break; 1.2764 + } /* switch of chunk type */ 1.2765 + } 1.2766 + *offset += SCTP_SIZE32(chk_length); 1.2767 + if ((*offset >= length) || stop_proc) { 1.2768 + /* no more data left in the mbuf chain */ 1.2769 + stop_proc = 1; 1.2770 + continue; 1.2771 + } 1.2772 + ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, 1.2773 + sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); 1.2774 + if (ch == NULL) { 1.2775 + *offset = length; 1.2776 + stop_proc = 1; 1.2777 + continue; 1.2778 + } 1.2779 + } 1.2780 + if (break_flag) { 1.2781 + /* 1.2782 + * we need to report rwnd overrun drops. 1.2783 + */ 1.2784 + sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0); 1.2785 + } 1.2786 + if (num_chunks) { 1.2787 + /* 1.2788 + * Did we get data, if so update the time for auto-close and 1.2789 + * give peer credit for being alive. 1.2790 + */ 1.2791 + SCTP_STAT_INCR(sctps_recvpktwithdata); 1.2792 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 1.2793 + sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 1.2794 + stcb->asoc.overall_error_count, 1.2795 + 0, 1.2796 + SCTP_FROM_SCTP_INDATA, 1.2797 + __LINE__); 1.2798 + } 1.2799 + stcb->asoc.overall_error_count = 0; 1.2800 + (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); 1.2801 + } 1.2802 + /* now service all of the reassm queue if needed */ 1.2803 + if (!(TAILQ_EMPTY(&asoc->reasmqueue))) 1.2804 + sctp_service_queues(stcb, asoc); 1.2805 + 1.2806 + if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 1.2807 + /* Assure that we ack right away */ 1.2808 + stcb->asoc.send_sack = 1; 1.2809 + } 1.2810 + /* Start a sack timer or QUEUE a SACK for sending */ 1.2811 + sctp_sack_check(stcb, was_a_gap); 1.2812 + return (0); 1.2813 +} 1.2814 + 1.2815 +static int 1.2816 +sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn, 1.2817 + uint16_t frag_strt, uint16_t frag_end, int nr_sacking, 1.2818 + int *num_frs, 1.2819 + uint32_t *biggest_newly_acked_tsn, 1.2820 + uint32_t *this_sack_lowest_newack, 1.2821 + int *rto_ok) 1.2822 +{ 1.2823 + struct sctp_tmit_chunk *tp1; 1.2824 + unsigned int theTSN; 1.2825 + int j, wake_him = 0, circled = 0; 1.2826 + 1.2827 + /* Recover the tp1 we last saw */ 1.2828 + tp1 = *p_tp1; 1.2829 + if (tp1 == NULL) { 1.2830 + tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 1.2831 + } 1.2832 + for (j = frag_strt; j <= frag_end; j++) { 1.2833 + theTSN = j + last_tsn; 1.2834 + while (tp1) { 1.2835 + if (tp1->rec.data.doing_fast_retransmit) 1.2836 + (*num_frs) += 1; 1.2837 + 1.2838 + /*- 1.2839 + * CMT: CUCv2 algorithm. For each TSN being 1.2840 + * processed from the sent queue, track the 1.2841 + * next expected pseudo-cumack, or 1.2842 + * rtx_pseudo_cumack, if required. Separate 1.2843 + * cumack trackers for first transmissions, 1.2844 + * and retransmissions. 1.2845 + */ 1.2846 + if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && 1.2847 + (tp1->snd_count == 1)) { 1.2848 + tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq; 1.2849 + tp1->whoTo->find_pseudo_cumack = 0; 1.2850 + } 1.2851 + if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && 1.2852 + (tp1->snd_count > 1)) { 1.2853 + tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq; 1.2854 + tp1->whoTo->find_rtx_pseudo_cumack = 0; 1.2855 + } 1.2856 + if (tp1->rec.data.TSN_seq == theTSN) { 1.2857 + if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 1.2858 + /*- 1.2859 + * must be held until 1.2860 + * cum-ack passes 1.2861 + */ 1.2862 + if (tp1->sent < SCTP_DATAGRAM_RESEND) { 1.2863 + /*- 1.2864 + * If it is less than RESEND, it is 1.2865 + * now no-longer in flight. 1.2866 + * Higher values may already be set 1.2867 + * via previous Gap Ack Blocks... 1.2868 + * i.e. ACKED or RESEND. 1.2869 + */ 1.2870 + if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, 1.2871 + *biggest_newly_acked_tsn)) { 1.2872 + *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq; 1.2873 + } 1.2874 + /*- 1.2875 + * CMT: SFR algo (and HTNA) - set 1.2876 + * saw_newack to 1 for dest being 1.2877 + * newly acked. update 1.2878 + * this_sack_highest_newack if 1.2879 + * appropriate. 1.2880 + */ 1.2881 + if (tp1->rec.data.chunk_was_revoked == 0) 1.2882 + tp1->whoTo->saw_newack = 1; 1.2883 + 1.2884 + if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, 1.2885 + tp1->whoTo->this_sack_highest_newack)) { 1.2886 + tp1->whoTo->this_sack_highest_newack = 1.2887 + tp1->rec.data.TSN_seq; 1.2888 + } 1.2889 + /*- 1.2890 + * CMT DAC algo: also update 1.2891 + * this_sack_lowest_newack 1.2892 + */ 1.2893 + if (*this_sack_lowest_newack == 0) { 1.2894 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 1.2895 + sctp_log_sack(*this_sack_lowest_newack, 1.2896 + last_tsn, 1.2897 + tp1->rec.data.TSN_seq, 1.2898 + 0, 1.2899 + 0, 1.2900 + SCTP_LOG_TSN_ACKED); 1.2901 + } 1.2902 + *this_sack_lowest_newack = tp1->rec.data.TSN_seq; 1.2903 + } 1.2904 + /*- 1.2905 + * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp 1.2906 + * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set 1.2907 + * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be 1.2908 + * updated. Also trigger search for the next expected (rtx-)pseudo-cumack. 1.2909 + * Separate pseudo_cumack trackers for first transmissions and 1.2910 + * retransmissions. 1.2911 + */ 1.2912 + if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) { 1.2913 + if (tp1->rec.data.chunk_was_revoked == 0) { 1.2914 + tp1->whoTo->new_pseudo_cumack = 1; 1.2915 + } 1.2916 + tp1->whoTo->find_pseudo_cumack = 1; 1.2917 + } 1.2918 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1.2919 + sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 1.2920 + } 1.2921 + if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) { 1.2922 + if (tp1->rec.data.chunk_was_revoked == 0) { 1.2923 + tp1->whoTo->new_pseudo_cumack = 1; 1.2924 + } 1.2925 + tp1->whoTo->find_rtx_pseudo_cumack = 1; 1.2926 + } 1.2927 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 1.2928 + sctp_log_sack(*biggest_newly_acked_tsn, 1.2929 + last_tsn, 1.2930 + tp1->rec.data.TSN_seq, 1.2931 + frag_strt, 1.2932 + frag_end, 1.2933 + SCTP_LOG_TSN_ACKED); 1.2934 + } 1.2935 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 1.2936 + sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP, 1.2937 + tp1->whoTo->flight_size, 1.2938 + tp1->book_size, 1.2939 + (uintptr_t)tp1->whoTo, 1.2940 + tp1->rec.data.TSN_seq); 1.2941 + } 1.2942 + sctp_flight_size_decrease(tp1); 1.2943 + if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 1.2944 + (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo, 1.2945 + tp1); 1.2946 + } 1.2947 + sctp_total_flight_decrease(stcb, tp1); 1.2948 + 1.2949 + tp1->whoTo->net_ack += tp1->send_size; 1.2950 + if (tp1->snd_count < 2) { 1.2951 + /*- 1.2952 + * True non-retransmited chunk 1.2953 + */ 1.2954 + tp1->whoTo->net_ack2 += tp1->send_size; 1.2955 + 1.2956 + /*- 1.2957 + * update RTO too ? 1.2958 + */ 1.2959 + if (tp1->do_rtt) { 1.2960 + if (*rto_ok) { 1.2961 + tp1->whoTo->RTO = 1.2962 + sctp_calculate_rto(stcb, 1.2963 + &stcb->asoc, 1.2964 + tp1->whoTo, 1.2965 + &tp1->sent_rcv_time, 1.2966 + sctp_align_safe_nocopy, 1.2967 + SCTP_RTT_FROM_DATA); 1.2968 + *rto_ok = 0; 1.2969 + } 1.2970 + if (tp1->whoTo->rto_needed == 0) { 1.2971 + tp1->whoTo->rto_needed = 1; 1.2972 + } 1.2973 + tp1->do_rtt = 0; 1.2974 + } 1.2975 + } 1.2976 + 1.2977 + } 1.2978 + if (tp1->sent <= SCTP_DATAGRAM_RESEND) { 1.2979 + if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, 1.2980 + stcb->asoc.this_sack_highest_gap)) { 1.2981 + stcb->asoc.this_sack_highest_gap = 1.2982 + tp1->rec.data.TSN_seq; 1.2983 + } 1.2984 + if (tp1->sent == SCTP_DATAGRAM_RESEND) { 1.2985 + sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt); 1.2986 +#ifdef SCTP_AUDITING_ENABLED 1.2987 + sctp_audit_log(0xB2, 1.2988 + (stcb->asoc.sent_queue_retran_cnt & 0x000000ff)); 1.2989 +#endif 1.2990 + } 1.2991 + } 1.2992 + /*- 1.2993 + * All chunks NOT UNSENT fall through here and are marked 1.2994 + * (leave PR-SCTP ones that are to skip alone though) 1.2995 + */ 1.2996 + if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) && 1.2997 + (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 1.2998 + tp1->sent = SCTP_DATAGRAM_MARKED; 1.2999 + } 1.3000 + if (tp1->rec.data.chunk_was_revoked) { 1.3001 + /* deflate the cwnd */ 1.3002 + tp1->whoTo->cwnd -= tp1->book_size; 1.3003 + tp1->rec.data.chunk_was_revoked = 0; 1.3004 + } 1.3005 + /* NR Sack code here */ 1.3006 + if (nr_sacking && 1.3007 + (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 1.3008 + if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) { 1.3009 + stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--; 1.3010 +#ifdef INVARIANTS 1.3011 + } else { 1.3012 + panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number); 1.3013 +#endif 1.3014 + } 1.3015 + tp1->sent = SCTP_DATAGRAM_NR_ACKED; 1.3016 + if (tp1->data) { 1.3017 + /* sa_ignore NO_NULL_CHK */ 1.3018 + sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 1.3019 + sctp_m_freem(tp1->data); 1.3020 + tp1->data = NULL; 1.3021 + } 1.3022 + wake_him++; 1.3023 + } 1.3024 + } 1.3025 + break; 1.3026 + } /* if (tp1->TSN_seq == theTSN) */ 1.3027 + if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) { 1.3028 + break; 1.3029 + } 1.3030 + tp1 = TAILQ_NEXT(tp1, sctp_next); 1.3031 + if ((tp1 == NULL) && (circled == 0)) { 1.3032 + circled++; 1.3033 + tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 1.3034 + } 1.3035 + } /* end while (tp1) */ 1.3036 + if (tp1 == NULL) { 1.3037 + circled = 0; 1.3038 + tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 1.3039 + } 1.3040 + /* In case the fragments were not in order we must reset */ 1.3041 + } /* end for (j = fragStart */ 1.3042 + *p_tp1 = tp1; 1.3043 + return (wake_him); /* Return value only used for nr-sack */ 1.3044 +} 1.3045 + 1.3046 + 1.3047 +static int 1.3048 +sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc, 1.3049 + uint32_t last_tsn, uint32_t *biggest_tsn_acked, 1.3050 + uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack, 1.3051 + int num_seg, int num_nr_seg, int *rto_ok) 1.3052 +{ 1.3053 + struct sctp_gap_ack_block *frag, block; 1.3054 + struct sctp_tmit_chunk *tp1; 1.3055 + int i; 1.3056 + int num_frs = 0; 1.3057 + int chunk_freed; 1.3058 + int non_revocable; 1.3059 + uint16_t frag_strt, frag_end, prev_frag_end; 1.3060 + 1.3061 + tp1 = TAILQ_FIRST(&asoc->sent_queue); 1.3062 + prev_frag_end = 0; 1.3063 + chunk_freed = 0; 1.3064 + 1.3065 + for (i = 0; i < (num_seg + num_nr_seg); i++) { 1.3066 + if (i == num_seg) { 1.3067 + prev_frag_end = 0; 1.3068 + tp1 = TAILQ_FIRST(&asoc->sent_queue); 1.3069 + } 1.3070 + frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset, 1.3071 + sizeof(struct sctp_gap_ack_block), (uint8_t *) &block); 1.3072 + *offset += sizeof(block); 1.3073 + if (frag == NULL) { 1.3074 + return (chunk_freed); 1.3075 + } 1.3076 + frag_strt = ntohs(frag->start); 1.3077 + frag_end = ntohs(frag->end); 1.3078 + 1.3079 + if (frag_strt > frag_end) { 1.3080 + /* This gap report is malformed, skip it. */ 1.3081 + continue; 1.3082 + } 1.3083 + if (frag_strt <= prev_frag_end) { 1.3084 + /* This gap report is not in order, so restart. */ 1.3085 + tp1 = TAILQ_FIRST(&asoc->sent_queue); 1.3086 + } 1.3087 + if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) { 1.3088 + *biggest_tsn_acked = last_tsn + frag_end; 1.3089 + } 1.3090 + if (i < num_seg) { 1.3091 + non_revocable = 0; 1.3092 + } else { 1.3093 + non_revocable = 1; 1.3094 + } 1.3095 + if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end, 1.3096 + non_revocable, &num_frs, biggest_newly_acked_tsn, 1.3097 + this_sack_lowest_newack, rto_ok)) { 1.3098 + chunk_freed = 1; 1.3099 + } 1.3100 + prev_frag_end = frag_end; 1.3101 + } 1.3102 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 1.3103 + if (num_frs) 1.3104 + sctp_log_fr(*biggest_tsn_acked, 1.3105 + *biggest_newly_acked_tsn, 1.3106 + last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); 1.3107 + } 1.3108 + return (chunk_freed); 1.3109 +} 1.3110 + 1.3111 +static void 1.3112 +sctp_check_for_revoked(struct sctp_tcb *stcb, 1.3113 + struct sctp_association *asoc, uint32_t cumack, 1.3114 + uint32_t biggest_tsn_acked) 1.3115 +{ 1.3116 + struct sctp_tmit_chunk *tp1; 1.3117 + 1.3118 + TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 1.3119 + if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) { 1.3120 + /* 1.3121 + * ok this guy is either ACK or MARKED. If it is 1.3122 + * ACKED it has been previously acked but not this 1.3123 + * time i.e. revoked. If it is MARKED it was ACK'ed 1.3124 + * again. 1.3125 + */ 1.3126 + if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) { 1.3127 + break; 1.3128 + } 1.3129 + if (tp1->sent == SCTP_DATAGRAM_ACKED) { 1.3130 + /* it has been revoked */ 1.3131 + tp1->sent = SCTP_DATAGRAM_SENT; 1.3132 + tp1->rec.data.chunk_was_revoked = 1; 1.3133 + /* We must add this stuff back in to 1.3134 + * assure timers and such get started. 1.3135 + */ 1.3136 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 1.3137 + sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 1.3138 + tp1->whoTo->flight_size, 1.3139 + tp1->book_size, 1.3140 + (uintptr_t)tp1->whoTo, 1.3141 + tp1->rec.data.TSN_seq); 1.3142 + } 1.3143 + sctp_flight_size_increase(tp1); 1.3144 + sctp_total_flight_increase(stcb, tp1); 1.3145 + /* We inflate the cwnd to compensate for our 1.3146 + * artificial inflation of the flight_size. 1.3147 + */ 1.3148 + tp1->whoTo->cwnd += tp1->book_size; 1.3149 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 1.3150 + sctp_log_sack(asoc->last_acked_seq, 1.3151 + cumack, 1.3152 + tp1->rec.data.TSN_seq, 1.3153 + 0, 1.3154 + 0, 1.3155 + SCTP_LOG_TSN_REVOKED); 1.3156 + } 1.3157 + } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { 1.3158 + /* it has been re-acked in this SACK */ 1.3159 + tp1->sent = SCTP_DATAGRAM_ACKED; 1.3160 + } 1.3161 + } 1.3162 + if (tp1->sent == SCTP_DATAGRAM_UNSENT) 1.3163 + break; 1.3164 + } 1.3165 +} 1.3166 + 1.3167 + 1.3168 +static void 1.3169 +sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, 1.3170 + uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved) 1.3171 +{ 1.3172 + struct sctp_tmit_chunk *tp1; 1.3173 + int strike_flag = 0; 1.3174 + struct timeval now; 1.3175 + int tot_retrans = 0; 1.3176 + uint32_t sending_seq; 1.3177 + struct sctp_nets *net; 1.3178 + int num_dests_sacked = 0; 1.3179 + 1.3180 + /* 1.3181 + * select the sending_seq, this is either the next thing ready to be 1.3182 + * sent but not transmitted, OR, the next seq we assign. 1.3183 + */ 1.3184 + tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 1.3185 + if (tp1 == NULL) { 1.3186 + sending_seq = asoc->sending_seq; 1.3187 + } else { 1.3188 + sending_seq = tp1->rec.data.TSN_seq; 1.3189 + } 1.3190 + 1.3191 + /* CMT DAC algo: finding out if SACK is a mixed SACK */ 1.3192 + if ((asoc->sctp_cmt_on_off > 0) && 1.3193 + SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 1.3194 + TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1.3195 + if (net->saw_newack) 1.3196 + num_dests_sacked++; 1.3197 + } 1.3198 + } 1.3199 + if (stcb->asoc.peer_supports_prsctp) { 1.3200 + (void)SCTP_GETTIME_TIMEVAL(&now); 1.3201 + } 1.3202 + TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 1.3203 + strike_flag = 0; 1.3204 + if (tp1->no_fr_allowed) { 1.3205 + /* this one had a timeout or something */ 1.3206 + continue; 1.3207 + } 1.3208 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 1.3209 + if (tp1->sent < SCTP_DATAGRAM_RESEND) 1.3210 + sctp_log_fr(biggest_tsn_newly_acked, 1.3211 + tp1->rec.data.TSN_seq, 1.3212 + tp1->sent, 1.3213 + SCTP_FR_LOG_CHECK_STRIKE); 1.3214 + } 1.3215 + if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) || 1.3216 + tp1->sent == SCTP_DATAGRAM_UNSENT) { 1.3217 + /* done */ 1.3218 + break; 1.3219 + } 1.3220 + if (stcb->asoc.peer_supports_prsctp) { 1.3221 + if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 1.3222 + /* Is it expired? */ 1.3223 +#ifndef __FreeBSD__ 1.3224 + if (timercmp(&now, &tp1->rec.data.timetodrop, >)) { 1.3225 +#else 1.3226 + if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 1.3227 +#endif 1.3228 + /* Yes so drop it */ 1.3229 + if (tp1->data != NULL) { 1.3230 + (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 1.3231 + SCTP_SO_NOT_LOCKED); 1.3232 + } 1.3233 + continue; 1.3234 + } 1.3235 + } 1.3236 + 1.3237 + } 1.3238 + if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) { 1.3239 + /* we are beyond the tsn in the sack */ 1.3240 + break; 1.3241 + } 1.3242 + if (tp1->sent >= SCTP_DATAGRAM_RESEND) { 1.3243 + /* either a RESEND, ACKED, or MARKED */ 1.3244 + /* skip */ 1.3245 + if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { 1.3246 + /* Continue strikin FWD-TSN chunks */ 1.3247 + tp1->rec.data.fwd_tsn_cnt++; 1.3248 + } 1.3249 + continue; 1.3250 + } 1.3251 + /* 1.3252 + * CMT : SFR algo (covers part of DAC and HTNA as well) 1.3253 + */ 1.3254 + if (tp1->whoTo && tp1->whoTo->saw_newack == 0) { 1.3255 + /* 1.3256 + * No new acks were receieved for data sent to this 1.3257 + * dest. Therefore, according to the SFR algo for 1.3258 + * CMT, no data sent to this dest can be marked for 1.3259 + * FR using this SACK. 1.3260 + */ 1.3261 + continue; 1.3262 + } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq, 1.3263 + tp1->whoTo->this_sack_highest_newack)) { 1.3264 + /* 1.3265 + * CMT: New acks were receieved for data sent to 1.3266 + * this dest. But no new acks were seen for data 1.3267 + * sent after tp1. Therefore, according to the SFR 1.3268 + * algo for CMT, tp1 cannot be marked for FR using 1.3269 + * this SACK. This step covers part of the DAC algo 1.3270 + * and the HTNA algo as well. 1.3271 + */ 1.3272 + continue; 1.3273 + } 1.3274 + /* 1.3275 + * Here we check to see if we were have already done a FR 1.3276 + * and if so we see if the biggest TSN we saw in the sack is 1.3277 + * smaller than the recovery point. If so we don't strike 1.3278 + * the tsn... otherwise we CAN strike the TSN. 1.3279 + */ 1.3280 + /* 1.3281 + * @@@ JRI: Check for CMT 1.3282 + * if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) { 1.3283 + */ 1.3284 + if (accum_moved && asoc->fast_retran_loss_recovery) { 1.3285 + /* 1.3286 + * Strike the TSN if in fast-recovery and cum-ack 1.3287 + * moved. 1.3288 + */ 1.3289 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 1.3290 + sctp_log_fr(biggest_tsn_newly_acked, 1.3291 + tp1->rec.data.TSN_seq, 1.3292 + tp1->sent, 1.3293 + SCTP_FR_LOG_STRIKE_CHUNK); 1.3294 + } 1.3295 + if (tp1->sent < SCTP_DATAGRAM_RESEND) { 1.3296 + tp1->sent++; 1.3297 + } 1.3298 + if ((asoc->sctp_cmt_on_off > 0) && 1.3299 + SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 1.3300 + /* 1.3301 + * CMT DAC algorithm: If SACK flag is set to 1.3302 + * 0, then lowest_newack test will not pass 1.3303 + * because it would have been set to the 1.3304 + * cumack earlier. If not already to be 1.3305 + * rtx'd, If not a mixed sack and if tp1 is 1.3306 + * not between two sacked TSNs, then mark by 1.3307 + * one more. 1.3308 + * NOTE that we are marking by one additional time since the SACK DAC flag indicates that 1.3309 + * two packets have been received after this missing TSN. 1.3310 + */ 1.3311 + if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 1.3312 + SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) { 1.3313 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 1.3314 + sctp_log_fr(16 + num_dests_sacked, 1.3315 + tp1->rec.data.TSN_seq, 1.3316 + tp1->sent, 1.3317 + SCTP_FR_LOG_STRIKE_CHUNK); 1.3318 + } 1.3319 + tp1->sent++; 1.3320 + } 1.3321 + } 1.3322 + } else if ((tp1->rec.data.doing_fast_retransmit) && 1.3323 + (asoc->sctp_cmt_on_off == 0)) { 1.3324 + /* 1.3325 + * For those that have done a FR we must take 1.3326 + * special consideration if we strike. I.e the 1.3327 + * biggest_newly_acked must be higher than the 1.3328 + * sending_seq at the time we did the FR. 1.3329 + */ 1.3330 + if ( 1.3331 +#ifdef SCTP_FR_TO_ALTERNATE 1.3332 + /* 1.3333 + * If FR's go to new networks, then we must only do 1.3334 + * this for singly homed asoc's. However if the FR's 1.3335 + * go to the same network (Armando's work) then its 1.3336 + * ok to FR multiple times. 1.3337 + */ 1.3338 + (asoc->numnets < 2) 1.3339 +#else 1.3340 + (1) 1.3341 +#endif 1.3342 + ) { 1.3343 + 1.3344 + if (SCTP_TSN_GE(biggest_tsn_newly_acked, 1.3345 + tp1->rec.data.fast_retran_tsn)) { 1.3346 + /* 1.3347 + * Strike the TSN, since this ack is 1.3348 + * beyond where things were when we 1.3349 + * did a FR. 1.3350 + */ 1.3351 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 1.3352 + sctp_log_fr(biggest_tsn_newly_acked, 1.3353 + tp1->rec.data.TSN_seq, 1.3354 + tp1->sent, 1.3355 + SCTP_FR_LOG_STRIKE_CHUNK); 1.3356 + } 1.3357 + if (tp1->sent < SCTP_DATAGRAM_RESEND) { 1.3358 + tp1->sent++; 1.3359 + } 1.3360 + strike_flag = 1; 1.3361 + if ((asoc->sctp_cmt_on_off > 0) && 1.3362 + SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 1.3363 + /* 1.3364 + * CMT DAC algorithm: If 1.3365 + * SACK flag is set to 0, 1.3366 + * then lowest_newack test 1.3367 + * will not pass because it 1.3368 + * would have been set to 1.3369 + * the cumack earlier. If 1.3370 + * not already to be rtx'd, 1.3371 + * If not a mixed sack and 1.3372 + * if tp1 is not between two 1.3373 + * sacked TSNs, then mark by 1.3374 + * one more. 1.3375 + * NOTE that we are marking by one additional time since the SACK DAC flag indicates that 1.3376 + * two packets have been received after this missing TSN. 1.3377 + */ 1.3378 + if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 1.3379 + (num_dests_sacked == 1) && 1.3380 + SCTP_TSN_GT(this_sack_lowest_newack, 1.3381 + tp1->rec.data.TSN_seq)) { 1.3382 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 1.3383 + sctp_log_fr(32 + num_dests_sacked, 1.3384 + tp1->rec.data.TSN_seq, 1.3385 + tp1->sent, 1.3386 + SCTP_FR_LOG_STRIKE_CHUNK); 1.3387 + } 1.3388 + if (tp1->sent < SCTP_DATAGRAM_RESEND) { 1.3389 + tp1->sent++; 1.3390 + } 1.3391 + } 1.3392 + } 1.3393 + } 1.3394 + } 1.3395 + /* 1.3396 + * JRI: TODO: remove code for HTNA algo. CMT's 1.3397 + * SFR algo covers HTNA. 1.3398 + */ 1.3399 + } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, 1.3400 + biggest_tsn_newly_acked)) { 1.3401 + /* 1.3402 + * We don't strike these: This is the HTNA 1.3403 + * algorithm i.e. we don't strike If our TSN is 1.3404 + * larger than the Highest TSN Newly Acked. 1.3405 + */ 1.3406 + ; 1.3407 + } else { 1.3408 + /* Strike the TSN */ 1.3409 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 1.3410 + sctp_log_fr(biggest_tsn_newly_acked, 1.3411 + tp1->rec.data.TSN_seq, 1.3412 + tp1->sent, 1.3413 + SCTP_FR_LOG_STRIKE_CHUNK); 1.3414 + } 1.3415 + if (tp1->sent < SCTP_DATAGRAM_RESEND) { 1.3416 + tp1->sent++; 1.3417 + } 1.3418 + if ((asoc->sctp_cmt_on_off > 0) && 1.3419 + SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 1.3420 + /* 1.3421 + * CMT DAC algorithm: If SACK flag is set to 1.3422 + * 0, then lowest_newack test will not pass 1.3423 + * because it would have been set to the 1.3424 + * cumack earlier. If not already to be 1.3425 + * rtx'd, If not a mixed sack and if tp1 is 1.3426 + * not between two sacked TSNs, then mark by 1.3427 + * one more. 1.3428 + * NOTE that we are marking by one additional time since the SACK DAC flag indicates that 1.3429 + * two packets have been received after this missing TSN. 1.3430 + */ 1.3431 + if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 1.3432 + SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) { 1.3433 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 1.3434 + sctp_log_fr(48 + num_dests_sacked, 1.3435 + tp1->rec.data.TSN_seq, 1.3436 + tp1->sent, 1.3437 + SCTP_FR_LOG_STRIKE_CHUNK); 1.3438 + } 1.3439 + tp1->sent++; 1.3440 + } 1.3441 + } 1.3442 + } 1.3443 + if (tp1->sent == SCTP_DATAGRAM_RESEND) { 1.3444 + struct sctp_nets *alt; 1.3445 + 1.3446 + /* fix counts and things */ 1.3447 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 1.3448 + sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND, 1.3449 + (tp1->whoTo ? (tp1->whoTo->flight_size) : 0), 1.3450 + tp1->book_size, 1.3451 + (uintptr_t)tp1->whoTo, 1.3452 + tp1->rec.data.TSN_seq); 1.3453 + } 1.3454 + if (tp1->whoTo) { 1.3455 + tp1->whoTo->net_ack++; 1.3456 + sctp_flight_size_decrease(tp1); 1.3457 + if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 1.3458 + (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo, 1.3459 + tp1); 1.3460 + } 1.3461 + } 1.3462 + 1.3463 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 1.3464 + sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, 1.3465 + asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 1.3466 + } 1.3467 + /* add back to the rwnd */ 1.3468 + asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 1.3469 + 1.3470 + /* remove from the total flight */ 1.3471 + sctp_total_flight_decrease(stcb, tp1); 1.3472 + 1.3473 + if ((stcb->asoc.peer_supports_prsctp) && 1.3474 + (PR_SCTP_RTX_ENABLED(tp1->flags))) { 1.3475 + /* Has it been retransmitted tv_sec times? - we store the retran count there. */ 1.3476 + if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) { 1.3477 + /* Yes, so drop it */ 1.3478 + if (tp1->data != NULL) { 1.3479 + (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 1.3480 + SCTP_SO_NOT_LOCKED); 1.3481 + } 1.3482 + /* Make sure to flag we had a FR */ 1.3483 + tp1->whoTo->net_ack++; 1.3484 + continue; 1.3485 + } 1.3486 + } 1.3487 + /* SCTP_PRINTF("OK, we are now ready to FR this guy\n"); */ 1.3488 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 1.3489 + sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count, 1.3490 + 0, SCTP_FR_MARKED); 1.3491 + } 1.3492 + if (strike_flag) { 1.3493 + /* This is a subsequent FR */ 1.3494 + SCTP_STAT_INCR(sctps_sendmultfastretrans); 1.3495 + } 1.3496 + sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1.3497 + if (asoc->sctp_cmt_on_off > 0) { 1.3498 + /* 1.3499 + * CMT: Using RTX_SSTHRESH policy for CMT. 1.3500 + * If CMT is being used, then pick dest with 1.3501 + * largest ssthresh for any retransmission. 1.3502 + */ 1.3503 + tp1->no_fr_allowed = 1; 1.3504 + alt = tp1->whoTo; 1.3505 + /*sa_ignore NO_NULL_CHK*/ 1.3506 + if (asoc->sctp_cmt_pf > 0) { 1.3507 + /* JRS 5/18/07 - If CMT PF is on, use the PF version of find_alt_net() */ 1.3508 + alt = sctp_find_alternate_net(stcb, alt, 2); 1.3509 + } else { 1.3510 + /* JRS 5/18/07 - If only CMT is on, use the CMT version of find_alt_net() */ 1.3511 + /*sa_ignore NO_NULL_CHK*/ 1.3512 + alt = sctp_find_alternate_net(stcb, alt, 1); 1.3513 + } 1.3514 + if (alt == NULL) { 1.3515 + alt = tp1->whoTo; 1.3516 + } 1.3517 + /* 1.3518 + * CUCv2: If a different dest is picked for 1.3519 + * the retransmission, then new 1.3520 + * (rtx-)pseudo_cumack needs to be tracked 1.3521 + * for orig dest. Let CUCv2 track new (rtx-) 1.3522 + * pseudo-cumack always. 1.3523 + */ 1.3524 + if (tp1->whoTo) { 1.3525 + tp1->whoTo->find_pseudo_cumack = 1; 1.3526 + tp1->whoTo->find_rtx_pseudo_cumack = 1; 1.3527 + } 1.3528 + 1.3529 + } else {/* CMT is OFF */ 1.3530 + 1.3531 +#ifdef SCTP_FR_TO_ALTERNATE 1.3532 + /* Can we find an alternate? */ 1.3533 + alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); 1.3534 +#else 1.3535 + /* 1.3536 + * default behavior is to NOT retransmit 1.3537 + * FR's to an alternate. Armando Caro's 1.3538 + * paper details why. 1.3539 + */ 1.3540 + alt = tp1->whoTo; 1.3541 +#endif 1.3542 + } 1.3543 + 1.3544 + tp1->rec.data.doing_fast_retransmit = 1; 1.3545 + tot_retrans++; 1.3546 + /* mark the sending seq for possible subsequent FR's */ 1.3547 + /* 1.3548 + * SCTP_PRINTF("Marking TSN for FR new value %x\n", 1.3549 + * (uint32_t)tpi->rec.data.TSN_seq); 1.3550 + */ 1.3551 + if (TAILQ_EMPTY(&asoc->send_queue)) { 1.3552 + /* 1.3553 + * If the queue of send is empty then its 1.3554 + * the next sequence number that will be 1.3555 + * assigned so we subtract one from this to 1.3556 + * get the one we last sent. 1.3557 + */ 1.3558 + tp1->rec.data.fast_retran_tsn = sending_seq; 1.3559 + } else { 1.3560 + /* 1.3561 + * If there are chunks on the send queue 1.3562 + * (unsent data that has made it from the 1.3563 + * stream queues but not out the door, we 1.3564 + * take the first one (which will have the 1.3565 + * lowest TSN) and subtract one to get the 1.3566 + * one we last sent. 1.3567 + */ 1.3568 + struct sctp_tmit_chunk *ttt; 1.3569 + 1.3570 + ttt = TAILQ_FIRST(&asoc->send_queue); 1.3571 + tp1->rec.data.fast_retran_tsn = 1.3572 + ttt->rec.data.TSN_seq; 1.3573 + } 1.3574 + 1.3575 + if (tp1->do_rtt) { 1.3576 + /* 1.3577 + * this guy had a RTO calculation pending on 1.3578 + * it, cancel it 1.3579 + */ 1.3580 + if ((tp1->whoTo != NULL) && 1.3581 + (tp1->whoTo->rto_needed == 0)) { 1.3582 + tp1->whoTo->rto_needed = 1; 1.3583 + } 1.3584 + tp1->do_rtt = 0; 1.3585 + } 1.3586 + if (alt != tp1->whoTo) { 1.3587 + /* yes, there is an alternate. */ 1.3588 + sctp_free_remote_addr(tp1->whoTo); 1.3589 + /*sa_ignore FREED_MEMORY*/ 1.3590 + tp1->whoTo = alt; 1.3591 + atomic_add_int(&alt->ref_count, 1); 1.3592 + } 1.3593 + } 1.3594 + } 1.3595 +} 1.3596 + 1.3597 +struct sctp_tmit_chunk * 1.3598 +sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, 1.3599 + struct sctp_association *asoc) 1.3600 +{ 1.3601 + struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL; 1.3602 + struct timeval now; 1.3603 + int now_filled = 0; 1.3604 + 1.3605 + if (asoc->peer_supports_prsctp == 0) { 1.3606 + return (NULL); 1.3607 + } 1.3608 + TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 1.3609 + if (tp1->sent != SCTP_FORWARD_TSN_SKIP && 1.3610 + tp1->sent != SCTP_DATAGRAM_RESEND && 1.3611 + tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 1.3612 + /* no chance to advance, out of here */ 1.3613 + break; 1.3614 + } 1.3615 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 1.3616 + if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 1.3617 + (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 1.3618 + sctp_misc_ints(SCTP_FWD_TSN_CHECK, 1.3619 + asoc->advanced_peer_ack_point, 1.3620 + tp1->rec.data.TSN_seq, 0, 0); 1.3621 + } 1.3622 + } 1.3623 + if (!PR_SCTP_ENABLED(tp1->flags)) { 1.3624 + /* 1.3625 + * We can't fwd-tsn past any that are reliable aka 1.3626 + * retransmitted until the asoc fails. 1.3627 + */ 1.3628 + break; 1.3629 + } 1.3630 + if (!now_filled) { 1.3631 + (void)SCTP_GETTIME_TIMEVAL(&now); 1.3632 + now_filled = 1; 1.3633 + } 1.3634 + /* 1.3635 + * now we got a chunk which is marked for another 1.3636 + * retransmission to a PR-stream but has run out its chances 1.3637 + * already maybe OR has been marked to skip now. Can we skip 1.3638 + * it if its a resend? 1.3639 + */ 1.3640 + if (tp1->sent == SCTP_DATAGRAM_RESEND && 1.3641 + (PR_SCTP_TTL_ENABLED(tp1->flags))) { 1.3642 + /* 1.3643 + * Now is this one marked for resend and its time is 1.3644 + * now up? 1.3645 + */ 1.3646 +#ifndef __FreeBSD__ 1.3647 + if (timercmp(&now, &tp1->rec.data.timetodrop, >)) { 1.3648 +#else 1.3649 + if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 1.3650 +#endif 1.3651 + /* Yes so drop it */ 1.3652 + if (tp1->data) { 1.3653 + (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1.3654 + 1, SCTP_SO_NOT_LOCKED); 1.3655 + } 1.3656 + } else { 1.3657 + /* 1.3658 + * No, we are done when hit one for resend 1.3659 + * whos time as not expired. 1.3660 + */ 1.3661 + break; 1.3662 + } 1.3663 + } 1.3664 + /* 1.3665 + * Ok now if this chunk is marked to drop it we can clean up 1.3666 + * the chunk, advance our peer ack point and we can check 1.3667 + * the next chunk. 1.3668 + */ 1.3669 + if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 1.3670 + (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 1.3671 + /* advance PeerAckPoint goes forward */ 1.3672 + if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) { 1.3673 + asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq; 1.3674 + a_adv = tp1; 1.3675 + } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) { 1.3676 + /* No update but we do save the chk */ 1.3677 + a_adv = tp1; 1.3678 + } 1.3679 + } else { 1.3680 + /* 1.3681 + * If it is still in RESEND we can advance no 1.3682 + * further 1.3683 + */ 1.3684 + break; 1.3685 + } 1.3686 + } 1.3687 + return (a_adv); 1.3688 +} 1.3689 + 1.3690 +static int 1.3691 +sctp_fs_audit(struct sctp_association *asoc) 1.3692 +{ 1.3693 + struct sctp_tmit_chunk *chk; 1.3694 + int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0; 1.3695 + int entry_flight, entry_cnt, ret; 1.3696 + 1.3697 + entry_flight = asoc->total_flight; 1.3698 + entry_cnt = asoc->total_flight_count; 1.3699 + ret = 0; 1.3700 + 1.3701 + if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt) 1.3702 + return (0); 1.3703 + 1.3704 + TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 1.3705 + if (chk->sent < SCTP_DATAGRAM_RESEND) { 1.3706 + SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n", 1.3707 + chk->rec.data.TSN_seq, 1.3708 + chk->send_size, 1.3709 + chk->snd_count); 1.3710 + inflight++; 1.3711 + } else if (chk->sent == SCTP_DATAGRAM_RESEND) { 1.3712 + resend++; 1.3713 + } else if (chk->sent < SCTP_DATAGRAM_ACKED) { 1.3714 + inbetween++; 1.3715 + } else if (chk->sent > SCTP_DATAGRAM_ACKED) { 1.3716 + above++; 1.3717 + } else { 1.3718 + acked++; 1.3719 + } 1.3720 + } 1.3721 + 1.3722 + if ((inflight > 0) || (inbetween > 0)) { 1.3723 +#ifdef INVARIANTS 1.3724 + panic("Flight size-express incorrect? \n"); 1.3725 +#else 1.3726 + SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n", 1.3727 + entry_flight, entry_cnt); 1.3728 + 1.3729 + SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n", 1.3730 + inflight, inbetween, resend, above, acked); 1.3731 + ret = 1; 1.3732 +#endif 1.3733 + } 1.3734 + return (ret); 1.3735 +} 1.3736 + 1.3737 + 1.3738 +static void 1.3739 +sctp_window_probe_recovery(struct sctp_tcb *stcb, 1.3740 + struct sctp_association *asoc, 1.3741 + struct sctp_tmit_chunk *tp1) 1.3742 +{ 1.3743 + tp1->window_probe = 0; 1.3744 + if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) { 1.3745 + /* TSN's skipped we do NOT move back. */ 1.3746 + sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD, 1.3747 + tp1->whoTo->flight_size, 1.3748 + tp1->book_size, 1.3749 + (uintptr_t)tp1->whoTo, 1.3750 + tp1->rec.data.TSN_seq); 1.3751 + return; 1.3752 + } 1.3753 + /* First setup this by shrinking flight */ 1.3754 + if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 1.3755 + (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo, 1.3756 + tp1); 1.3757 + } 1.3758 + sctp_flight_size_decrease(tp1); 1.3759 + sctp_total_flight_decrease(stcb, tp1); 1.3760 + /* Now mark for resend */ 1.3761 + tp1->sent = SCTP_DATAGRAM_RESEND; 1.3762 + sctp_ucount_incr(asoc->sent_queue_retran_cnt); 1.3763 + 1.3764 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 1.3765 + sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP, 1.3766 + tp1->whoTo->flight_size, 1.3767 + tp1->book_size, 1.3768 + (uintptr_t)tp1->whoTo, 1.3769 + tp1->rec.data.TSN_seq); 1.3770 + } 1.3771 +} 1.3772 + 1.3773 +void 1.3774 +sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, 1.3775 + uint32_t rwnd, int *abort_now, int ecne_seen) 1.3776 +{ 1.3777 + struct sctp_nets *net; 1.3778 + struct sctp_association *asoc; 1.3779 + struct sctp_tmit_chunk *tp1, *tp2; 1.3780 + uint32_t old_rwnd; 1.3781 + int win_probe_recovery = 0; 1.3782 + int win_probe_recovered = 0; 1.3783 + int j, done_once = 0; 1.3784 + int rto_ok = 1; 1.3785 + 1.3786 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 1.3787 + sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack, 1.3788 + rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 1.3789 + } 1.3790 + SCTP_TCB_LOCK_ASSERT(stcb); 1.3791 +#ifdef SCTP_ASOCLOG_OF_TSNS 1.3792 + stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack; 1.3793 + stcb->asoc.cumack_log_at++; 1.3794 + if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 1.3795 + stcb->asoc.cumack_log_at = 0; 1.3796 + } 1.3797 +#endif 1.3798 + asoc = &stcb->asoc; 1.3799 + old_rwnd = asoc->peers_rwnd; 1.3800 + if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) { 1.3801 + /* old ack */ 1.3802 + return; 1.3803 + } else if (asoc->last_acked_seq == cumack) { 1.3804 + /* Window update sack */ 1.3805 + asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 1.3806 + (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 1.3807 + if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 1.3808 + /* SWS sender side engages */ 1.3809 + asoc->peers_rwnd = 0; 1.3810 + } 1.3811 + if (asoc->peers_rwnd > old_rwnd) { 1.3812 + goto again; 1.3813 + } 1.3814 + return; 1.3815 + } 1.3816 + 1.3817 + /* First setup for CC stuff */ 1.3818 + TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1.3819 + if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) { 1.3820 + /* Drag along the window_tsn for cwr's */ 1.3821 + net->cwr_window_tsn = cumack; 1.3822 + } 1.3823 + net->prev_cwnd = net->cwnd; 1.3824 + net->net_ack = 0; 1.3825 + net->net_ack2 = 0; 1.3826 + 1.3827 + /* 1.3828 + * CMT: Reset CUC and Fast recovery algo variables before 1.3829 + * SACK processing 1.3830 + */ 1.3831 + net->new_pseudo_cumack = 0; 1.3832 + net->will_exit_fast_recovery = 0; 1.3833 + if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 1.3834 + (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net); 1.3835 + } 1.3836 + } 1.3837 + if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) { 1.3838 + uint32_t send_s; 1.3839 + 1.3840 + if (!TAILQ_EMPTY(&asoc->sent_queue)) { 1.3841 + tp1 = TAILQ_LAST(&asoc->sent_queue, 1.3842 + sctpchunk_listhead); 1.3843 + send_s = tp1->rec.data.TSN_seq + 1; 1.3844 + } else { 1.3845 + send_s = asoc->sending_seq; 1.3846 + } 1.3847 + if (SCTP_TSN_GE(cumack, send_s)) { 1.3848 +#ifndef INVARIANTS 1.3849 + struct mbuf *oper; 1.3850 + 1.3851 +#endif 1.3852 +#ifdef INVARIANTS 1.3853 + panic("Impossible sack 1"); 1.3854 +#else 1.3855 + 1.3856 + *abort_now = 1; 1.3857 + /* XXX */ 1.3858 + oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1.3859 + 0, M_NOWAIT, 1, MT_DATA); 1.3860 + if (oper) { 1.3861 + struct sctp_paramhdr *ph; 1.3862 + uint32_t *ippp; 1.3863 + 1.3864 + SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 1.3865 + sizeof(uint32_t); 1.3866 + ph = mtod(oper, struct sctp_paramhdr *); 1.3867 + ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1.3868 + ph->param_length = htons(SCTP_BUF_LEN(oper)); 1.3869 + ippp = (uint32_t *) (ph + 1); 1.3870 + *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25); 1.3871 + } 1.3872 + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 1.3873 + sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1.3874 + return; 1.3875 +#endif 1.3876 + } 1.3877 + } 1.3878 + asoc->this_sack_highest_gap = cumack; 1.3879 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 1.3880 + sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 1.3881 + stcb->asoc.overall_error_count, 1.3882 + 0, 1.3883 + SCTP_FROM_SCTP_INDATA, 1.3884 + __LINE__); 1.3885 + } 1.3886 + stcb->asoc.overall_error_count = 0; 1.3887 + if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) { 1.3888 + /* process the new consecutive TSN first */ 1.3889 + TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 1.3890 + if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) { 1.3891 + if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 1.3892 + SCTP_PRINTF("Warning, an unsent is now acked?\n"); 1.3893 + } 1.3894 + if (tp1->sent < SCTP_DATAGRAM_ACKED) { 1.3895 + /* 1.3896 + * If it is less than ACKED, it is 1.3897 + * now no-longer in flight. Higher 1.3898 + * values may occur during marking 1.3899 + */ 1.3900 + if (tp1->sent < SCTP_DATAGRAM_RESEND) { 1.3901 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 1.3902 + sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 1.3903 + tp1->whoTo->flight_size, 1.3904 + tp1->book_size, 1.3905 + (uintptr_t)tp1->whoTo, 1.3906 + tp1->rec.data.TSN_seq); 1.3907 + } 1.3908 + sctp_flight_size_decrease(tp1); 1.3909 + if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 1.3910 + (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo, 1.3911 + tp1); 1.3912 + } 1.3913 + /* sa_ignore NO_NULL_CHK */ 1.3914 + sctp_total_flight_decrease(stcb, tp1); 1.3915 + } 1.3916 + tp1->whoTo->net_ack += tp1->send_size; 1.3917 + if (tp1->snd_count < 2) { 1.3918 + /* 1.3919 + * True non-retransmited 1.3920 + * chunk 1.3921 + */ 1.3922 + tp1->whoTo->net_ack2 += 1.3923 + tp1->send_size; 1.3924 + 1.3925 + /* update RTO too? */ 1.3926 + if (tp1->do_rtt) { 1.3927 + if (rto_ok) { 1.3928 + tp1->whoTo->RTO = 1.3929 + /* 1.3930 + * sa_ignore 1.3931 + * NO_NULL_CHK 1.3932 + */ 1.3933 + sctp_calculate_rto(stcb, 1.3934 + asoc, tp1->whoTo, 1.3935 + &tp1->sent_rcv_time, 1.3936 + sctp_align_safe_nocopy, 1.3937 + SCTP_RTT_FROM_DATA); 1.3938 + rto_ok = 0; 1.3939 + } 1.3940 + if (tp1->whoTo->rto_needed == 0) { 1.3941 + tp1->whoTo->rto_needed = 1; 1.3942 + } 1.3943 + tp1->do_rtt = 0; 1.3944 + } 1.3945 + } 1.3946 + /* 1.3947 + * CMT: CUCv2 algorithm. From the 1.3948 + * cumack'd TSNs, for each TSN being 1.3949 + * acked for the first time, set the 1.3950 + * following variables for the 1.3951 + * corresp destination. 1.3952 + * new_pseudo_cumack will trigger a 1.3953 + * cwnd update. 1.3954 + * find_(rtx_)pseudo_cumack will 1.3955 + * trigger search for the next 1.3956 + * expected (rtx-)pseudo-cumack. 1.3957 + */ 1.3958 + tp1->whoTo->new_pseudo_cumack = 1; 1.3959 + tp1->whoTo->find_pseudo_cumack = 1; 1.3960 + tp1->whoTo->find_rtx_pseudo_cumack = 1; 1.3961 + 1.3962 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1.3963 + /* sa_ignore NO_NULL_CHK */ 1.3964 + sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 1.3965 + } 1.3966 + } 1.3967 + if (tp1->sent == SCTP_DATAGRAM_RESEND) { 1.3968 + sctp_ucount_decr(asoc->sent_queue_retran_cnt); 1.3969 + } 1.3970 + if (tp1->rec.data.chunk_was_revoked) { 1.3971 + /* deflate the cwnd */ 1.3972 + tp1->whoTo->cwnd -= tp1->book_size; 1.3973 + tp1->rec.data.chunk_was_revoked = 0; 1.3974 + } 1.3975 + if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 1.3976 + if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) { 1.3977 + asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--; 1.3978 +#ifdef INVARIANTS 1.3979 + } else { 1.3980 + panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number); 1.3981 +#endif 1.3982 + } 1.3983 + } 1.3984 + TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 1.3985 + if (tp1->data) { 1.3986 + /* sa_ignore NO_NULL_CHK */ 1.3987 + sctp_free_bufspace(stcb, asoc, tp1, 1); 1.3988 + sctp_m_freem(tp1->data); 1.3989 + tp1->data = NULL; 1.3990 + } 1.3991 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 1.3992 + sctp_log_sack(asoc->last_acked_seq, 1.3993 + cumack, 1.3994 + tp1->rec.data.TSN_seq, 1.3995 + 0, 1.3996 + 0, 1.3997 + SCTP_LOG_FREE_SENT); 1.3998 + } 1.3999 + asoc->sent_queue_cnt--; 1.4000 + sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 1.4001 + } else { 1.4002 + break; 1.4003 + } 1.4004 + } 1.4005 + 1.4006 + } 1.4007 +#if defined(__Userspace__) 1.4008 + if (stcb->sctp_ep->recv_callback) { 1.4009 + if (stcb->sctp_socket) { 1.4010 + uint32_t inqueue_bytes, sb_free_now; 1.4011 + struct sctp_inpcb *inp; 1.4012 + 1.4013 + inp = stcb->sctp_ep; 1.4014 + inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); 1.4015 + sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv); 1.4016 + 1.4017 + /* check if the amount free in the send socket buffer crossed the threshold */ 1.4018 + if (inp->send_callback && 1.4019 + (((inp->send_sb_threshold > 0) && 1.4020 + (sb_free_now >= inp->send_sb_threshold) && 1.4021 + (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) || 1.4022 + (inp->send_sb_threshold == 0))) { 1.4023 + atomic_add_int(&stcb->asoc.refcnt, 1); 1.4024 + SCTP_TCB_UNLOCK(stcb); 1.4025 + inp->send_callback(stcb->sctp_socket, sb_free_now); 1.4026 + SCTP_TCB_LOCK(stcb); 1.4027 + atomic_subtract_int(&stcb->asoc.refcnt, 1); 1.4028 + } 1.4029 + } 1.4030 + } else if (stcb->sctp_socket) { 1.4031 +#else 1.4032 + /* sa_ignore NO_NULL_CHK */ 1.4033 + if (stcb->sctp_socket) { 1.4034 +#endif 1.4035 +#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1.4036 + struct socket *so; 1.4037 + 1.4038 +#endif 1.4039 + SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 1.4040 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 1.4041 + /* sa_ignore NO_NULL_CHK */ 1.4042 + sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK); 1.4043 + } 1.4044 +#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1.4045 + so = SCTP_INP_SO(stcb->sctp_ep); 1.4046 + atomic_add_int(&stcb->asoc.refcnt, 1); 1.4047 + SCTP_TCB_UNLOCK(stcb); 1.4048 + SCTP_SOCKET_LOCK(so, 1); 1.4049 + SCTP_TCB_LOCK(stcb); 1.4050 + atomic_subtract_int(&stcb->asoc.refcnt, 1); 1.4051 + if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1.4052 + /* assoc was freed while we were unlocked */ 1.4053 + SCTP_SOCKET_UNLOCK(so, 1); 1.4054 + return; 1.4055 + } 1.4056 +#endif 1.4057 + sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 1.4058 +#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1.4059 + SCTP_SOCKET_UNLOCK(so, 1); 1.4060 +#endif 1.4061 + } else { 1.4062 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 1.4063 + sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK); 1.4064 + } 1.4065 + } 1.4066 + 1.4067 + /* JRS - Use the congestion control given in the CC module */ 1.4068 + if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) { 1.4069 + TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1.4070 + if (net->net_ack2 > 0) { 1.4071 + /* 1.4072 + * Karn's rule applies to clearing error count, this 1.4073 + * is optional. 1.4074 + */ 1.4075 + net->error_count = 0; 1.4076 + if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 1.4077 + /* addr came good */ 1.4078 + net->dest_state |= SCTP_ADDR_REACHABLE; 1.4079 + sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 1.4080 + 0, (void *)net, SCTP_SO_NOT_LOCKED); 1.4081 + } 1.4082 + if (net == stcb->asoc.primary_destination) { 1.4083 + if (stcb->asoc.alternate) { 1.4084 + /* release the alternate, primary is good */ 1.4085 + sctp_free_remote_addr(stcb->asoc.alternate); 1.4086 + stcb->asoc.alternate = NULL; 1.4087 + } 1.4088 + } 1.4089 + if (net->dest_state & SCTP_ADDR_PF) { 1.4090 + net->dest_state &= ~SCTP_ADDR_PF; 1.4091 + sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3); 1.4092 + sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 1.4093 + asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 1.4094 + /* Done with this net */ 1.4095 + net->net_ack = 0; 1.4096 + } 1.4097 + /* restore any doubled timers */ 1.4098 + net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 1.4099 + if (net->RTO < stcb->asoc.minrto) { 1.4100 + net->RTO = stcb->asoc.minrto; 1.4101 + } 1.4102 + if (net->RTO > stcb->asoc.maxrto) { 1.4103 + net->RTO = stcb->asoc.maxrto; 1.4104 + } 1.4105 + } 1.4106 + } 1.4107 + asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0); 1.4108 + } 1.4109 + asoc->last_acked_seq = cumack; 1.4110 + 1.4111 + if (TAILQ_EMPTY(&asoc->sent_queue)) { 1.4112 + /* nothing left in-flight */ 1.4113 + TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1.4114 + net->flight_size = 0; 1.4115 + net->partial_bytes_acked = 0; 1.4116 + } 1.4117 + asoc->total_flight = 0; 1.4118 + asoc->total_flight_count = 0; 1.4119 + } 1.4120 + 1.4121 + /* RWND update */ 1.4122 + asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 1.4123 + (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 1.4124 + if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 1.4125 + /* SWS sender side engages */ 1.4126 + asoc->peers_rwnd = 0; 1.4127 + } 1.4128 + if (asoc->peers_rwnd > old_rwnd) { 1.4129 + win_probe_recovery = 1; 1.4130 + } 1.4131 + /* Now assure a timer where data is queued at */ 1.4132 +again: 1.4133 + j = 0; 1.4134 + TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1.4135 + int to_ticks; 1.4136 + if (win_probe_recovery && (net->window_probe)) { 1.4137 + win_probe_recovered = 1; 1.4138 + /* 1.4139 + * Find first chunk that was used with window probe 1.4140 + * and clear the sent 1.4141 + */ 1.4142 + /* sa_ignore FREED_MEMORY */ 1.4143 + TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 1.4144 + if (tp1->window_probe) { 1.4145 + /* move back to data send queue */ 1.4146 + sctp_window_probe_recovery(stcb, asoc, tp1); 1.4147 + break; 1.4148 + } 1.4149 + } 1.4150 + } 1.4151 + if (net->RTO == 0) { 1.4152 + to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1.4153 + } else { 1.4154 + to_ticks = MSEC_TO_TICKS(net->RTO); 1.4155 + } 1.4156 + if (net->flight_size) { 1.4157 + j++; 1.4158 + (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks, 1.4159 + sctp_timeout_handler, &net->rxt_timer); 1.4160 + if (net->window_probe) { 1.4161 + net->window_probe = 0; 1.4162 + } 1.4163 + } else { 1.4164 + if (net->window_probe) { 1.4165 + /* In window probes we must assure a timer is still running there */ 1.4166 + net->window_probe = 0; 1.4167 + if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 1.4168 + SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks, 1.4169 + sctp_timeout_handler, &net->rxt_timer); 1.4170 + } 1.4171 + } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 1.4172 + sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 1.4173 + stcb, net, 1.4174 + SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); 1.4175 + } 1.4176 + } 1.4177 + } 1.4178 + if ((j == 0) && 1.4179 + (!TAILQ_EMPTY(&asoc->sent_queue)) && 1.4180 + (asoc->sent_queue_retran_cnt == 0) && 1.4181 + (win_probe_recovered == 0) && 1.4182 + (done_once == 0)) { 1.4183 + /* huh, this should not happen unless all packets 1.4184 + * are PR-SCTP and marked to skip of course. 1.4185 + */ 1.4186 + if (sctp_fs_audit(asoc)) { 1.4187 + TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1.4188 + net->flight_size = 0; 1.4189 + } 1.4190 + asoc->total_flight = 0; 1.4191 + asoc->total_flight_count = 0; 1.4192 + asoc->sent_queue_retran_cnt = 0; 1.4193 + TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 1.4194 + if (tp1->sent < SCTP_DATAGRAM_RESEND) { 1.4195 + sctp_flight_size_increase(tp1); 1.4196 + sctp_total_flight_increase(stcb, tp1); 1.4197 + } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 1.4198 + sctp_ucount_incr(asoc->sent_queue_retran_cnt); 1.4199 + } 1.4200 + } 1.4201 + } 1.4202 + done_once = 1; 1.4203 + goto again; 1.4204 + } 1.4205 + /**********************************/ 1.4206 + /* Now what about shutdown issues */ 1.4207 + /**********************************/ 1.4208 + if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 1.4209 + /* nothing left on sendqueue.. consider done */ 1.4210 + /* clean up */ 1.4211 + if ((asoc->stream_queue_cnt == 1) && 1.4212 + ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 1.4213 + (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 1.4214 + (asoc->locked_on_sending) 1.4215 + ) { 1.4216 + struct sctp_stream_queue_pending *sp; 1.4217 + /* I may be in a state where we got 1.4218 + * all across.. but cannot write more due 1.4219 + * to a shutdown... we abort since the 1.4220 + * user did not indicate EOR in this case. The 1.4221 + * sp will be cleaned during free of the asoc. 1.4222 + */ 1.4223 + sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 1.4224 + sctp_streamhead); 1.4225 + if ((sp) && (sp->length == 0)) { 1.4226 + /* Let cleanup code purge it */ 1.4227 + if (sp->msg_is_complete) { 1.4228 + asoc->stream_queue_cnt--; 1.4229 + } else { 1.4230 + asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 1.4231 + asoc->locked_on_sending = NULL; 1.4232 + asoc->stream_queue_cnt--; 1.4233 + } 1.4234 + } 1.4235 + } 1.4236 + if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 1.4237 + (asoc->stream_queue_cnt == 0)) { 1.4238 + if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 1.4239 + /* Need to abort here */ 1.4240 + struct mbuf *oper; 1.4241 + 1.4242 + abort_out_now: 1.4243 + *abort_now = 1; 1.4244 + /* XXX */ 1.4245 + oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 1.4246 + 0, M_NOWAIT, 1, MT_DATA); 1.4247 + if (oper) { 1.4248 + struct sctp_paramhdr *ph; 1.4249 + 1.4250 + SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr); 1.4251 + ph = mtod(oper, struct sctp_paramhdr *); 1.4252 + ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 1.4253 + ph->param_length = htons(SCTP_BUF_LEN(oper)); 1.4254 + } 1.4255 + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; 1.4256 + sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1.4257 + } else { 1.4258 + struct sctp_nets *netp; 1.4259 + 1.4260 + if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 1.4261 + (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1.4262 + SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1.4263 + } 1.4264 + SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 1.4265 + SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 1.4266 + sctp_stop_timers_for_shutdown(stcb); 1.4267 + if (asoc->alternate) { 1.4268 + netp = asoc->alternate; 1.4269 + } else { 1.4270 + netp = asoc->primary_destination; 1.4271 + } 1.4272 + sctp_send_shutdown(stcb, netp); 1.4273 + sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 1.4274 + stcb->sctp_ep, stcb, netp); 1.4275 + sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1.4276 + stcb->sctp_ep, stcb, netp); 1.4277 + } 1.4278 + } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 1.4279 + (asoc->stream_queue_cnt == 0)) { 1.4280 + struct sctp_nets *netp; 1.4281 + 1.4282 + if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 1.4283 + goto abort_out_now; 1.4284 + } 1.4285 + SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1.4286 + SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 1.4287 + SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 1.4288 + sctp_stop_timers_for_shutdown(stcb); 1.4289 + if (asoc->alternate) { 1.4290 + netp = asoc->alternate; 1.4291 + } else { 1.4292 + netp = asoc->primary_destination; 1.4293 + } 1.4294 + sctp_send_shutdown_ack(stcb, netp); 1.4295 + sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 1.4296 + stcb->sctp_ep, stcb, netp); 1.4297 + } 1.4298 + } 1.4299 + /*********************************************/ 1.4300 + /* Here we perform PR-SCTP procedures */ 1.4301 + /* (section 4.2) */ 1.4302 + /*********************************************/ 1.4303 + /* C1. update advancedPeerAckPoint */ 1.4304 + if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) { 1.4305 + asoc->advanced_peer_ack_point = cumack; 1.4306 + } 1.4307 + /* PR-Sctp issues need to be addressed too */ 1.4308 + if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) { 1.4309 + struct sctp_tmit_chunk *lchk; 1.4310 + uint32_t old_adv_peer_ack_point; 1.4311 + 1.4312 + old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 1.4313 + lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 1.4314 + /* C3. See if we need to send a Fwd-TSN */ 1.4315 + if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) { 1.4316 + /* 1.4317 + * ISSUE with ECN, see FWD-TSN processing. 1.4318 + */ 1.4319 + if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 1.4320 + send_forward_tsn(stcb, asoc); 1.4321 + } else if (lchk) { 1.4322 + /* try to FR fwd-tsn's that get lost too */ 1.4323 + if (lchk->rec.data.fwd_tsn_cnt >= 3) { 1.4324 + send_forward_tsn(stcb, asoc); 1.4325 + } 1.4326 + } 1.4327 + } 1.4328 + if (lchk) { 1.4329 + /* Assure a timer is up */ 1.4330 + sctp_timer_start(SCTP_TIMER_TYPE_SEND, 1.4331 + stcb->sctp_ep, stcb, lchk->whoTo); 1.4332 + } 1.4333 + } 1.4334 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 1.4335 + sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 1.4336 + rwnd, 1.4337 + stcb->asoc.peers_rwnd, 1.4338 + stcb->asoc.total_flight, 1.4339 + stcb->asoc.total_output_queue_size); 1.4340 + } 1.4341 +} 1.4342 + 1.4343 +void 1.4344 +sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, 1.4345 + struct sctp_tcb *stcb, 1.4346 + uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup, 1.4347 + int *abort_now, uint8_t flags, 1.4348 + uint32_t cum_ack, uint32_t rwnd, int ecne_seen) 1.4349 +{ 1.4350 + struct sctp_association *asoc; 1.4351 + struct sctp_tmit_chunk *tp1, *tp2; 1.4352 + uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack; 1.4353 + uint16_t wake_him = 0; 1.4354 + uint32_t send_s = 0; 1.4355 + long j; 1.4356 + int accum_moved = 0; 1.4357 + int will_exit_fast_recovery = 0; 1.4358 + uint32_t a_rwnd, old_rwnd; 1.4359 + int win_probe_recovery = 0; 1.4360 + int win_probe_recovered = 0; 1.4361 + struct sctp_nets *net = NULL; 1.4362 + int done_once; 1.4363 + int rto_ok = 1; 1.4364 + uint8_t reneged_all = 0; 1.4365 + uint8_t cmt_dac_flag; 1.4366 + /* 1.4367 + * we take any chance we can to service our queues since we cannot 1.4368 + * get awoken when the socket is read from :< 1.4369 + */ 1.4370 + /* 1.4371 + * Now perform the actual SACK handling: 1) Verify that it is not an 1.4372 + * old sack, if so discard. 2) If there is nothing left in the send 1.4373 + * queue (cum-ack is equal to last acked) then you have a duplicate 1.4374 + * too, update any rwnd change and verify no timers are running. 1.4375 + * then return. 3) Process any new consequtive data i.e. cum-ack 1.4376 + * moved process these first and note that it moved. 4) Process any 1.4377 + * sack blocks. 5) Drop any acked from the queue. 6) Check for any 1.4378 + * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left, 1.4379 + * sync up flightsizes and things, stop all timers and also check 1.4380 + * for shutdown_pending state. If so then go ahead and send off the 1.4381 + * shutdown. If in shutdown recv, send off the shutdown-ack and 1.4382 + * start that timer, Ret. 9) Strike any non-acked things and do FR 1.4383 + * procedure if needed being sure to set the FR flag. 10) Do pr-sctp 1.4384 + * procedures. 11) Apply any FR penalties. 12) Assure we will SACK 1.4385 + * if in shutdown_recv state. 1.4386 + */ 1.4387 + SCTP_TCB_LOCK_ASSERT(stcb); 1.4388 + /* CMT DAC algo */ 1.4389 + this_sack_lowest_newack = 0; 1.4390 + SCTP_STAT_INCR(sctps_slowpath_sack); 1.4391 + last_tsn = cum_ack; 1.4392 + cmt_dac_flag = flags & SCTP_SACK_CMT_DAC; 1.4393 +#ifdef SCTP_ASOCLOG_OF_TSNS 1.4394 + stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack; 1.4395 + stcb->asoc.cumack_log_at++; 1.4396 + if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 1.4397 + stcb->asoc.cumack_log_at = 0; 1.4398 + } 1.4399 +#endif 1.4400 + a_rwnd = rwnd; 1.4401 + 1.4402 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 1.4403 + sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack, 1.4404 + rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 1.4405 + } 1.4406 + 1.4407 + old_rwnd = stcb->asoc.peers_rwnd; 1.4408 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 1.4409 + sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 1.4410 + stcb->asoc.overall_error_count, 1.4411 + 0, 1.4412 + SCTP_FROM_SCTP_INDATA, 1.4413 + __LINE__); 1.4414 + } 1.4415 + stcb->asoc.overall_error_count = 0; 1.4416 + asoc = &stcb->asoc; 1.4417 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 1.4418 + sctp_log_sack(asoc->last_acked_seq, 1.4419 + cum_ack, 1.4420 + 0, 1.4421 + num_seg, 1.4422 + num_dup, 1.4423 + SCTP_LOG_NEW_SACK); 1.4424 + } 1.4425 + if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) { 1.4426 + uint16_t i; 1.4427 + uint32_t *dupdata, dblock; 1.4428 + 1.4429 + for (i = 0; i < num_dup; i++) { 1.4430 + dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t), 1.4431 + sizeof(uint32_t), (uint8_t *)&dblock); 1.4432 + if (dupdata == NULL) { 1.4433 + break; 1.4434 + } 1.4435 + sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED); 1.4436 + } 1.4437 + } 1.4438 + if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) { 1.4439 + /* reality check */ 1.4440 + if (!TAILQ_EMPTY(&asoc->sent_queue)) { 1.4441 + tp1 = TAILQ_LAST(&asoc->sent_queue, 1.4442 + sctpchunk_listhead); 1.4443 + send_s = tp1->rec.data.TSN_seq + 1; 1.4444 + } else { 1.4445 + tp1 = NULL; 1.4446 + send_s = asoc->sending_seq; 1.4447 + } 1.4448 + if (SCTP_TSN_GE(cum_ack, send_s)) { 1.4449 + struct mbuf *oper; 1.4450 + /* 1.4451 + * no way, we have not even sent this TSN out yet. 1.4452 + * Peer is hopelessly messed up with us. 1.4453 + */ 1.4454 + SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n", 1.4455 + cum_ack, send_s); 1.4456 + if (tp1) { 1.4457 + SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n", 1.4458 + tp1->rec.data.TSN_seq, (void *)tp1); 1.4459 + } 1.4460 + hopeless_peer: 1.4461 + *abort_now = 1; 1.4462 + /* XXX */ 1.4463 + oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1.4464 + 0, M_NOWAIT, 1, MT_DATA); 1.4465 + if (oper) { 1.4466 + struct sctp_paramhdr *ph; 1.4467 + uint32_t *ippp; 1.4468 + 1.4469 + SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 1.4470 + sizeof(uint32_t); 1.4471 + ph = mtod(oper, struct sctp_paramhdr *); 1.4472 + ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1.4473 + ph->param_length = htons(SCTP_BUF_LEN(oper)); 1.4474 + ippp = (uint32_t *) (ph + 1); 1.4475 + *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25); 1.4476 + } 1.4477 + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 1.4478 + sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1.4479 + return; 1.4480 + } 1.4481 + } 1.4482 + /**********************/ 1.4483 + /* 1) check the range */ 1.4484 + /**********************/ 1.4485 + if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) { 1.4486 + /* acking something behind */ 1.4487 + return; 1.4488 + } 1.4489 + 1.4490 + /* update the Rwnd of the peer */ 1.4491 + if (TAILQ_EMPTY(&asoc->sent_queue) && 1.4492 + TAILQ_EMPTY(&asoc->send_queue) && 1.4493 + (asoc->stream_queue_cnt == 0)) { 1.4494 + /* nothing left on send/sent and strmq */ 1.4495 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 1.4496 + sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 1.4497 + asoc->peers_rwnd, 0, 0, a_rwnd); 1.4498 + } 1.4499 + asoc->peers_rwnd = a_rwnd; 1.4500 + if (asoc->sent_queue_retran_cnt) { 1.4501 + asoc->sent_queue_retran_cnt = 0; 1.4502 + } 1.4503 + if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 1.4504 + /* SWS sender side engages */ 1.4505 + asoc->peers_rwnd = 0; 1.4506 + } 1.4507 + /* stop any timers */ 1.4508 + TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1.4509 + sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 1.4510 + stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 1.4511 + net->partial_bytes_acked = 0; 1.4512 + net->flight_size = 0; 1.4513 + } 1.4514 + asoc->total_flight = 0; 1.4515 + asoc->total_flight_count = 0; 1.4516 + return; 1.4517 + } 1.4518 + /* 1.4519 + * We init netAckSz and netAckSz2 to 0. These are used to track 2 1.4520 + * things. The total byte count acked is tracked in netAckSz AND 1.4521 + * netAck2 is used to track the total bytes acked that are un- 1.4522 + * amibguious and were never retransmitted. We track these on a per 1.4523 + * destination address basis. 1.4524 + */ 1.4525 + TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1.4526 + if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) { 1.4527 + /* Drag along the window_tsn for cwr's */ 1.4528 + net->cwr_window_tsn = cum_ack; 1.4529 + } 1.4530 + net->prev_cwnd = net->cwnd; 1.4531 + net->net_ack = 0; 1.4532 + net->net_ack2 = 0; 1.4533 + 1.4534 + /* 1.4535 + * CMT: Reset CUC and Fast recovery algo variables before 1.4536 + * SACK processing 1.4537 + */ 1.4538 + net->new_pseudo_cumack = 0; 1.4539 + net->will_exit_fast_recovery = 0; 1.4540 + if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 1.4541 + (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net); 1.4542 + } 1.4543 + } 1.4544 + /* process the new consecutive TSN first */ 1.4545 + TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 1.4546 + if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) { 1.4547 + if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 1.4548 + accum_moved = 1; 1.4549 + if (tp1->sent < SCTP_DATAGRAM_ACKED) { 1.4550 + /* 1.4551 + * If it is less than ACKED, it is 1.4552 + * now no-longer in flight. Higher 1.4553 + * values may occur during marking 1.4554 + */ 1.4555 + if ((tp1->whoTo->dest_state & 1.4556 + SCTP_ADDR_UNCONFIRMED) && 1.4557 + (tp1->snd_count < 2)) { 1.4558 + /* 1.4559 + * If there was no retran 1.4560 + * and the address is 1.4561 + * un-confirmed and we sent 1.4562 + * there and are now 1.4563 + * sacked.. its confirmed, 1.4564 + * mark it so. 1.4565 + */ 1.4566 + tp1->whoTo->dest_state &= 1.4567 + ~SCTP_ADDR_UNCONFIRMED; 1.4568 + } 1.4569 + if (tp1->sent < SCTP_DATAGRAM_RESEND) { 1.4570 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 1.4571 + sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 1.4572 + tp1->whoTo->flight_size, 1.4573 + tp1->book_size, 1.4574 + (uintptr_t)tp1->whoTo, 1.4575 + tp1->rec.data.TSN_seq); 1.4576 + } 1.4577 + sctp_flight_size_decrease(tp1); 1.4578 + sctp_total_flight_decrease(stcb, tp1); 1.4579 + if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 1.4580 + (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo, 1.4581 + tp1); 1.4582 + } 1.4583 + } 1.4584 + tp1->whoTo->net_ack += tp1->send_size; 1.4585 + 1.4586 + /* CMT SFR and DAC algos */ 1.4587 + this_sack_lowest_newack = tp1->rec.data.TSN_seq; 1.4588 + tp1->whoTo->saw_newack = 1; 1.4589 + 1.4590 + if (tp1->snd_count < 2) { 1.4591 + /* 1.4592 + * True non-retransmited 1.4593 + * chunk 1.4594 + */ 1.4595 + tp1->whoTo->net_ack2 += 1.4596 + tp1->send_size; 1.4597 + 1.4598 + /* update RTO too? */ 1.4599 + if (tp1->do_rtt) { 1.4600 + if (rto_ok) { 1.4601 + tp1->whoTo->RTO = 1.4602 + sctp_calculate_rto(stcb, 1.4603 + asoc, tp1->whoTo, 1.4604 + &tp1->sent_rcv_time, 1.4605 + sctp_align_safe_nocopy, 1.4606 + SCTP_RTT_FROM_DATA); 1.4607 + rto_ok = 0; 1.4608 + } 1.4609 + if (tp1->whoTo->rto_needed == 0) { 1.4610 + tp1->whoTo->rto_needed = 1; 1.4611 + } 1.4612 + tp1->do_rtt = 0; 1.4613 + } 1.4614 + } 1.4615 + /* 1.4616 + * CMT: CUCv2 algorithm. From the 1.4617 + * cumack'd TSNs, for each TSN being 1.4618 + * acked for the first time, set the 1.4619 + * following variables for the 1.4620 + * corresp destination. 1.4621 + * new_pseudo_cumack will trigger a 1.4622 + * cwnd update. 1.4623 + * find_(rtx_)pseudo_cumack will 1.4624 + * trigger search for the next 1.4625 + * expected (rtx-)pseudo-cumack. 1.4626 + */ 1.4627 + tp1->whoTo->new_pseudo_cumack = 1; 1.4628 + tp1->whoTo->find_pseudo_cumack = 1; 1.4629 + tp1->whoTo->find_rtx_pseudo_cumack = 1; 1.4630 + 1.4631 + 1.4632 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 1.4633 + sctp_log_sack(asoc->last_acked_seq, 1.4634 + cum_ack, 1.4635 + tp1->rec.data.TSN_seq, 1.4636 + 0, 1.4637 + 0, 1.4638 + SCTP_LOG_TSN_ACKED); 1.4639 + } 1.4640 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1.4641 + sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); 1.4642 + } 1.4643 + } 1.4644 + if (tp1->sent == SCTP_DATAGRAM_RESEND) { 1.4645 + sctp_ucount_decr(asoc->sent_queue_retran_cnt); 1.4646 +#ifdef SCTP_AUDITING_ENABLED 1.4647 + sctp_audit_log(0xB3, 1.4648 + (asoc->sent_queue_retran_cnt & 0x000000ff)); 1.4649 +#endif 1.4650 + } 1.4651 + if (tp1->rec.data.chunk_was_revoked) { 1.4652 + /* deflate the cwnd */ 1.4653 + tp1->whoTo->cwnd -= tp1->book_size; 1.4654 + tp1->rec.data.chunk_was_revoked = 0; 1.4655 + } 1.4656 + if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 1.4657 + tp1->sent = SCTP_DATAGRAM_ACKED; 1.4658 + } 1.4659 + } 1.4660 + } else { 1.4661 + break; 1.4662 + } 1.4663 + } 1.4664 + biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; 1.4665 + /* always set this up to cum-ack */ 1.4666 + asoc->this_sack_highest_gap = last_tsn; 1.4667 + 1.4668 + if ((num_seg > 0) || (num_nr_seg > 0)) { 1.4669 + 1.4670 + /* 1.4671 + * CMT: SFR algo (and HTNA) - this_sack_highest_newack has 1.4672 + * to be greater than the cumack. Also reset saw_newack to 0 1.4673 + * for all dests. 1.4674 + */ 1.4675 + TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1.4676 + net->saw_newack = 0; 1.4677 + net->this_sack_highest_newack = last_tsn; 1.4678 + } 1.4679 + 1.4680 + /* 1.4681 + * thisSackHighestGap will increase while handling NEW 1.4682 + * segments this_sack_highest_newack will increase while 1.4683 + * handling NEWLY ACKED chunks. this_sack_lowest_newack is 1.4684 + * used for CMT DAC algo. saw_newack will also change. 1.4685 + */ 1.4686 + if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked, 1.4687 + &biggest_tsn_newly_acked, &this_sack_lowest_newack, 1.4688 + num_seg, num_nr_seg, &rto_ok)) { 1.4689 + wake_him++; 1.4690 + } 1.4691 + if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) { 1.4692 + /* 1.4693 + * validate the biggest_tsn_acked in the gap acks if 1.4694 + * strict adherence is wanted. 1.4695 + */ 1.4696 + if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) { 1.4697 + /* 1.4698 + * peer is either confused or we are under 1.4699 + * attack. We must abort. 1.4700 + */ 1.4701 + SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n", 1.4702 + biggest_tsn_acked, send_s); 1.4703 + goto hopeless_peer; 1.4704 + } 1.4705 + } 1.4706 + } 1.4707 + /*******************************************/ 1.4708 + /* cancel ALL T3-send timer if accum moved */ 1.4709 + /*******************************************/ 1.4710 + if (asoc->sctp_cmt_on_off > 0) { 1.4711 + TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1.4712 + if (net->new_pseudo_cumack) 1.4713 + sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 1.4714 + stcb, net, 1.4715 + SCTP_FROM_SCTP_INDATA + SCTP_LOC_27); 1.4716 + 1.4717 + } 1.4718 + } else { 1.4719 + if (accum_moved) { 1.4720 + TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1.4721 + sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 1.4722 + stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28); 1.4723 + } 1.4724 + } 1.4725 + } 1.4726 + /********************************************/ 1.4727 + /* drop the acked chunks from the sentqueue */ 1.4728 + /********************************************/ 1.4729 + asoc->last_acked_seq = cum_ack; 1.4730 + 1.4731 + TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 1.4732 + if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) { 1.4733 + break; 1.4734 + } 1.4735 + if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 1.4736 + if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) { 1.4737 + asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--; 1.4738 +#ifdef INVARIANTS 1.4739 + } else { 1.4740 + panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number); 1.4741 +#endif 1.4742 + } 1.4743 + } 1.4744 + TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 1.4745 + if (PR_SCTP_ENABLED(tp1->flags)) { 1.4746 + if (asoc->pr_sctp_cnt != 0) 1.4747 + asoc->pr_sctp_cnt--; 1.4748 + } 1.4749 + asoc->sent_queue_cnt--; 1.4750 + if (tp1->data) { 1.4751 + /* sa_ignore NO_NULL_CHK */ 1.4752 + sctp_free_bufspace(stcb, asoc, tp1, 1); 1.4753 + sctp_m_freem(tp1->data); 1.4754 + tp1->data = NULL; 1.4755 + if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) { 1.4756 + asoc->sent_queue_cnt_removeable--; 1.4757 + } 1.4758 + } 1.4759 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 1.4760 + sctp_log_sack(asoc->last_acked_seq, 1.4761 + cum_ack, 1.4762 + tp1->rec.data.TSN_seq, 1.4763 + 0, 1.4764 + 0, 1.4765 + SCTP_LOG_FREE_SENT); 1.4766 + } 1.4767 + sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 1.4768 + wake_him++; 1.4769 + } 1.4770 + if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) { 1.4771 +#ifdef INVARIANTS 1.4772 + panic("Warning flight size is postive and should be 0"); 1.4773 +#else 1.4774 + SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n", 1.4775 + asoc->total_flight); 1.4776 +#endif 1.4777 + asoc->total_flight = 0; 1.4778 + } 1.4779 + 1.4780 +#if defined(__Userspace__) 1.4781 + if (stcb->sctp_ep->recv_callback) { 1.4782 + if (stcb->sctp_socket) { 1.4783 + uint32_t inqueue_bytes, sb_free_now; 1.4784 + struct sctp_inpcb *inp; 1.4785 + 1.4786 + inp = stcb->sctp_ep; 1.4787 + inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); 1.4788 + sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv); 1.4789 + 1.4790 + /* check if the amount free in the send socket buffer crossed the threshold */ 1.4791 + if (inp->send_callback && 1.4792 + (((inp->send_sb_threshold > 0) && (sb_free_now >= inp->send_sb_threshold)) || 1.4793 + (inp->send_sb_threshold == 0))) { 1.4794 + atomic_add_int(&stcb->asoc.refcnt, 1); 1.4795 + SCTP_TCB_UNLOCK(stcb); 1.4796 + inp->send_callback(stcb->sctp_socket, sb_free_now); 1.4797 + SCTP_TCB_LOCK(stcb); 1.4798 + atomic_subtract_int(&stcb->asoc.refcnt, 1); 1.4799 + } 1.4800 + } 1.4801 + } else if ((wake_him) && (stcb->sctp_socket)) { 1.4802 +#else 1.4803 + /* sa_ignore NO_NULL_CHK */ 1.4804 + if ((wake_him) && (stcb->sctp_socket)) { 1.4805 +#endif 1.4806 +#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1.4807 + struct socket *so; 1.4808 + 1.4809 +#endif 1.4810 + SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 1.4811 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 1.4812 + sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK); 1.4813 + } 1.4814 +#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1.4815 + so = SCTP_INP_SO(stcb->sctp_ep); 1.4816 + atomic_add_int(&stcb->asoc.refcnt, 1); 1.4817 + SCTP_TCB_UNLOCK(stcb); 1.4818 + SCTP_SOCKET_LOCK(so, 1); 1.4819 + SCTP_TCB_LOCK(stcb); 1.4820 + atomic_subtract_int(&stcb->asoc.refcnt, 1); 1.4821 + if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1.4822 + /* assoc was freed while we were unlocked */ 1.4823 + SCTP_SOCKET_UNLOCK(so, 1); 1.4824 + return; 1.4825 + } 1.4826 +#endif 1.4827 + sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 1.4828 +#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1.4829 + SCTP_SOCKET_UNLOCK(so, 1); 1.4830 +#endif 1.4831 + } else { 1.4832 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 1.4833 + sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK); 1.4834 + } 1.4835 + } 1.4836 + 1.4837 + if (asoc->fast_retran_loss_recovery && accum_moved) { 1.4838 + if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) { 1.4839 + /* Setup so we will exit RFC2582 fast recovery */ 1.4840 + will_exit_fast_recovery = 1; 1.4841 + } 1.4842 + } 1.4843 + /* 1.4844 + * Check for revoked fragments: 1.4845 + * 1.4846 + * if Previous sack - Had no frags then we can't have any revoked if 1.4847 + * Previous sack - Had frag's then - If we now have frags aka 1.4848 + * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked 1.4849 + * some of them. else - The peer revoked all ACKED fragments, since 1.4850 + * we had some before and now we have NONE. 1.4851 + */ 1.4852 + 1.4853 + if (num_seg) { 1.4854 + sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked); 1.4855 + asoc->saw_sack_with_frags = 1; 1.4856 + } else if (asoc->saw_sack_with_frags) { 1.4857 + int cnt_revoked = 0; 1.4858 + 1.4859 + /* Peer revoked all dg's marked or acked */ 1.4860 + TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 1.4861 + if (tp1->sent == SCTP_DATAGRAM_ACKED) { 1.4862 + tp1->sent = SCTP_DATAGRAM_SENT; 1.4863 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 1.4864 + sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 1.4865 + tp1->whoTo->flight_size, 1.4866 + tp1->book_size, 1.4867 + (uintptr_t)tp1->whoTo, 1.4868 + tp1->rec.data.TSN_seq); 1.4869 + } 1.4870 + sctp_flight_size_increase(tp1); 1.4871 + sctp_total_flight_increase(stcb, tp1); 1.4872 + tp1->rec.data.chunk_was_revoked = 1; 1.4873 + /* 1.4874 + * To ensure that this increase in 1.4875 + * flightsize, which is artificial, 1.4876 + * does not throttle the sender, we 1.4877 + * also increase the cwnd 1.4878 + * artificially. 1.4879 + */ 1.4880 + tp1->whoTo->cwnd += tp1->book_size; 1.4881 + cnt_revoked++; 1.4882 + } 1.4883 + } 1.4884 + if (cnt_revoked) { 1.4885 + reneged_all = 1; 1.4886 + } 1.4887 + asoc->saw_sack_with_frags = 0; 1.4888 + } 1.4889 + if (num_nr_seg > 0) 1.4890 + asoc->saw_sack_with_nr_frags = 1; 1.4891 + else 1.4892 + asoc->saw_sack_with_nr_frags = 0; 1.4893 + 1.4894 + /* JRS - Use the congestion control given in the CC module */ 1.4895 + if (ecne_seen == 0) { 1.4896 + TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1.4897 + if (net->net_ack2 > 0) { 1.4898 + /* 1.4899 + * Karn's rule applies to clearing error count, this 1.4900 + * is optional. 1.4901 + */ 1.4902 + net->error_count = 0; 1.4903 + if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 1.4904 + /* addr came good */ 1.4905 + net->dest_state |= SCTP_ADDR_REACHABLE; 1.4906 + sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 1.4907 + 0, (void *)net, SCTP_SO_NOT_LOCKED); 1.4908 + } 1.4909 + 1.4910 + if (net == stcb->asoc.primary_destination) { 1.4911 + if (stcb->asoc.alternate) { 1.4912 + /* release the alternate, primary is good */ 1.4913 + sctp_free_remote_addr(stcb->asoc.alternate); 1.4914 + stcb->asoc.alternate = NULL; 1.4915 + } 1.4916 + } 1.4917 + 1.4918 + if (net->dest_state & SCTP_ADDR_PF) { 1.4919 + net->dest_state &= ~SCTP_ADDR_PF; 1.4920 + sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3); 1.4921 + sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 1.4922 + asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 1.4923 + /* Done with this net */ 1.4924 + net->net_ack = 0; 1.4925 + } 1.4926 + /* restore any doubled timers */ 1.4927 + net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 1.4928 + if (net->RTO < stcb->asoc.minrto) { 1.4929 + net->RTO = stcb->asoc.minrto; 1.4930 + } 1.4931 + if (net->RTO > stcb->asoc.maxrto) { 1.4932 + net->RTO = stcb->asoc.maxrto; 1.4933 + } 1.4934 + } 1.4935 + } 1.4936 + asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery); 1.4937 + } 1.4938 + 1.4939 + if (TAILQ_EMPTY(&asoc->sent_queue)) { 1.4940 + /* nothing left in-flight */ 1.4941 + TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1.4942 + /* stop all timers */ 1.4943 + sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 1.4944 + stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30); 1.4945 + net->flight_size = 0; 1.4946 + net->partial_bytes_acked = 0; 1.4947 + } 1.4948 + asoc->total_flight = 0; 1.4949 + asoc->total_flight_count = 0; 1.4950 + } 1.4951 + 1.4952 + /**********************************/ 1.4953 + /* Now what about shutdown issues */ 1.4954 + /**********************************/ 1.4955 + if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 1.4956 + /* nothing left on sendqueue.. consider done */ 1.4957 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 1.4958 + sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 1.4959 + asoc->peers_rwnd, 0, 0, a_rwnd); 1.4960 + } 1.4961 + asoc->peers_rwnd = a_rwnd; 1.4962 + if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 1.4963 + /* SWS sender side engages */ 1.4964 + asoc->peers_rwnd = 0; 1.4965 + } 1.4966 + /* clean up */ 1.4967 + if ((asoc->stream_queue_cnt == 1) && 1.4968 + ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 1.4969 + (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 1.4970 + (asoc->locked_on_sending) 1.4971 + ) { 1.4972 + struct sctp_stream_queue_pending *sp; 1.4973 + /* I may be in a state where we got 1.4974 + * all across.. but cannot write more due 1.4975 + * to a shutdown... we abort since the 1.4976 + * user did not indicate EOR in this case. 1.4977 + */ 1.4978 + sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), 1.4979 + sctp_streamhead); 1.4980 + if ((sp) && (sp->length == 0)) { 1.4981 + asoc->locked_on_sending = NULL; 1.4982 + if (sp->msg_is_complete) { 1.4983 + asoc->stream_queue_cnt--; 1.4984 + } else { 1.4985 + asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 1.4986 + asoc->stream_queue_cnt--; 1.4987 + } 1.4988 + } 1.4989 + } 1.4990 + if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 1.4991 + (asoc->stream_queue_cnt == 0)) { 1.4992 + if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 1.4993 + /* Need to abort here */ 1.4994 + struct mbuf *oper; 1.4995 + abort_out_now: 1.4996 + *abort_now = 1; 1.4997 + /* XXX */ 1.4998 + oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 1.4999 + 0, M_NOWAIT, 1, MT_DATA); 1.5000 + if (oper) { 1.5001 + struct sctp_paramhdr *ph; 1.5002 + 1.5003 + SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr); 1.5004 + ph = mtod(oper, struct sctp_paramhdr *); 1.5005 + ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 1.5006 + ph->param_length = htons(SCTP_BUF_LEN(oper)); 1.5007 + } 1.5008 + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31; 1.5009 + sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1.5010 + return; 1.5011 + } else { 1.5012 + struct sctp_nets *netp; 1.5013 + 1.5014 + if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 1.5015 + (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1.5016 + SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1.5017 + } 1.5018 + SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 1.5019 + SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 1.5020 + sctp_stop_timers_for_shutdown(stcb); 1.5021 + if (asoc->alternate) { 1.5022 + netp = asoc->alternate; 1.5023 + } else { 1.5024 + netp = asoc->primary_destination; 1.5025 + } 1.5026 + sctp_send_shutdown(stcb, netp); 1.5027 + sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 1.5028 + stcb->sctp_ep, stcb, netp); 1.5029 + sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1.5030 + stcb->sctp_ep, stcb, netp); 1.5031 + } 1.5032 + return; 1.5033 + } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 1.5034 + (asoc->stream_queue_cnt == 0)) { 1.5035 + struct sctp_nets *netp; 1.5036 + 1.5037 + if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { 1.5038 + goto abort_out_now; 1.5039 + } 1.5040 + SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1.5041 + SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 1.5042 + SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 1.5043 + sctp_stop_timers_for_shutdown(stcb); 1.5044 + if (asoc->alternate) { 1.5045 + netp = asoc->alternate; 1.5046 + } else { 1.5047 + netp = asoc->primary_destination; 1.5048 + } 1.5049 + sctp_send_shutdown_ack(stcb, netp); 1.5050 + sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 1.5051 + stcb->sctp_ep, stcb, netp); 1.5052 + return; 1.5053 + } 1.5054 + } 1.5055 + /* 1.5056 + * Now here we are going to recycle net_ack for a different use... 1.5057 + * HEADS UP. 1.5058 + */ 1.5059 + TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1.5060 + net->net_ack = 0; 1.5061 + } 1.5062 + 1.5063 + /* 1.5064 + * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking 1.5065 + * to be done. Setting this_sack_lowest_newack to the cum_ack will 1.5066 + * automatically ensure that. 1.5067 + */ 1.5068 + if ((asoc->sctp_cmt_on_off > 0) && 1.5069 + SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && 1.5070 + (cmt_dac_flag == 0)) { 1.5071 + this_sack_lowest_newack = cum_ack; 1.5072 + } 1.5073 + if ((num_seg > 0) || (num_nr_seg > 0)) { 1.5074 + sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, 1.5075 + biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved); 1.5076 + } 1.5077 + /* JRS - Use the congestion control given in the CC module */ 1.5078 + asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc); 1.5079 + 1.5080 + /* Now are we exiting loss recovery ? */ 1.5081 + if (will_exit_fast_recovery) { 1.5082 + /* Ok, we must exit fast recovery */ 1.5083 + asoc->fast_retran_loss_recovery = 0; 1.5084 + } 1.5085 + if ((asoc->sat_t3_loss_recovery) && 1.5086 + SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) { 1.5087 + /* end satellite t3 loss recovery */ 1.5088 + asoc->sat_t3_loss_recovery = 0; 1.5089 + } 1.5090 + /* 1.5091 + * CMT Fast recovery 1.5092 + */ 1.5093 + TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1.5094 + if (net->will_exit_fast_recovery) { 1.5095 + /* Ok, we must exit fast recovery */ 1.5096 + net->fast_retran_loss_recovery = 0; 1.5097 + } 1.5098 + } 1.5099 + 1.5100 + /* Adjust and set the new rwnd value */ 1.5101 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 1.5102 + sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 1.5103 + asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd); 1.5104 + } 1.5105 + asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, 1.5106 + (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 1.5107 + if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 1.5108 + /* SWS sender side engages */ 1.5109 + asoc->peers_rwnd = 0; 1.5110 + } 1.5111 + if (asoc->peers_rwnd > old_rwnd) { 1.5112 + win_probe_recovery = 1; 1.5113 + } 1.5114 + 1.5115 + /* 1.5116 + * Now we must setup so we have a timer up for anyone with 1.5117 + * outstanding data. 1.5118 + */ 1.5119 + done_once = 0; 1.5120 +again: 1.5121 + j = 0; 1.5122 + TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1.5123 + if (win_probe_recovery && (net->window_probe)) { 1.5124 + win_probe_recovered = 1; 1.5125 + /*- 1.5126 + * Find first chunk that was used with 1.5127 + * window probe and clear the event. Put 1.5128 + * it back into the send queue as if has 1.5129 + * not been sent. 1.5130 + */ 1.5131 + TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 1.5132 + if (tp1->window_probe) { 1.5133 + sctp_window_probe_recovery(stcb, asoc, tp1); 1.5134 + break; 1.5135 + } 1.5136 + } 1.5137 + } 1.5138 + if (net->flight_size) { 1.5139 + j++; 1.5140 + if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 1.5141 + sctp_timer_start(SCTP_TIMER_TYPE_SEND, 1.5142 + stcb->sctp_ep, stcb, net); 1.5143 + } 1.5144 + if (net->window_probe) { 1.5145 + net->window_probe = 0; 1.5146 + } 1.5147 + } else { 1.5148 + if (net->window_probe) { 1.5149 + /* In window probes we must assure a timer is still running there */ 1.5150 + if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 1.5151 + sctp_timer_start(SCTP_TIMER_TYPE_SEND, 1.5152 + stcb->sctp_ep, stcb, net); 1.5153 + 1.5154 + } 1.5155 + } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 1.5156 + sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 1.5157 + stcb, net, 1.5158 + SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); 1.5159 + } 1.5160 + } 1.5161 + } 1.5162 + if ((j == 0) && 1.5163 + (!TAILQ_EMPTY(&asoc->sent_queue)) && 1.5164 + (asoc->sent_queue_retran_cnt == 0) && 1.5165 + (win_probe_recovered == 0) && 1.5166 + (done_once == 0)) { 1.5167 + /* huh, this should not happen unless all packets 1.5168 + * are PR-SCTP and marked to skip of course. 1.5169 + */ 1.5170 + if (sctp_fs_audit(asoc)) { 1.5171 + TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1.5172 + net->flight_size = 0; 1.5173 + } 1.5174 + asoc->total_flight = 0; 1.5175 + asoc->total_flight_count = 0; 1.5176 + asoc->sent_queue_retran_cnt = 0; 1.5177 + TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 1.5178 + if (tp1->sent < SCTP_DATAGRAM_RESEND) { 1.5179 + sctp_flight_size_increase(tp1); 1.5180 + sctp_total_flight_increase(stcb, tp1); 1.5181 + } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 1.5182 + sctp_ucount_incr(asoc->sent_queue_retran_cnt); 1.5183 + } 1.5184 + } 1.5185 + } 1.5186 + done_once = 1; 1.5187 + goto again; 1.5188 + } 1.5189 + /*********************************************/ 1.5190 + /* Here we perform PR-SCTP procedures */ 1.5191 + /* (section 4.2) */ 1.5192 + /*********************************************/ 1.5193 + /* C1. update advancedPeerAckPoint */ 1.5194 + if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) { 1.5195 + asoc->advanced_peer_ack_point = cum_ack; 1.5196 + } 1.5197 + /* C2. try to further move advancedPeerAckPoint ahead */ 1.5198 + if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) { 1.5199 + struct sctp_tmit_chunk *lchk; 1.5200 + uint32_t old_adv_peer_ack_point; 1.5201 + 1.5202 + old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 1.5203 + lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 1.5204 + /* C3. See if we need to send a Fwd-TSN */ 1.5205 + if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) { 1.5206 + /* 1.5207 + * ISSUE with ECN, see FWD-TSN processing. 1.5208 + */ 1.5209 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 1.5210 + sctp_misc_ints(SCTP_FWD_TSN_CHECK, 1.5211 + 0xee, cum_ack, asoc->advanced_peer_ack_point, 1.5212 + old_adv_peer_ack_point); 1.5213 + } 1.5214 + if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 1.5215 + send_forward_tsn(stcb, asoc); 1.5216 + } else if (lchk) { 1.5217 + /* try to FR fwd-tsn's that get lost too */ 1.5218 + if (lchk->rec.data.fwd_tsn_cnt >= 3) { 1.5219 + send_forward_tsn(stcb, asoc); 1.5220 + } 1.5221 + } 1.5222 + } 1.5223 + if (lchk) { 1.5224 + /* Assure a timer is up */ 1.5225 + sctp_timer_start(SCTP_TIMER_TYPE_SEND, 1.5226 + stcb->sctp_ep, stcb, lchk->whoTo); 1.5227 + } 1.5228 + } 1.5229 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 1.5230 + sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 1.5231 + a_rwnd, 1.5232 + stcb->asoc.peers_rwnd, 1.5233 + stcb->asoc.total_flight, 1.5234 + stcb->asoc.total_output_queue_size); 1.5235 + } 1.5236 +} 1.5237 + 1.5238 +void 1.5239 +sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag) 1.5240 +{ 1.5241 + /* Copy cum-ack */ 1.5242 + uint32_t cum_ack, a_rwnd; 1.5243 + 1.5244 + cum_ack = ntohl(cp->cumulative_tsn_ack); 1.5245 + /* Arrange so a_rwnd does NOT change */ 1.5246 + a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight; 1.5247 + 1.5248 + /* Now call the express sack handling */ 1.5249 + sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0); 1.5250 +} 1.5251 + 1.5252 +static void 1.5253 +sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, 1.5254 + struct sctp_stream_in *strmin) 1.5255 +{ 1.5256 + struct sctp_queued_to_read *ctl, *nctl; 1.5257 + struct sctp_association *asoc; 1.5258 + uint16_t tt; 1.5259 + 1.5260 + asoc = &stcb->asoc; 1.5261 + tt = strmin->last_sequence_delivered; 1.5262 + /* 1.5263 + * First deliver anything prior to and including the stream no that 1.5264 + * came in 1.5265 + */ 1.5266 + TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) { 1.5267 + if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) { 1.5268 + /* this is deliverable now */ 1.5269 + TAILQ_REMOVE(&strmin->inqueue, ctl, next); 1.5270 + /* subtract pending on streams */ 1.5271 + asoc->size_on_all_streams -= ctl->length; 1.5272 + sctp_ucount_decr(asoc->cnt_on_all_streams); 1.5273 + /* deliver it to at least the delivery-q */ 1.5274 + if (stcb->sctp_socket) { 1.5275 + sctp_mark_non_revokable(asoc, ctl->sinfo_tsn); 1.5276 + sctp_add_to_readq(stcb->sctp_ep, stcb, 1.5277 + ctl, 1.5278 + &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); 1.5279 + } 1.5280 + } else { 1.5281 + /* no more delivery now. */ 1.5282 + break; 1.5283 + } 1.5284 + } 1.5285 + /* 1.5286 + * now we must deliver things in queue the normal way if any are 1.5287 + * now ready. 1.5288 + */ 1.5289 + tt = strmin->last_sequence_delivered + 1; 1.5290 + TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) { 1.5291 + if (tt == ctl->sinfo_ssn) { 1.5292 + /* this is deliverable now */ 1.5293 + TAILQ_REMOVE(&strmin->inqueue, ctl, next); 1.5294 + /* subtract pending on streams */ 1.5295 + asoc->size_on_all_streams -= ctl->length; 1.5296 + sctp_ucount_decr(asoc->cnt_on_all_streams); 1.5297 + /* deliver it to at least the delivery-q */ 1.5298 + strmin->last_sequence_delivered = ctl->sinfo_ssn; 1.5299 + if (stcb->sctp_socket) { 1.5300 + sctp_mark_non_revokable(asoc, ctl->sinfo_tsn); 1.5301 + sctp_add_to_readq(stcb->sctp_ep, stcb, 1.5302 + ctl, 1.5303 + &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); 1.5304 + 1.5305 + } 1.5306 + tt = strmin->last_sequence_delivered + 1; 1.5307 + } else { 1.5308 + break; 1.5309 + } 1.5310 + } 1.5311 +} 1.5312 + 1.5313 +static void 1.5314 +sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb, 1.5315 + struct sctp_association *asoc, 1.5316 + uint16_t stream, uint16_t seq) 1.5317 +{ 1.5318 + struct sctp_tmit_chunk *chk, *nchk; 1.5319 + 1.5320 + /* For each one on here see if we need to toss it */ 1.5321 + /* 1.5322 + * For now large messages held on the reasmqueue that are 1.5323 + * complete will be tossed too. We could in theory do more 1.5324 + * work to spin through and stop after dumping one msg aka 1.5325 + * seeing the start of a new msg at the head, and call the 1.5326 + * delivery function... to see if it can be delivered... But 1.5327 + * for now we just dump everything on the queue. 1.5328 + */ 1.5329 + TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) { 1.5330 + /* Do not toss it if on a different stream or 1.5331 + * marked for unordered delivery in which case 1.5332 + * the stream sequence number has no meaning. 1.5333 + */ 1.5334 + if ((chk->rec.data.stream_number != stream) || 1.5335 + ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) { 1.5336 + continue; 1.5337 + } 1.5338 + if (chk->rec.data.stream_seq == seq) { 1.5339 + /* It needs to be tossed */ 1.5340 + TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 1.5341 + if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) { 1.5342 + asoc->tsn_last_delivered = chk->rec.data.TSN_seq; 1.5343 + asoc->str_of_pdapi = chk->rec.data.stream_number; 1.5344 + asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 1.5345 + asoc->fragment_flags = chk->rec.data.rcv_flags; 1.5346 + } 1.5347 + asoc->size_on_reasm_queue -= chk->send_size; 1.5348 + sctp_ucount_decr(asoc->cnt_on_reasm_queue); 1.5349 + 1.5350 + /* Clear up any stream problem */ 1.5351 + if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED && 1.5352 + SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) { 1.5353 + /* 1.5354 + * We must dump forward this streams 1.5355 + * sequence number if the chunk is 1.5356 + * not unordered that is being 1.5357 + * skipped. There is a chance that 1.5358 + * if the peer does not include the 1.5359 + * last fragment in its FWD-TSN we 1.5360 + * WILL have a problem here since 1.5361 + * you would have a partial chunk in 1.5362 + * queue that may not be 1.5363 + * deliverable. Also if a Partial 1.5364 + * delivery API as started the user 1.5365 + * may get a partial chunk. The next 1.5366 + * read returning a new chunk... 1.5367 + * really ugly but I see no way 1.5368 + * around it! Maybe a notify?? 1.5369 + */ 1.5370 + asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq; 1.5371 + } 1.5372 + if (chk->data) { 1.5373 + sctp_m_freem(chk->data); 1.5374 + chk->data = NULL; 1.5375 + } 1.5376 + sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1.5377 + } else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) { 1.5378 + /* If the stream_seq is > than the purging one, we are done */ 1.5379 + break; 1.5380 + } 1.5381 + } 1.5382 +} 1.5383 + 1.5384 + 1.5385 +void 1.5386 +sctp_handle_forward_tsn(struct sctp_tcb *stcb, 1.5387 + struct sctp_forward_tsn_chunk *fwd, 1.5388 + int *abort_flag, struct mbuf *m ,int offset) 1.5389 +{ 1.5390 + /* The pr-sctp fwd tsn */ 1.5391 + /* 1.5392 + * here we will perform all the data receiver side steps for 1.5393 + * processing FwdTSN, as required in by pr-sctp draft: 1.5394 + * 1.5395 + * Assume we get FwdTSN(x): 1.5396 + * 1.5397 + * 1) update local cumTSN to x 2) try to further advance cumTSN to x + 1.5398 + * others we have 3) examine and update re-ordering queue on 1.5399 + * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to 1.5400 + * report where we are. 1.5401 + */ 1.5402 + struct sctp_association *asoc; 1.5403 + uint32_t new_cum_tsn, gap; 1.5404 + unsigned int i, fwd_sz, m_size; 1.5405 + uint32_t str_seq; 1.5406 + struct sctp_stream_in *strm; 1.5407 + struct sctp_tmit_chunk *chk, *nchk; 1.5408 + struct sctp_queued_to_read *ctl, *sv; 1.5409 + 1.5410 + asoc = &stcb->asoc; 1.5411 + if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { 1.5412 + SCTPDBG(SCTP_DEBUG_INDATA1, 1.5413 + "Bad size too small/big fwd-tsn\n"); 1.5414 + return; 1.5415 + } 1.5416 + m_size = (stcb->asoc.mapping_array_size << 3); 1.5417 + /*************************************************************/ 1.5418 + /* 1. Here we update local cumTSN and shift the bitmap array */ 1.5419 + /*************************************************************/ 1.5420 + new_cum_tsn = ntohl(fwd->new_cumulative_tsn); 1.5421 + 1.5422 + if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) { 1.5423 + /* Already got there ... */ 1.5424 + return; 1.5425 + } 1.5426 + /* 1.5427 + * now we know the new TSN is more advanced, let's find the actual 1.5428 + * gap 1.5429 + */ 1.5430 + SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn); 1.5431 + asoc->cumulative_tsn = new_cum_tsn; 1.5432 + if (gap >= m_size) { 1.5433 + if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) { 1.5434 + struct mbuf *oper; 1.5435 + /* 1.5436 + * out of range (of single byte chunks in the rwnd I 1.5437 + * give out). This must be an attacker. 1.5438 + */ 1.5439 + *abort_flag = 1; 1.5440 + oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), 1.5441 + 0, M_NOWAIT, 1, MT_DATA); 1.5442 + if (oper) { 1.5443 + struct sctp_paramhdr *ph; 1.5444 + uint32_t *ippp; 1.5445 + SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 1.5446 + (sizeof(uint32_t) * 3); 1.5447 + ph = mtod(oper, struct sctp_paramhdr *); 1.5448 + ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1.5449 + ph->param_length = htons(SCTP_BUF_LEN(oper)); 1.5450 + ippp = (uint32_t *) (ph + 1); 1.5451 + *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_33); 1.5452 + ippp++; 1.5453 + *ippp = asoc->highest_tsn_inside_map; 1.5454 + ippp++; 1.5455 + *ippp = new_cum_tsn; 1.5456 + } 1.5457 + stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_33; 1.5458 + sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 1.5459 + return; 1.5460 + } 1.5461 + SCTP_STAT_INCR(sctps_fwdtsn_map_over); 1.5462 + 1.5463 + memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 1.5464 + asoc->mapping_array_base_tsn = new_cum_tsn + 1; 1.5465 + asoc->highest_tsn_inside_map = new_cum_tsn; 1.5466 + 1.5467 + memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size); 1.5468 + asoc->highest_tsn_inside_nr_map = new_cum_tsn; 1.5469 + 1.5470 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 1.5471 + sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 1.5472 + } 1.5473 + } else { 1.5474 + SCTP_TCB_LOCK_ASSERT(stcb); 1.5475 + for (i = 0; i <= gap; i++) { 1.5476 + if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) && 1.5477 + !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) { 1.5478 + SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i); 1.5479 + if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) { 1.5480 + asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i; 1.5481 + } 1.5482 + } 1.5483 + } 1.5484 + } 1.5485 + /*************************************************************/ 1.5486 + /* 2. Clear up re-assembly queue */ 1.5487 + /*************************************************************/ 1.5488 + /* 1.5489 + * First service it if pd-api is up, just in case we can progress it 1.5490 + * forward 1.5491 + */ 1.5492 + if (asoc->fragmented_delivery_inprogress) { 1.5493 + sctp_service_reassembly(stcb, asoc); 1.5494 + } 1.5495 + /* For each one on here see if we need to toss it */ 1.5496 + /* 1.5497 + * For now large messages held on the reasmqueue that are 1.5498 + * complete will be tossed too. We could in theory do more 1.5499 + * work to spin through and stop after dumping one msg aka 1.5500 + * seeing the start of a new msg at the head, and call the 1.5501 + * delivery function... to see if it can be delivered... But 1.5502 + * for now we just dump everything on the queue. 1.5503 + */ 1.5504 + TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) { 1.5505 + if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) { 1.5506 + /* It needs to be tossed */ 1.5507 + TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 1.5508 + if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) { 1.5509 + asoc->tsn_last_delivered = chk->rec.data.TSN_seq; 1.5510 + asoc->str_of_pdapi = chk->rec.data.stream_number; 1.5511 + asoc->ssn_of_pdapi = chk->rec.data.stream_seq; 1.5512 + asoc->fragment_flags = chk->rec.data.rcv_flags; 1.5513 + } 1.5514 + asoc->size_on_reasm_queue -= chk->send_size; 1.5515 + sctp_ucount_decr(asoc->cnt_on_reasm_queue); 1.5516 + 1.5517 + /* Clear up any stream problem */ 1.5518 + if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED && 1.5519 + SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) { 1.5520 + /* 1.5521 + * We must dump forward this streams 1.5522 + * sequence number if the chunk is 1.5523 + * not unordered that is being 1.5524 + * skipped. There is a chance that 1.5525 + * if the peer does not include the 1.5526 + * last fragment in its FWD-TSN we 1.5527 + * WILL have a problem here since 1.5528 + * you would have a partial chunk in 1.5529 + * queue that may not be 1.5530 + * deliverable. Also if a Partial 1.5531 + * delivery API as started the user 1.5532 + * may get a partial chunk. The next 1.5533 + * read returning a new chunk... 1.5534 + * really ugly but I see no way 1.5535 + * around it! Maybe a notify?? 1.5536 + */ 1.5537 + asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq; 1.5538 + } 1.5539 + if (chk->data) { 1.5540 + sctp_m_freem(chk->data); 1.5541 + chk->data = NULL; 1.5542 + } 1.5543 + sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1.5544 + } else { 1.5545 + /* 1.5546 + * Ok we have gone beyond the end of the 1.5547 + * fwd-tsn's mark. 1.5548 + */ 1.5549 + break; 1.5550 + } 1.5551 + } 1.5552 + /*******************************************************/ 1.5553 + /* 3. Update the PR-stream re-ordering queues and fix */ 1.5554 + /* delivery issues as needed. */ 1.5555 + /*******************************************************/ 1.5556 + fwd_sz -= sizeof(*fwd); 1.5557 + if (m && fwd_sz) { 1.5558 + /* New method. */ 1.5559 + unsigned int num_str; 1.5560 + struct sctp_strseq *stseq, strseqbuf; 1.5561 + offset += sizeof(*fwd); 1.5562 + 1.5563 + SCTP_INP_READ_LOCK(stcb->sctp_ep); 1.5564 + num_str = fwd_sz / sizeof(struct sctp_strseq); 1.5565 + for (i = 0; i < num_str; i++) { 1.5566 + uint16_t st; 1.5567 + stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset, 1.5568 + sizeof(struct sctp_strseq), 1.5569 + (uint8_t *)&strseqbuf); 1.5570 + offset += sizeof(struct sctp_strseq); 1.5571 + if (stseq == NULL) { 1.5572 + break; 1.5573 + } 1.5574 + /* Convert */ 1.5575 + st = ntohs(stseq->stream); 1.5576 + stseq->stream = st; 1.5577 + st = ntohs(stseq->sequence); 1.5578 + stseq->sequence = st; 1.5579 + 1.5580 + /* now process */ 1.5581 + 1.5582 + /* 1.5583 + * Ok we now look for the stream/seq on the read queue 1.5584 + * where its not all delivered. If we find it we transmute the 1.5585 + * read entry into a PDI_ABORTED. 1.5586 + */ 1.5587 + if (stseq->stream >= asoc->streamincnt) { 1.5588 + /* screwed up streams, stop! */ 1.5589 + break; 1.5590 + } 1.5591 + if ((asoc->str_of_pdapi == stseq->stream) && 1.5592 + (asoc->ssn_of_pdapi == stseq->sequence)) { 1.5593 + /* If this is the one we were partially delivering 1.5594 + * now then we no longer are. Note this will change 1.5595 + * with the reassembly re-write. 1.5596 + */ 1.5597 + asoc->fragmented_delivery_inprogress = 0; 1.5598 + } 1.5599 + sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence); 1.5600 + TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) { 1.5601 + if ((ctl->sinfo_stream == stseq->stream) && 1.5602 + (ctl->sinfo_ssn == stseq->sequence)) { 1.5603 + str_seq = (stseq->stream << 16) | stseq->sequence; 1.5604 + ctl->end_added = 1; 1.5605 + ctl->pdapi_aborted = 1; 1.5606 + sv = stcb->asoc.control_pdapi; 1.5607 + stcb->asoc.control_pdapi = ctl; 1.5608 + sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 1.5609 + stcb, 1.5610 + SCTP_PARTIAL_DELIVERY_ABORTED, 1.5611 + (void *)&str_seq, 1.5612 + SCTP_SO_NOT_LOCKED); 1.5613 + stcb->asoc.control_pdapi = sv; 1.5614 + break; 1.5615 + } else if ((ctl->sinfo_stream == stseq->stream) && 1.5616 + SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) { 1.5617 + /* We are past our victim SSN */ 1.5618 + break; 1.5619 + } 1.5620 + } 1.5621 + strm = &asoc->strmin[stseq->stream]; 1.5622 + if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) { 1.5623 + /* Update the sequence number */ 1.5624 + strm->last_sequence_delivered = stseq->sequence; 1.5625 + } 1.5626 + /* now kick the stream the new way */ 1.5627 + /*sa_ignore NO_NULL_CHK*/ 1.5628 + sctp_kick_prsctp_reorder_queue(stcb, strm); 1.5629 + } 1.5630 + SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 1.5631 + } 1.5632 + /* 1.5633 + * Now slide thing forward. 1.5634 + */ 1.5635 + sctp_slide_mapping_arrays(stcb); 1.5636 + 1.5637 + if (!TAILQ_EMPTY(&asoc->reasmqueue)) { 1.5638 + /* now lets kick out and check for more fragmented delivery */ 1.5639 + /*sa_ignore NO_NULL_CHK*/ 1.5640 + sctp_deliver_reasm_check(stcb, &stcb->asoc); 1.5641 + } 1.5642 +}