netwerk/sctp/src/netinet/sctp_indata.c

Thu, 15 Jan 2015 15:59:08 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Thu, 15 Jan 2015 15:59:08 +0100
branch
TOR_BUG_9701
changeset 10
ac0c01689b40
permissions
-rwxr-xr-x

Implement a real Private Browsing Mode condition by changing the API/ABI;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.

michael@0 1 /*-
michael@0 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
michael@0 3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
michael@0 4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
michael@0 5 *
michael@0 6 * Redistribution and use in source and binary forms, with or without
michael@0 7 * modification, are permitted provided that the following conditions are met:
michael@0 8 *
michael@0 9 * a) Redistributions of source code must retain the above copyright notice,
michael@0 10 * this list of conditions and the following disclaimer.
michael@0 11 *
michael@0 12 * b) Redistributions in binary form must reproduce the above copyright
michael@0 13 * notice, this list of conditions and the following disclaimer in
michael@0 14 * the documentation and/or other materials provided with the distribution.
michael@0 15 *
michael@0 16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
michael@0 17 * contributors may be used to endorse or promote products derived
michael@0 18 * from this software without specific prior written permission.
michael@0 19 *
michael@0 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
michael@0 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
michael@0 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
michael@0 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
michael@0 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
michael@0 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
michael@0 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
michael@0 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
michael@0 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
michael@0 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
michael@0 30 * THE POSSIBILITY OF SUCH DAMAGE.
michael@0 31 */
michael@0 32
michael@0 33 #ifdef __FreeBSD__
michael@0 34 #include <sys/cdefs.h>
michael@0 35 __FBSDID("$FreeBSD: head/sys/netinet/sctp_indata.c 258228 2013-11-16 16:09:09Z tuexen $");
michael@0 36 #endif
michael@0 37
michael@0 38 #include <netinet/sctp_os.h>
michael@0 39 #include <netinet/sctp_var.h>
michael@0 40 #include <netinet/sctp_sysctl.h>
michael@0 41 #include <netinet/sctp_pcb.h>
michael@0 42 #include <netinet/sctp_header.h>
michael@0 43 #include <netinet/sctputil.h>
michael@0 44 #include <netinet/sctp_output.h>
michael@0 45 #include <netinet/sctp_input.h>
michael@0 46 #include <netinet/sctp_indata.h>
michael@0 47 #include <netinet/sctp_uio.h>
michael@0 48 #include <netinet/sctp_timer.h>
michael@0 49
michael@0 50
michael@0 51 /*
michael@0 52 * NOTES: On the outbound side of things I need to check the sack timer to
michael@0 53 * see if I should generate a sack into the chunk queue (if I have data to
michael@0 54 * send that is and will be sending it .. for bundling.
michael@0 55 *
michael@0 56 * The callback in sctp_usrreq.c will get called when the socket is read from.
michael@0 57 * This will cause sctp_service_queues() to get called on the top entry in
michael@0 58 * the list.
michael@0 59 */
michael@0 60
michael@0 61 void
michael@0 62 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
michael@0 63 {
michael@0 64 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
michael@0 65 }
michael@0 66
michael@0 67 /* Calculate what the rwnd would be */
michael@0 68 uint32_t
michael@0 69 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
michael@0 70 {
michael@0 71 uint32_t calc = 0;
michael@0 72
michael@0 73 /*
michael@0 74 * This is really set wrong with respect to a 1-2-m socket. Since
michael@0 75 * the sb_cc is the count that everyone as put up. When we re-write
michael@0 76 * sctp_soreceive then we will fix this so that ONLY this
michael@0 77 * associations data is taken into account.
michael@0 78 */
michael@0 79 if (stcb->sctp_socket == NULL)
michael@0 80 return (calc);
michael@0 81
michael@0 82 if (stcb->asoc.sb_cc == 0 &&
michael@0 83 asoc->size_on_reasm_queue == 0 &&
michael@0 84 asoc->size_on_all_streams == 0) {
michael@0 85 /* Full rwnd granted */
michael@0 86 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
michael@0 87 return (calc);
michael@0 88 }
michael@0 89 /* get actual space */
michael@0 90 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
michael@0 91
michael@0 92 /*
michael@0 93 * take out what has NOT been put on socket queue and we yet hold
michael@0 94 * for putting up.
michael@0 95 */
michael@0 96 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
michael@0 97 asoc->cnt_on_reasm_queue * MSIZE));
michael@0 98 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
michael@0 99 asoc->cnt_on_all_streams * MSIZE));
michael@0 100
michael@0 101 if (calc == 0) {
michael@0 102 /* out of space */
michael@0 103 return (calc);
michael@0 104 }
michael@0 105
michael@0 106 /* what is the overhead of all these rwnd's */
michael@0 107 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
michael@0 108 /* If the window gets too small due to ctrl-stuff, reduce it
michael@0 109 * to 1, even it is 0. SWS engaged
michael@0 110 */
michael@0 111 if (calc < stcb->asoc.my_rwnd_control_len) {
michael@0 112 calc = 1;
michael@0 113 }
michael@0 114 return (calc);
michael@0 115 }
michael@0 116
michael@0 117
michael@0 118
michael@0 119 /*
michael@0 120 * Build out our readq entry based on the incoming packet.
michael@0 121 */
michael@0 122 struct sctp_queued_to_read *
michael@0 123 sctp_build_readq_entry(struct sctp_tcb *stcb,
michael@0 124 struct sctp_nets *net,
michael@0 125 uint32_t tsn, uint32_t ppid,
michael@0 126 uint32_t context, uint16_t stream_no,
michael@0 127 uint16_t stream_seq, uint8_t flags,
michael@0 128 struct mbuf *dm)
michael@0 129 {
michael@0 130 struct sctp_queued_to_read *read_queue_e = NULL;
michael@0 131
michael@0 132 sctp_alloc_a_readq(stcb, read_queue_e);
michael@0 133 if (read_queue_e == NULL) {
michael@0 134 goto failed_build;
michael@0 135 }
michael@0 136 read_queue_e->sinfo_stream = stream_no;
michael@0 137 read_queue_e->sinfo_ssn = stream_seq;
michael@0 138 read_queue_e->sinfo_flags = (flags << 8);
michael@0 139 read_queue_e->sinfo_ppid = ppid;
michael@0 140 read_queue_e->sinfo_context = context;
michael@0 141 read_queue_e->sinfo_timetolive = 0;
michael@0 142 read_queue_e->sinfo_tsn = tsn;
michael@0 143 read_queue_e->sinfo_cumtsn = tsn;
michael@0 144 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
michael@0 145 read_queue_e->whoFrom = net;
michael@0 146 read_queue_e->length = 0;
michael@0 147 atomic_add_int(&net->ref_count, 1);
michael@0 148 read_queue_e->data = dm;
michael@0 149 read_queue_e->spec_flags = 0;
michael@0 150 read_queue_e->tail_mbuf = NULL;
michael@0 151 read_queue_e->aux_data = NULL;
michael@0 152 read_queue_e->stcb = stcb;
michael@0 153 read_queue_e->port_from = stcb->rport;
michael@0 154 read_queue_e->do_not_ref_stcb = 0;
michael@0 155 read_queue_e->end_added = 0;
michael@0 156 read_queue_e->some_taken = 0;
michael@0 157 read_queue_e->pdapi_aborted = 0;
michael@0 158 failed_build:
michael@0 159 return (read_queue_e);
michael@0 160 }
michael@0 161
michael@0 162
michael@0 163 /*
michael@0 164 * Build out our readq entry based on the incoming packet.
michael@0 165 */
michael@0 166 static struct sctp_queued_to_read *
michael@0 167 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
michael@0 168 struct sctp_tmit_chunk *chk)
michael@0 169 {
michael@0 170 struct sctp_queued_to_read *read_queue_e = NULL;
michael@0 171
michael@0 172 sctp_alloc_a_readq(stcb, read_queue_e);
michael@0 173 if (read_queue_e == NULL) {
michael@0 174 goto failed_build;
michael@0 175 }
michael@0 176 read_queue_e->sinfo_stream = chk->rec.data.stream_number;
michael@0 177 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
michael@0 178 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
michael@0 179 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
michael@0 180 read_queue_e->sinfo_context = stcb->asoc.context;
michael@0 181 read_queue_e->sinfo_timetolive = 0;
michael@0 182 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
michael@0 183 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
michael@0 184 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
michael@0 185 read_queue_e->whoFrom = chk->whoTo;
michael@0 186 read_queue_e->aux_data = NULL;
michael@0 187 read_queue_e->length = 0;
michael@0 188 atomic_add_int(&chk->whoTo->ref_count, 1);
michael@0 189 read_queue_e->data = chk->data;
michael@0 190 read_queue_e->tail_mbuf = NULL;
michael@0 191 read_queue_e->stcb = stcb;
michael@0 192 read_queue_e->port_from = stcb->rport;
michael@0 193 read_queue_e->spec_flags = 0;
michael@0 194 read_queue_e->do_not_ref_stcb = 0;
michael@0 195 read_queue_e->end_added = 0;
michael@0 196 read_queue_e->some_taken = 0;
michael@0 197 read_queue_e->pdapi_aborted = 0;
michael@0 198 failed_build:
michael@0 199 return (read_queue_e);
michael@0 200 }
michael@0 201
michael@0 202
michael@0 203 struct mbuf *
michael@0 204 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
michael@0 205 {
michael@0 206 struct sctp_extrcvinfo *seinfo;
michael@0 207 struct sctp_sndrcvinfo *outinfo;
michael@0 208 struct sctp_rcvinfo *rcvinfo;
michael@0 209 struct sctp_nxtinfo *nxtinfo;
michael@0 210 #if defined(__Userspace_os_Windows)
michael@0 211 WSACMSGHDR *cmh;
michael@0 212 #else
michael@0 213 struct cmsghdr *cmh;
michael@0 214 #endif
michael@0 215 struct mbuf *ret;
michael@0 216 int len;
michael@0 217 int use_extended;
michael@0 218 int provide_nxt;
michael@0 219
michael@0 220 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
michael@0 221 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
michael@0 222 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
michael@0 223 /* user does not want any ancillary data */
michael@0 224 return (NULL);
michael@0 225 }
michael@0 226
michael@0 227 len = 0;
michael@0 228 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
michael@0 229 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
michael@0 230 }
michael@0 231 seinfo = (struct sctp_extrcvinfo *)sinfo;
michael@0 232 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
michael@0 233 (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
michael@0 234 provide_nxt = 1;
michael@0 235 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
michael@0 236 } else {
michael@0 237 provide_nxt = 0;
michael@0 238 }
michael@0 239 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
michael@0 240 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
michael@0 241 use_extended = 1;
michael@0 242 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
michael@0 243 } else {
michael@0 244 use_extended = 0;
michael@0 245 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
michael@0 246 }
michael@0 247 } else {
michael@0 248 use_extended = 0;
michael@0 249 }
michael@0 250
michael@0 251 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
michael@0 252 if (ret == NULL) {
michael@0 253 /* No space */
michael@0 254 return (ret);
michael@0 255 }
michael@0 256 SCTP_BUF_LEN(ret) = 0;
michael@0 257
michael@0 258 /* We need a CMSG header followed by the struct */
michael@0 259 #if defined(__Userspace_os_Windows)
michael@0 260 cmh = mtod(ret, WSACMSGHDR *);
michael@0 261 #else
michael@0 262 cmh = mtod(ret, struct cmsghdr *);
michael@0 263 #endif
michael@0 264 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
michael@0 265 cmh->cmsg_level = IPPROTO_SCTP;
michael@0 266 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
michael@0 267 cmh->cmsg_type = SCTP_RCVINFO;
michael@0 268 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
michael@0 269 rcvinfo->rcv_sid = sinfo->sinfo_stream;
michael@0 270 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
michael@0 271 rcvinfo->rcv_flags = sinfo->sinfo_flags;
michael@0 272 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
michael@0 273 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
michael@0 274 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
michael@0 275 rcvinfo->rcv_context = sinfo->sinfo_context;
michael@0 276 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
michael@0 277 #if defined(__Userspace_os_Windows)
michael@0 278 cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
michael@0 279 #else
michael@0 280 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
michael@0 281 #endif
michael@0 282 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
michael@0 283 }
michael@0 284 if (provide_nxt) {
michael@0 285 cmh->cmsg_level = IPPROTO_SCTP;
michael@0 286 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
michael@0 287 cmh->cmsg_type = SCTP_NXTINFO;
michael@0 288 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
michael@0 289 nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
michael@0 290 nxtinfo->nxt_flags = 0;
michael@0 291 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
michael@0 292 nxtinfo->nxt_flags |= SCTP_UNORDERED;
michael@0 293 }
michael@0 294 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
michael@0 295 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
michael@0 296 }
michael@0 297 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
michael@0 298 nxtinfo->nxt_flags |= SCTP_COMPLETE;
michael@0 299 }
michael@0 300 nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
michael@0 301 nxtinfo->nxt_length = seinfo->sreinfo_next_length;
michael@0 302 nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
michael@0 303 #if defined(__Userspace_os_Windows)
michael@0 304 cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
michael@0 305 #else
michael@0 306 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
michael@0 307 #endif
michael@0 308 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
michael@0 309 }
michael@0 310 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
michael@0 311 cmh->cmsg_level = IPPROTO_SCTP;
michael@0 312 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
michael@0 313 if (use_extended) {
michael@0 314 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
michael@0 315 cmh->cmsg_type = SCTP_EXTRCV;
michael@0 316 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
michael@0 317 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
michael@0 318 } else {
michael@0 319 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
michael@0 320 cmh->cmsg_type = SCTP_SNDRCV;
michael@0 321 *outinfo = *sinfo;
michael@0 322 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
michael@0 323 }
michael@0 324 }
michael@0 325 return (ret);
michael@0 326 }
michael@0 327
michael@0 328
michael@0 329 static void
michael@0 330 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
michael@0 331 {
michael@0 332 uint32_t gap, i, cumackp1;
michael@0 333 int fnd = 0;
michael@0 334
michael@0 335 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
michael@0 336 return;
michael@0 337 }
michael@0 338 cumackp1 = asoc->cumulative_tsn + 1;
michael@0 339 if (SCTP_TSN_GT(cumackp1, tsn)) {
michael@0 340 /* this tsn is behind the cum ack and thus we don't
michael@0 341 * need to worry about it being moved from one to the other.
michael@0 342 */
michael@0 343 return;
michael@0 344 }
michael@0 345 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
michael@0 346 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
michael@0 347 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
michael@0 348 sctp_print_mapping_array(asoc);
michael@0 349 #ifdef INVARIANTS
michael@0 350 panic("Things are really messed up now!!");
michael@0 351 #endif
michael@0 352 }
michael@0 353 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
michael@0 354 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
michael@0 355 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
michael@0 356 asoc->highest_tsn_inside_nr_map = tsn;
michael@0 357 }
michael@0 358 if (tsn == asoc->highest_tsn_inside_map) {
michael@0 359 /* We must back down to see what the new highest is */
michael@0 360 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
michael@0 361 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
michael@0 362 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
michael@0 363 asoc->highest_tsn_inside_map = i;
michael@0 364 fnd = 1;
michael@0 365 break;
michael@0 366 }
michael@0 367 }
michael@0 368 if (!fnd) {
michael@0 369 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
michael@0 370 }
michael@0 371 }
michael@0 372 }
michael@0 373
michael@0 374
michael@0 375 /*
michael@0 376 * We are delivering currently from the reassembly queue. We must continue to
michael@0 377 * deliver until we either: 1) run out of space. 2) run out of sequential
michael@0 378 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
michael@0 379 */
michael@0 380 static void
michael@0 381 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
michael@0 382 {
michael@0 383 struct sctp_tmit_chunk *chk, *nchk;
michael@0 384 uint16_t nxt_todel;
michael@0 385 uint16_t stream_no;
michael@0 386 int end = 0;
michael@0 387 int cntDel;
michael@0 388 struct sctp_queued_to_read *control, *ctl, *nctl;
michael@0 389
michael@0 390 if (stcb == NULL)
michael@0 391 return;
michael@0 392
michael@0 393 cntDel = stream_no = 0;
michael@0 394 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
michael@0 395 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
michael@0 396 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
michael@0 397 /* socket above is long gone or going.. */
michael@0 398 abandon:
michael@0 399 asoc->fragmented_delivery_inprogress = 0;
michael@0 400 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
michael@0 401 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
michael@0 402 asoc->size_on_reasm_queue -= chk->send_size;
michael@0 403 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
michael@0 404 /*
michael@0 405 * Lose the data pointer, since its in the socket
michael@0 406 * buffer
michael@0 407 */
michael@0 408 if (chk->data) {
michael@0 409 sctp_m_freem(chk->data);
michael@0 410 chk->data = NULL;
michael@0 411 }
michael@0 412 /* Now free the address and data */
michael@0 413 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
michael@0 414 /*sa_ignore FREED_MEMORY*/
michael@0 415 }
michael@0 416 return;
michael@0 417 }
michael@0 418 SCTP_TCB_LOCK_ASSERT(stcb);
michael@0 419 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
michael@0 420 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
michael@0 421 /* Can't deliver more :< */
michael@0 422 return;
michael@0 423 }
michael@0 424 stream_no = chk->rec.data.stream_number;
michael@0 425 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
michael@0 426 if (nxt_todel != chk->rec.data.stream_seq &&
michael@0 427 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
michael@0 428 /*
michael@0 429 * Not the next sequence to deliver in its stream OR
michael@0 430 * unordered
michael@0 431 */
michael@0 432 return;
michael@0 433 }
michael@0 434 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
michael@0 435
michael@0 436 control = sctp_build_readq_entry_chk(stcb, chk);
michael@0 437 if (control == NULL) {
michael@0 438 /* out of memory? */
michael@0 439 return;
michael@0 440 }
michael@0 441 /* save it off for our future deliveries */
michael@0 442 stcb->asoc.control_pdapi = control;
michael@0 443 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
michael@0 444 end = 1;
michael@0 445 else
michael@0 446 end = 0;
michael@0 447 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
michael@0 448 sctp_add_to_readq(stcb->sctp_ep,
michael@0 449 stcb, control, &stcb->sctp_socket->so_rcv, end,
michael@0 450 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
michael@0 451 cntDel++;
michael@0 452 } else {
michael@0 453 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
michael@0 454 end = 1;
michael@0 455 else
michael@0 456 end = 0;
michael@0 457 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
michael@0 458 if (sctp_append_to_readq(stcb->sctp_ep, stcb,
michael@0 459 stcb->asoc.control_pdapi,
michael@0 460 chk->data, end, chk->rec.data.TSN_seq,
michael@0 461 &stcb->sctp_socket->so_rcv)) {
michael@0 462 /*
michael@0 463 * something is very wrong, either
michael@0 464 * control_pdapi is NULL, or the tail_mbuf
michael@0 465 * is corrupt, or there is a EOM already on
michael@0 466 * the mbuf chain.
michael@0 467 */
michael@0 468 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
michael@0 469 goto abandon;
michael@0 470 } else {
michael@0 471 #ifdef INVARIANTS
michael@0 472 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
michael@0 473 panic("This should not happen control_pdapi NULL?");
michael@0 474 }
michael@0 475 /* if we did not panic, it was a EOM */
michael@0 476 panic("Bad chunking ??");
michael@0 477 #else
michael@0 478 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
michael@0 479 SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
michael@0 480 }
michael@0 481 SCTP_PRINTF("Bad chunking ??\n");
michael@0 482 SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
michael@0 483
michael@0 484 #endif
michael@0 485 goto abandon;
michael@0 486 }
michael@0 487 }
michael@0 488 cntDel++;
michael@0 489 }
michael@0 490 /* pull it we did it */
michael@0 491 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
michael@0 492 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
michael@0 493 asoc->fragmented_delivery_inprogress = 0;
michael@0 494 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
michael@0 495 asoc->strmin[stream_no].last_sequence_delivered++;
michael@0 496 }
michael@0 497 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
michael@0 498 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
michael@0 499 }
michael@0 500 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
michael@0 501 /*
michael@0 502 * turn the flag back on since we just delivered
michael@0 503 * yet another one.
michael@0 504 */
michael@0 505 asoc->fragmented_delivery_inprogress = 1;
michael@0 506 }
michael@0 507 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
michael@0 508 asoc->last_flags_delivered = chk->rec.data.rcv_flags;
michael@0 509 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
michael@0 510 asoc->last_strm_no_delivered = chk->rec.data.stream_number;
michael@0 511
michael@0 512 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
michael@0 513 asoc->size_on_reasm_queue -= chk->send_size;
michael@0 514 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
michael@0 515 /* free up the chk */
michael@0 516 chk->data = NULL;
michael@0 517 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
michael@0 518
michael@0 519 if (asoc->fragmented_delivery_inprogress == 0) {
michael@0 520 /*
michael@0 521 * Now lets see if we can deliver the next one on
michael@0 522 * the stream
michael@0 523 */
michael@0 524 struct sctp_stream_in *strm;
michael@0 525
michael@0 526 strm = &asoc->strmin[stream_no];
michael@0 527 nxt_todel = strm->last_sequence_delivered + 1;
michael@0 528 TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
michael@0 529 /* Deliver more if we can. */
michael@0 530 if (nxt_todel == ctl->sinfo_ssn) {
michael@0 531 TAILQ_REMOVE(&strm->inqueue, ctl, next);
michael@0 532 asoc->size_on_all_streams -= ctl->length;
michael@0 533 sctp_ucount_decr(asoc->cnt_on_all_streams);
michael@0 534 strm->last_sequence_delivered++;
michael@0 535 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
michael@0 536 sctp_add_to_readq(stcb->sctp_ep, stcb,
michael@0 537 ctl,
michael@0 538 &stcb->sctp_socket->so_rcv, 1,
michael@0 539 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
michael@0 540 } else {
michael@0 541 break;
michael@0 542 }
michael@0 543 nxt_todel = strm->last_sequence_delivered + 1;
michael@0 544 }
michael@0 545 break;
michael@0 546 }
michael@0 547 }
michael@0 548 }
michael@0 549
michael@0 550 /*
michael@0 551 * Queue the chunk either right into the socket buffer if it is the next one
michael@0 552 * to go OR put it in the correct place in the delivery queue. If we do
michael@0 553 * append to the so_buf, keep doing so until we are out of order. One big
michael@0 554 * question still remains, what to do when the socket buffer is FULL??
michael@0 555 */
michael@0 556 static void
michael@0 557 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
michael@0 558 struct sctp_queued_to_read *control, int *abort_flag)
michael@0 559 {
michael@0 560 /*
michael@0 561 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
michael@0 562 * all the data in one stream this could happen quite rapidly. One
michael@0 563 * could use the TSN to keep track of things, but this scheme breaks
michael@0 564 * down in the other type of stream useage that could occur. Send a
michael@0 565 * single msg to stream 0, send 4Billion messages to stream 1, now
michael@0 566 * send a message to stream 0. You have a situation where the TSN
michael@0 567 * has wrapped but not in the stream. Is this worth worrying about
michael@0 568 * or should we just change our queue sort at the bottom to be by
michael@0 569 * TSN.
michael@0 570 *
michael@0 571 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
michael@0 572 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
michael@0 573 * assignment this could happen... and I don't see how this would be
michael@0 574 * a violation. So for now I am undecided an will leave the sort by
michael@0 575 * SSN alone. Maybe a hybred approach is the answer
michael@0 576 *
michael@0 577 */
michael@0 578 struct sctp_stream_in *strm;
michael@0 579 struct sctp_queued_to_read *at;
michael@0 580 int queue_needed;
michael@0 581 uint16_t nxt_todel;
michael@0 582 struct mbuf *oper;
michael@0 583
michael@0 584 queue_needed = 1;
michael@0 585 asoc->size_on_all_streams += control->length;
michael@0 586 sctp_ucount_incr(asoc->cnt_on_all_streams);
michael@0 587 strm = &asoc->strmin[control->sinfo_stream];
michael@0 588 nxt_todel = strm->last_sequence_delivered + 1;
michael@0 589 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
michael@0 590 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
michael@0 591 }
michael@0 592 SCTPDBG(SCTP_DEBUG_INDATA1,
michael@0 593 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
michael@0 594 (uint32_t) control->sinfo_stream,
michael@0 595 (uint32_t) strm->last_sequence_delivered,
michael@0 596 (uint32_t) nxt_todel);
michael@0 597 if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
michael@0 598 /* The incoming sseq is behind where we last delivered? */
michael@0 599 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
michael@0 600 control->sinfo_ssn, strm->last_sequence_delivered);
michael@0 601 protocol_error:
michael@0 602 /*
michael@0 603 * throw it in the stream so it gets cleaned up in
michael@0 604 * association destruction
michael@0 605 */
michael@0 606 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
michael@0 607 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
michael@0 608 0, M_NOWAIT, 1, MT_DATA);
michael@0 609 if (oper) {
michael@0 610 struct sctp_paramhdr *ph;
michael@0 611 uint32_t *ippp;
michael@0 612 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
michael@0 613 (sizeof(uint32_t) * 3);
michael@0 614 ph = mtod(oper, struct sctp_paramhdr *);
michael@0 615 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
michael@0 616 ph->param_length = htons(SCTP_BUF_LEN(oper));
michael@0 617 ippp = (uint32_t *) (ph + 1);
michael@0 618 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_1);
michael@0 619 ippp++;
michael@0 620 *ippp = control->sinfo_tsn;
michael@0 621 ippp++;
michael@0 622 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
michael@0 623 }
michael@0 624 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_1;
michael@0 625 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
michael@0 626 *abort_flag = 1;
michael@0 627 return;
michael@0 628
michael@0 629 }
michael@0 630 if (nxt_todel == control->sinfo_ssn) {
michael@0 631 /* can be delivered right away? */
michael@0 632 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
michael@0 633 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
michael@0 634 }
michael@0 635 /* EY it wont be queued if it could be delivered directly*/
michael@0 636 queue_needed = 0;
michael@0 637 asoc->size_on_all_streams -= control->length;
michael@0 638 sctp_ucount_decr(asoc->cnt_on_all_streams);
michael@0 639 strm->last_sequence_delivered++;
michael@0 640
michael@0 641 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
michael@0 642 sctp_add_to_readq(stcb->sctp_ep, stcb,
michael@0 643 control,
michael@0 644 &stcb->sctp_socket->so_rcv, 1,
michael@0 645 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
michael@0 646 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
michael@0 647 /* all delivered */
michael@0 648 nxt_todel = strm->last_sequence_delivered + 1;
michael@0 649 if (nxt_todel == control->sinfo_ssn) {
michael@0 650 TAILQ_REMOVE(&strm->inqueue, control, next);
michael@0 651 asoc->size_on_all_streams -= control->length;
michael@0 652 sctp_ucount_decr(asoc->cnt_on_all_streams);
michael@0 653 strm->last_sequence_delivered++;
michael@0 654 /*
michael@0 655 * We ignore the return of deliver_data here
michael@0 656 * since we always can hold the chunk on the
michael@0 657 * d-queue. And we have a finite number that
michael@0 658 * can be delivered from the strq.
michael@0 659 */
michael@0 660 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
michael@0 661 sctp_log_strm_del(control, NULL,
michael@0 662 SCTP_STR_LOG_FROM_IMMED_DEL);
michael@0 663 }
michael@0 664 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
michael@0 665 sctp_add_to_readq(stcb->sctp_ep, stcb,
michael@0 666 control,
michael@0 667 &stcb->sctp_socket->so_rcv, 1,
michael@0 668 SCTP_READ_LOCK_NOT_HELD,
michael@0 669 SCTP_SO_NOT_LOCKED);
michael@0 670 continue;
michael@0 671 }
michael@0 672 break;
michael@0 673 }
michael@0 674 }
michael@0 675 if (queue_needed) {
michael@0 676 /*
michael@0 677 * Ok, we did not deliver this guy, find the correct place
michael@0 678 * to put it on the queue.
michael@0 679 */
michael@0 680 if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
michael@0 681 goto protocol_error;
michael@0 682 }
michael@0 683 if (TAILQ_EMPTY(&strm->inqueue)) {
michael@0 684 /* Empty queue */
michael@0 685 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
michael@0 686 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
michael@0 687 }
michael@0 688 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
michael@0 689 } else {
michael@0 690 TAILQ_FOREACH(at, &strm->inqueue, next) {
michael@0 691 if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
michael@0 692 /*
michael@0 693 * one in queue is bigger than the
michael@0 694 * new one, insert before this one
michael@0 695 */
michael@0 696 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
michael@0 697 sctp_log_strm_del(control, at,
michael@0 698 SCTP_STR_LOG_FROM_INSERT_MD);
michael@0 699 }
michael@0 700 TAILQ_INSERT_BEFORE(at, control, next);
michael@0 701 break;
michael@0 702 } else if (at->sinfo_ssn == control->sinfo_ssn) {
michael@0 703 /*
michael@0 704 * Gak, He sent me a duplicate str
michael@0 705 * seq number
michael@0 706 */
michael@0 707 /*
michael@0 708 * foo bar, I guess I will just free
michael@0 709 * this new guy, should we abort
michael@0 710 * too? FIX ME MAYBE? Or it COULD be
michael@0 711 * that the SSN's have wrapped.
michael@0 712 * Maybe I should compare to TSN
michael@0 713 * somehow... sigh for now just blow
michael@0 714 * away the chunk!
michael@0 715 */
michael@0 716
michael@0 717 if (control->data)
michael@0 718 sctp_m_freem(control->data);
michael@0 719 control->data = NULL;
michael@0 720 asoc->size_on_all_streams -= control->length;
michael@0 721 sctp_ucount_decr(asoc->cnt_on_all_streams);
michael@0 722 if (control->whoFrom) {
michael@0 723 sctp_free_remote_addr(control->whoFrom);
michael@0 724 control->whoFrom = NULL;
michael@0 725 }
michael@0 726 sctp_free_a_readq(stcb, control);
michael@0 727 return;
michael@0 728 } else {
michael@0 729 if (TAILQ_NEXT(at, next) == NULL) {
michael@0 730 /*
michael@0 731 * We are at the end, insert
michael@0 732 * it after this one
michael@0 733 */
michael@0 734 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
michael@0 735 sctp_log_strm_del(control, at,
michael@0 736 SCTP_STR_LOG_FROM_INSERT_TL);
michael@0 737 }
michael@0 738 TAILQ_INSERT_AFTER(&strm->inqueue,
michael@0 739 at, control, next);
michael@0 740 break;
michael@0 741 }
michael@0 742 }
michael@0 743 }
michael@0 744 }
michael@0 745 }
michael@0 746 }
michael@0 747
michael@0 748 /*
michael@0 749 * Returns two things: You get the total size of the deliverable parts of the
michael@0 750 * first fragmented message on the reassembly queue. And you get a 1 back if
michael@0 751 * all of the message is ready or a 0 back if the message is still incomplete
michael@0 752 */
michael@0 753 static int
michael@0 754 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t *t_size)
michael@0 755 {
michael@0 756 struct sctp_tmit_chunk *chk;
michael@0 757 uint32_t tsn;
michael@0 758
michael@0 759 *t_size = 0;
michael@0 760 chk = TAILQ_FIRST(&asoc->reasmqueue);
michael@0 761 if (chk == NULL) {
michael@0 762 /* nothing on the queue */
michael@0 763 return (0);
michael@0 764 }
michael@0 765 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
michael@0 766 /* Not a first on the queue */
michael@0 767 return (0);
michael@0 768 }
michael@0 769 tsn = chk->rec.data.TSN_seq;
michael@0 770 TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
michael@0 771 if (tsn != chk->rec.data.TSN_seq) {
michael@0 772 return (0);
michael@0 773 }
michael@0 774 *t_size += chk->send_size;
michael@0 775 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
michael@0 776 return (1);
michael@0 777 }
michael@0 778 tsn++;
michael@0 779 }
michael@0 780 return (0);
michael@0 781 }
michael@0 782
michael@0 783 static void
michael@0 784 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
michael@0 785 {
michael@0 786 struct sctp_tmit_chunk *chk;
michael@0 787 uint16_t nxt_todel;
michael@0 788 uint32_t tsize, pd_point;
michael@0 789
michael@0 790 doit_again:
michael@0 791 chk = TAILQ_FIRST(&asoc->reasmqueue);
michael@0 792 if (chk == NULL) {
michael@0 793 /* Huh? */
michael@0 794 asoc->size_on_reasm_queue = 0;
michael@0 795 asoc->cnt_on_reasm_queue = 0;
michael@0 796 return;
michael@0 797 }
michael@0 798 if (asoc->fragmented_delivery_inprogress == 0) {
michael@0 799 nxt_todel =
michael@0 800 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
michael@0 801 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
michael@0 802 (nxt_todel == chk->rec.data.stream_seq ||
michael@0 803 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
michael@0 804 /*
michael@0 805 * Yep the first one is here and its ok to deliver
michael@0 806 * but should we?
michael@0 807 */
michael@0 808 if (stcb->sctp_socket) {
michael@0 809 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
michael@0 810 stcb->sctp_ep->partial_delivery_point);
michael@0 811 } else {
michael@0 812 pd_point = stcb->sctp_ep->partial_delivery_point;
michael@0 813 }
michael@0 814 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
michael@0 815 /*
michael@0 816 * Yes, we setup to start reception, by
michael@0 817 * backing down the TSN just in case we
michael@0 818 * can't deliver. If we
michael@0 819 */
michael@0 820 asoc->fragmented_delivery_inprogress = 1;
michael@0 821 asoc->tsn_last_delivered =
michael@0 822 chk->rec.data.TSN_seq - 1;
michael@0 823 asoc->str_of_pdapi =
michael@0 824 chk->rec.data.stream_number;
michael@0 825 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
michael@0 826 asoc->pdapi_ppid = chk->rec.data.payloadtype;
michael@0 827 asoc->fragment_flags = chk->rec.data.rcv_flags;
michael@0 828 sctp_service_reassembly(stcb, asoc);
michael@0 829 }
michael@0 830 }
michael@0 831 } else {
michael@0 832 /* Service re-assembly will deliver stream data queued
michael@0 833 * at the end of fragmented delivery.. but it wont know
michael@0 834 * to go back and call itself again... we do that here
michael@0 835 * with the got doit_again
michael@0 836 */
michael@0 837 sctp_service_reassembly(stcb, asoc);
michael@0 838 if (asoc->fragmented_delivery_inprogress == 0) {
michael@0 839 /* finished our Fragmented delivery, could be
michael@0 840 * more waiting?
michael@0 841 */
michael@0 842 goto doit_again;
michael@0 843 }
michael@0 844 }
michael@0 845 }
michael@0 846
michael@0 847 /*
michael@0 848 * Dump onto the re-assembly queue, in its proper place. After dumping on the
michael@0 849 * queue, see if anthing can be delivered. If so pull it off (or as much as
michael@0 850 * we can. If we run out of space then we must dump what we can and set the
michael@0 851 * appropriate flag to say we queued what we could.
michael@0 852 */
michael@0 853 static void
michael@0 854 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
michael@0 855 struct sctp_tmit_chunk *chk, int *abort_flag)
michael@0 856 {
michael@0 857 struct mbuf *oper;
michael@0 858 uint32_t cum_ackp1, prev_tsn, post_tsn;
michael@0 859 struct sctp_tmit_chunk *at, *prev, *next;
michael@0 860
michael@0 861 prev = next = NULL;
michael@0 862 cum_ackp1 = asoc->tsn_last_delivered + 1;
michael@0 863 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
michael@0 864 /* This is the first one on the queue */
michael@0 865 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
michael@0 866 /*
michael@0 867 * we do not check for delivery of anything when only one
michael@0 868 * fragment is here
michael@0 869 */
michael@0 870 asoc->size_on_reasm_queue = chk->send_size;
michael@0 871 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
michael@0 872 if (chk->rec.data.TSN_seq == cum_ackp1) {
michael@0 873 if (asoc->fragmented_delivery_inprogress == 0 &&
michael@0 874 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
michael@0 875 SCTP_DATA_FIRST_FRAG) {
michael@0 876 /*
michael@0 877 * An empty queue, no delivery inprogress,
michael@0 878 * we hit the next one and it does NOT have
michael@0 879 * a FIRST fragment mark.
michael@0 880 */
michael@0 881 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
michael@0 882 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
michael@0 883 0, M_NOWAIT, 1, MT_DATA);
michael@0 884
michael@0 885 if (oper) {
michael@0 886 struct sctp_paramhdr *ph;
michael@0 887 uint32_t *ippp;
michael@0 888
michael@0 889 SCTP_BUF_LEN(oper) =
michael@0 890 sizeof(struct sctp_paramhdr) +
michael@0 891 (sizeof(uint32_t) * 3);
michael@0 892 ph = mtod(oper, struct sctp_paramhdr *);
michael@0 893 ph->param_type =
michael@0 894 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
michael@0 895 ph->param_length = htons(SCTP_BUF_LEN(oper));
michael@0 896 ippp = (uint32_t *) (ph + 1);
michael@0 897 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_2);
michael@0 898 ippp++;
michael@0 899 *ippp = chk->rec.data.TSN_seq;
michael@0 900 ippp++;
michael@0 901 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
michael@0 902
michael@0 903 }
michael@0 904 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_2;
michael@0 905 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
michael@0 906 *abort_flag = 1;
michael@0 907 } else if (asoc->fragmented_delivery_inprogress &&
michael@0 908 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
michael@0 909 /*
michael@0 910 * We are doing a partial delivery and the
michael@0 911 * NEXT chunk MUST be either the LAST or
michael@0 912 * MIDDLE fragment NOT a FIRST
michael@0 913 */
michael@0 914 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
michael@0 915 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
michael@0 916 0, M_NOWAIT, 1, MT_DATA);
michael@0 917 if (oper) {
michael@0 918 struct sctp_paramhdr *ph;
michael@0 919 uint32_t *ippp;
michael@0 920
michael@0 921 SCTP_BUF_LEN(oper) =
michael@0 922 sizeof(struct sctp_paramhdr) +
michael@0 923 (3 *sizeof(uint32_t));
michael@0 924 ph = mtod(oper, struct sctp_paramhdr *);
michael@0 925 ph->param_type =
michael@0 926 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
michael@0 927 ph->param_length = htons(SCTP_BUF_LEN(oper));
michael@0 928 ippp = (uint32_t *) (ph + 1);
michael@0 929 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_3);
michael@0 930 ippp++;
michael@0 931 *ippp = chk->rec.data.TSN_seq;
michael@0 932 ippp++;
michael@0 933 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
michael@0 934 }
michael@0 935 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_3;
michael@0 936 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
michael@0 937 *abort_flag = 1;
michael@0 938 } else if (asoc->fragmented_delivery_inprogress) {
michael@0 939 /*
michael@0 940 * Here we are ok with a MIDDLE or LAST
michael@0 941 * piece
michael@0 942 */
michael@0 943 if (chk->rec.data.stream_number !=
michael@0 944 asoc->str_of_pdapi) {
michael@0 945 /* Got to be the right STR No */
michael@0 946 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
michael@0 947 chk->rec.data.stream_number,
michael@0 948 asoc->str_of_pdapi);
michael@0 949 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
michael@0 950 0, M_NOWAIT, 1, MT_DATA);
michael@0 951 if (oper) {
michael@0 952 struct sctp_paramhdr *ph;
michael@0 953 uint32_t *ippp;
michael@0 954
michael@0 955 SCTP_BUF_LEN(oper) =
michael@0 956 sizeof(struct sctp_paramhdr) +
michael@0 957 (sizeof(uint32_t) * 3);
michael@0 958 ph = mtod(oper,
michael@0 959 struct sctp_paramhdr *);
michael@0 960 ph->param_type =
michael@0 961 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
michael@0 962 ph->param_length =
michael@0 963 htons(SCTP_BUF_LEN(oper));
michael@0 964 ippp = (uint32_t *) (ph + 1);
michael@0 965 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_4);
michael@0 966 ippp++;
michael@0 967 *ippp = chk->rec.data.TSN_seq;
michael@0 968 ippp++;
michael@0 969 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
michael@0 970 }
michael@0 971 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_4;
michael@0 972 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
michael@0 973 *abort_flag = 1;
michael@0 974 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
michael@0 975 SCTP_DATA_UNORDERED &&
michael@0 976 chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
michael@0 977 /* Got to be the right STR Seq */
michael@0 978 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
michael@0 979 chk->rec.data.stream_seq,
michael@0 980 asoc->ssn_of_pdapi);
michael@0 981 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
michael@0 982 0, M_NOWAIT, 1, MT_DATA);
michael@0 983 if (oper) {
michael@0 984 struct sctp_paramhdr *ph;
michael@0 985 uint32_t *ippp;
michael@0 986
michael@0 987 SCTP_BUF_LEN(oper) =
michael@0 988 sizeof(struct sctp_paramhdr) +
michael@0 989 (3 * sizeof(uint32_t));
michael@0 990 ph = mtod(oper,
michael@0 991 struct sctp_paramhdr *);
michael@0 992 ph->param_type =
michael@0 993 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
michael@0 994 ph->param_length =
michael@0 995 htons(SCTP_BUF_LEN(oper));
michael@0 996 ippp = (uint32_t *) (ph + 1);
michael@0 997 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_5);
michael@0 998 ippp++;
michael@0 999 *ippp = chk->rec.data.TSN_seq;
michael@0 1000 ippp++;
michael@0 1001 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
michael@0 1002
michael@0 1003 }
michael@0 1004 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_5;
michael@0 1005 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
michael@0 1006 *abort_flag = 1;
michael@0 1007 }
michael@0 1008 }
michael@0 1009 }
michael@0 1010 return;
michael@0 1011 }
michael@0 1012 /* Find its place */
michael@0 1013 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
michael@0 1014 if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
michael@0 1015 /*
michael@0 1016 * one in queue is bigger than the new one, insert
michael@0 1017 * before this one
michael@0 1018 */
michael@0 1019 /* A check */
michael@0 1020 asoc->size_on_reasm_queue += chk->send_size;
michael@0 1021 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
michael@0 1022 next = at;
michael@0 1023 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
michael@0 1024 break;
michael@0 1025 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
michael@0 1026 /* Gak, He sent me a duplicate str seq number */
michael@0 1027 /*
michael@0 1028 * foo bar, I guess I will just free this new guy,
michael@0 1029 * should we abort too? FIX ME MAYBE? Or it COULD be
michael@0 1030 * that the SSN's have wrapped. Maybe I should
michael@0 1031 * compare to TSN somehow... sigh for now just blow
michael@0 1032 * away the chunk!
michael@0 1033 */
michael@0 1034 if (chk->data) {
michael@0 1035 sctp_m_freem(chk->data);
michael@0 1036 chk->data = NULL;
michael@0 1037 }
michael@0 1038 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
michael@0 1039 return;
michael@0 1040 } else {
michael@0 1041 prev = at;
michael@0 1042 if (TAILQ_NEXT(at, sctp_next) == NULL) {
michael@0 1043 /*
michael@0 1044 * We are at the end, insert it after this
michael@0 1045 * one
michael@0 1046 */
michael@0 1047 /* check it first */
michael@0 1048 asoc->size_on_reasm_queue += chk->send_size;
michael@0 1049 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
michael@0 1050 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
michael@0 1051 break;
michael@0 1052 }
michael@0 1053 }
michael@0 1054 }
michael@0 1055 /* Now the audits */
michael@0 1056 if (prev) {
michael@0 1057 prev_tsn = chk->rec.data.TSN_seq - 1;
michael@0 1058 if (prev_tsn == prev->rec.data.TSN_seq) {
michael@0 1059 /*
michael@0 1060 * Ok the one I am dropping onto the end is the
michael@0 1061 * NEXT. A bit of valdiation here.
michael@0 1062 */
michael@0 1063 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
michael@0 1064 SCTP_DATA_FIRST_FRAG ||
michael@0 1065 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
michael@0 1066 SCTP_DATA_MIDDLE_FRAG) {
michael@0 1067 /*
michael@0 1068 * Insert chk MUST be a MIDDLE or LAST
michael@0 1069 * fragment
michael@0 1070 */
michael@0 1071 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
michael@0 1072 SCTP_DATA_FIRST_FRAG) {
michael@0 1073 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
michael@0 1074 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
michael@0 1075 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
michael@0 1076 0, M_NOWAIT, 1, MT_DATA);
michael@0 1077 if (oper) {
michael@0 1078 struct sctp_paramhdr *ph;
michael@0 1079 uint32_t *ippp;
michael@0 1080
michael@0 1081 SCTP_BUF_LEN(oper) =
michael@0 1082 sizeof(struct sctp_paramhdr) +
michael@0 1083 (3 * sizeof(uint32_t));
michael@0 1084 ph = mtod(oper,
michael@0 1085 struct sctp_paramhdr *);
michael@0 1086 ph->param_type =
michael@0 1087 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
michael@0 1088 ph->param_length =
michael@0 1089 htons(SCTP_BUF_LEN(oper));
michael@0 1090 ippp = (uint32_t *) (ph + 1);
michael@0 1091 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_6);
michael@0 1092 ippp++;
michael@0 1093 *ippp = chk->rec.data.TSN_seq;
michael@0 1094 ippp++;
michael@0 1095 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
michael@0 1096
michael@0 1097 }
michael@0 1098 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_6;
michael@0 1099 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
michael@0 1100 *abort_flag = 1;
michael@0 1101 return;
michael@0 1102 }
michael@0 1103 if (chk->rec.data.stream_number !=
michael@0 1104 prev->rec.data.stream_number) {
michael@0 1105 /*
michael@0 1106 * Huh, need the correct STR here,
michael@0 1107 * they must be the same.
michael@0 1108 */
michael@0 1109 SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
michael@0 1110 chk->rec.data.stream_number,
michael@0 1111 prev->rec.data.stream_number);
michael@0 1112 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
michael@0 1113 0, M_NOWAIT, 1, MT_DATA);
michael@0 1114 if (oper) {
michael@0 1115 struct sctp_paramhdr *ph;
michael@0 1116 uint32_t *ippp;
michael@0 1117
michael@0 1118 SCTP_BUF_LEN(oper) =
michael@0 1119 sizeof(struct sctp_paramhdr) +
michael@0 1120 (3 * sizeof(uint32_t));
michael@0 1121 ph = mtod(oper,
michael@0 1122 struct sctp_paramhdr *);
michael@0 1123 ph->param_type =
michael@0 1124 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
michael@0 1125 ph->param_length =
michael@0 1126 htons(SCTP_BUF_LEN(oper));
michael@0 1127 ippp = (uint32_t *) (ph + 1);
michael@0 1128 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_7);
michael@0 1129 ippp++;
michael@0 1130 *ippp = chk->rec.data.TSN_seq;
michael@0 1131 ippp++;
michael@0 1132 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
michael@0 1133 }
michael@0 1134 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_7;
michael@0 1135 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
michael@0 1136 *abort_flag = 1;
michael@0 1137 return;
michael@0 1138 }
michael@0 1139 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
michael@0 1140 chk->rec.data.stream_seq !=
michael@0 1141 prev->rec.data.stream_seq) {
michael@0 1142 /*
michael@0 1143 * Huh, need the correct STR here,
michael@0 1144 * they must be the same.
michael@0 1145 */
michael@0 1146 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
michael@0 1147 chk->rec.data.stream_seq,
michael@0 1148 prev->rec.data.stream_seq);
michael@0 1149 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
michael@0 1150 0, M_NOWAIT, 1, MT_DATA);
michael@0 1151 if (oper) {
michael@0 1152 struct sctp_paramhdr *ph;
michael@0 1153 uint32_t *ippp;
michael@0 1154
michael@0 1155 SCTP_BUF_LEN(oper) =
michael@0 1156 sizeof(struct sctp_paramhdr) +
michael@0 1157 (3 * sizeof(uint32_t));
michael@0 1158 ph = mtod(oper,
michael@0 1159 struct sctp_paramhdr *);
michael@0 1160 ph->param_type =
michael@0 1161 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
michael@0 1162 ph->param_length =
michael@0 1163 htons(SCTP_BUF_LEN(oper));
michael@0 1164 ippp = (uint32_t *) (ph + 1);
michael@0 1165 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_8);
michael@0 1166 ippp++;
michael@0 1167 *ippp = chk->rec.data.TSN_seq;
michael@0 1168 ippp++;
michael@0 1169 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
michael@0 1170 }
michael@0 1171 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_8;
michael@0 1172 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
michael@0 1173 *abort_flag = 1;
michael@0 1174 return;
michael@0 1175 }
michael@0 1176 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
michael@0 1177 SCTP_DATA_LAST_FRAG) {
michael@0 1178 /* Insert chk MUST be a FIRST */
michael@0 1179 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
michael@0 1180 SCTP_DATA_FIRST_FRAG) {
michael@0 1181 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
michael@0 1182 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
michael@0 1183 0, M_NOWAIT, 1, MT_DATA);
michael@0 1184 if (oper) {
michael@0 1185 struct sctp_paramhdr *ph;
michael@0 1186 uint32_t *ippp;
michael@0 1187
michael@0 1188 SCTP_BUF_LEN(oper) =
michael@0 1189 sizeof(struct sctp_paramhdr) +
michael@0 1190 (3 * sizeof(uint32_t));
michael@0 1191 ph = mtod(oper,
michael@0 1192 struct sctp_paramhdr *);
michael@0 1193 ph->param_type =
michael@0 1194 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
michael@0 1195 ph->param_length =
michael@0 1196 htons(SCTP_BUF_LEN(oper));
michael@0 1197 ippp = (uint32_t *) (ph + 1);
michael@0 1198 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_9);
michael@0 1199 ippp++;
michael@0 1200 *ippp = chk->rec.data.TSN_seq;
michael@0 1201 ippp++;
michael@0 1202 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
michael@0 1203
michael@0 1204 }
michael@0 1205 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_9;
michael@0 1206 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
michael@0 1207 *abort_flag = 1;
michael@0 1208 return;
michael@0 1209 }
michael@0 1210 }
michael@0 1211 }
michael@0 1212 }
michael@0 1213 if (next) {
michael@0 1214 post_tsn = chk->rec.data.TSN_seq + 1;
michael@0 1215 if (post_tsn == next->rec.data.TSN_seq) {
michael@0 1216 /*
michael@0 1217 * Ok the one I am inserting ahead of is my NEXT
michael@0 1218 * one. A bit of valdiation here.
michael@0 1219 */
michael@0 1220 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
michael@0 1221 /* Insert chk MUST be a last fragment */
michael@0 1222 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
michael@0 1223 != SCTP_DATA_LAST_FRAG) {
michael@0 1224 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
michael@0 1225 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
michael@0 1226 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
michael@0 1227 0, M_NOWAIT, 1, MT_DATA);
michael@0 1228 if (oper) {
michael@0 1229 struct sctp_paramhdr *ph;
michael@0 1230 uint32_t *ippp;
michael@0 1231
michael@0 1232 SCTP_BUF_LEN(oper) =
michael@0 1233 sizeof(struct sctp_paramhdr) +
michael@0 1234 ( 3 * sizeof(uint32_t));
michael@0 1235 ph = mtod(oper,
michael@0 1236 struct sctp_paramhdr *);
michael@0 1237 ph->param_type =
michael@0 1238 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
michael@0 1239 ph->param_length =
michael@0 1240 htons(SCTP_BUF_LEN(oper));
michael@0 1241 ippp = (uint32_t *) (ph + 1);
michael@0 1242 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_10);
michael@0 1243 ippp++;
michael@0 1244 *ippp = chk->rec.data.TSN_seq;
michael@0 1245 ippp++;
michael@0 1246 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
michael@0 1247 }
michael@0 1248 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_10;
michael@0 1249 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
michael@0 1250 *abort_flag = 1;
michael@0 1251 return;
michael@0 1252 }
michael@0 1253 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
michael@0 1254 SCTP_DATA_MIDDLE_FRAG ||
michael@0 1255 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
michael@0 1256 SCTP_DATA_LAST_FRAG) {
michael@0 1257 /*
michael@0 1258 * Insert chk CAN be MIDDLE or FIRST NOT
michael@0 1259 * LAST
michael@0 1260 */
michael@0 1261 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
michael@0 1262 SCTP_DATA_LAST_FRAG) {
michael@0 1263 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
michael@0 1264 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
michael@0 1265 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
michael@0 1266 0, M_NOWAIT, 1, MT_DATA);
michael@0 1267 if (oper) {
michael@0 1268 struct sctp_paramhdr *ph;
michael@0 1269 uint32_t *ippp;
michael@0 1270
michael@0 1271 SCTP_BUF_LEN(oper) =
michael@0 1272 sizeof(struct sctp_paramhdr) +
michael@0 1273 (3 * sizeof(uint32_t));
michael@0 1274 ph = mtod(oper,
michael@0 1275 struct sctp_paramhdr *);
michael@0 1276 ph->param_type =
michael@0 1277 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
michael@0 1278 ph->param_length =
michael@0 1279 htons(SCTP_BUF_LEN(oper));
michael@0 1280 ippp = (uint32_t *) (ph + 1);
michael@0 1281 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_11);
michael@0 1282 ippp++;
michael@0 1283 *ippp = chk->rec.data.TSN_seq;
michael@0 1284 ippp++;
michael@0 1285 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
michael@0 1286
michael@0 1287 }
michael@0 1288 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_11;
michael@0 1289 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
michael@0 1290 *abort_flag = 1;
michael@0 1291 return;
michael@0 1292 }
michael@0 1293 if (chk->rec.data.stream_number !=
michael@0 1294 next->rec.data.stream_number) {
michael@0 1295 /*
michael@0 1296 * Huh, need the correct STR here,
michael@0 1297 * they must be the same.
michael@0 1298 */
michael@0 1299 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
michael@0 1300 chk->rec.data.stream_number,
michael@0 1301 next->rec.data.stream_number);
michael@0 1302 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
michael@0 1303 0, M_NOWAIT, 1, MT_DATA);
michael@0 1304 if (oper) {
michael@0 1305 struct sctp_paramhdr *ph;
michael@0 1306 uint32_t *ippp;
michael@0 1307
michael@0 1308 SCTP_BUF_LEN(oper) =
michael@0 1309 sizeof(struct sctp_paramhdr) +
michael@0 1310 (3 * sizeof(uint32_t));
michael@0 1311 ph = mtod(oper,
michael@0 1312 struct sctp_paramhdr *);
michael@0 1313 ph->param_type =
michael@0 1314 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
michael@0 1315 ph->param_length =
michael@0 1316 htons(SCTP_BUF_LEN(oper));
michael@0 1317 ippp = (uint32_t *) (ph + 1);
michael@0 1318 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_12);
michael@0 1319 ippp++;
michael@0 1320 *ippp = chk->rec.data.TSN_seq;
michael@0 1321 ippp++;
michael@0 1322 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
michael@0 1323
michael@0 1324 }
michael@0 1325 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_12;
michael@0 1326 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
michael@0 1327 *abort_flag = 1;
michael@0 1328 return;
michael@0 1329 }
michael@0 1330 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
michael@0 1331 chk->rec.data.stream_seq !=
michael@0 1332 next->rec.data.stream_seq) {
michael@0 1333 /*
michael@0 1334 * Huh, need the correct STR here,
michael@0 1335 * they must be the same.
michael@0 1336 */
michael@0 1337 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
michael@0 1338 chk->rec.data.stream_seq,
michael@0 1339 next->rec.data.stream_seq);
michael@0 1340 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
michael@0 1341 0, M_NOWAIT, 1, MT_DATA);
michael@0 1342 if (oper) {
michael@0 1343 struct sctp_paramhdr *ph;
michael@0 1344 uint32_t *ippp;
michael@0 1345
michael@0 1346 SCTP_BUF_LEN(oper) =
michael@0 1347 sizeof(struct sctp_paramhdr) +
michael@0 1348 (3 * sizeof(uint32_t));
michael@0 1349 ph = mtod(oper,
michael@0 1350 struct sctp_paramhdr *);
michael@0 1351 ph->param_type =
michael@0 1352 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
michael@0 1353 ph->param_length =
michael@0 1354 htons(SCTP_BUF_LEN(oper));
michael@0 1355 ippp = (uint32_t *) (ph + 1);
michael@0 1356 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_13);
michael@0 1357 ippp++;
michael@0 1358 *ippp = chk->rec.data.TSN_seq;
michael@0 1359 ippp++;
michael@0 1360 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
michael@0 1361 }
michael@0 1362 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_13;
michael@0 1363 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
michael@0 1364 *abort_flag = 1;
michael@0 1365 return;
michael@0 1366 }
michael@0 1367 }
michael@0 1368 }
michael@0 1369 }
michael@0 1370 /* Do we need to do some delivery? check */
michael@0 1371 sctp_deliver_reasm_check(stcb, asoc);
michael@0 1372 }
michael@0 1373
michael@0 1374 /*
michael@0 1375 * This is an unfortunate routine. It checks to make sure a evil guy is not
michael@0 1376 * stuffing us full of bad packet fragments. A broken peer could also do this
michael@0 1377 * but this is doubtful. It is to bad I must worry about evil crackers sigh
michael@0 1378 * :< more cycles.
michael@0 1379 */
michael@0 1380 static int
michael@0 1381 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
michael@0 1382 uint32_t TSN_seq)
michael@0 1383 {
michael@0 1384 struct sctp_tmit_chunk *at;
michael@0 1385 uint32_t tsn_est;
michael@0 1386
michael@0 1387 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
michael@0 1388 if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
michael@0 1389 /* is it one bigger? */
michael@0 1390 tsn_est = at->rec.data.TSN_seq + 1;
michael@0 1391 if (tsn_est == TSN_seq) {
michael@0 1392 /* yep. It better be a last then */
michael@0 1393 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
michael@0 1394 SCTP_DATA_LAST_FRAG) {
michael@0 1395 /*
michael@0 1396 * Ok this guy belongs next to a guy
michael@0 1397 * that is NOT last, it should be a
michael@0 1398 * middle/last, not a complete
michael@0 1399 * chunk.
michael@0 1400 */
michael@0 1401 return (1);
michael@0 1402 } else {
michael@0 1403 /*
michael@0 1404 * This guy is ok since its a LAST
michael@0 1405 * and the new chunk is a fully
michael@0 1406 * self- contained one.
michael@0 1407 */
michael@0 1408 return (0);
michael@0 1409 }
michael@0 1410 }
michael@0 1411 } else if (TSN_seq == at->rec.data.TSN_seq) {
michael@0 1412 /* Software error since I have a dup? */
michael@0 1413 return (1);
michael@0 1414 } else {
michael@0 1415 /*
michael@0 1416 * Ok, 'at' is larger than new chunk but does it
michael@0 1417 * need to be right before it.
michael@0 1418 */
michael@0 1419 tsn_est = TSN_seq + 1;
michael@0 1420 if (tsn_est == at->rec.data.TSN_seq) {
michael@0 1421 /* Yep, It better be a first */
michael@0 1422 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
michael@0 1423 SCTP_DATA_FIRST_FRAG) {
michael@0 1424 return (1);
michael@0 1425 } else {
michael@0 1426 return (0);
michael@0 1427 }
michael@0 1428 }
michael@0 1429 }
michael@0 1430 }
michael@0 1431 return (0);
michael@0 1432 }
michael@0 1433
michael@0 1434 static int
michael@0 1435 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
michael@0 1436 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
michael@0 1437 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
michael@0 1438 int *break_flag, int last_chunk)
michael@0 1439 {
michael@0 1440 /* Process a data chunk */
michael@0 1441 /* struct sctp_tmit_chunk *chk; */
michael@0 1442 struct sctp_tmit_chunk *chk;
michael@0 1443 uint32_t tsn, gap;
michael@0 1444 struct mbuf *dmbuf;
michael@0 1445 int the_len;
michael@0 1446 int need_reasm_check = 0;
michael@0 1447 uint16_t strmno, strmseq;
michael@0 1448 struct mbuf *oper;
michael@0 1449 struct sctp_queued_to_read *control;
michael@0 1450 int ordered;
michael@0 1451 uint32_t protocol_id;
michael@0 1452 uint8_t chunk_flags;
michael@0 1453 struct sctp_stream_reset_list *liste;
michael@0 1454
michael@0 1455 chk = NULL;
michael@0 1456 tsn = ntohl(ch->dp.tsn);
michael@0 1457 chunk_flags = ch->ch.chunk_flags;
michael@0 1458 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
michael@0 1459 asoc->send_sack = 1;
michael@0 1460 }
michael@0 1461 protocol_id = ch->dp.protocol_id;
michael@0 1462 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
michael@0 1463 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
michael@0 1464 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
michael@0 1465 }
michael@0 1466 if (stcb == NULL) {
michael@0 1467 return (0);
michael@0 1468 }
michael@0 1469 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
michael@0 1470 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
michael@0 1471 /* It is a duplicate */
michael@0 1472 SCTP_STAT_INCR(sctps_recvdupdata);
michael@0 1473 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
michael@0 1474 /* Record a dup for the next outbound sack */
michael@0 1475 asoc->dup_tsns[asoc->numduptsns] = tsn;
michael@0 1476 asoc->numduptsns++;
michael@0 1477 }
michael@0 1478 asoc->send_sack = 1;
michael@0 1479 return (0);
michael@0 1480 }
michael@0 1481 /* Calculate the number of TSN's between the base and this TSN */
michael@0 1482 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
michael@0 1483 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
michael@0 1484 /* Can't hold the bit in the mapping at max array, toss it */
michael@0 1485 return (0);
michael@0 1486 }
michael@0 1487 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
michael@0 1488 SCTP_TCB_LOCK_ASSERT(stcb);
michael@0 1489 if (sctp_expand_mapping_array(asoc, gap)) {
michael@0 1490 /* Can't expand, drop it */
michael@0 1491 return (0);
michael@0 1492 }
michael@0 1493 }
michael@0 1494 if (SCTP_TSN_GT(tsn, *high_tsn)) {
michael@0 1495 *high_tsn = tsn;
michael@0 1496 }
michael@0 1497 /* See if we have received this one already */
michael@0 1498 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
michael@0 1499 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
michael@0 1500 SCTP_STAT_INCR(sctps_recvdupdata);
michael@0 1501 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
michael@0 1502 /* Record a dup for the next outbound sack */
michael@0 1503 asoc->dup_tsns[asoc->numduptsns] = tsn;
michael@0 1504 asoc->numduptsns++;
michael@0 1505 }
michael@0 1506 asoc->send_sack = 1;
michael@0 1507 return (0);
michael@0 1508 }
michael@0 1509 /*
michael@0 1510 * Check to see about the GONE flag, duplicates would cause a sack
michael@0 1511 * to be sent up above
michael@0 1512 */
michael@0 1513 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
michael@0 1514 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
michael@0 1515 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
michael@0 1516 ) {
michael@0 1517 /*
michael@0 1518 * wait a minute, this guy is gone, there is no longer a
michael@0 1519 * receiver. Send peer an ABORT!
michael@0 1520 */
michael@0 1521 struct mbuf *op_err;
michael@0 1522
michael@0 1523 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
michael@0 1524 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
michael@0 1525 *abort_flag = 1;
michael@0 1526 return (0);
michael@0 1527 }
michael@0 1528 /*
michael@0 1529 * Now before going further we see if there is room. If NOT then we
michael@0 1530 * MAY let one through only IF this TSN is the one we are waiting
michael@0 1531 * for on a partial delivery API.
michael@0 1532 */
michael@0 1533
michael@0 1534 /* now do the tests */
michael@0 1535 if (((asoc->cnt_on_all_streams +
michael@0 1536 asoc->cnt_on_reasm_queue +
michael@0 1537 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
michael@0 1538 (((int)asoc->my_rwnd) <= 0)) {
michael@0 1539 /*
michael@0 1540 * When we have NO room in the rwnd we check to make sure
michael@0 1541 * the reader is doing its job...
michael@0 1542 */
michael@0 1543 if (stcb->sctp_socket->so_rcv.sb_cc) {
michael@0 1544 /* some to read, wake-up */
michael@0 1545 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 1546 struct socket *so;
michael@0 1547
michael@0 1548 so = SCTP_INP_SO(stcb->sctp_ep);
michael@0 1549 atomic_add_int(&stcb->asoc.refcnt, 1);
michael@0 1550 SCTP_TCB_UNLOCK(stcb);
michael@0 1551 SCTP_SOCKET_LOCK(so, 1);
michael@0 1552 SCTP_TCB_LOCK(stcb);
michael@0 1553 atomic_subtract_int(&stcb->asoc.refcnt, 1);
michael@0 1554 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
michael@0 1555 /* assoc was freed while we were unlocked */
michael@0 1556 SCTP_SOCKET_UNLOCK(so, 1);
michael@0 1557 return (0);
michael@0 1558 }
michael@0 1559 #endif
michael@0 1560 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
michael@0 1561 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 1562 SCTP_SOCKET_UNLOCK(so, 1);
michael@0 1563 #endif
michael@0 1564 }
michael@0 1565 /* now is it in the mapping array of what we have accepted? */
michael@0 1566 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
michael@0 1567 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
michael@0 1568 /* Nope not in the valid range dump it */
michael@0 1569 sctp_set_rwnd(stcb, asoc);
michael@0 1570 if ((asoc->cnt_on_all_streams +
michael@0 1571 asoc->cnt_on_reasm_queue +
michael@0 1572 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
michael@0 1573 SCTP_STAT_INCR(sctps_datadropchklmt);
michael@0 1574 } else {
michael@0 1575 SCTP_STAT_INCR(sctps_datadroprwnd);
michael@0 1576 }
michael@0 1577 *break_flag = 1;
michael@0 1578 return (0);
michael@0 1579 }
michael@0 1580 }
michael@0 1581 strmno = ntohs(ch->dp.stream_id);
michael@0 1582 if (strmno >= asoc->streamincnt) {
michael@0 1583 struct sctp_paramhdr *phdr;
michael@0 1584 struct mbuf *mb;
michael@0 1585
michael@0 1586 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
michael@0 1587 0, M_NOWAIT, 1, MT_DATA);
michael@0 1588 if (mb != NULL) {
michael@0 1589 /* add some space up front so prepend will work well */
michael@0 1590 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
michael@0 1591 phdr = mtod(mb, struct sctp_paramhdr *);
michael@0 1592 /*
michael@0 1593 * Error causes are just param's and this one has
michael@0 1594 * two back to back phdr, one with the error type
michael@0 1595 * and size, the other with the streamid and a rsvd
michael@0 1596 */
michael@0 1597 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
michael@0 1598 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
michael@0 1599 phdr->param_length =
michael@0 1600 htons(sizeof(struct sctp_paramhdr) * 2);
michael@0 1601 phdr++;
michael@0 1602 /* We insert the stream in the type field */
michael@0 1603 phdr->param_type = ch->dp.stream_id;
michael@0 1604 /* And set the length to 0 for the rsvd field */
michael@0 1605 phdr->param_length = 0;
michael@0 1606 sctp_queue_op_err(stcb, mb);
michael@0 1607 }
michael@0 1608 SCTP_STAT_INCR(sctps_badsid);
michael@0 1609 SCTP_TCB_LOCK_ASSERT(stcb);
michael@0 1610 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
michael@0 1611 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
michael@0 1612 asoc->highest_tsn_inside_nr_map = tsn;
michael@0 1613 }
michael@0 1614 if (tsn == (asoc->cumulative_tsn + 1)) {
michael@0 1615 /* Update cum-ack */
michael@0 1616 asoc->cumulative_tsn = tsn;
michael@0 1617 }
michael@0 1618 return (0);
michael@0 1619 }
michael@0 1620 /*
michael@0 1621 * Before we continue lets validate that we are not being fooled by
michael@0 1622 * an evil attacker. We can only have 4k chunks based on our TSN
michael@0 1623 * spread allowed by the mapping array 512 * 8 bits, so there is no
michael@0 1624 * way our stream sequence numbers could have wrapped. We of course
michael@0 1625 * only validate the FIRST fragment so the bit must be set.
michael@0 1626 */
michael@0 1627 strmseq = ntohs(ch->dp.stream_sequence);
michael@0 1628 #ifdef SCTP_ASOCLOG_OF_TSNS
michael@0 1629 SCTP_TCB_LOCK_ASSERT(stcb);
michael@0 1630 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
michael@0 1631 asoc->tsn_in_at = 0;
michael@0 1632 asoc->tsn_in_wrapped = 1;
michael@0 1633 }
michael@0 1634 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
michael@0 1635 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
michael@0 1636 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
michael@0 1637 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
michael@0 1638 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
michael@0 1639 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
michael@0 1640 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
michael@0 1641 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
michael@0 1642 asoc->tsn_in_at++;
michael@0 1643 #endif
michael@0 1644 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
michael@0 1645 (TAILQ_EMPTY(&asoc->resetHead)) &&
michael@0 1646 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
michael@0 1647 SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
michael@0 1648 /* The incoming sseq is behind where we last delivered? */
michael@0 1649 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
michael@0 1650 strmseq, asoc->strmin[strmno].last_sequence_delivered);
michael@0 1651 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
michael@0 1652 0, M_NOWAIT, 1, MT_DATA);
michael@0 1653 if (oper) {
michael@0 1654 struct sctp_paramhdr *ph;
michael@0 1655 uint32_t *ippp;
michael@0 1656
michael@0 1657 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
michael@0 1658 (3 * sizeof(uint32_t));
michael@0 1659 ph = mtod(oper, struct sctp_paramhdr *);
michael@0 1660 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
michael@0 1661 ph->param_length = htons(SCTP_BUF_LEN(oper));
michael@0 1662 ippp = (uint32_t *) (ph + 1);
michael@0 1663 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_14);
michael@0 1664 ippp++;
michael@0 1665 *ippp = tsn;
michael@0 1666 ippp++;
michael@0 1667 *ippp = ((strmno << 16) | strmseq);
michael@0 1668
michael@0 1669 }
michael@0 1670 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_14;
michael@0 1671 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
michael@0 1672 *abort_flag = 1;
michael@0 1673 return (0);
michael@0 1674 }
michael@0 1675 /************************************
michael@0 1676 * From here down we may find ch-> invalid
michael@0 1677 * so its a good idea NOT to use it.
michael@0 1678 *************************************/
michael@0 1679
michael@0 1680 the_len = (chk_length - sizeof(struct sctp_data_chunk));
michael@0 1681 if (last_chunk == 0) {
michael@0 1682 dmbuf = SCTP_M_COPYM(*m,
michael@0 1683 (offset + sizeof(struct sctp_data_chunk)),
michael@0 1684 the_len, M_NOWAIT);
michael@0 1685 #ifdef SCTP_MBUF_LOGGING
michael@0 1686 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
michael@0 1687 struct mbuf *mat;
michael@0 1688
michael@0 1689 for (mat = dmbuf; mat; mat = SCTP_BUF_NEXT(mat)) {
michael@0 1690 if (SCTP_BUF_IS_EXTENDED(mat)) {
michael@0 1691 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
michael@0 1692 }
michael@0 1693 }
michael@0 1694 }
michael@0 1695 #endif
michael@0 1696 } else {
michael@0 1697 /* We can steal the last chunk */
michael@0 1698 int l_len;
michael@0 1699 dmbuf = *m;
michael@0 1700 /* lop off the top part */
michael@0 1701 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
michael@0 1702 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
michael@0 1703 l_len = SCTP_BUF_LEN(dmbuf);
michael@0 1704 } else {
michael@0 1705 /* need to count up the size hopefully
michael@0 1706 * does not hit this to often :-0
michael@0 1707 */
michael@0 1708 struct mbuf *lat;
michael@0 1709
michael@0 1710 l_len = 0;
michael@0 1711 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
michael@0 1712 l_len += SCTP_BUF_LEN(lat);
michael@0 1713 }
michael@0 1714 }
michael@0 1715 if (l_len > the_len) {
michael@0 1716 /* Trim the end round bytes off too */
michael@0 1717 m_adj(dmbuf, -(l_len - the_len));
michael@0 1718 }
michael@0 1719 }
michael@0 1720 if (dmbuf == NULL) {
michael@0 1721 SCTP_STAT_INCR(sctps_nomem);
michael@0 1722 return (0);
michael@0 1723 }
michael@0 1724 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
michael@0 1725 asoc->fragmented_delivery_inprogress == 0 &&
michael@0 1726 TAILQ_EMPTY(&asoc->resetHead) &&
michael@0 1727 ((ordered == 0) ||
michael@0 1728 ((uint16_t)(asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
michael@0 1729 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
michael@0 1730 /* Candidate for express delivery */
michael@0 1731 /*
michael@0 1732 * Its not fragmented, No PD-API is up, Nothing in the
michael@0 1733 * delivery queue, Its un-ordered OR ordered and the next to
michael@0 1734 * deliver AND nothing else is stuck on the stream queue,
michael@0 1735 * And there is room for it in the socket buffer. Lets just
michael@0 1736 * stuff it up the buffer....
michael@0 1737 */
michael@0 1738
michael@0 1739 /* It would be nice to avoid this copy if we could :< */
michael@0 1740 sctp_alloc_a_readq(stcb, control);
michael@0 1741 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
michael@0 1742 protocol_id,
michael@0 1743 strmno, strmseq,
michael@0 1744 chunk_flags,
michael@0 1745 dmbuf);
michael@0 1746 if (control == NULL) {
michael@0 1747 goto failed_express_del;
michael@0 1748 }
michael@0 1749 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
michael@0 1750 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
michael@0 1751 asoc->highest_tsn_inside_nr_map = tsn;
michael@0 1752 }
michael@0 1753 sctp_add_to_readq(stcb->sctp_ep, stcb,
michael@0 1754 control, &stcb->sctp_socket->so_rcv,
michael@0 1755 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
michael@0 1756
michael@0 1757 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
michael@0 1758 /* for ordered, bump what we delivered */
michael@0 1759 asoc->strmin[strmno].last_sequence_delivered++;
michael@0 1760 }
michael@0 1761 SCTP_STAT_INCR(sctps_recvexpress);
michael@0 1762 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
michael@0 1763 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
michael@0 1764 SCTP_STR_LOG_FROM_EXPRS_DEL);
michael@0 1765 }
michael@0 1766 control = NULL;
michael@0 1767
michael@0 1768 goto finish_express_del;
michael@0 1769 }
michael@0 1770 failed_express_del:
michael@0 1771 /* If we reach here this is a new chunk */
michael@0 1772 chk = NULL;
michael@0 1773 control = NULL;
michael@0 1774 /* Express for fragmented delivery? */
michael@0 1775 if ((asoc->fragmented_delivery_inprogress) &&
michael@0 1776 (stcb->asoc.control_pdapi) &&
michael@0 1777 (asoc->str_of_pdapi == strmno) &&
michael@0 1778 (asoc->ssn_of_pdapi == strmseq)
michael@0 1779 ) {
michael@0 1780 control = stcb->asoc.control_pdapi;
michael@0 1781 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
michael@0 1782 /* Can't be another first? */
michael@0 1783 goto failed_pdapi_express_del;
michael@0 1784 }
michael@0 1785 if (tsn == (control->sinfo_tsn + 1)) {
michael@0 1786 /* Yep, we can add it on */
michael@0 1787 int end = 0;
michael@0 1788
michael@0 1789 if (chunk_flags & SCTP_DATA_LAST_FRAG) {
michael@0 1790 end = 1;
michael@0 1791 }
michael@0 1792 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
michael@0 1793 tsn,
michael@0 1794 &stcb->sctp_socket->so_rcv)) {
michael@0 1795 SCTP_PRINTF("Append fails end:%d\n", end);
michael@0 1796 goto failed_pdapi_express_del;
michael@0 1797 }
michael@0 1798
michael@0 1799 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
michael@0 1800 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
michael@0 1801 asoc->highest_tsn_inside_nr_map = tsn;
michael@0 1802 }
michael@0 1803 SCTP_STAT_INCR(sctps_recvexpressm);
michael@0 1804 asoc->tsn_last_delivered = tsn;
michael@0 1805 asoc->fragment_flags = chunk_flags;
michael@0 1806 asoc->tsn_of_pdapi_last_delivered = tsn;
michael@0 1807 asoc->last_flags_delivered = chunk_flags;
michael@0 1808 asoc->last_strm_seq_delivered = strmseq;
michael@0 1809 asoc->last_strm_no_delivered = strmno;
michael@0 1810 if (end) {
michael@0 1811 /* clean up the flags and such */
michael@0 1812 asoc->fragmented_delivery_inprogress = 0;
michael@0 1813 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
michael@0 1814 asoc->strmin[strmno].last_sequence_delivered++;
michael@0 1815 }
michael@0 1816 stcb->asoc.control_pdapi = NULL;
michael@0 1817 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
michael@0 1818 /* There could be another message ready */
michael@0 1819 need_reasm_check = 1;
michael@0 1820 }
michael@0 1821 }
michael@0 1822 control = NULL;
michael@0 1823 goto finish_express_del;
michael@0 1824 }
michael@0 1825 }
michael@0 1826 failed_pdapi_express_del:
michael@0 1827 control = NULL;
michael@0 1828 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
michael@0 1829 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
michael@0 1830 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
michael@0 1831 asoc->highest_tsn_inside_nr_map = tsn;
michael@0 1832 }
michael@0 1833 } else {
michael@0 1834 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
michael@0 1835 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
michael@0 1836 asoc->highest_tsn_inside_map = tsn;
michael@0 1837 }
michael@0 1838 }
michael@0 1839 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
michael@0 1840 sctp_alloc_a_chunk(stcb, chk);
michael@0 1841 if (chk == NULL) {
michael@0 1842 /* No memory so we drop the chunk */
michael@0 1843 SCTP_STAT_INCR(sctps_nomem);
michael@0 1844 if (last_chunk == 0) {
michael@0 1845 /* we copied it, free the copy */
michael@0 1846 sctp_m_freem(dmbuf);
michael@0 1847 }
michael@0 1848 return (0);
michael@0 1849 }
michael@0 1850 chk->rec.data.TSN_seq = tsn;
michael@0 1851 chk->no_fr_allowed = 0;
michael@0 1852 chk->rec.data.stream_seq = strmseq;
michael@0 1853 chk->rec.data.stream_number = strmno;
michael@0 1854 chk->rec.data.payloadtype = protocol_id;
michael@0 1855 chk->rec.data.context = stcb->asoc.context;
michael@0 1856 chk->rec.data.doing_fast_retransmit = 0;
michael@0 1857 chk->rec.data.rcv_flags = chunk_flags;
michael@0 1858 chk->asoc = asoc;
michael@0 1859 chk->send_size = the_len;
michael@0 1860 chk->whoTo = net;
michael@0 1861 atomic_add_int(&net->ref_count, 1);
michael@0 1862 chk->data = dmbuf;
michael@0 1863 } else {
michael@0 1864 sctp_alloc_a_readq(stcb, control);
michael@0 1865 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
michael@0 1866 protocol_id,
michael@0 1867 strmno, strmseq,
michael@0 1868 chunk_flags,
michael@0 1869 dmbuf);
michael@0 1870 if (control == NULL) {
michael@0 1871 /* No memory so we drop the chunk */
michael@0 1872 SCTP_STAT_INCR(sctps_nomem);
michael@0 1873 if (last_chunk == 0) {
michael@0 1874 /* we copied it, free the copy */
michael@0 1875 sctp_m_freem(dmbuf);
michael@0 1876 }
michael@0 1877 return (0);
michael@0 1878 }
michael@0 1879 control->length = the_len;
michael@0 1880 }
michael@0 1881
michael@0 1882 /* Mark it as received */
michael@0 1883 /* Now queue it where it belongs */
michael@0 1884 if (control != NULL) {
michael@0 1885 /* First a sanity check */
michael@0 1886 if (asoc->fragmented_delivery_inprogress) {
michael@0 1887 /*
michael@0 1888 * Ok, we have a fragmented delivery in progress if
michael@0 1889 * this chunk is next to deliver OR belongs in our
michael@0 1890 * view to the reassembly, the peer is evil or
michael@0 1891 * broken.
michael@0 1892 */
michael@0 1893 uint32_t estimate_tsn;
michael@0 1894
michael@0 1895 estimate_tsn = asoc->tsn_last_delivered + 1;
michael@0 1896 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
michael@0 1897 (estimate_tsn == control->sinfo_tsn)) {
michael@0 1898 /* Evil/Broke peer */
michael@0 1899 sctp_m_freem(control->data);
michael@0 1900 control->data = NULL;
michael@0 1901 if (control->whoFrom) {
michael@0 1902 sctp_free_remote_addr(control->whoFrom);
michael@0 1903 control->whoFrom = NULL;
michael@0 1904 }
michael@0 1905 sctp_free_a_readq(stcb, control);
michael@0 1906 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
michael@0 1907 0, M_NOWAIT, 1, MT_DATA);
michael@0 1908 if (oper) {
michael@0 1909 struct sctp_paramhdr *ph;
michael@0 1910 uint32_t *ippp;
michael@0 1911
michael@0 1912 SCTP_BUF_LEN(oper) =
michael@0 1913 sizeof(struct sctp_paramhdr) +
michael@0 1914 (3 * sizeof(uint32_t));
michael@0 1915 ph = mtod(oper, struct sctp_paramhdr *);
michael@0 1916 ph->param_type =
michael@0 1917 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
michael@0 1918 ph->param_length = htons(SCTP_BUF_LEN(oper));
michael@0 1919 ippp = (uint32_t *) (ph + 1);
michael@0 1920 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_15);
michael@0 1921 ippp++;
michael@0 1922 *ippp = tsn;
michael@0 1923 ippp++;
michael@0 1924 *ippp = ((strmno << 16) | strmseq);
michael@0 1925 }
michael@0 1926 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_15;
michael@0 1927 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
michael@0 1928 *abort_flag = 1;
michael@0 1929 return (0);
michael@0 1930 } else {
michael@0 1931 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
michael@0 1932 sctp_m_freem(control->data);
michael@0 1933 control->data = NULL;
michael@0 1934 if (control->whoFrom) {
michael@0 1935 sctp_free_remote_addr(control->whoFrom);
michael@0 1936 control->whoFrom = NULL;
michael@0 1937 }
michael@0 1938 sctp_free_a_readq(stcb, control);
michael@0 1939
michael@0 1940 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
michael@0 1941 0, M_NOWAIT, 1, MT_DATA);
michael@0 1942 if (oper) {
michael@0 1943 struct sctp_paramhdr *ph;
michael@0 1944 uint32_t *ippp;
michael@0 1945
michael@0 1946 SCTP_BUF_LEN(oper) =
michael@0 1947 sizeof(struct sctp_paramhdr) +
michael@0 1948 ( 3 * sizeof(uint32_t));
michael@0 1949 ph = mtod(oper,
michael@0 1950 struct sctp_paramhdr *);
michael@0 1951 ph->param_type =
michael@0 1952 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
michael@0 1953 ph->param_length =
michael@0 1954 htons(SCTP_BUF_LEN(oper));
michael@0 1955 ippp = (uint32_t *) (ph + 1);
michael@0 1956 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_16);
michael@0 1957 ippp++;
michael@0 1958 *ippp = tsn;
michael@0 1959 ippp++;
michael@0 1960 *ippp = ((strmno << 16) | strmseq);
michael@0 1961 }
michael@0 1962 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_16;
michael@0 1963 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
michael@0 1964 *abort_flag = 1;
michael@0 1965 return (0);
michael@0 1966 }
michael@0 1967 }
michael@0 1968 } else {
michael@0 1969 /* No PDAPI running */
michael@0 1970 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
michael@0 1971 /*
michael@0 1972 * Reassembly queue is NOT empty validate
michael@0 1973 * that this tsn does not need to be in
michael@0 1974 * reasembly queue. If it does then our peer
michael@0 1975 * is broken or evil.
michael@0 1976 */
michael@0 1977 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
michael@0 1978 sctp_m_freem(control->data);
michael@0 1979 control->data = NULL;
michael@0 1980 if (control->whoFrom) {
michael@0 1981 sctp_free_remote_addr(control->whoFrom);
michael@0 1982 control->whoFrom = NULL;
michael@0 1983 }
michael@0 1984 sctp_free_a_readq(stcb, control);
michael@0 1985 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
michael@0 1986 0, M_NOWAIT, 1, MT_DATA);
michael@0 1987 if (oper) {
michael@0 1988 struct sctp_paramhdr *ph;
michael@0 1989 uint32_t *ippp;
michael@0 1990
michael@0 1991 SCTP_BUF_LEN(oper) =
michael@0 1992 sizeof(struct sctp_paramhdr) +
michael@0 1993 (3 * sizeof(uint32_t));
michael@0 1994 ph = mtod(oper,
michael@0 1995 struct sctp_paramhdr *);
michael@0 1996 ph->param_type =
michael@0 1997 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
michael@0 1998 ph->param_length =
michael@0 1999 htons(SCTP_BUF_LEN(oper));
michael@0 2000 ippp = (uint32_t *) (ph + 1);
michael@0 2001 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_17);
michael@0 2002 ippp++;
michael@0 2003 *ippp = tsn;
michael@0 2004 ippp++;
michael@0 2005 *ippp = ((strmno << 16) | strmseq);
michael@0 2006 }
michael@0 2007 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_17;
michael@0 2008 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
michael@0 2009 *abort_flag = 1;
michael@0 2010 return (0);
michael@0 2011 }
michael@0 2012 }
michael@0 2013 }
michael@0 2014 /* ok, if we reach here we have passed the sanity checks */
michael@0 2015 if (chunk_flags & SCTP_DATA_UNORDERED) {
michael@0 2016 /* queue directly into socket buffer */
michael@0 2017 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
michael@0 2018 sctp_add_to_readq(stcb->sctp_ep, stcb,
michael@0 2019 control,
michael@0 2020 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
michael@0 2021 } else {
michael@0 2022 /*
michael@0 2023 * Special check for when streams are resetting. We
michael@0 2024 * could be more smart about this and check the
michael@0 2025 * actual stream to see if it is not being reset..
michael@0 2026 * that way we would not create a HOLB when amongst
michael@0 2027 * streams being reset and those not being reset.
michael@0 2028 *
michael@0 2029 * We take complete messages that have a stream reset
michael@0 2030 * intervening (aka the TSN is after where our
michael@0 2031 * cum-ack needs to be) off and put them on a
michael@0 2032 * pending_reply_queue. The reassembly ones we do
michael@0 2033 * not have to worry about since they are all sorted
michael@0 2034 * and proceessed by TSN order. It is only the
michael@0 2035 * singletons I must worry about.
michael@0 2036 */
michael@0 2037 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
michael@0 2038 SCTP_TSN_GT(tsn, liste->tsn)) {
michael@0 2039 /*
michael@0 2040 * yep its past where we need to reset... go
michael@0 2041 * ahead and queue it.
michael@0 2042 */
michael@0 2043 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
michael@0 2044 /* first one on */
michael@0 2045 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
michael@0 2046 } else {
michael@0 2047 struct sctp_queued_to_read *ctlOn, *nctlOn;
michael@0 2048 unsigned char inserted = 0;
michael@0 2049
michael@0 2050 TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
michael@0 2051 if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
michael@0 2052 continue;
michael@0 2053 } else {
michael@0 2054 /* found it */
michael@0 2055 TAILQ_INSERT_BEFORE(ctlOn, control, next);
michael@0 2056 inserted = 1;
michael@0 2057 break;
michael@0 2058 }
michael@0 2059 }
michael@0 2060 if (inserted == 0) {
michael@0 2061 /*
michael@0 2062 * must be put at end, use
michael@0 2063 * prevP (all setup from
michael@0 2064 * loop) to setup nextP.
michael@0 2065 */
michael@0 2066 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
michael@0 2067 }
michael@0 2068 }
michael@0 2069 } else {
michael@0 2070 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
michael@0 2071 if (*abort_flag) {
michael@0 2072 return (0);
michael@0 2073 }
michael@0 2074 }
michael@0 2075 }
michael@0 2076 } else {
michael@0 2077 /* Into the re-assembly queue */
michael@0 2078 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
michael@0 2079 if (*abort_flag) {
michael@0 2080 /*
michael@0 2081 * the assoc is now gone and chk was put onto the
michael@0 2082 * reasm queue, which has all been freed.
michael@0 2083 */
michael@0 2084 *m = NULL;
michael@0 2085 return (0);
michael@0 2086 }
michael@0 2087 }
michael@0 2088 finish_express_del:
michael@0 2089 if (tsn == (asoc->cumulative_tsn + 1)) {
michael@0 2090 /* Update cum-ack */
michael@0 2091 asoc->cumulative_tsn = tsn;
michael@0 2092 }
michael@0 2093 if (last_chunk) {
michael@0 2094 *m = NULL;
michael@0 2095 }
michael@0 2096 if (ordered) {
michael@0 2097 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
michael@0 2098 } else {
michael@0 2099 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
michael@0 2100 }
michael@0 2101 SCTP_STAT_INCR(sctps_recvdata);
michael@0 2102 /* Set it present please */
michael@0 2103 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
michael@0 2104 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
michael@0 2105 }
michael@0 2106 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
michael@0 2107 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
michael@0 2108 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
michael@0 2109 }
michael@0 2110 /* check the special flag for stream resets */
michael@0 2111 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
michael@0 2112 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
michael@0 2113 /*
michael@0 2114 * we have finished working through the backlogged TSN's now
michael@0 2115 * time to reset streams. 1: call reset function. 2: free
michael@0 2116 * pending_reply space 3: distribute any chunks in
michael@0 2117 * pending_reply_queue.
michael@0 2118 */
michael@0 2119 struct sctp_queued_to_read *ctl, *nctl;
michael@0 2120
michael@0 2121 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
michael@0 2122 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
michael@0 2123 SCTP_FREE(liste, SCTP_M_STRESET);
michael@0 2124 /*sa_ignore FREED_MEMORY*/
michael@0 2125 liste = TAILQ_FIRST(&asoc->resetHead);
michael@0 2126 if (TAILQ_EMPTY(&asoc->resetHead)) {
michael@0 2127 /* All can be removed */
michael@0 2128 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
michael@0 2129 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
michael@0 2130 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
michael@0 2131 if (*abort_flag) {
michael@0 2132 return (0);
michael@0 2133 }
michael@0 2134 }
michael@0 2135 } else {
michael@0 2136 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
michael@0 2137 if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
michael@0 2138 break;
michael@0 2139 }
michael@0 2140 /*
michael@0 2141 * if ctl->sinfo_tsn is <= liste->tsn we can
michael@0 2142 * process it which is the NOT of
michael@0 2143 * ctl->sinfo_tsn > liste->tsn
michael@0 2144 */
michael@0 2145 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
michael@0 2146 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
michael@0 2147 if (*abort_flag) {
michael@0 2148 return (0);
michael@0 2149 }
michael@0 2150 }
michael@0 2151 }
michael@0 2152 /*
michael@0 2153 * Now service re-assembly to pick up anything that has been
michael@0 2154 * held on reassembly queue?
michael@0 2155 */
michael@0 2156 sctp_deliver_reasm_check(stcb, asoc);
michael@0 2157 need_reasm_check = 0;
michael@0 2158 }
michael@0 2159
michael@0 2160 if (need_reasm_check) {
michael@0 2161 /* Another one waits ? */
michael@0 2162 sctp_deliver_reasm_check(stcb, asoc);
michael@0 2163 }
michael@0 2164 return (1);
michael@0 2165 }
michael@0 2166
michael@0 2167 int8_t sctp_map_lookup_tab[256] = {
michael@0 2168 0, 1, 0, 2, 0, 1, 0, 3,
michael@0 2169 0, 1, 0, 2, 0, 1, 0, 4,
michael@0 2170 0, 1, 0, 2, 0, 1, 0, 3,
michael@0 2171 0, 1, 0, 2, 0, 1, 0, 5,
michael@0 2172 0, 1, 0, 2, 0, 1, 0, 3,
michael@0 2173 0, 1, 0, 2, 0, 1, 0, 4,
michael@0 2174 0, 1, 0, 2, 0, 1, 0, 3,
michael@0 2175 0, 1, 0, 2, 0, 1, 0, 6,
michael@0 2176 0, 1, 0, 2, 0, 1, 0, 3,
michael@0 2177 0, 1, 0, 2, 0, 1, 0, 4,
michael@0 2178 0, 1, 0, 2, 0, 1, 0, 3,
michael@0 2179 0, 1, 0, 2, 0, 1, 0, 5,
michael@0 2180 0, 1, 0, 2, 0, 1, 0, 3,
michael@0 2181 0, 1, 0, 2, 0, 1, 0, 4,
michael@0 2182 0, 1, 0, 2, 0, 1, 0, 3,
michael@0 2183 0, 1, 0, 2, 0, 1, 0, 7,
michael@0 2184 0, 1, 0, 2, 0, 1, 0, 3,
michael@0 2185 0, 1, 0, 2, 0, 1, 0, 4,
michael@0 2186 0, 1, 0, 2, 0, 1, 0, 3,
michael@0 2187 0, 1, 0, 2, 0, 1, 0, 5,
michael@0 2188 0, 1, 0, 2, 0, 1, 0, 3,
michael@0 2189 0, 1, 0, 2, 0, 1, 0, 4,
michael@0 2190 0, 1, 0, 2, 0, 1, 0, 3,
michael@0 2191 0, 1, 0, 2, 0, 1, 0, 6,
michael@0 2192 0, 1, 0, 2, 0, 1, 0, 3,
michael@0 2193 0, 1, 0, 2, 0, 1, 0, 4,
michael@0 2194 0, 1, 0, 2, 0, 1, 0, 3,
michael@0 2195 0, 1, 0, 2, 0, 1, 0, 5,
michael@0 2196 0, 1, 0, 2, 0, 1, 0, 3,
michael@0 2197 0, 1, 0, 2, 0, 1, 0, 4,
michael@0 2198 0, 1, 0, 2, 0, 1, 0, 3,
michael@0 2199 0, 1, 0, 2, 0, 1, 0, 8
michael@0 2200 };
michael@0 2201
michael@0 2202
michael@0 2203 void
michael@0 2204 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
michael@0 2205 {
michael@0 2206 /*
michael@0 2207 * Now we also need to check the mapping array in a couple of ways.
michael@0 2208 * 1) Did we move the cum-ack point?
michael@0 2209 *
michael@0 2210 * When you first glance at this you might think
michael@0 2211 * that all entries that make up the postion
michael@0 2212 * of the cum-ack would be in the nr-mapping array
michael@0 2213 * only.. i.e. things up to the cum-ack are always
michael@0 2214 * deliverable. Thats true with one exception, when
michael@0 2215 * its a fragmented message we may not deliver the data
michael@0 2216 * until some threshold (or all of it) is in place. So
michael@0 2217 * we must OR the nr_mapping_array and mapping_array to
michael@0 2218 * get a true picture of the cum-ack.
michael@0 2219 */
michael@0 2220 struct sctp_association *asoc;
michael@0 2221 int at;
michael@0 2222 uint8_t val;
michael@0 2223 int slide_from, slide_end, lgap, distance;
michael@0 2224 uint32_t old_cumack, old_base, old_highest, highest_tsn;
michael@0 2225
michael@0 2226 asoc = &stcb->asoc;
michael@0 2227
michael@0 2228 old_cumack = asoc->cumulative_tsn;
michael@0 2229 old_base = asoc->mapping_array_base_tsn;
michael@0 2230 old_highest = asoc->highest_tsn_inside_map;
michael@0 2231 /*
michael@0 2232 * We could probably improve this a small bit by calculating the
michael@0 2233 * offset of the current cum-ack as the starting point.
michael@0 2234 */
michael@0 2235 at = 0;
michael@0 2236 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
michael@0 2237 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
michael@0 2238 if (val == 0xff) {
michael@0 2239 at += 8;
michael@0 2240 } else {
michael@0 2241 /* there is a 0 bit */
michael@0 2242 at += sctp_map_lookup_tab[val];
michael@0 2243 break;
michael@0 2244 }
michael@0 2245 }
michael@0 2246 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at-1);
michael@0 2247
michael@0 2248 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
michael@0 2249 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
michael@0 2250 #ifdef INVARIANTS
michael@0 2251 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
michael@0 2252 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
michael@0 2253 #else
michael@0 2254 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
michael@0 2255 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
michael@0 2256 sctp_print_mapping_array(asoc);
michael@0 2257 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
michael@0 2258 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
michael@0 2259 }
michael@0 2260 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
michael@0 2261 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
michael@0 2262 #endif
michael@0 2263 }
michael@0 2264 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
michael@0 2265 highest_tsn = asoc->highest_tsn_inside_nr_map;
michael@0 2266 } else {
michael@0 2267 highest_tsn = asoc->highest_tsn_inside_map;
michael@0 2268 }
michael@0 2269 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
michael@0 2270 /* The complete array was completed by a single FR */
michael@0 2271 /* highest becomes the cum-ack */
michael@0 2272 int clr;
michael@0 2273 #ifdef INVARIANTS
michael@0 2274 unsigned int i;
michael@0 2275 #endif
michael@0 2276
michael@0 2277 /* clear the array */
michael@0 2278 clr = ((at+7) >> 3);
michael@0 2279 if (clr > asoc->mapping_array_size) {
michael@0 2280 clr = asoc->mapping_array_size;
michael@0 2281 }
michael@0 2282 memset(asoc->mapping_array, 0, clr);
michael@0 2283 memset(asoc->nr_mapping_array, 0, clr);
michael@0 2284 #ifdef INVARIANTS
michael@0 2285 for (i = 0; i < asoc->mapping_array_size; i++) {
michael@0 2286 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
michael@0 2287 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
michael@0 2288 sctp_print_mapping_array(asoc);
michael@0 2289 }
michael@0 2290 }
michael@0 2291 #endif
michael@0 2292 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
michael@0 2293 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
michael@0 2294 } else if (at >= 8) {
michael@0 2295 /* we can slide the mapping array down */
michael@0 2296 /* slide_from holds where we hit the first NON 0xff byte */
michael@0 2297
michael@0 2298 /*
michael@0 2299 * now calculate the ceiling of the move using our highest
michael@0 2300 * TSN value
michael@0 2301 */
michael@0 2302 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
michael@0 2303 slide_end = (lgap >> 3);
michael@0 2304 if (slide_end < slide_from) {
michael@0 2305 sctp_print_mapping_array(asoc);
michael@0 2306 #ifdef INVARIANTS
michael@0 2307 panic("impossible slide");
michael@0 2308 #else
michael@0 2309 SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
michael@0 2310 lgap, slide_end, slide_from, at);
michael@0 2311 return;
michael@0 2312 #endif
michael@0 2313 }
michael@0 2314 if (slide_end > asoc->mapping_array_size) {
michael@0 2315 #ifdef INVARIANTS
michael@0 2316 panic("would overrun buffer");
michael@0 2317 #else
michael@0 2318 SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
michael@0 2319 asoc->mapping_array_size, slide_end);
michael@0 2320 slide_end = asoc->mapping_array_size;
michael@0 2321 #endif
michael@0 2322 }
michael@0 2323 distance = (slide_end - slide_from) + 1;
michael@0 2324 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
michael@0 2325 sctp_log_map(old_base, old_cumack, old_highest,
michael@0 2326 SCTP_MAP_PREPARE_SLIDE);
michael@0 2327 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
michael@0 2328 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
michael@0 2329 }
michael@0 2330 if (distance + slide_from > asoc->mapping_array_size ||
michael@0 2331 distance < 0) {
michael@0 2332 /*
michael@0 2333 * Here we do NOT slide forward the array so that
michael@0 2334 * hopefully when more data comes in to fill it up
michael@0 2335 * we will be able to slide it forward. Really I
michael@0 2336 * don't think this should happen :-0
michael@0 2337 */
michael@0 2338
michael@0 2339 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
michael@0 2340 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
michael@0 2341 (uint32_t) asoc->mapping_array_size,
michael@0 2342 SCTP_MAP_SLIDE_NONE);
michael@0 2343 }
michael@0 2344 } else {
michael@0 2345 int ii;
michael@0 2346
michael@0 2347 for (ii = 0; ii < distance; ii++) {
michael@0 2348 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
michael@0 2349 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
michael@0 2350
michael@0 2351 }
michael@0 2352 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
michael@0 2353 asoc->mapping_array[ii] = 0;
michael@0 2354 asoc->nr_mapping_array[ii] = 0;
michael@0 2355 }
michael@0 2356 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
michael@0 2357 asoc->highest_tsn_inside_map += (slide_from << 3);
michael@0 2358 }
michael@0 2359 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
michael@0 2360 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
michael@0 2361 }
michael@0 2362 asoc->mapping_array_base_tsn += (slide_from << 3);
michael@0 2363 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
michael@0 2364 sctp_log_map(asoc->mapping_array_base_tsn,
michael@0 2365 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
michael@0 2366 SCTP_MAP_SLIDE_RESULT);
michael@0 2367 }
michael@0 2368 }
michael@0 2369 }
michael@0 2370 }
michael@0 2371
michael@0 2372 void
michael@0 2373 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
michael@0 2374 {
michael@0 2375 struct sctp_association *asoc;
michael@0 2376 uint32_t highest_tsn;
michael@0 2377
michael@0 2378 asoc = &stcb->asoc;
michael@0 2379 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
michael@0 2380 highest_tsn = asoc->highest_tsn_inside_nr_map;
michael@0 2381 } else {
michael@0 2382 highest_tsn = asoc->highest_tsn_inside_map;
michael@0 2383 }
michael@0 2384
michael@0 2385 /*
michael@0 2386 * Now we need to see if we need to queue a sack or just start the
michael@0 2387 * timer (if allowed).
michael@0 2388 */
michael@0 2389 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
michael@0 2390 /*
michael@0 2391 * Ok special case, in SHUTDOWN-SENT case. here we
michael@0 2392 * maker sure SACK timer is off and instead send a
michael@0 2393 * SHUTDOWN and a SACK
michael@0 2394 */
michael@0 2395 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
michael@0 2396 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
michael@0 2397 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA+SCTP_LOC_18);
michael@0 2398 }
michael@0 2399 sctp_send_shutdown(stcb,
michael@0 2400 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
michael@0 2401 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
michael@0 2402 } else {
michael@0 2403 int is_a_gap;
michael@0 2404
michael@0 2405 /* is there a gap now ? */
michael@0 2406 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
michael@0 2407
michael@0 2408 /*
michael@0 2409 * CMT DAC algorithm: increase number of packets
michael@0 2410 * received since last ack
michael@0 2411 */
michael@0 2412 stcb->asoc.cmt_dac_pkts_rcvd++;
michael@0 2413
michael@0 2414 if ((stcb->asoc.send_sack == 1) || /* We need to send a SACK */
michael@0 2415 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
michael@0 2416 * longer is one */
michael@0 2417 (stcb->asoc.numduptsns) || /* we have dup's */
michael@0 2418 (is_a_gap) || /* is still a gap */
michael@0 2419 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
michael@0 2420 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
michael@0 2421 ) {
michael@0 2422
michael@0 2423 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
michael@0 2424 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
michael@0 2425 (stcb->asoc.send_sack == 0) &&
michael@0 2426 (stcb->asoc.numduptsns == 0) &&
michael@0 2427 (stcb->asoc.delayed_ack) &&
michael@0 2428 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
michael@0 2429
michael@0 2430 /*
michael@0 2431 * CMT DAC algorithm: With CMT,
michael@0 2432 * delay acks even in the face of
michael@0 2433
michael@0 2434 * reordering. Therefore, if acks
michael@0 2435 * that do not have to be sent
michael@0 2436 * because of the above reasons,
michael@0 2437 * will be delayed. That is, acks
michael@0 2438 * that would have been sent due to
michael@0 2439 * gap reports will be delayed with
michael@0 2440 * DAC. Start the delayed ack timer.
michael@0 2441 */
michael@0 2442 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
michael@0 2443 stcb->sctp_ep, stcb, NULL);
michael@0 2444 } else {
michael@0 2445 /*
michael@0 2446 * Ok we must build a SACK since the
michael@0 2447 * timer is pending, we got our
michael@0 2448 * first packet OR there are gaps or
michael@0 2449 * duplicates.
michael@0 2450 */
michael@0 2451 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
michael@0 2452 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
michael@0 2453 }
michael@0 2454 } else {
michael@0 2455 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
michael@0 2456 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
michael@0 2457 stcb->sctp_ep, stcb, NULL);
michael@0 2458 }
michael@0 2459 }
michael@0 2460 }
michael@0 2461 }
michael@0 2462
michael@0 2463 void
michael@0 2464 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
michael@0 2465 {
michael@0 2466 struct sctp_tmit_chunk *chk;
michael@0 2467 uint32_t tsize, pd_point;
michael@0 2468 uint16_t nxt_todel;
michael@0 2469
michael@0 2470 if (asoc->fragmented_delivery_inprogress) {
michael@0 2471 sctp_service_reassembly(stcb, asoc);
michael@0 2472 }
michael@0 2473 /* Can we proceed further, i.e. the PD-API is complete */
michael@0 2474 if (asoc->fragmented_delivery_inprogress) {
michael@0 2475 /* no */
michael@0 2476 return;
michael@0 2477 }
michael@0 2478 /*
michael@0 2479 * Now is there some other chunk I can deliver from the reassembly
michael@0 2480 * queue.
michael@0 2481 */
michael@0 2482 doit_again:
michael@0 2483 chk = TAILQ_FIRST(&asoc->reasmqueue);
michael@0 2484 if (chk == NULL) {
michael@0 2485 asoc->size_on_reasm_queue = 0;
michael@0 2486 asoc->cnt_on_reasm_queue = 0;
michael@0 2487 return;
michael@0 2488 }
michael@0 2489 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
michael@0 2490 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
michael@0 2491 ((nxt_todel == chk->rec.data.stream_seq) ||
michael@0 2492 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
michael@0 2493 /*
michael@0 2494 * Yep the first one is here. We setup to start reception,
michael@0 2495 * by backing down the TSN just in case we can't deliver.
michael@0 2496 */
michael@0 2497
michael@0 2498 /*
michael@0 2499 * Before we start though either all of the message should
michael@0 2500 * be here or the socket buffer max or nothing on the
michael@0 2501 * delivery queue and something can be delivered.
michael@0 2502 */
michael@0 2503 if (stcb->sctp_socket) {
michael@0 2504 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
michael@0 2505 stcb->sctp_ep->partial_delivery_point);
michael@0 2506 } else {
michael@0 2507 pd_point = stcb->sctp_ep->partial_delivery_point;
michael@0 2508 }
michael@0 2509 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
michael@0 2510 asoc->fragmented_delivery_inprogress = 1;
michael@0 2511 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
michael@0 2512 asoc->str_of_pdapi = chk->rec.data.stream_number;
michael@0 2513 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
michael@0 2514 asoc->pdapi_ppid = chk->rec.data.payloadtype;
michael@0 2515 asoc->fragment_flags = chk->rec.data.rcv_flags;
michael@0 2516 sctp_service_reassembly(stcb, asoc);
michael@0 2517 if (asoc->fragmented_delivery_inprogress == 0) {
michael@0 2518 goto doit_again;
michael@0 2519 }
michael@0 2520 }
michael@0 2521 }
michael@0 2522 }
michael@0 2523
michael@0 2524 int
michael@0 2525 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
michael@0 2526 struct sockaddr *src, struct sockaddr *dst,
michael@0 2527 struct sctphdr *sh, struct sctp_inpcb *inp,
michael@0 2528 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t *high_tsn,
michael@0 2529 #if defined(__FreeBSD__)
michael@0 2530 uint8_t use_mflowid, uint32_t mflowid,
michael@0 2531 #endif
michael@0 2532 uint32_t vrf_id, uint16_t port)
michael@0 2533 {
michael@0 2534 struct sctp_data_chunk *ch, chunk_buf;
michael@0 2535 struct sctp_association *asoc;
michael@0 2536 int num_chunks = 0; /* number of control chunks processed */
michael@0 2537 int stop_proc = 0;
michael@0 2538 int chk_length, break_flag, last_chunk;
michael@0 2539 int abort_flag = 0, was_a_gap;
michael@0 2540 struct mbuf *m;
michael@0 2541 uint32_t highest_tsn;
michael@0 2542
michael@0 2543 /* set the rwnd */
michael@0 2544 sctp_set_rwnd(stcb, &stcb->asoc);
michael@0 2545
michael@0 2546 m = *mm;
michael@0 2547 SCTP_TCB_LOCK_ASSERT(stcb);
michael@0 2548 asoc = &stcb->asoc;
michael@0 2549 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
michael@0 2550 highest_tsn = asoc->highest_tsn_inside_nr_map;
michael@0 2551 } else {
michael@0 2552 highest_tsn = asoc->highest_tsn_inside_map;
michael@0 2553 }
michael@0 2554 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
michael@0 2555 /*
michael@0 2556 * setup where we got the last DATA packet from for any SACK that
michael@0 2557 * may need to go out. Don't bump the net. This is done ONLY when a
michael@0 2558 * chunk is assigned.
michael@0 2559 */
michael@0 2560 asoc->last_data_chunk_from = net;
michael@0 2561
michael@0 2562 #ifndef __Panda__
michael@0 2563 /*-
michael@0 2564 * Now before we proceed we must figure out if this is a wasted
michael@0 2565 * cluster... i.e. it is a small packet sent in and yet the driver
michael@0 2566 * underneath allocated a full cluster for it. If so we must copy it
michael@0 2567 * to a smaller mbuf and free up the cluster mbuf. This will help
michael@0 2568 * with cluster starvation. Note for __Panda__ we don't do this
michael@0 2569 * since it has clusters all the way down to 64 bytes.
michael@0 2570 */
michael@0 2571 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
michael@0 2572 /* we only handle mbufs that are singletons.. not chains */
michael@0 2573 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
michael@0 2574 if (m) {
michael@0 2575 /* ok lets see if we can copy the data up */
michael@0 2576 caddr_t *from, *to;
michael@0 2577 /* get the pointers and copy */
michael@0 2578 to = mtod(m, caddr_t *);
michael@0 2579 from = mtod((*mm), caddr_t *);
michael@0 2580 memcpy(to, from, SCTP_BUF_LEN((*mm)));
michael@0 2581 /* copy the length and free up the old */
michael@0 2582 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
michael@0 2583 sctp_m_freem(*mm);
michael@0 2584 /* sucess, back copy */
michael@0 2585 *mm = m;
michael@0 2586 } else {
michael@0 2587 /* We are in trouble in the mbuf world .. yikes */
michael@0 2588 m = *mm;
michael@0 2589 }
michael@0 2590 }
michael@0 2591 #endif
michael@0 2592 /* get pointer to the first chunk header */
michael@0 2593 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
michael@0 2594 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
michael@0 2595 if (ch == NULL) {
michael@0 2596 return (1);
michael@0 2597 }
michael@0 2598 /*
michael@0 2599 * process all DATA chunks...
michael@0 2600 */
michael@0 2601 *high_tsn = asoc->cumulative_tsn;
michael@0 2602 break_flag = 0;
michael@0 2603 asoc->data_pkts_seen++;
michael@0 2604 while (stop_proc == 0) {
michael@0 2605 /* validate chunk length */
michael@0 2606 chk_length = ntohs(ch->ch.chunk_length);
michael@0 2607 if (length - *offset < chk_length) {
michael@0 2608 /* all done, mutulated chunk */
michael@0 2609 stop_proc = 1;
michael@0 2610 continue;
michael@0 2611 }
michael@0 2612 if (ch->ch.chunk_type == SCTP_DATA) {
michael@0 2613 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
michael@0 2614 /*
michael@0 2615 * Need to send an abort since we had a
michael@0 2616 * invalid data chunk.
michael@0 2617 */
michael@0 2618 struct mbuf *op_err;
michael@0 2619
michael@0 2620 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
michael@0 2621 0, M_NOWAIT, 1, MT_DATA);
michael@0 2622
michael@0 2623 if (op_err) {
michael@0 2624 struct sctp_paramhdr *ph;
michael@0 2625 uint32_t *ippp;
michael@0 2626
michael@0 2627 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
michael@0 2628 (2 * sizeof(uint32_t));
michael@0 2629 ph = mtod(op_err, struct sctp_paramhdr *);
michael@0 2630 ph->param_type =
michael@0 2631 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
michael@0 2632 ph->param_length = htons(SCTP_BUF_LEN(op_err));
michael@0 2633 ippp = (uint32_t *) (ph + 1);
michael@0 2634 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_19);
michael@0 2635 ippp++;
michael@0 2636 *ippp = asoc->cumulative_tsn;
michael@0 2637
michael@0 2638 }
michael@0 2639 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_19;
michael@0 2640 sctp_abort_association(inp, stcb, m, iphlen,
michael@0 2641 src, dst, sh, op_err,
michael@0 2642 #if defined(__FreeBSD__)
michael@0 2643 use_mflowid, mflowid,
michael@0 2644 #endif
michael@0 2645 vrf_id, port);
michael@0 2646 return (2);
michael@0 2647 }
michael@0 2648 #ifdef SCTP_AUDITING_ENABLED
michael@0 2649 sctp_audit_log(0xB1, 0);
michael@0 2650 #endif
michael@0 2651 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
michael@0 2652 last_chunk = 1;
michael@0 2653 } else {
michael@0 2654 last_chunk = 0;
michael@0 2655 }
michael@0 2656 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
michael@0 2657 chk_length, net, high_tsn, &abort_flag, &break_flag,
michael@0 2658 last_chunk)) {
michael@0 2659 num_chunks++;
michael@0 2660 }
michael@0 2661 if (abort_flag)
michael@0 2662 return (2);
michael@0 2663
michael@0 2664 if (break_flag) {
michael@0 2665 /*
michael@0 2666 * Set because of out of rwnd space and no
michael@0 2667 * drop rep space left.
michael@0 2668 */
michael@0 2669 stop_proc = 1;
michael@0 2670 continue;
michael@0 2671 }
michael@0 2672 } else {
michael@0 2673 /* not a data chunk in the data region */
michael@0 2674 switch (ch->ch.chunk_type) {
michael@0 2675 case SCTP_INITIATION:
michael@0 2676 case SCTP_INITIATION_ACK:
michael@0 2677 case SCTP_SELECTIVE_ACK:
michael@0 2678 case SCTP_NR_SELECTIVE_ACK:
michael@0 2679 case SCTP_HEARTBEAT_REQUEST:
michael@0 2680 case SCTP_HEARTBEAT_ACK:
michael@0 2681 case SCTP_ABORT_ASSOCIATION:
michael@0 2682 case SCTP_SHUTDOWN:
michael@0 2683 case SCTP_SHUTDOWN_ACK:
michael@0 2684 case SCTP_OPERATION_ERROR:
michael@0 2685 case SCTP_COOKIE_ECHO:
michael@0 2686 case SCTP_COOKIE_ACK:
michael@0 2687 case SCTP_ECN_ECHO:
michael@0 2688 case SCTP_ECN_CWR:
michael@0 2689 case SCTP_SHUTDOWN_COMPLETE:
michael@0 2690 case SCTP_AUTHENTICATION:
michael@0 2691 case SCTP_ASCONF_ACK:
michael@0 2692 case SCTP_PACKET_DROPPED:
michael@0 2693 case SCTP_STREAM_RESET:
michael@0 2694 case SCTP_FORWARD_CUM_TSN:
michael@0 2695 case SCTP_ASCONF:
michael@0 2696 /*
michael@0 2697 * Now, what do we do with KNOWN chunks that
michael@0 2698 * are NOT in the right place?
michael@0 2699 *
michael@0 2700 * For now, I do nothing but ignore them. We
michael@0 2701 * may later want to add sysctl stuff to
michael@0 2702 * switch out and do either an ABORT() or
michael@0 2703 * possibly process them.
michael@0 2704 */
michael@0 2705 if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
michael@0 2706 struct mbuf *op_err;
michael@0 2707
michael@0 2708 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
michael@0 2709 sctp_abort_association(inp, stcb,
michael@0 2710 m, iphlen,
michael@0 2711 src, dst,
michael@0 2712 sh, op_err,
michael@0 2713 #if defined(__FreeBSD__)
michael@0 2714 use_mflowid, mflowid,
michael@0 2715 #endif
michael@0 2716 vrf_id, port);
michael@0 2717 return (2);
michael@0 2718 }
michael@0 2719 break;
michael@0 2720 default:
michael@0 2721 /* unknown chunk type, use bit rules */
michael@0 2722 if (ch->ch.chunk_type & 0x40) {
michael@0 2723 /* Add a error report to the queue */
michael@0 2724 struct mbuf *merr;
michael@0 2725 struct sctp_paramhdr *phd;
michael@0 2726
michael@0 2727 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_NOWAIT, 1, MT_DATA);
michael@0 2728 if (merr) {
michael@0 2729 phd = mtod(merr, struct sctp_paramhdr *);
michael@0 2730 /*
michael@0 2731 * We cheat and use param
michael@0 2732 * type since we did not
michael@0 2733 * bother to define a error
michael@0 2734 * cause struct. They are
michael@0 2735 * the same basic format
michael@0 2736 * with different names.
michael@0 2737 */
michael@0 2738 phd->param_type =
michael@0 2739 htons(SCTP_CAUSE_UNRECOG_CHUNK);
michael@0 2740 phd->param_length =
michael@0 2741 htons(chk_length + sizeof(*phd));
michael@0 2742 SCTP_BUF_LEN(merr) = sizeof(*phd);
michael@0 2743 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
michael@0 2744 if (SCTP_BUF_NEXT(merr)) {
michael@0 2745 if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL)) {
michael@0 2746 sctp_m_freem(merr);
michael@0 2747 } else {
michael@0 2748 sctp_queue_op_err(stcb, merr);
michael@0 2749 }
michael@0 2750 } else {
michael@0 2751 sctp_m_freem(merr);
michael@0 2752 }
michael@0 2753 }
michael@0 2754 }
michael@0 2755 if ((ch->ch.chunk_type & 0x80) == 0) {
michael@0 2756 /* discard the rest of this packet */
michael@0 2757 stop_proc = 1;
michael@0 2758 } /* else skip this bad chunk and
michael@0 2759 * continue... */
michael@0 2760 break;
michael@0 2761 } /* switch of chunk type */
michael@0 2762 }
michael@0 2763 *offset += SCTP_SIZE32(chk_length);
michael@0 2764 if ((*offset >= length) || stop_proc) {
michael@0 2765 /* no more data left in the mbuf chain */
michael@0 2766 stop_proc = 1;
michael@0 2767 continue;
michael@0 2768 }
michael@0 2769 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
michael@0 2770 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
michael@0 2771 if (ch == NULL) {
michael@0 2772 *offset = length;
michael@0 2773 stop_proc = 1;
michael@0 2774 continue;
michael@0 2775 }
michael@0 2776 }
michael@0 2777 if (break_flag) {
michael@0 2778 /*
michael@0 2779 * we need to report rwnd overrun drops.
michael@0 2780 */
michael@0 2781 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
michael@0 2782 }
michael@0 2783 if (num_chunks) {
michael@0 2784 /*
michael@0 2785 * Did we get data, if so update the time for auto-close and
michael@0 2786 * give peer credit for being alive.
michael@0 2787 */
michael@0 2788 SCTP_STAT_INCR(sctps_recvpktwithdata);
michael@0 2789 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
michael@0 2790 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
michael@0 2791 stcb->asoc.overall_error_count,
michael@0 2792 0,
michael@0 2793 SCTP_FROM_SCTP_INDATA,
michael@0 2794 __LINE__);
michael@0 2795 }
michael@0 2796 stcb->asoc.overall_error_count = 0;
michael@0 2797 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
michael@0 2798 }
michael@0 2799 /* now service all of the reassm queue if needed */
michael@0 2800 if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
michael@0 2801 sctp_service_queues(stcb, asoc);
michael@0 2802
michael@0 2803 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
michael@0 2804 /* Assure that we ack right away */
michael@0 2805 stcb->asoc.send_sack = 1;
michael@0 2806 }
michael@0 2807 /* Start a sack timer or QUEUE a SACK for sending */
michael@0 2808 sctp_sack_check(stcb, was_a_gap);
michael@0 2809 return (0);
michael@0 2810 }
michael@0 2811
michael@0 2812 static int
michael@0 2813 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
michael@0 2814 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
michael@0 2815 int *num_frs,
michael@0 2816 uint32_t *biggest_newly_acked_tsn,
michael@0 2817 uint32_t *this_sack_lowest_newack,
michael@0 2818 int *rto_ok)
michael@0 2819 {
michael@0 2820 struct sctp_tmit_chunk *tp1;
michael@0 2821 unsigned int theTSN;
michael@0 2822 int j, wake_him = 0, circled = 0;
michael@0 2823
michael@0 2824 /* Recover the tp1 we last saw */
michael@0 2825 tp1 = *p_tp1;
michael@0 2826 if (tp1 == NULL) {
michael@0 2827 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
michael@0 2828 }
michael@0 2829 for (j = frag_strt; j <= frag_end; j++) {
michael@0 2830 theTSN = j + last_tsn;
michael@0 2831 while (tp1) {
michael@0 2832 if (tp1->rec.data.doing_fast_retransmit)
michael@0 2833 (*num_frs) += 1;
michael@0 2834
michael@0 2835 /*-
michael@0 2836 * CMT: CUCv2 algorithm. For each TSN being
michael@0 2837 * processed from the sent queue, track the
michael@0 2838 * next expected pseudo-cumack, or
michael@0 2839 * rtx_pseudo_cumack, if required. Separate
michael@0 2840 * cumack trackers for first transmissions,
michael@0 2841 * and retransmissions.
michael@0 2842 */
michael@0 2843 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
michael@0 2844 (tp1->snd_count == 1)) {
michael@0 2845 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
michael@0 2846 tp1->whoTo->find_pseudo_cumack = 0;
michael@0 2847 }
michael@0 2848 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
michael@0 2849 (tp1->snd_count > 1)) {
michael@0 2850 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
michael@0 2851 tp1->whoTo->find_rtx_pseudo_cumack = 0;
michael@0 2852 }
michael@0 2853 if (tp1->rec.data.TSN_seq == theTSN) {
michael@0 2854 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
michael@0 2855 /*-
michael@0 2856 * must be held until
michael@0 2857 * cum-ack passes
michael@0 2858 */
michael@0 2859 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
michael@0 2860 /*-
michael@0 2861 * If it is less than RESEND, it is
michael@0 2862 * now no-longer in flight.
michael@0 2863 * Higher values may already be set
michael@0 2864 * via previous Gap Ack Blocks...
michael@0 2865 * i.e. ACKED or RESEND.
michael@0 2866 */
michael@0 2867 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
michael@0 2868 *biggest_newly_acked_tsn)) {
michael@0 2869 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
michael@0 2870 }
michael@0 2871 /*-
michael@0 2872 * CMT: SFR algo (and HTNA) - set
michael@0 2873 * saw_newack to 1 for dest being
michael@0 2874 * newly acked. update
michael@0 2875 * this_sack_highest_newack if
michael@0 2876 * appropriate.
michael@0 2877 */
michael@0 2878 if (tp1->rec.data.chunk_was_revoked == 0)
michael@0 2879 tp1->whoTo->saw_newack = 1;
michael@0 2880
michael@0 2881 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
michael@0 2882 tp1->whoTo->this_sack_highest_newack)) {
michael@0 2883 tp1->whoTo->this_sack_highest_newack =
michael@0 2884 tp1->rec.data.TSN_seq;
michael@0 2885 }
michael@0 2886 /*-
michael@0 2887 * CMT DAC algo: also update
michael@0 2888 * this_sack_lowest_newack
michael@0 2889 */
michael@0 2890 if (*this_sack_lowest_newack == 0) {
michael@0 2891 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
michael@0 2892 sctp_log_sack(*this_sack_lowest_newack,
michael@0 2893 last_tsn,
michael@0 2894 tp1->rec.data.TSN_seq,
michael@0 2895 0,
michael@0 2896 0,
michael@0 2897 SCTP_LOG_TSN_ACKED);
michael@0 2898 }
michael@0 2899 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
michael@0 2900 }
michael@0 2901 /*-
michael@0 2902 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
michael@0 2903 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
michael@0 2904 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
michael@0 2905 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
michael@0 2906 * Separate pseudo_cumack trackers for first transmissions and
michael@0 2907 * retransmissions.
michael@0 2908 */
michael@0 2909 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
michael@0 2910 if (tp1->rec.data.chunk_was_revoked == 0) {
michael@0 2911 tp1->whoTo->new_pseudo_cumack = 1;
michael@0 2912 }
michael@0 2913 tp1->whoTo->find_pseudo_cumack = 1;
michael@0 2914 }
michael@0 2915 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
michael@0 2916 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
michael@0 2917 }
michael@0 2918 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
michael@0 2919 if (tp1->rec.data.chunk_was_revoked == 0) {
michael@0 2920 tp1->whoTo->new_pseudo_cumack = 1;
michael@0 2921 }
michael@0 2922 tp1->whoTo->find_rtx_pseudo_cumack = 1;
michael@0 2923 }
michael@0 2924 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
michael@0 2925 sctp_log_sack(*biggest_newly_acked_tsn,
michael@0 2926 last_tsn,
michael@0 2927 tp1->rec.data.TSN_seq,
michael@0 2928 frag_strt,
michael@0 2929 frag_end,
michael@0 2930 SCTP_LOG_TSN_ACKED);
michael@0 2931 }
michael@0 2932 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
michael@0 2933 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
michael@0 2934 tp1->whoTo->flight_size,
michael@0 2935 tp1->book_size,
michael@0 2936 (uintptr_t)tp1->whoTo,
michael@0 2937 tp1->rec.data.TSN_seq);
michael@0 2938 }
michael@0 2939 sctp_flight_size_decrease(tp1);
michael@0 2940 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
michael@0 2941 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
michael@0 2942 tp1);
michael@0 2943 }
michael@0 2944 sctp_total_flight_decrease(stcb, tp1);
michael@0 2945
michael@0 2946 tp1->whoTo->net_ack += tp1->send_size;
michael@0 2947 if (tp1->snd_count < 2) {
michael@0 2948 /*-
michael@0 2949 * True non-retransmited chunk
michael@0 2950 */
michael@0 2951 tp1->whoTo->net_ack2 += tp1->send_size;
michael@0 2952
michael@0 2953 /*-
michael@0 2954 * update RTO too ?
michael@0 2955 */
michael@0 2956 if (tp1->do_rtt) {
michael@0 2957 if (*rto_ok) {
michael@0 2958 tp1->whoTo->RTO =
michael@0 2959 sctp_calculate_rto(stcb,
michael@0 2960 &stcb->asoc,
michael@0 2961 tp1->whoTo,
michael@0 2962 &tp1->sent_rcv_time,
michael@0 2963 sctp_align_safe_nocopy,
michael@0 2964 SCTP_RTT_FROM_DATA);
michael@0 2965 *rto_ok = 0;
michael@0 2966 }
michael@0 2967 if (tp1->whoTo->rto_needed == 0) {
michael@0 2968 tp1->whoTo->rto_needed = 1;
michael@0 2969 }
michael@0 2970 tp1->do_rtt = 0;
michael@0 2971 }
michael@0 2972 }
michael@0 2973
michael@0 2974 }
michael@0 2975 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
michael@0 2976 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
michael@0 2977 stcb->asoc.this_sack_highest_gap)) {
michael@0 2978 stcb->asoc.this_sack_highest_gap =
michael@0 2979 tp1->rec.data.TSN_seq;
michael@0 2980 }
michael@0 2981 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
michael@0 2982 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
michael@0 2983 #ifdef SCTP_AUDITING_ENABLED
michael@0 2984 sctp_audit_log(0xB2,
michael@0 2985 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
michael@0 2986 #endif
michael@0 2987 }
michael@0 2988 }
michael@0 2989 /*-
michael@0 2990 * All chunks NOT UNSENT fall through here and are marked
michael@0 2991 * (leave PR-SCTP ones that are to skip alone though)
michael@0 2992 */
michael@0 2993 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
michael@0 2994 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
michael@0 2995 tp1->sent = SCTP_DATAGRAM_MARKED;
michael@0 2996 }
michael@0 2997 if (tp1->rec.data.chunk_was_revoked) {
michael@0 2998 /* deflate the cwnd */
michael@0 2999 tp1->whoTo->cwnd -= tp1->book_size;
michael@0 3000 tp1->rec.data.chunk_was_revoked = 0;
michael@0 3001 }
michael@0 3002 /* NR Sack code here */
michael@0 3003 if (nr_sacking &&
michael@0 3004 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
michael@0 3005 if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
michael@0 3006 stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
michael@0 3007 #ifdef INVARIANTS
michael@0 3008 } else {
michael@0 3009 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
michael@0 3010 #endif
michael@0 3011 }
michael@0 3012 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
michael@0 3013 if (tp1->data) {
michael@0 3014 /* sa_ignore NO_NULL_CHK */
michael@0 3015 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
michael@0 3016 sctp_m_freem(tp1->data);
michael@0 3017 tp1->data = NULL;
michael@0 3018 }
michael@0 3019 wake_him++;
michael@0 3020 }
michael@0 3021 }
michael@0 3022 break;
michael@0 3023 } /* if (tp1->TSN_seq == theTSN) */
michael@0 3024 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
michael@0 3025 break;
michael@0 3026 }
michael@0 3027 tp1 = TAILQ_NEXT(tp1, sctp_next);
michael@0 3028 if ((tp1 == NULL) && (circled == 0)) {
michael@0 3029 circled++;
michael@0 3030 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
michael@0 3031 }
michael@0 3032 } /* end while (tp1) */
michael@0 3033 if (tp1 == NULL) {
michael@0 3034 circled = 0;
michael@0 3035 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
michael@0 3036 }
michael@0 3037 /* In case the fragments were not in order we must reset */
michael@0 3038 } /* end for (j = fragStart */
michael@0 3039 *p_tp1 = tp1;
michael@0 3040 return (wake_him); /* Return value only used for nr-sack */
michael@0 3041 }
michael@0 3042
michael@0 3043
michael@0 3044 static int
michael@0 3045 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
michael@0 3046 uint32_t last_tsn, uint32_t *biggest_tsn_acked,
michael@0 3047 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
michael@0 3048 int num_seg, int num_nr_seg, int *rto_ok)
michael@0 3049 {
michael@0 3050 struct sctp_gap_ack_block *frag, block;
michael@0 3051 struct sctp_tmit_chunk *tp1;
michael@0 3052 int i;
michael@0 3053 int num_frs = 0;
michael@0 3054 int chunk_freed;
michael@0 3055 int non_revocable;
michael@0 3056 uint16_t frag_strt, frag_end, prev_frag_end;
michael@0 3057
michael@0 3058 tp1 = TAILQ_FIRST(&asoc->sent_queue);
michael@0 3059 prev_frag_end = 0;
michael@0 3060 chunk_freed = 0;
michael@0 3061
michael@0 3062 for (i = 0; i < (num_seg + num_nr_seg); i++) {
michael@0 3063 if (i == num_seg) {
michael@0 3064 prev_frag_end = 0;
michael@0 3065 tp1 = TAILQ_FIRST(&asoc->sent_queue);
michael@0 3066 }
michael@0 3067 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
michael@0 3068 sizeof(struct sctp_gap_ack_block), (uint8_t *) &block);
michael@0 3069 *offset += sizeof(block);
michael@0 3070 if (frag == NULL) {
michael@0 3071 return (chunk_freed);
michael@0 3072 }
michael@0 3073 frag_strt = ntohs(frag->start);
michael@0 3074 frag_end = ntohs(frag->end);
michael@0 3075
michael@0 3076 if (frag_strt > frag_end) {
michael@0 3077 /* This gap report is malformed, skip it. */
michael@0 3078 continue;
michael@0 3079 }
michael@0 3080 if (frag_strt <= prev_frag_end) {
michael@0 3081 /* This gap report is not in order, so restart. */
michael@0 3082 tp1 = TAILQ_FIRST(&asoc->sent_queue);
michael@0 3083 }
michael@0 3084 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
michael@0 3085 *biggest_tsn_acked = last_tsn + frag_end;
michael@0 3086 }
michael@0 3087 if (i < num_seg) {
michael@0 3088 non_revocable = 0;
michael@0 3089 } else {
michael@0 3090 non_revocable = 1;
michael@0 3091 }
michael@0 3092 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
michael@0 3093 non_revocable, &num_frs, biggest_newly_acked_tsn,
michael@0 3094 this_sack_lowest_newack, rto_ok)) {
michael@0 3095 chunk_freed = 1;
michael@0 3096 }
michael@0 3097 prev_frag_end = frag_end;
michael@0 3098 }
michael@0 3099 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
michael@0 3100 if (num_frs)
michael@0 3101 sctp_log_fr(*biggest_tsn_acked,
michael@0 3102 *biggest_newly_acked_tsn,
michael@0 3103 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
michael@0 3104 }
michael@0 3105 return (chunk_freed);
michael@0 3106 }
michael@0 3107
michael@0 3108 static void
michael@0 3109 sctp_check_for_revoked(struct sctp_tcb *stcb,
michael@0 3110 struct sctp_association *asoc, uint32_t cumack,
michael@0 3111 uint32_t biggest_tsn_acked)
michael@0 3112 {
michael@0 3113 struct sctp_tmit_chunk *tp1;
michael@0 3114
michael@0 3115 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
michael@0 3116 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
michael@0 3117 /*
michael@0 3118 * ok this guy is either ACK or MARKED. If it is
michael@0 3119 * ACKED it has been previously acked but not this
michael@0 3120 * time i.e. revoked. If it is MARKED it was ACK'ed
michael@0 3121 * again.
michael@0 3122 */
michael@0 3123 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
michael@0 3124 break;
michael@0 3125 }
michael@0 3126 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
michael@0 3127 /* it has been revoked */
michael@0 3128 tp1->sent = SCTP_DATAGRAM_SENT;
michael@0 3129 tp1->rec.data.chunk_was_revoked = 1;
michael@0 3130 /* We must add this stuff back in to
michael@0 3131 * assure timers and such get started.
michael@0 3132 */
michael@0 3133 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
michael@0 3134 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
michael@0 3135 tp1->whoTo->flight_size,
michael@0 3136 tp1->book_size,
michael@0 3137 (uintptr_t)tp1->whoTo,
michael@0 3138 tp1->rec.data.TSN_seq);
michael@0 3139 }
michael@0 3140 sctp_flight_size_increase(tp1);
michael@0 3141 sctp_total_flight_increase(stcb, tp1);
michael@0 3142 /* We inflate the cwnd to compensate for our
michael@0 3143 * artificial inflation of the flight_size.
michael@0 3144 */
michael@0 3145 tp1->whoTo->cwnd += tp1->book_size;
michael@0 3146 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
michael@0 3147 sctp_log_sack(asoc->last_acked_seq,
michael@0 3148 cumack,
michael@0 3149 tp1->rec.data.TSN_seq,
michael@0 3150 0,
michael@0 3151 0,
michael@0 3152 SCTP_LOG_TSN_REVOKED);
michael@0 3153 }
michael@0 3154 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
michael@0 3155 /* it has been re-acked in this SACK */
michael@0 3156 tp1->sent = SCTP_DATAGRAM_ACKED;
michael@0 3157 }
michael@0 3158 }
michael@0 3159 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
michael@0 3160 break;
michael@0 3161 }
michael@0 3162 }
michael@0 3163
michael@0 3164
michael@0 3165 static void
michael@0 3166 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
michael@0 3167 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
michael@0 3168 {
michael@0 3169 struct sctp_tmit_chunk *tp1;
michael@0 3170 int strike_flag = 0;
michael@0 3171 struct timeval now;
michael@0 3172 int tot_retrans = 0;
michael@0 3173 uint32_t sending_seq;
michael@0 3174 struct sctp_nets *net;
michael@0 3175 int num_dests_sacked = 0;
michael@0 3176
michael@0 3177 /*
michael@0 3178 * select the sending_seq, this is either the next thing ready to be
michael@0 3179 * sent but not transmitted, OR, the next seq we assign.
michael@0 3180 */
michael@0 3181 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
michael@0 3182 if (tp1 == NULL) {
michael@0 3183 sending_seq = asoc->sending_seq;
michael@0 3184 } else {
michael@0 3185 sending_seq = tp1->rec.data.TSN_seq;
michael@0 3186 }
michael@0 3187
michael@0 3188 /* CMT DAC algo: finding out if SACK is a mixed SACK */
michael@0 3189 if ((asoc->sctp_cmt_on_off > 0) &&
michael@0 3190 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
michael@0 3191 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
michael@0 3192 if (net->saw_newack)
michael@0 3193 num_dests_sacked++;
michael@0 3194 }
michael@0 3195 }
michael@0 3196 if (stcb->asoc.peer_supports_prsctp) {
michael@0 3197 (void)SCTP_GETTIME_TIMEVAL(&now);
michael@0 3198 }
michael@0 3199 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
michael@0 3200 strike_flag = 0;
michael@0 3201 if (tp1->no_fr_allowed) {
michael@0 3202 /* this one had a timeout or something */
michael@0 3203 continue;
michael@0 3204 }
michael@0 3205 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
michael@0 3206 if (tp1->sent < SCTP_DATAGRAM_RESEND)
michael@0 3207 sctp_log_fr(biggest_tsn_newly_acked,
michael@0 3208 tp1->rec.data.TSN_seq,
michael@0 3209 tp1->sent,
michael@0 3210 SCTP_FR_LOG_CHECK_STRIKE);
michael@0 3211 }
michael@0 3212 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
michael@0 3213 tp1->sent == SCTP_DATAGRAM_UNSENT) {
michael@0 3214 /* done */
michael@0 3215 break;
michael@0 3216 }
michael@0 3217 if (stcb->asoc.peer_supports_prsctp) {
michael@0 3218 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
michael@0 3219 /* Is it expired? */
michael@0 3220 #ifndef __FreeBSD__
michael@0 3221 if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
michael@0 3222 #else
michael@0 3223 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
michael@0 3224 #endif
michael@0 3225 /* Yes so drop it */
michael@0 3226 if (tp1->data != NULL) {
michael@0 3227 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
michael@0 3228 SCTP_SO_NOT_LOCKED);
michael@0 3229 }
michael@0 3230 continue;
michael@0 3231 }
michael@0 3232 }
michael@0 3233
michael@0 3234 }
michael@0 3235 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
michael@0 3236 /* we are beyond the tsn in the sack */
michael@0 3237 break;
michael@0 3238 }
michael@0 3239 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
michael@0 3240 /* either a RESEND, ACKED, or MARKED */
michael@0 3241 /* skip */
michael@0 3242 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
michael@0 3243 /* Continue strikin FWD-TSN chunks */
michael@0 3244 tp1->rec.data.fwd_tsn_cnt++;
michael@0 3245 }
michael@0 3246 continue;
michael@0 3247 }
michael@0 3248 /*
michael@0 3249 * CMT : SFR algo (covers part of DAC and HTNA as well)
michael@0 3250 */
michael@0 3251 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
michael@0 3252 /*
michael@0 3253 * No new acks were receieved for data sent to this
michael@0 3254 * dest. Therefore, according to the SFR algo for
michael@0 3255 * CMT, no data sent to this dest can be marked for
michael@0 3256 * FR using this SACK.
michael@0 3257 */
michael@0 3258 continue;
michael@0 3259 } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
michael@0 3260 tp1->whoTo->this_sack_highest_newack)) {
michael@0 3261 /*
michael@0 3262 * CMT: New acks were receieved for data sent to
michael@0 3263 * this dest. But no new acks were seen for data
michael@0 3264 * sent after tp1. Therefore, according to the SFR
michael@0 3265 * algo for CMT, tp1 cannot be marked for FR using
michael@0 3266 * this SACK. This step covers part of the DAC algo
michael@0 3267 * and the HTNA algo as well.
michael@0 3268 */
michael@0 3269 continue;
michael@0 3270 }
michael@0 3271 /*
michael@0 3272 * Here we check to see if we were have already done a FR
michael@0 3273 * and if so we see if the biggest TSN we saw in the sack is
michael@0 3274 * smaller than the recovery point. If so we don't strike
michael@0 3275 * the tsn... otherwise we CAN strike the TSN.
michael@0 3276 */
michael@0 3277 /*
michael@0 3278 * @@@ JRI: Check for CMT
michael@0 3279 * if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) {
michael@0 3280 */
michael@0 3281 if (accum_moved && asoc->fast_retran_loss_recovery) {
michael@0 3282 /*
michael@0 3283 * Strike the TSN if in fast-recovery and cum-ack
michael@0 3284 * moved.
michael@0 3285 */
michael@0 3286 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
michael@0 3287 sctp_log_fr(biggest_tsn_newly_acked,
michael@0 3288 tp1->rec.data.TSN_seq,
michael@0 3289 tp1->sent,
michael@0 3290 SCTP_FR_LOG_STRIKE_CHUNK);
michael@0 3291 }
michael@0 3292 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
michael@0 3293 tp1->sent++;
michael@0 3294 }
michael@0 3295 if ((asoc->sctp_cmt_on_off > 0) &&
michael@0 3296 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
michael@0 3297 /*
michael@0 3298 * CMT DAC algorithm: If SACK flag is set to
michael@0 3299 * 0, then lowest_newack test will not pass
michael@0 3300 * because it would have been set to the
michael@0 3301 * cumack earlier. If not already to be
michael@0 3302 * rtx'd, If not a mixed sack and if tp1 is
michael@0 3303 * not between two sacked TSNs, then mark by
michael@0 3304 * one more.
michael@0 3305 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
michael@0 3306 * two packets have been received after this missing TSN.
michael@0 3307 */
michael@0 3308 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
michael@0 3309 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
michael@0 3310 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
michael@0 3311 sctp_log_fr(16 + num_dests_sacked,
michael@0 3312 tp1->rec.data.TSN_seq,
michael@0 3313 tp1->sent,
michael@0 3314 SCTP_FR_LOG_STRIKE_CHUNK);
michael@0 3315 }
michael@0 3316 tp1->sent++;
michael@0 3317 }
michael@0 3318 }
michael@0 3319 } else if ((tp1->rec.data.doing_fast_retransmit) &&
michael@0 3320 (asoc->sctp_cmt_on_off == 0)) {
michael@0 3321 /*
michael@0 3322 * For those that have done a FR we must take
michael@0 3323 * special consideration if we strike. I.e the
michael@0 3324 * biggest_newly_acked must be higher than the
michael@0 3325 * sending_seq at the time we did the FR.
michael@0 3326 */
michael@0 3327 if (
michael@0 3328 #ifdef SCTP_FR_TO_ALTERNATE
michael@0 3329 /*
michael@0 3330 * If FR's go to new networks, then we must only do
michael@0 3331 * this for singly homed asoc's. However if the FR's
michael@0 3332 * go to the same network (Armando's work) then its
michael@0 3333 * ok to FR multiple times.
michael@0 3334 */
michael@0 3335 (asoc->numnets < 2)
michael@0 3336 #else
michael@0 3337 (1)
michael@0 3338 #endif
michael@0 3339 ) {
michael@0 3340
michael@0 3341 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
michael@0 3342 tp1->rec.data.fast_retran_tsn)) {
michael@0 3343 /*
michael@0 3344 * Strike the TSN, since this ack is
michael@0 3345 * beyond where things were when we
michael@0 3346 * did a FR.
michael@0 3347 */
michael@0 3348 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
michael@0 3349 sctp_log_fr(biggest_tsn_newly_acked,
michael@0 3350 tp1->rec.data.TSN_seq,
michael@0 3351 tp1->sent,
michael@0 3352 SCTP_FR_LOG_STRIKE_CHUNK);
michael@0 3353 }
michael@0 3354 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
michael@0 3355 tp1->sent++;
michael@0 3356 }
michael@0 3357 strike_flag = 1;
michael@0 3358 if ((asoc->sctp_cmt_on_off > 0) &&
michael@0 3359 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
michael@0 3360 /*
michael@0 3361 * CMT DAC algorithm: If
michael@0 3362 * SACK flag is set to 0,
michael@0 3363 * then lowest_newack test
michael@0 3364 * will not pass because it
michael@0 3365 * would have been set to
michael@0 3366 * the cumack earlier. If
michael@0 3367 * not already to be rtx'd,
michael@0 3368 * If not a mixed sack and
michael@0 3369 * if tp1 is not between two
michael@0 3370 * sacked TSNs, then mark by
michael@0 3371 * one more.
michael@0 3372 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
michael@0 3373 * two packets have been received after this missing TSN.
michael@0 3374 */
michael@0 3375 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
michael@0 3376 (num_dests_sacked == 1) &&
michael@0 3377 SCTP_TSN_GT(this_sack_lowest_newack,
michael@0 3378 tp1->rec.data.TSN_seq)) {
michael@0 3379 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
michael@0 3380 sctp_log_fr(32 + num_dests_sacked,
michael@0 3381 tp1->rec.data.TSN_seq,
michael@0 3382 tp1->sent,
michael@0 3383 SCTP_FR_LOG_STRIKE_CHUNK);
michael@0 3384 }
michael@0 3385 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
michael@0 3386 tp1->sent++;
michael@0 3387 }
michael@0 3388 }
michael@0 3389 }
michael@0 3390 }
michael@0 3391 }
michael@0 3392 /*
michael@0 3393 * JRI: TODO: remove code for HTNA algo. CMT's
michael@0 3394 * SFR algo covers HTNA.
michael@0 3395 */
michael@0 3396 } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
michael@0 3397 biggest_tsn_newly_acked)) {
michael@0 3398 /*
michael@0 3399 * We don't strike these: This is the HTNA
michael@0 3400 * algorithm i.e. we don't strike If our TSN is
michael@0 3401 * larger than the Highest TSN Newly Acked.
michael@0 3402 */
michael@0 3403 ;
michael@0 3404 } else {
michael@0 3405 /* Strike the TSN */
michael@0 3406 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
michael@0 3407 sctp_log_fr(biggest_tsn_newly_acked,
michael@0 3408 tp1->rec.data.TSN_seq,
michael@0 3409 tp1->sent,
michael@0 3410 SCTP_FR_LOG_STRIKE_CHUNK);
michael@0 3411 }
michael@0 3412 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
michael@0 3413 tp1->sent++;
michael@0 3414 }
michael@0 3415 if ((asoc->sctp_cmt_on_off > 0) &&
michael@0 3416 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
michael@0 3417 /*
michael@0 3418 * CMT DAC algorithm: If SACK flag is set to
michael@0 3419 * 0, then lowest_newack test will not pass
michael@0 3420 * because it would have been set to the
michael@0 3421 * cumack earlier. If not already to be
michael@0 3422 * rtx'd, If not a mixed sack and if tp1 is
michael@0 3423 * not between two sacked TSNs, then mark by
michael@0 3424 * one more.
michael@0 3425 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
michael@0 3426 * two packets have been received after this missing TSN.
michael@0 3427 */
michael@0 3428 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
michael@0 3429 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
michael@0 3430 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
michael@0 3431 sctp_log_fr(48 + num_dests_sacked,
michael@0 3432 tp1->rec.data.TSN_seq,
michael@0 3433 tp1->sent,
michael@0 3434 SCTP_FR_LOG_STRIKE_CHUNK);
michael@0 3435 }
michael@0 3436 tp1->sent++;
michael@0 3437 }
michael@0 3438 }
michael@0 3439 }
michael@0 3440 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
michael@0 3441 struct sctp_nets *alt;
michael@0 3442
michael@0 3443 /* fix counts and things */
michael@0 3444 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
michael@0 3445 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
michael@0 3446 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
michael@0 3447 tp1->book_size,
michael@0 3448 (uintptr_t)tp1->whoTo,
michael@0 3449 tp1->rec.data.TSN_seq);
michael@0 3450 }
michael@0 3451 if (tp1->whoTo) {
michael@0 3452 tp1->whoTo->net_ack++;
michael@0 3453 sctp_flight_size_decrease(tp1);
michael@0 3454 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
michael@0 3455 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
michael@0 3456 tp1);
michael@0 3457 }
michael@0 3458 }
michael@0 3459
michael@0 3460 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
michael@0 3461 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
michael@0 3462 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
michael@0 3463 }
michael@0 3464 /* add back to the rwnd */
michael@0 3465 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
michael@0 3466
michael@0 3467 /* remove from the total flight */
michael@0 3468 sctp_total_flight_decrease(stcb, tp1);
michael@0 3469
michael@0 3470 if ((stcb->asoc.peer_supports_prsctp) &&
michael@0 3471 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
michael@0 3472 /* Has it been retransmitted tv_sec times? - we store the retran count there. */
michael@0 3473 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
michael@0 3474 /* Yes, so drop it */
michael@0 3475 if (tp1->data != NULL) {
michael@0 3476 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
michael@0 3477 SCTP_SO_NOT_LOCKED);
michael@0 3478 }
michael@0 3479 /* Make sure to flag we had a FR */
michael@0 3480 tp1->whoTo->net_ack++;
michael@0 3481 continue;
michael@0 3482 }
michael@0 3483 }
michael@0 3484 /* SCTP_PRINTF("OK, we are now ready to FR this guy\n"); */
michael@0 3485 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
michael@0 3486 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
michael@0 3487 0, SCTP_FR_MARKED);
michael@0 3488 }
michael@0 3489 if (strike_flag) {
michael@0 3490 /* This is a subsequent FR */
michael@0 3491 SCTP_STAT_INCR(sctps_sendmultfastretrans);
michael@0 3492 }
michael@0 3493 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
michael@0 3494 if (asoc->sctp_cmt_on_off > 0) {
michael@0 3495 /*
michael@0 3496 * CMT: Using RTX_SSTHRESH policy for CMT.
michael@0 3497 * If CMT is being used, then pick dest with
michael@0 3498 * largest ssthresh for any retransmission.
michael@0 3499 */
michael@0 3500 tp1->no_fr_allowed = 1;
michael@0 3501 alt = tp1->whoTo;
michael@0 3502 /*sa_ignore NO_NULL_CHK*/
michael@0 3503 if (asoc->sctp_cmt_pf > 0) {
michael@0 3504 /* JRS 5/18/07 - If CMT PF is on, use the PF version of find_alt_net() */
michael@0 3505 alt = sctp_find_alternate_net(stcb, alt, 2);
michael@0 3506 } else {
michael@0 3507 /* JRS 5/18/07 - If only CMT is on, use the CMT version of find_alt_net() */
michael@0 3508 /*sa_ignore NO_NULL_CHK*/
michael@0 3509 alt = sctp_find_alternate_net(stcb, alt, 1);
michael@0 3510 }
michael@0 3511 if (alt == NULL) {
michael@0 3512 alt = tp1->whoTo;
michael@0 3513 }
michael@0 3514 /*
michael@0 3515 * CUCv2: If a different dest is picked for
michael@0 3516 * the retransmission, then new
michael@0 3517 * (rtx-)pseudo_cumack needs to be tracked
michael@0 3518 * for orig dest. Let CUCv2 track new (rtx-)
michael@0 3519 * pseudo-cumack always.
michael@0 3520 */
michael@0 3521 if (tp1->whoTo) {
michael@0 3522 tp1->whoTo->find_pseudo_cumack = 1;
michael@0 3523 tp1->whoTo->find_rtx_pseudo_cumack = 1;
michael@0 3524 }
michael@0 3525
michael@0 3526 } else {/* CMT is OFF */
michael@0 3527
michael@0 3528 #ifdef SCTP_FR_TO_ALTERNATE
michael@0 3529 /* Can we find an alternate? */
michael@0 3530 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
michael@0 3531 #else
michael@0 3532 /*
michael@0 3533 * default behavior is to NOT retransmit
michael@0 3534 * FR's to an alternate. Armando Caro's
michael@0 3535 * paper details why.
michael@0 3536 */
michael@0 3537 alt = tp1->whoTo;
michael@0 3538 #endif
michael@0 3539 }
michael@0 3540
michael@0 3541 tp1->rec.data.doing_fast_retransmit = 1;
michael@0 3542 tot_retrans++;
michael@0 3543 /* mark the sending seq for possible subsequent FR's */
michael@0 3544 /*
michael@0 3545 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
michael@0 3546 * (uint32_t)tpi->rec.data.TSN_seq);
michael@0 3547 */
michael@0 3548 if (TAILQ_EMPTY(&asoc->send_queue)) {
michael@0 3549 /*
michael@0 3550 * If the queue of send is empty then its
michael@0 3551 * the next sequence number that will be
michael@0 3552 * assigned so we subtract one from this to
michael@0 3553 * get the one we last sent.
michael@0 3554 */
michael@0 3555 tp1->rec.data.fast_retran_tsn = sending_seq;
michael@0 3556 } else {
michael@0 3557 /*
michael@0 3558 * If there are chunks on the send queue
michael@0 3559 * (unsent data that has made it from the
michael@0 3560 * stream queues but not out the door, we
michael@0 3561 * take the first one (which will have the
michael@0 3562 * lowest TSN) and subtract one to get the
michael@0 3563 * one we last sent.
michael@0 3564 */
michael@0 3565 struct sctp_tmit_chunk *ttt;
michael@0 3566
michael@0 3567 ttt = TAILQ_FIRST(&asoc->send_queue);
michael@0 3568 tp1->rec.data.fast_retran_tsn =
michael@0 3569 ttt->rec.data.TSN_seq;
michael@0 3570 }
michael@0 3571
michael@0 3572 if (tp1->do_rtt) {
michael@0 3573 /*
michael@0 3574 * this guy had a RTO calculation pending on
michael@0 3575 * it, cancel it
michael@0 3576 */
michael@0 3577 if ((tp1->whoTo != NULL) &&
michael@0 3578 (tp1->whoTo->rto_needed == 0)) {
michael@0 3579 tp1->whoTo->rto_needed = 1;
michael@0 3580 }
michael@0 3581 tp1->do_rtt = 0;
michael@0 3582 }
michael@0 3583 if (alt != tp1->whoTo) {
michael@0 3584 /* yes, there is an alternate. */
michael@0 3585 sctp_free_remote_addr(tp1->whoTo);
michael@0 3586 /*sa_ignore FREED_MEMORY*/
michael@0 3587 tp1->whoTo = alt;
michael@0 3588 atomic_add_int(&alt->ref_count, 1);
michael@0 3589 }
michael@0 3590 }
michael@0 3591 }
michael@0 3592 }
michael@0 3593
michael@0 3594 struct sctp_tmit_chunk *
michael@0 3595 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
michael@0 3596 struct sctp_association *asoc)
michael@0 3597 {
michael@0 3598 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
michael@0 3599 struct timeval now;
michael@0 3600 int now_filled = 0;
michael@0 3601
michael@0 3602 if (asoc->peer_supports_prsctp == 0) {
michael@0 3603 return (NULL);
michael@0 3604 }
michael@0 3605 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
michael@0 3606 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
michael@0 3607 tp1->sent != SCTP_DATAGRAM_RESEND &&
michael@0 3608 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
michael@0 3609 /* no chance to advance, out of here */
michael@0 3610 break;
michael@0 3611 }
michael@0 3612 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
michael@0 3613 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
michael@0 3614 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
michael@0 3615 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
michael@0 3616 asoc->advanced_peer_ack_point,
michael@0 3617 tp1->rec.data.TSN_seq, 0, 0);
michael@0 3618 }
michael@0 3619 }
michael@0 3620 if (!PR_SCTP_ENABLED(tp1->flags)) {
michael@0 3621 /*
michael@0 3622 * We can't fwd-tsn past any that are reliable aka
michael@0 3623 * retransmitted until the asoc fails.
michael@0 3624 */
michael@0 3625 break;
michael@0 3626 }
michael@0 3627 if (!now_filled) {
michael@0 3628 (void)SCTP_GETTIME_TIMEVAL(&now);
michael@0 3629 now_filled = 1;
michael@0 3630 }
michael@0 3631 /*
michael@0 3632 * now we got a chunk which is marked for another
michael@0 3633 * retransmission to a PR-stream but has run out its chances
michael@0 3634 * already maybe OR has been marked to skip now. Can we skip
michael@0 3635 * it if its a resend?
michael@0 3636 */
michael@0 3637 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
michael@0 3638 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
michael@0 3639 /*
michael@0 3640 * Now is this one marked for resend and its time is
michael@0 3641 * now up?
michael@0 3642 */
michael@0 3643 #ifndef __FreeBSD__
michael@0 3644 if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
michael@0 3645 #else
michael@0 3646 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
michael@0 3647 #endif
michael@0 3648 /* Yes so drop it */
michael@0 3649 if (tp1->data) {
michael@0 3650 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
michael@0 3651 1, SCTP_SO_NOT_LOCKED);
michael@0 3652 }
michael@0 3653 } else {
michael@0 3654 /*
michael@0 3655 * No, we are done when hit one for resend
michael@0 3656 * whos time as not expired.
michael@0 3657 */
michael@0 3658 break;
michael@0 3659 }
michael@0 3660 }
michael@0 3661 /*
michael@0 3662 * Ok now if this chunk is marked to drop it we can clean up
michael@0 3663 * the chunk, advance our peer ack point and we can check
michael@0 3664 * the next chunk.
michael@0 3665 */
michael@0 3666 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
michael@0 3667 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
michael@0 3668 /* advance PeerAckPoint goes forward */
michael@0 3669 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
michael@0 3670 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
michael@0 3671 a_adv = tp1;
michael@0 3672 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
michael@0 3673 /* No update but we do save the chk */
michael@0 3674 a_adv = tp1;
michael@0 3675 }
michael@0 3676 } else {
michael@0 3677 /*
michael@0 3678 * If it is still in RESEND we can advance no
michael@0 3679 * further
michael@0 3680 */
michael@0 3681 break;
michael@0 3682 }
michael@0 3683 }
michael@0 3684 return (a_adv);
michael@0 3685 }
michael@0 3686
michael@0 3687 static int
michael@0 3688 sctp_fs_audit(struct sctp_association *asoc)
michael@0 3689 {
michael@0 3690 struct sctp_tmit_chunk *chk;
michael@0 3691 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
michael@0 3692 int entry_flight, entry_cnt, ret;
michael@0 3693
michael@0 3694 entry_flight = asoc->total_flight;
michael@0 3695 entry_cnt = asoc->total_flight_count;
michael@0 3696 ret = 0;
michael@0 3697
michael@0 3698 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
michael@0 3699 return (0);
michael@0 3700
michael@0 3701 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
michael@0 3702 if (chk->sent < SCTP_DATAGRAM_RESEND) {
michael@0 3703 SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
michael@0 3704 chk->rec.data.TSN_seq,
michael@0 3705 chk->send_size,
michael@0 3706 chk->snd_count);
michael@0 3707 inflight++;
michael@0 3708 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
michael@0 3709 resend++;
michael@0 3710 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
michael@0 3711 inbetween++;
michael@0 3712 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
michael@0 3713 above++;
michael@0 3714 } else {
michael@0 3715 acked++;
michael@0 3716 }
michael@0 3717 }
michael@0 3718
michael@0 3719 if ((inflight > 0) || (inbetween > 0)) {
michael@0 3720 #ifdef INVARIANTS
michael@0 3721 panic("Flight size-express incorrect? \n");
michael@0 3722 #else
michael@0 3723 SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
michael@0 3724 entry_flight, entry_cnt);
michael@0 3725
michael@0 3726 SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
michael@0 3727 inflight, inbetween, resend, above, acked);
michael@0 3728 ret = 1;
michael@0 3729 #endif
michael@0 3730 }
michael@0 3731 return (ret);
michael@0 3732 }
michael@0 3733
michael@0 3734
michael@0 3735 static void
michael@0 3736 sctp_window_probe_recovery(struct sctp_tcb *stcb,
michael@0 3737 struct sctp_association *asoc,
michael@0 3738 struct sctp_tmit_chunk *tp1)
michael@0 3739 {
michael@0 3740 tp1->window_probe = 0;
michael@0 3741 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
michael@0 3742 /* TSN's skipped we do NOT move back. */
michael@0 3743 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
michael@0 3744 tp1->whoTo->flight_size,
michael@0 3745 tp1->book_size,
michael@0 3746 (uintptr_t)tp1->whoTo,
michael@0 3747 tp1->rec.data.TSN_seq);
michael@0 3748 return;
michael@0 3749 }
michael@0 3750 /* First setup this by shrinking flight */
michael@0 3751 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
michael@0 3752 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
michael@0 3753 tp1);
michael@0 3754 }
michael@0 3755 sctp_flight_size_decrease(tp1);
michael@0 3756 sctp_total_flight_decrease(stcb, tp1);
michael@0 3757 /* Now mark for resend */
michael@0 3758 tp1->sent = SCTP_DATAGRAM_RESEND;
michael@0 3759 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
michael@0 3760
michael@0 3761 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
michael@0 3762 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
michael@0 3763 tp1->whoTo->flight_size,
michael@0 3764 tp1->book_size,
michael@0 3765 (uintptr_t)tp1->whoTo,
michael@0 3766 tp1->rec.data.TSN_seq);
michael@0 3767 }
michael@0 3768 }
michael@0 3769
michael@0 3770 void
michael@0 3771 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
michael@0 3772 uint32_t rwnd, int *abort_now, int ecne_seen)
michael@0 3773 {
michael@0 3774 struct sctp_nets *net;
michael@0 3775 struct sctp_association *asoc;
michael@0 3776 struct sctp_tmit_chunk *tp1, *tp2;
michael@0 3777 uint32_t old_rwnd;
michael@0 3778 int win_probe_recovery = 0;
michael@0 3779 int win_probe_recovered = 0;
michael@0 3780 int j, done_once = 0;
michael@0 3781 int rto_ok = 1;
michael@0 3782
michael@0 3783 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
michael@0 3784 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
michael@0 3785 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
michael@0 3786 }
michael@0 3787 SCTP_TCB_LOCK_ASSERT(stcb);
michael@0 3788 #ifdef SCTP_ASOCLOG_OF_TSNS
michael@0 3789 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
michael@0 3790 stcb->asoc.cumack_log_at++;
michael@0 3791 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
michael@0 3792 stcb->asoc.cumack_log_at = 0;
michael@0 3793 }
michael@0 3794 #endif
michael@0 3795 asoc = &stcb->asoc;
michael@0 3796 old_rwnd = asoc->peers_rwnd;
michael@0 3797 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
michael@0 3798 /* old ack */
michael@0 3799 return;
michael@0 3800 } else if (asoc->last_acked_seq == cumack) {
michael@0 3801 /* Window update sack */
michael@0 3802 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
michael@0 3803 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
michael@0 3804 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
michael@0 3805 /* SWS sender side engages */
michael@0 3806 asoc->peers_rwnd = 0;
michael@0 3807 }
michael@0 3808 if (asoc->peers_rwnd > old_rwnd) {
michael@0 3809 goto again;
michael@0 3810 }
michael@0 3811 return;
michael@0 3812 }
michael@0 3813
michael@0 3814 /* First setup for CC stuff */
michael@0 3815 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
michael@0 3816 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
michael@0 3817 /* Drag along the window_tsn for cwr's */
michael@0 3818 net->cwr_window_tsn = cumack;
michael@0 3819 }
michael@0 3820 net->prev_cwnd = net->cwnd;
michael@0 3821 net->net_ack = 0;
michael@0 3822 net->net_ack2 = 0;
michael@0 3823
michael@0 3824 /*
michael@0 3825 * CMT: Reset CUC and Fast recovery algo variables before
michael@0 3826 * SACK processing
michael@0 3827 */
michael@0 3828 net->new_pseudo_cumack = 0;
michael@0 3829 net->will_exit_fast_recovery = 0;
michael@0 3830 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
michael@0 3831 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
michael@0 3832 }
michael@0 3833 }
michael@0 3834 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
michael@0 3835 uint32_t send_s;
michael@0 3836
michael@0 3837 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
michael@0 3838 tp1 = TAILQ_LAST(&asoc->sent_queue,
michael@0 3839 sctpchunk_listhead);
michael@0 3840 send_s = tp1->rec.data.TSN_seq + 1;
michael@0 3841 } else {
michael@0 3842 send_s = asoc->sending_seq;
michael@0 3843 }
michael@0 3844 if (SCTP_TSN_GE(cumack, send_s)) {
michael@0 3845 #ifndef INVARIANTS
michael@0 3846 struct mbuf *oper;
michael@0 3847
michael@0 3848 #endif
michael@0 3849 #ifdef INVARIANTS
michael@0 3850 panic("Impossible sack 1");
michael@0 3851 #else
michael@0 3852
michael@0 3853 *abort_now = 1;
michael@0 3854 /* XXX */
michael@0 3855 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
michael@0 3856 0, M_NOWAIT, 1, MT_DATA);
michael@0 3857 if (oper) {
michael@0 3858 struct sctp_paramhdr *ph;
michael@0 3859 uint32_t *ippp;
michael@0 3860
michael@0 3861 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
michael@0 3862 sizeof(uint32_t);
michael@0 3863 ph = mtod(oper, struct sctp_paramhdr *);
michael@0 3864 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
michael@0 3865 ph->param_length = htons(SCTP_BUF_LEN(oper));
michael@0 3866 ippp = (uint32_t *) (ph + 1);
michael@0 3867 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
michael@0 3868 }
michael@0 3869 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
michael@0 3870 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
michael@0 3871 return;
michael@0 3872 #endif
michael@0 3873 }
michael@0 3874 }
michael@0 3875 asoc->this_sack_highest_gap = cumack;
michael@0 3876 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
michael@0 3877 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
michael@0 3878 stcb->asoc.overall_error_count,
michael@0 3879 0,
michael@0 3880 SCTP_FROM_SCTP_INDATA,
michael@0 3881 __LINE__);
michael@0 3882 }
michael@0 3883 stcb->asoc.overall_error_count = 0;
michael@0 3884 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
michael@0 3885 /* process the new consecutive TSN first */
michael@0 3886 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
michael@0 3887 if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
michael@0 3888 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
michael@0 3889 SCTP_PRINTF("Warning, an unsent is now acked?\n");
michael@0 3890 }
michael@0 3891 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
michael@0 3892 /*
michael@0 3893 * If it is less than ACKED, it is
michael@0 3894 * now no-longer in flight. Higher
michael@0 3895 * values may occur during marking
michael@0 3896 */
michael@0 3897 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
michael@0 3898 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
michael@0 3899 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
michael@0 3900 tp1->whoTo->flight_size,
michael@0 3901 tp1->book_size,
michael@0 3902 (uintptr_t)tp1->whoTo,
michael@0 3903 tp1->rec.data.TSN_seq);
michael@0 3904 }
michael@0 3905 sctp_flight_size_decrease(tp1);
michael@0 3906 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
michael@0 3907 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
michael@0 3908 tp1);
michael@0 3909 }
michael@0 3910 /* sa_ignore NO_NULL_CHK */
michael@0 3911 sctp_total_flight_decrease(stcb, tp1);
michael@0 3912 }
michael@0 3913 tp1->whoTo->net_ack += tp1->send_size;
michael@0 3914 if (tp1->snd_count < 2) {
michael@0 3915 /*
michael@0 3916 * True non-retransmited
michael@0 3917 * chunk
michael@0 3918 */
michael@0 3919 tp1->whoTo->net_ack2 +=
michael@0 3920 tp1->send_size;
michael@0 3921
michael@0 3922 /* update RTO too? */
michael@0 3923 if (tp1->do_rtt) {
michael@0 3924 if (rto_ok) {
michael@0 3925 tp1->whoTo->RTO =
michael@0 3926 /*
michael@0 3927 * sa_ignore
michael@0 3928 * NO_NULL_CHK
michael@0 3929 */
michael@0 3930 sctp_calculate_rto(stcb,
michael@0 3931 asoc, tp1->whoTo,
michael@0 3932 &tp1->sent_rcv_time,
michael@0 3933 sctp_align_safe_nocopy,
michael@0 3934 SCTP_RTT_FROM_DATA);
michael@0 3935 rto_ok = 0;
michael@0 3936 }
michael@0 3937 if (tp1->whoTo->rto_needed == 0) {
michael@0 3938 tp1->whoTo->rto_needed = 1;
michael@0 3939 }
michael@0 3940 tp1->do_rtt = 0;
michael@0 3941 }
michael@0 3942 }
michael@0 3943 /*
michael@0 3944 * CMT: CUCv2 algorithm. From the
michael@0 3945 * cumack'd TSNs, for each TSN being
michael@0 3946 * acked for the first time, set the
michael@0 3947 * following variables for the
michael@0 3948 * corresp destination.
michael@0 3949 * new_pseudo_cumack will trigger a
michael@0 3950 * cwnd update.
michael@0 3951 * find_(rtx_)pseudo_cumack will
michael@0 3952 * trigger search for the next
michael@0 3953 * expected (rtx-)pseudo-cumack.
michael@0 3954 */
michael@0 3955 tp1->whoTo->new_pseudo_cumack = 1;
michael@0 3956 tp1->whoTo->find_pseudo_cumack = 1;
michael@0 3957 tp1->whoTo->find_rtx_pseudo_cumack = 1;
michael@0 3958
michael@0 3959 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
michael@0 3960 /* sa_ignore NO_NULL_CHK */
michael@0 3961 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
michael@0 3962 }
michael@0 3963 }
michael@0 3964 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
michael@0 3965 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
michael@0 3966 }
michael@0 3967 if (tp1->rec.data.chunk_was_revoked) {
michael@0 3968 /* deflate the cwnd */
michael@0 3969 tp1->whoTo->cwnd -= tp1->book_size;
michael@0 3970 tp1->rec.data.chunk_was_revoked = 0;
michael@0 3971 }
michael@0 3972 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
michael@0 3973 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
michael@0 3974 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
michael@0 3975 #ifdef INVARIANTS
michael@0 3976 } else {
michael@0 3977 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
michael@0 3978 #endif
michael@0 3979 }
michael@0 3980 }
michael@0 3981 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
michael@0 3982 if (tp1->data) {
michael@0 3983 /* sa_ignore NO_NULL_CHK */
michael@0 3984 sctp_free_bufspace(stcb, asoc, tp1, 1);
michael@0 3985 sctp_m_freem(tp1->data);
michael@0 3986 tp1->data = NULL;
michael@0 3987 }
michael@0 3988 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
michael@0 3989 sctp_log_sack(asoc->last_acked_seq,
michael@0 3990 cumack,
michael@0 3991 tp1->rec.data.TSN_seq,
michael@0 3992 0,
michael@0 3993 0,
michael@0 3994 SCTP_LOG_FREE_SENT);
michael@0 3995 }
michael@0 3996 asoc->sent_queue_cnt--;
michael@0 3997 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
michael@0 3998 } else {
michael@0 3999 break;
michael@0 4000 }
michael@0 4001 }
michael@0 4002
michael@0 4003 }
michael@0 4004 #if defined(__Userspace__)
michael@0 4005 if (stcb->sctp_ep->recv_callback) {
michael@0 4006 if (stcb->sctp_socket) {
michael@0 4007 uint32_t inqueue_bytes, sb_free_now;
michael@0 4008 struct sctp_inpcb *inp;
michael@0 4009
michael@0 4010 inp = stcb->sctp_ep;
michael@0 4011 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
michael@0 4012 sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
michael@0 4013
michael@0 4014 /* check if the amount free in the send socket buffer crossed the threshold */
michael@0 4015 if (inp->send_callback &&
michael@0 4016 (((inp->send_sb_threshold > 0) &&
michael@0 4017 (sb_free_now >= inp->send_sb_threshold) &&
michael@0 4018 (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) ||
michael@0 4019 (inp->send_sb_threshold == 0))) {
michael@0 4020 atomic_add_int(&stcb->asoc.refcnt, 1);
michael@0 4021 SCTP_TCB_UNLOCK(stcb);
michael@0 4022 inp->send_callback(stcb->sctp_socket, sb_free_now);
michael@0 4023 SCTP_TCB_LOCK(stcb);
michael@0 4024 atomic_subtract_int(&stcb->asoc.refcnt, 1);
michael@0 4025 }
michael@0 4026 }
michael@0 4027 } else if (stcb->sctp_socket) {
michael@0 4028 #else
michael@0 4029 /* sa_ignore NO_NULL_CHK */
michael@0 4030 if (stcb->sctp_socket) {
michael@0 4031 #endif
michael@0 4032 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 4033 struct socket *so;
michael@0 4034
michael@0 4035 #endif
michael@0 4036 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
michael@0 4037 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
michael@0 4038 /* sa_ignore NO_NULL_CHK */
michael@0 4039 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
michael@0 4040 }
michael@0 4041 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 4042 so = SCTP_INP_SO(stcb->sctp_ep);
michael@0 4043 atomic_add_int(&stcb->asoc.refcnt, 1);
michael@0 4044 SCTP_TCB_UNLOCK(stcb);
michael@0 4045 SCTP_SOCKET_LOCK(so, 1);
michael@0 4046 SCTP_TCB_LOCK(stcb);
michael@0 4047 atomic_subtract_int(&stcb->asoc.refcnt, 1);
michael@0 4048 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
michael@0 4049 /* assoc was freed while we were unlocked */
michael@0 4050 SCTP_SOCKET_UNLOCK(so, 1);
michael@0 4051 return;
michael@0 4052 }
michael@0 4053 #endif
michael@0 4054 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
michael@0 4055 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 4056 SCTP_SOCKET_UNLOCK(so, 1);
michael@0 4057 #endif
michael@0 4058 } else {
michael@0 4059 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
michael@0 4060 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
michael@0 4061 }
michael@0 4062 }
michael@0 4063
michael@0 4064 /* JRS - Use the congestion control given in the CC module */
michael@0 4065 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
michael@0 4066 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
michael@0 4067 if (net->net_ack2 > 0) {
michael@0 4068 /*
michael@0 4069 * Karn's rule applies to clearing error count, this
michael@0 4070 * is optional.
michael@0 4071 */
michael@0 4072 net->error_count = 0;
michael@0 4073 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
michael@0 4074 /* addr came good */
michael@0 4075 net->dest_state |= SCTP_ADDR_REACHABLE;
michael@0 4076 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
michael@0 4077 0, (void *)net, SCTP_SO_NOT_LOCKED);
michael@0 4078 }
michael@0 4079 if (net == stcb->asoc.primary_destination) {
michael@0 4080 if (stcb->asoc.alternate) {
michael@0 4081 /* release the alternate, primary is good */
michael@0 4082 sctp_free_remote_addr(stcb->asoc.alternate);
michael@0 4083 stcb->asoc.alternate = NULL;
michael@0 4084 }
michael@0 4085 }
michael@0 4086 if (net->dest_state & SCTP_ADDR_PF) {
michael@0 4087 net->dest_state &= ~SCTP_ADDR_PF;
michael@0 4088 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
michael@0 4089 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
michael@0 4090 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
michael@0 4091 /* Done with this net */
michael@0 4092 net->net_ack = 0;
michael@0 4093 }
michael@0 4094 /* restore any doubled timers */
michael@0 4095 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
michael@0 4096 if (net->RTO < stcb->asoc.minrto) {
michael@0 4097 net->RTO = stcb->asoc.minrto;
michael@0 4098 }
michael@0 4099 if (net->RTO > stcb->asoc.maxrto) {
michael@0 4100 net->RTO = stcb->asoc.maxrto;
michael@0 4101 }
michael@0 4102 }
michael@0 4103 }
michael@0 4104 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
michael@0 4105 }
michael@0 4106 asoc->last_acked_seq = cumack;
michael@0 4107
michael@0 4108 if (TAILQ_EMPTY(&asoc->sent_queue)) {
michael@0 4109 /* nothing left in-flight */
michael@0 4110 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
michael@0 4111 net->flight_size = 0;
michael@0 4112 net->partial_bytes_acked = 0;
michael@0 4113 }
michael@0 4114 asoc->total_flight = 0;
michael@0 4115 asoc->total_flight_count = 0;
michael@0 4116 }
michael@0 4117
michael@0 4118 /* RWND update */
michael@0 4119 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
michael@0 4120 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
michael@0 4121 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
michael@0 4122 /* SWS sender side engages */
michael@0 4123 asoc->peers_rwnd = 0;
michael@0 4124 }
michael@0 4125 if (asoc->peers_rwnd > old_rwnd) {
michael@0 4126 win_probe_recovery = 1;
michael@0 4127 }
michael@0 4128 /* Now assure a timer where data is queued at */
michael@0 4129 again:
michael@0 4130 j = 0;
michael@0 4131 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
michael@0 4132 int to_ticks;
michael@0 4133 if (win_probe_recovery && (net->window_probe)) {
michael@0 4134 win_probe_recovered = 1;
michael@0 4135 /*
michael@0 4136 * Find first chunk that was used with window probe
michael@0 4137 * and clear the sent
michael@0 4138 */
michael@0 4139 /* sa_ignore FREED_MEMORY */
michael@0 4140 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
michael@0 4141 if (tp1->window_probe) {
michael@0 4142 /* move back to data send queue */
michael@0 4143 sctp_window_probe_recovery(stcb, asoc, tp1);
michael@0 4144 break;
michael@0 4145 }
michael@0 4146 }
michael@0 4147 }
michael@0 4148 if (net->RTO == 0) {
michael@0 4149 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
michael@0 4150 } else {
michael@0 4151 to_ticks = MSEC_TO_TICKS(net->RTO);
michael@0 4152 }
michael@0 4153 if (net->flight_size) {
michael@0 4154 j++;
michael@0 4155 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
michael@0 4156 sctp_timeout_handler, &net->rxt_timer);
michael@0 4157 if (net->window_probe) {
michael@0 4158 net->window_probe = 0;
michael@0 4159 }
michael@0 4160 } else {
michael@0 4161 if (net->window_probe) {
michael@0 4162 /* In window probes we must assure a timer is still running there */
michael@0 4163 net->window_probe = 0;
michael@0 4164 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
michael@0 4165 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
michael@0 4166 sctp_timeout_handler, &net->rxt_timer);
michael@0 4167 }
michael@0 4168 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
michael@0 4169 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
michael@0 4170 stcb, net,
michael@0 4171 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
michael@0 4172 }
michael@0 4173 }
michael@0 4174 }
michael@0 4175 if ((j == 0) &&
michael@0 4176 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
michael@0 4177 (asoc->sent_queue_retran_cnt == 0) &&
michael@0 4178 (win_probe_recovered == 0) &&
michael@0 4179 (done_once == 0)) {
michael@0 4180 /* huh, this should not happen unless all packets
michael@0 4181 * are PR-SCTP and marked to skip of course.
michael@0 4182 */
michael@0 4183 if (sctp_fs_audit(asoc)) {
michael@0 4184 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
michael@0 4185 net->flight_size = 0;
michael@0 4186 }
michael@0 4187 asoc->total_flight = 0;
michael@0 4188 asoc->total_flight_count = 0;
michael@0 4189 asoc->sent_queue_retran_cnt = 0;
michael@0 4190 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
michael@0 4191 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
michael@0 4192 sctp_flight_size_increase(tp1);
michael@0 4193 sctp_total_flight_increase(stcb, tp1);
michael@0 4194 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
michael@0 4195 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
michael@0 4196 }
michael@0 4197 }
michael@0 4198 }
michael@0 4199 done_once = 1;
michael@0 4200 goto again;
michael@0 4201 }
michael@0 4202 /**********************************/
michael@0 4203 /* Now what about shutdown issues */
michael@0 4204 /**********************************/
michael@0 4205 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
michael@0 4206 /* nothing left on sendqueue.. consider done */
michael@0 4207 /* clean up */
michael@0 4208 if ((asoc->stream_queue_cnt == 1) &&
michael@0 4209 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
michael@0 4210 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
michael@0 4211 (asoc->locked_on_sending)
michael@0 4212 ) {
michael@0 4213 struct sctp_stream_queue_pending *sp;
michael@0 4214 /* I may be in a state where we got
michael@0 4215 * all across.. but cannot write more due
michael@0 4216 * to a shutdown... we abort since the
michael@0 4217 * user did not indicate EOR in this case. The
michael@0 4218 * sp will be cleaned during free of the asoc.
michael@0 4219 */
michael@0 4220 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
michael@0 4221 sctp_streamhead);
michael@0 4222 if ((sp) && (sp->length == 0)) {
michael@0 4223 /* Let cleanup code purge it */
michael@0 4224 if (sp->msg_is_complete) {
michael@0 4225 asoc->stream_queue_cnt--;
michael@0 4226 } else {
michael@0 4227 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
michael@0 4228 asoc->locked_on_sending = NULL;
michael@0 4229 asoc->stream_queue_cnt--;
michael@0 4230 }
michael@0 4231 }
michael@0 4232 }
michael@0 4233 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
michael@0 4234 (asoc->stream_queue_cnt == 0)) {
michael@0 4235 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
michael@0 4236 /* Need to abort here */
michael@0 4237 struct mbuf *oper;
michael@0 4238
michael@0 4239 abort_out_now:
michael@0 4240 *abort_now = 1;
michael@0 4241 /* XXX */
michael@0 4242 oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
michael@0 4243 0, M_NOWAIT, 1, MT_DATA);
michael@0 4244 if (oper) {
michael@0 4245 struct sctp_paramhdr *ph;
michael@0 4246
michael@0 4247 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr);
michael@0 4248 ph = mtod(oper, struct sctp_paramhdr *);
michael@0 4249 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
michael@0 4250 ph->param_length = htons(SCTP_BUF_LEN(oper));
michael@0 4251 }
michael@0 4252 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
michael@0 4253 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
michael@0 4254 } else {
michael@0 4255 struct sctp_nets *netp;
michael@0 4256
michael@0 4257 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
michael@0 4258 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
michael@0 4259 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
michael@0 4260 }
michael@0 4261 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
michael@0 4262 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
michael@0 4263 sctp_stop_timers_for_shutdown(stcb);
michael@0 4264 if (asoc->alternate) {
michael@0 4265 netp = asoc->alternate;
michael@0 4266 } else {
michael@0 4267 netp = asoc->primary_destination;
michael@0 4268 }
michael@0 4269 sctp_send_shutdown(stcb, netp);
michael@0 4270 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
michael@0 4271 stcb->sctp_ep, stcb, netp);
michael@0 4272 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
michael@0 4273 stcb->sctp_ep, stcb, netp);
michael@0 4274 }
michael@0 4275 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
michael@0 4276 (asoc->stream_queue_cnt == 0)) {
michael@0 4277 struct sctp_nets *netp;
michael@0 4278
michael@0 4279 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
michael@0 4280 goto abort_out_now;
michael@0 4281 }
michael@0 4282 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
michael@0 4283 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
michael@0 4284 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
michael@0 4285 sctp_stop_timers_for_shutdown(stcb);
michael@0 4286 if (asoc->alternate) {
michael@0 4287 netp = asoc->alternate;
michael@0 4288 } else {
michael@0 4289 netp = asoc->primary_destination;
michael@0 4290 }
michael@0 4291 sctp_send_shutdown_ack(stcb, netp);
michael@0 4292 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
michael@0 4293 stcb->sctp_ep, stcb, netp);
michael@0 4294 }
michael@0 4295 }
michael@0 4296 /*********************************************/
michael@0 4297 /* Here we perform PR-SCTP procedures */
michael@0 4298 /* (section 4.2) */
michael@0 4299 /*********************************************/
michael@0 4300 /* C1. update advancedPeerAckPoint */
michael@0 4301 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
michael@0 4302 asoc->advanced_peer_ack_point = cumack;
michael@0 4303 }
michael@0 4304 /* PR-Sctp issues need to be addressed too */
michael@0 4305 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
michael@0 4306 struct sctp_tmit_chunk *lchk;
michael@0 4307 uint32_t old_adv_peer_ack_point;
michael@0 4308
michael@0 4309 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
michael@0 4310 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
michael@0 4311 /* C3. See if we need to send a Fwd-TSN */
michael@0 4312 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
michael@0 4313 /*
michael@0 4314 * ISSUE with ECN, see FWD-TSN processing.
michael@0 4315 */
michael@0 4316 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
michael@0 4317 send_forward_tsn(stcb, asoc);
michael@0 4318 } else if (lchk) {
michael@0 4319 /* try to FR fwd-tsn's that get lost too */
michael@0 4320 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
michael@0 4321 send_forward_tsn(stcb, asoc);
michael@0 4322 }
michael@0 4323 }
michael@0 4324 }
michael@0 4325 if (lchk) {
michael@0 4326 /* Assure a timer is up */
michael@0 4327 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
michael@0 4328 stcb->sctp_ep, stcb, lchk->whoTo);
michael@0 4329 }
michael@0 4330 }
michael@0 4331 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
michael@0 4332 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
michael@0 4333 rwnd,
michael@0 4334 stcb->asoc.peers_rwnd,
michael@0 4335 stcb->asoc.total_flight,
michael@0 4336 stcb->asoc.total_output_queue_size);
michael@0 4337 }
michael@0 4338 }
michael@0 4339
michael@0 4340 void
michael@0 4341 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
michael@0 4342 struct sctp_tcb *stcb,
michael@0 4343 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
michael@0 4344 int *abort_now, uint8_t flags,
michael@0 4345 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
michael@0 4346 {
michael@0 4347 struct sctp_association *asoc;
michael@0 4348 struct sctp_tmit_chunk *tp1, *tp2;
michael@0 4349 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
michael@0 4350 uint16_t wake_him = 0;
michael@0 4351 uint32_t send_s = 0;
michael@0 4352 long j;
michael@0 4353 int accum_moved = 0;
michael@0 4354 int will_exit_fast_recovery = 0;
michael@0 4355 uint32_t a_rwnd, old_rwnd;
michael@0 4356 int win_probe_recovery = 0;
michael@0 4357 int win_probe_recovered = 0;
michael@0 4358 struct sctp_nets *net = NULL;
michael@0 4359 int done_once;
michael@0 4360 int rto_ok = 1;
michael@0 4361 uint8_t reneged_all = 0;
michael@0 4362 uint8_t cmt_dac_flag;
michael@0 4363 /*
michael@0 4364 * we take any chance we can to service our queues since we cannot
michael@0 4365 * get awoken when the socket is read from :<
michael@0 4366 */
michael@0 4367 /*
michael@0 4368 * Now perform the actual SACK handling: 1) Verify that it is not an
michael@0 4369 * old sack, if so discard. 2) If there is nothing left in the send
michael@0 4370 * queue (cum-ack is equal to last acked) then you have a duplicate
michael@0 4371 * too, update any rwnd change and verify no timers are running.
michael@0 4372 * then return. 3) Process any new consequtive data i.e. cum-ack
michael@0 4373 * moved process these first and note that it moved. 4) Process any
michael@0 4374 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
michael@0 4375 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
michael@0 4376 * sync up flightsizes and things, stop all timers and also check
michael@0 4377 * for shutdown_pending state. If so then go ahead and send off the
michael@0 4378 * shutdown. If in shutdown recv, send off the shutdown-ack and
michael@0 4379 * start that timer, Ret. 9) Strike any non-acked things and do FR
michael@0 4380 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
michael@0 4381 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
michael@0 4382 * if in shutdown_recv state.
michael@0 4383 */
michael@0 4384 SCTP_TCB_LOCK_ASSERT(stcb);
michael@0 4385 /* CMT DAC algo */
michael@0 4386 this_sack_lowest_newack = 0;
michael@0 4387 SCTP_STAT_INCR(sctps_slowpath_sack);
michael@0 4388 last_tsn = cum_ack;
michael@0 4389 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
michael@0 4390 #ifdef SCTP_ASOCLOG_OF_TSNS
michael@0 4391 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
michael@0 4392 stcb->asoc.cumack_log_at++;
michael@0 4393 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
michael@0 4394 stcb->asoc.cumack_log_at = 0;
michael@0 4395 }
michael@0 4396 #endif
michael@0 4397 a_rwnd = rwnd;
michael@0 4398
michael@0 4399 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
michael@0 4400 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
michael@0 4401 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
michael@0 4402 }
michael@0 4403
michael@0 4404 old_rwnd = stcb->asoc.peers_rwnd;
michael@0 4405 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
michael@0 4406 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
michael@0 4407 stcb->asoc.overall_error_count,
michael@0 4408 0,
michael@0 4409 SCTP_FROM_SCTP_INDATA,
michael@0 4410 __LINE__);
michael@0 4411 }
michael@0 4412 stcb->asoc.overall_error_count = 0;
michael@0 4413 asoc = &stcb->asoc;
michael@0 4414 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
michael@0 4415 sctp_log_sack(asoc->last_acked_seq,
michael@0 4416 cum_ack,
michael@0 4417 0,
michael@0 4418 num_seg,
michael@0 4419 num_dup,
michael@0 4420 SCTP_LOG_NEW_SACK);
michael@0 4421 }
michael@0 4422 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
michael@0 4423 uint16_t i;
michael@0 4424 uint32_t *dupdata, dblock;
michael@0 4425
michael@0 4426 for (i = 0; i < num_dup; i++) {
michael@0 4427 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
michael@0 4428 sizeof(uint32_t), (uint8_t *)&dblock);
michael@0 4429 if (dupdata == NULL) {
michael@0 4430 break;
michael@0 4431 }
michael@0 4432 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
michael@0 4433 }
michael@0 4434 }
michael@0 4435 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
michael@0 4436 /* reality check */
michael@0 4437 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
michael@0 4438 tp1 = TAILQ_LAST(&asoc->sent_queue,
michael@0 4439 sctpchunk_listhead);
michael@0 4440 send_s = tp1->rec.data.TSN_seq + 1;
michael@0 4441 } else {
michael@0 4442 tp1 = NULL;
michael@0 4443 send_s = asoc->sending_seq;
michael@0 4444 }
michael@0 4445 if (SCTP_TSN_GE(cum_ack, send_s)) {
michael@0 4446 struct mbuf *oper;
michael@0 4447 /*
michael@0 4448 * no way, we have not even sent this TSN out yet.
michael@0 4449 * Peer is hopelessly messed up with us.
michael@0 4450 */
michael@0 4451 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
michael@0 4452 cum_ack, send_s);
michael@0 4453 if (tp1) {
michael@0 4454 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
michael@0 4455 tp1->rec.data.TSN_seq, (void *)tp1);
michael@0 4456 }
michael@0 4457 hopeless_peer:
michael@0 4458 *abort_now = 1;
michael@0 4459 /* XXX */
michael@0 4460 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
michael@0 4461 0, M_NOWAIT, 1, MT_DATA);
michael@0 4462 if (oper) {
michael@0 4463 struct sctp_paramhdr *ph;
michael@0 4464 uint32_t *ippp;
michael@0 4465
michael@0 4466 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
michael@0 4467 sizeof(uint32_t);
michael@0 4468 ph = mtod(oper, struct sctp_paramhdr *);
michael@0 4469 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
michael@0 4470 ph->param_length = htons(SCTP_BUF_LEN(oper));
michael@0 4471 ippp = (uint32_t *) (ph + 1);
michael@0 4472 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
michael@0 4473 }
michael@0 4474 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
michael@0 4475 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
michael@0 4476 return;
michael@0 4477 }
michael@0 4478 }
michael@0 4479 /**********************/
michael@0 4480 /* 1) check the range */
michael@0 4481 /**********************/
michael@0 4482 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
michael@0 4483 /* acking something behind */
michael@0 4484 return;
michael@0 4485 }
michael@0 4486
michael@0 4487 /* update the Rwnd of the peer */
michael@0 4488 if (TAILQ_EMPTY(&asoc->sent_queue) &&
michael@0 4489 TAILQ_EMPTY(&asoc->send_queue) &&
michael@0 4490 (asoc->stream_queue_cnt == 0)) {
michael@0 4491 /* nothing left on send/sent and strmq */
michael@0 4492 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
michael@0 4493 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
michael@0 4494 asoc->peers_rwnd, 0, 0, a_rwnd);
michael@0 4495 }
michael@0 4496 asoc->peers_rwnd = a_rwnd;
michael@0 4497 if (asoc->sent_queue_retran_cnt) {
michael@0 4498 asoc->sent_queue_retran_cnt = 0;
michael@0 4499 }
michael@0 4500 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
michael@0 4501 /* SWS sender side engages */
michael@0 4502 asoc->peers_rwnd = 0;
michael@0 4503 }
michael@0 4504 /* stop any timers */
michael@0 4505 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
michael@0 4506 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
michael@0 4507 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
michael@0 4508 net->partial_bytes_acked = 0;
michael@0 4509 net->flight_size = 0;
michael@0 4510 }
michael@0 4511 asoc->total_flight = 0;
michael@0 4512 asoc->total_flight_count = 0;
michael@0 4513 return;
michael@0 4514 }
michael@0 4515 /*
michael@0 4516 * We init netAckSz and netAckSz2 to 0. These are used to track 2
michael@0 4517 * things. The total byte count acked is tracked in netAckSz AND
michael@0 4518 * netAck2 is used to track the total bytes acked that are un-
michael@0 4519 * amibguious and were never retransmitted. We track these on a per
michael@0 4520 * destination address basis.
michael@0 4521 */
michael@0 4522 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
michael@0 4523 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
michael@0 4524 /* Drag along the window_tsn for cwr's */
michael@0 4525 net->cwr_window_tsn = cum_ack;
michael@0 4526 }
michael@0 4527 net->prev_cwnd = net->cwnd;
michael@0 4528 net->net_ack = 0;
michael@0 4529 net->net_ack2 = 0;
michael@0 4530
michael@0 4531 /*
michael@0 4532 * CMT: Reset CUC and Fast recovery algo variables before
michael@0 4533 * SACK processing
michael@0 4534 */
michael@0 4535 net->new_pseudo_cumack = 0;
michael@0 4536 net->will_exit_fast_recovery = 0;
michael@0 4537 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
michael@0 4538 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
michael@0 4539 }
michael@0 4540 }
michael@0 4541 /* process the new consecutive TSN first */
michael@0 4542 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
michael@0 4543 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
michael@0 4544 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
michael@0 4545 accum_moved = 1;
michael@0 4546 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
michael@0 4547 /*
michael@0 4548 * If it is less than ACKED, it is
michael@0 4549 * now no-longer in flight. Higher
michael@0 4550 * values may occur during marking
michael@0 4551 */
michael@0 4552 if ((tp1->whoTo->dest_state &
michael@0 4553 SCTP_ADDR_UNCONFIRMED) &&
michael@0 4554 (tp1->snd_count < 2)) {
michael@0 4555 /*
michael@0 4556 * If there was no retran
michael@0 4557 * and the address is
michael@0 4558 * un-confirmed and we sent
michael@0 4559 * there and are now
michael@0 4560 * sacked.. its confirmed,
michael@0 4561 * mark it so.
michael@0 4562 */
michael@0 4563 tp1->whoTo->dest_state &=
michael@0 4564 ~SCTP_ADDR_UNCONFIRMED;
michael@0 4565 }
michael@0 4566 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
michael@0 4567 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
michael@0 4568 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
michael@0 4569 tp1->whoTo->flight_size,
michael@0 4570 tp1->book_size,
michael@0 4571 (uintptr_t)tp1->whoTo,
michael@0 4572 tp1->rec.data.TSN_seq);
michael@0 4573 }
michael@0 4574 sctp_flight_size_decrease(tp1);
michael@0 4575 sctp_total_flight_decrease(stcb, tp1);
michael@0 4576 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
michael@0 4577 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
michael@0 4578 tp1);
michael@0 4579 }
michael@0 4580 }
michael@0 4581 tp1->whoTo->net_ack += tp1->send_size;
michael@0 4582
michael@0 4583 /* CMT SFR and DAC algos */
michael@0 4584 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
michael@0 4585 tp1->whoTo->saw_newack = 1;
michael@0 4586
michael@0 4587 if (tp1->snd_count < 2) {
michael@0 4588 /*
michael@0 4589 * True non-retransmited
michael@0 4590 * chunk
michael@0 4591 */
michael@0 4592 tp1->whoTo->net_ack2 +=
michael@0 4593 tp1->send_size;
michael@0 4594
michael@0 4595 /* update RTO too? */
michael@0 4596 if (tp1->do_rtt) {
michael@0 4597 if (rto_ok) {
michael@0 4598 tp1->whoTo->RTO =
michael@0 4599 sctp_calculate_rto(stcb,
michael@0 4600 asoc, tp1->whoTo,
michael@0 4601 &tp1->sent_rcv_time,
michael@0 4602 sctp_align_safe_nocopy,
michael@0 4603 SCTP_RTT_FROM_DATA);
michael@0 4604 rto_ok = 0;
michael@0 4605 }
michael@0 4606 if (tp1->whoTo->rto_needed == 0) {
michael@0 4607 tp1->whoTo->rto_needed = 1;
michael@0 4608 }
michael@0 4609 tp1->do_rtt = 0;
michael@0 4610 }
michael@0 4611 }
michael@0 4612 /*
michael@0 4613 * CMT: CUCv2 algorithm. From the
michael@0 4614 * cumack'd TSNs, for each TSN being
michael@0 4615 * acked for the first time, set the
michael@0 4616 * following variables for the
michael@0 4617 * corresp destination.
michael@0 4618 * new_pseudo_cumack will trigger a
michael@0 4619 * cwnd update.
michael@0 4620 * find_(rtx_)pseudo_cumack will
michael@0 4621 * trigger search for the next
michael@0 4622 * expected (rtx-)pseudo-cumack.
michael@0 4623 */
michael@0 4624 tp1->whoTo->new_pseudo_cumack = 1;
michael@0 4625 tp1->whoTo->find_pseudo_cumack = 1;
michael@0 4626 tp1->whoTo->find_rtx_pseudo_cumack = 1;
michael@0 4627
michael@0 4628
michael@0 4629 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
michael@0 4630 sctp_log_sack(asoc->last_acked_seq,
michael@0 4631 cum_ack,
michael@0 4632 tp1->rec.data.TSN_seq,
michael@0 4633 0,
michael@0 4634 0,
michael@0 4635 SCTP_LOG_TSN_ACKED);
michael@0 4636 }
michael@0 4637 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
michael@0 4638 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
michael@0 4639 }
michael@0 4640 }
michael@0 4641 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
michael@0 4642 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
michael@0 4643 #ifdef SCTP_AUDITING_ENABLED
michael@0 4644 sctp_audit_log(0xB3,
michael@0 4645 (asoc->sent_queue_retran_cnt & 0x000000ff));
michael@0 4646 #endif
michael@0 4647 }
michael@0 4648 if (tp1->rec.data.chunk_was_revoked) {
michael@0 4649 /* deflate the cwnd */
michael@0 4650 tp1->whoTo->cwnd -= tp1->book_size;
michael@0 4651 tp1->rec.data.chunk_was_revoked = 0;
michael@0 4652 }
michael@0 4653 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
michael@0 4654 tp1->sent = SCTP_DATAGRAM_ACKED;
michael@0 4655 }
michael@0 4656 }
michael@0 4657 } else {
michael@0 4658 break;
michael@0 4659 }
michael@0 4660 }
michael@0 4661 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
michael@0 4662 /* always set this up to cum-ack */
michael@0 4663 asoc->this_sack_highest_gap = last_tsn;
michael@0 4664
michael@0 4665 if ((num_seg > 0) || (num_nr_seg > 0)) {
michael@0 4666
michael@0 4667 /*
michael@0 4668 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
michael@0 4669 * to be greater than the cumack. Also reset saw_newack to 0
michael@0 4670 * for all dests.
michael@0 4671 */
michael@0 4672 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
michael@0 4673 net->saw_newack = 0;
michael@0 4674 net->this_sack_highest_newack = last_tsn;
michael@0 4675 }
michael@0 4676
michael@0 4677 /*
michael@0 4678 * thisSackHighestGap will increase while handling NEW
michael@0 4679 * segments this_sack_highest_newack will increase while
michael@0 4680 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
michael@0 4681 * used for CMT DAC algo. saw_newack will also change.
michael@0 4682 */
michael@0 4683 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
michael@0 4684 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
michael@0 4685 num_seg, num_nr_seg, &rto_ok)) {
michael@0 4686 wake_him++;
michael@0 4687 }
michael@0 4688 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
michael@0 4689 /*
michael@0 4690 * validate the biggest_tsn_acked in the gap acks if
michael@0 4691 * strict adherence is wanted.
michael@0 4692 */
michael@0 4693 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
michael@0 4694 /*
michael@0 4695 * peer is either confused or we are under
michael@0 4696 * attack. We must abort.
michael@0 4697 */
michael@0 4698 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
michael@0 4699 biggest_tsn_acked, send_s);
michael@0 4700 goto hopeless_peer;
michael@0 4701 }
michael@0 4702 }
michael@0 4703 }
michael@0 4704 /*******************************************/
michael@0 4705 /* cancel ALL T3-send timer if accum moved */
michael@0 4706 /*******************************************/
michael@0 4707 if (asoc->sctp_cmt_on_off > 0) {
michael@0 4708 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
michael@0 4709 if (net->new_pseudo_cumack)
michael@0 4710 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
michael@0 4711 stcb, net,
michael@0 4712 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
michael@0 4713
michael@0 4714 }
michael@0 4715 } else {
michael@0 4716 if (accum_moved) {
michael@0 4717 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
michael@0 4718 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
michael@0 4719 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
michael@0 4720 }
michael@0 4721 }
michael@0 4722 }
michael@0 4723 /********************************************/
michael@0 4724 /* drop the acked chunks from the sentqueue */
michael@0 4725 /********************************************/
michael@0 4726 asoc->last_acked_seq = cum_ack;
michael@0 4727
michael@0 4728 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
michael@0 4729 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
michael@0 4730 break;
michael@0 4731 }
michael@0 4732 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
michael@0 4733 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
michael@0 4734 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
michael@0 4735 #ifdef INVARIANTS
michael@0 4736 } else {
michael@0 4737 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
michael@0 4738 #endif
michael@0 4739 }
michael@0 4740 }
michael@0 4741 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
michael@0 4742 if (PR_SCTP_ENABLED(tp1->flags)) {
michael@0 4743 if (asoc->pr_sctp_cnt != 0)
michael@0 4744 asoc->pr_sctp_cnt--;
michael@0 4745 }
michael@0 4746 asoc->sent_queue_cnt--;
michael@0 4747 if (tp1->data) {
michael@0 4748 /* sa_ignore NO_NULL_CHK */
michael@0 4749 sctp_free_bufspace(stcb, asoc, tp1, 1);
michael@0 4750 sctp_m_freem(tp1->data);
michael@0 4751 tp1->data = NULL;
michael@0 4752 if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
michael@0 4753 asoc->sent_queue_cnt_removeable--;
michael@0 4754 }
michael@0 4755 }
michael@0 4756 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
michael@0 4757 sctp_log_sack(asoc->last_acked_seq,
michael@0 4758 cum_ack,
michael@0 4759 tp1->rec.data.TSN_seq,
michael@0 4760 0,
michael@0 4761 0,
michael@0 4762 SCTP_LOG_FREE_SENT);
michael@0 4763 }
michael@0 4764 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
michael@0 4765 wake_him++;
michael@0 4766 }
michael@0 4767 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
michael@0 4768 #ifdef INVARIANTS
michael@0 4769 panic("Warning flight size is postive and should be 0");
michael@0 4770 #else
michael@0 4771 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
michael@0 4772 asoc->total_flight);
michael@0 4773 #endif
michael@0 4774 asoc->total_flight = 0;
michael@0 4775 }
michael@0 4776
michael@0 4777 #if defined(__Userspace__)
michael@0 4778 if (stcb->sctp_ep->recv_callback) {
michael@0 4779 if (stcb->sctp_socket) {
michael@0 4780 uint32_t inqueue_bytes, sb_free_now;
michael@0 4781 struct sctp_inpcb *inp;
michael@0 4782
michael@0 4783 inp = stcb->sctp_ep;
michael@0 4784 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
michael@0 4785 sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
michael@0 4786
michael@0 4787 /* check if the amount free in the send socket buffer crossed the threshold */
michael@0 4788 if (inp->send_callback &&
michael@0 4789 (((inp->send_sb_threshold > 0) && (sb_free_now >= inp->send_sb_threshold)) ||
michael@0 4790 (inp->send_sb_threshold == 0))) {
michael@0 4791 atomic_add_int(&stcb->asoc.refcnt, 1);
michael@0 4792 SCTP_TCB_UNLOCK(stcb);
michael@0 4793 inp->send_callback(stcb->sctp_socket, sb_free_now);
michael@0 4794 SCTP_TCB_LOCK(stcb);
michael@0 4795 atomic_subtract_int(&stcb->asoc.refcnt, 1);
michael@0 4796 }
michael@0 4797 }
michael@0 4798 } else if ((wake_him) && (stcb->sctp_socket)) {
michael@0 4799 #else
michael@0 4800 /* sa_ignore NO_NULL_CHK */
michael@0 4801 if ((wake_him) && (stcb->sctp_socket)) {
michael@0 4802 #endif
michael@0 4803 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 4804 struct socket *so;
michael@0 4805
michael@0 4806 #endif
michael@0 4807 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
michael@0 4808 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
michael@0 4809 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
michael@0 4810 }
michael@0 4811 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 4812 so = SCTP_INP_SO(stcb->sctp_ep);
michael@0 4813 atomic_add_int(&stcb->asoc.refcnt, 1);
michael@0 4814 SCTP_TCB_UNLOCK(stcb);
michael@0 4815 SCTP_SOCKET_LOCK(so, 1);
michael@0 4816 SCTP_TCB_LOCK(stcb);
michael@0 4817 atomic_subtract_int(&stcb->asoc.refcnt, 1);
michael@0 4818 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
michael@0 4819 /* assoc was freed while we were unlocked */
michael@0 4820 SCTP_SOCKET_UNLOCK(so, 1);
michael@0 4821 return;
michael@0 4822 }
michael@0 4823 #endif
michael@0 4824 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
michael@0 4825 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 4826 SCTP_SOCKET_UNLOCK(so, 1);
michael@0 4827 #endif
michael@0 4828 } else {
michael@0 4829 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
michael@0 4830 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
michael@0 4831 }
michael@0 4832 }
michael@0 4833
michael@0 4834 if (asoc->fast_retran_loss_recovery && accum_moved) {
michael@0 4835 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
michael@0 4836 /* Setup so we will exit RFC2582 fast recovery */
michael@0 4837 will_exit_fast_recovery = 1;
michael@0 4838 }
michael@0 4839 }
michael@0 4840 /*
michael@0 4841 * Check for revoked fragments:
michael@0 4842 *
michael@0 4843 * if Previous sack - Had no frags then we can't have any revoked if
michael@0 4844 * Previous sack - Had frag's then - If we now have frags aka
michael@0 4845 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
michael@0 4846 * some of them. else - The peer revoked all ACKED fragments, since
michael@0 4847 * we had some before and now we have NONE.
michael@0 4848 */
michael@0 4849
michael@0 4850 if (num_seg) {
michael@0 4851 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
michael@0 4852 asoc->saw_sack_with_frags = 1;
michael@0 4853 } else if (asoc->saw_sack_with_frags) {
michael@0 4854 int cnt_revoked = 0;
michael@0 4855
michael@0 4856 /* Peer revoked all dg's marked or acked */
michael@0 4857 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
michael@0 4858 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
michael@0 4859 tp1->sent = SCTP_DATAGRAM_SENT;
michael@0 4860 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
michael@0 4861 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
michael@0 4862 tp1->whoTo->flight_size,
michael@0 4863 tp1->book_size,
michael@0 4864 (uintptr_t)tp1->whoTo,
michael@0 4865 tp1->rec.data.TSN_seq);
michael@0 4866 }
michael@0 4867 sctp_flight_size_increase(tp1);
michael@0 4868 sctp_total_flight_increase(stcb, tp1);
michael@0 4869 tp1->rec.data.chunk_was_revoked = 1;
michael@0 4870 /*
michael@0 4871 * To ensure that this increase in
michael@0 4872 * flightsize, which is artificial,
michael@0 4873 * does not throttle the sender, we
michael@0 4874 * also increase the cwnd
michael@0 4875 * artificially.
michael@0 4876 */
michael@0 4877 tp1->whoTo->cwnd += tp1->book_size;
michael@0 4878 cnt_revoked++;
michael@0 4879 }
michael@0 4880 }
michael@0 4881 if (cnt_revoked) {
michael@0 4882 reneged_all = 1;
michael@0 4883 }
michael@0 4884 asoc->saw_sack_with_frags = 0;
michael@0 4885 }
michael@0 4886 if (num_nr_seg > 0)
michael@0 4887 asoc->saw_sack_with_nr_frags = 1;
michael@0 4888 else
michael@0 4889 asoc->saw_sack_with_nr_frags = 0;
michael@0 4890
michael@0 4891 /* JRS - Use the congestion control given in the CC module */
michael@0 4892 if (ecne_seen == 0) {
michael@0 4893 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
michael@0 4894 if (net->net_ack2 > 0) {
michael@0 4895 /*
michael@0 4896 * Karn's rule applies to clearing error count, this
michael@0 4897 * is optional.
michael@0 4898 */
michael@0 4899 net->error_count = 0;
michael@0 4900 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
michael@0 4901 /* addr came good */
michael@0 4902 net->dest_state |= SCTP_ADDR_REACHABLE;
michael@0 4903 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
michael@0 4904 0, (void *)net, SCTP_SO_NOT_LOCKED);
michael@0 4905 }
michael@0 4906
michael@0 4907 if (net == stcb->asoc.primary_destination) {
michael@0 4908 if (stcb->asoc.alternate) {
michael@0 4909 /* release the alternate, primary is good */
michael@0 4910 sctp_free_remote_addr(stcb->asoc.alternate);
michael@0 4911 stcb->asoc.alternate = NULL;
michael@0 4912 }
michael@0 4913 }
michael@0 4914
michael@0 4915 if (net->dest_state & SCTP_ADDR_PF) {
michael@0 4916 net->dest_state &= ~SCTP_ADDR_PF;
michael@0 4917 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
michael@0 4918 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
michael@0 4919 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
michael@0 4920 /* Done with this net */
michael@0 4921 net->net_ack = 0;
michael@0 4922 }
michael@0 4923 /* restore any doubled timers */
michael@0 4924 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
michael@0 4925 if (net->RTO < stcb->asoc.minrto) {
michael@0 4926 net->RTO = stcb->asoc.minrto;
michael@0 4927 }
michael@0 4928 if (net->RTO > stcb->asoc.maxrto) {
michael@0 4929 net->RTO = stcb->asoc.maxrto;
michael@0 4930 }
michael@0 4931 }
michael@0 4932 }
michael@0 4933 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
michael@0 4934 }
michael@0 4935
michael@0 4936 if (TAILQ_EMPTY(&asoc->sent_queue)) {
michael@0 4937 /* nothing left in-flight */
michael@0 4938 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
michael@0 4939 /* stop all timers */
michael@0 4940 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
michael@0 4941 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
michael@0 4942 net->flight_size = 0;
michael@0 4943 net->partial_bytes_acked = 0;
michael@0 4944 }
michael@0 4945 asoc->total_flight = 0;
michael@0 4946 asoc->total_flight_count = 0;
michael@0 4947 }
michael@0 4948
michael@0 4949 /**********************************/
michael@0 4950 /* Now what about shutdown issues */
michael@0 4951 /**********************************/
michael@0 4952 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
michael@0 4953 /* nothing left on sendqueue.. consider done */
michael@0 4954 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
michael@0 4955 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
michael@0 4956 asoc->peers_rwnd, 0, 0, a_rwnd);
michael@0 4957 }
michael@0 4958 asoc->peers_rwnd = a_rwnd;
michael@0 4959 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
michael@0 4960 /* SWS sender side engages */
michael@0 4961 asoc->peers_rwnd = 0;
michael@0 4962 }
michael@0 4963 /* clean up */
michael@0 4964 if ((asoc->stream_queue_cnt == 1) &&
michael@0 4965 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
michael@0 4966 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
michael@0 4967 (asoc->locked_on_sending)
michael@0 4968 ) {
michael@0 4969 struct sctp_stream_queue_pending *sp;
michael@0 4970 /* I may be in a state where we got
michael@0 4971 * all across.. but cannot write more due
michael@0 4972 * to a shutdown... we abort since the
michael@0 4973 * user did not indicate EOR in this case.
michael@0 4974 */
michael@0 4975 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
michael@0 4976 sctp_streamhead);
michael@0 4977 if ((sp) && (sp->length == 0)) {
michael@0 4978 asoc->locked_on_sending = NULL;
michael@0 4979 if (sp->msg_is_complete) {
michael@0 4980 asoc->stream_queue_cnt--;
michael@0 4981 } else {
michael@0 4982 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
michael@0 4983 asoc->stream_queue_cnt--;
michael@0 4984 }
michael@0 4985 }
michael@0 4986 }
michael@0 4987 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
michael@0 4988 (asoc->stream_queue_cnt == 0)) {
michael@0 4989 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
michael@0 4990 /* Need to abort here */
michael@0 4991 struct mbuf *oper;
michael@0 4992 abort_out_now:
michael@0 4993 *abort_now = 1;
michael@0 4994 /* XXX */
michael@0 4995 oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
michael@0 4996 0, M_NOWAIT, 1, MT_DATA);
michael@0 4997 if (oper) {
michael@0 4998 struct sctp_paramhdr *ph;
michael@0 4999
michael@0 5000 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr);
michael@0 5001 ph = mtod(oper, struct sctp_paramhdr *);
michael@0 5002 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
michael@0 5003 ph->param_length = htons(SCTP_BUF_LEN(oper));
michael@0 5004 }
michael@0 5005 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
michael@0 5006 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
michael@0 5007 return;
michael@0 5008 } else {
michael@0 5009 struct sctp_nets *netp;
michael@0 5010
michael@0 5011 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
michael@0 5012 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
michael@0 5013 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
michael@0 5014 }
michael@0 5015 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
michael@0 5016 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
michael@0 5017 sctp_stop_timers_for_shutdown(stcb);
michael@0 5018 if (asoc->alternate) {
michael@0 5019 netp = asoc->alternate;
michael@0 5020 } else {
michael@0 5021 netp = asoc->primary_destination;
michael@0 5022 }
michael@0 5023 sctp_send_shutdown(stcb, netp);
michael@0 5024 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
michael@0 5025 stcb->sctp_ep, stcb, netp);
michael@0 5026 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
michael@0 5027 stcb->sctp_ep, stcb, netp);
michael@0 5028 }
michael@0 5029 return;
michael@0 5030 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
michael@0 5031 (asoc->stream_queue_cnt == 0)) {
michael@0 5032 struct sctp_nets *netp;
michael@0 5033
michael@0 5034 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
michael@0 5035 goto abort_out_now;
michael@0 5036 }
michael@0 5037 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
michael@0 5038 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
michael@0 5039 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
michael@0 5040 sctp_stop_timers_for_shutdown(stcb);
michael@0 5041 if (asoc->alternate) {
michael@0 5042 netp = asoc->alternate;
michael@0 5043 } else {
michael@0 5044 netp = asoc->primary_destination;
michael@0 5045 }
michael@0 5046 sctp_send_shutdown_ack(stcb, netp);
michael@0 5047 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
michael@0 5048 stcb->sctp_ep, stcb, netp);
michael@0 5049 return;
michael@0 5050 }
michael@0 5051 }
michael@0 5052 /*
michael@0 5053 * Now here we are going to recycle net_ack for a different use...
michael@0 5054 * HEADS UP.
michael@0 5055 */
michael@0 5056 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
michael@0 5057 net->net_ack = 0;
michael@0 5058 }
michael@0 5059
michael@0 5060 /*
michael@0 5061 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
michael@0 5062 * to be done. Setting this_sack_lowest_newack to the cum_ack will
michael@0 5063 * automatically ensure that.
michael@0 5064 */
michael@0 5065 if ((asoc->sctp_cmt_on_off > 0) &&
michael@0 5066 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
michael@0 5067 (cmt_dac_flag == 0)) {
michael@0 5068 this_sack_lowest_newack = cum_ack;
michael@0 5069 }
michael@0 5070 if ((num_seg > 0) || (num_nr_seg > 0)) {
michael@0 5071 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
michael@0 5072 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
michael@0 5073 }
michael@0 5074 /* JRS - Use the congestion control given in the CC module */
michael@0 5075 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
michael@0 5076
michael@0 5077 /* Now are we exiting loss recovery ? */
michael@0 5078 if (will_exit_fast_recovery) {
michael@0 5079 /* Ok, we must exit fast recovery */
michael@0 5080 asoc->fast_retran_loss_recovery = 0;
michael@0 5081 }
michael@0 5082 if ((asoc->sat_t3_loss_recovery) &&
michael@0 5083 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
michael@0 5084 /* end satellite t3 loss recovery */
michael@0 5085 asoc->sat_t3_loss_recovery = 0;
michael@0 5086 }
michael@0 5087 /*
michael@0 5088 * CMT Fast recovery
michael@0 5089 */
michael@0 5090 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
michael@0 5091 if (net->will_exit_fast_recovery) {
michael@0 5092 /* Ok, we must exit fast recovery */
michael@0 5093 net->fast_retran_loss_recovery = 0;
michael@0 5094 }
michael@0 5095 }
michael@0 5096
michael@0 5097 /* Adjust and set the new rwnd value */
michael@0 5098 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
michael@0 5099 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
michael@0 5100 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
michael@0 5101 }
michael@0 5102 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
michael@0 5103 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
michael@0 5104 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
michael@0 5105 /* SWS sender side engages */
michael@0 5106 asoc->peers_rwnd = 0;
michael@0 5107 }
michael@0 5108 if (asoc->peers_rwnd > old_rwnd) {
michael@0 5109 win_probe_recovery = 1;
michael@0 5110 }
michael@0 5111
michael@0 5112 /*
michael@0 5113 * Now we must setup so we have a timer up for anyone with
michael@0 5114 * outstanding data.
michael@0 5115 */
michael@0 5116 done_once = 0;
michael@0 5117 again:
michael@0 5118 j = 0;
michael@0 5119 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
michael@0 5120 if (win_probe_recovery && (net->window_probe)) {
michael@0 5121 win_probe_recovered = 1;
michael@0 5122 /*-
michael@0 5123 * Find first chunk that was used with
michael@0 5124 * window probe and clear the event. Put
michael@0 5125 * it back into the send queue as if has
michael@0 5126 * not been sent.
michael@0 5127 */
michael@0 5128 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
michael@0 5129 if (tp1->window_probe) {
michael@0 5130 sctp_window_probe_recovery(stcb, asoc, tp1);
michael@0 5131 break;
michael@0 5132 }
michael@0 5133 }
michael@0 5134 }
michael@0 5135 if (net->flight_size) {
michael@0 5136 j++;
michael@0 5137 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
michael@0 5138 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
michael@0 5139 stcb->sctp_ep, stcb, net);
michael@0 5140 }
michael@0 5141 if (net->window_probe) {
michael@0 5142 net->window_probe = 0;
michael@0 5143 }
michael@0 5144 } else {
michael@0 5145 if (net->window_probe) {
michael@0 5146 /* In window probes we must assure a timer is still running there */
michael@0 5147 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
michael@0 5148 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
michael@0 5149 stcb->sctp_ep, stcb, net);
michael@0 5150
michael@0 5151 }
michael@0 5152 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
michael@0 5153 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
michael@0 5154 stcb, net,
michael@0 5155 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
michael@0 5156 }
michael@0 5157 }
michael@0 5158 }
michael@0 5159 if ((j == 0) &&
michael@0 5160 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
michael@0 5161 (asoc->sent_queue_retran_cnt == 0) &&
michael@0 5162 (win_probe_recovered == 0) &&
michael@0 5163 (done_once == 0)) {
michael@0 5164 /* huh, this should not happen unless all packets
michael@0 5165 * are PR-SCTP and marked to skip of course.
michael@0 5166 */
michael@0 5167 if (sctp_fs_audit(asoc)) {
michael@0 5168 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
michael@0 5169 net->flight_size = 0;
michael@0 5170 }
michael@0 5171 asoc->total_flight = 0;
michael@0 5172 asoc->total_flight_count = 0;
michael@0 5173 asoc->sent_queue_retran_cnt = 0;
michael@0 5174 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
michael@0 5175 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
michael@0 5176 sctp_flight_size_increase(tp1);
michael@0 5177 sctp_total_flight_increase(stcb, tp1);
michael@0 5178 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
michael@0 5179 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
michael@0 5180 }
michael@0 5181 }
michael@0 5182 }
michael@0 5183 done_once = 1;
michael@0 5184 goto again;
michael@0 5185 }
michael@0 5186 /*********************************************/
michael@0 5187 /* Here we perform PR-SCTP procedures */
michael@0 5188 /* (section 4.2) */
michael@0 5189 /*********************************************/
michael@0 5190 /* C1. update advancedPeerAckPoint */
michael@0 5191 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
michael@0 5192 asoc->advanced_peer_ack_point = cum_ack;
michael@0 5193 }
michael@0 5194 /* C2. try to further move advancedPeerAckPoint ahead */
michael@0 5195 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
michael@0 5196 struct sctp_tmit_chunk *lchk;
michael@0 5197 uint32_t old_adv_peer_ack_point;
michael@0 5198
michael@0 5199 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
michael@0 5200 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
michael@0 5201 /* C3. See if we need to send a Fwd-TSN */
michael@0 5202 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
michael@0 5203 /*
michael@0 5204 * ISSUE with ECN, see FWD-TSN processing.
michael@0 5205 */
michael@0 5206 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
michael@0 5207 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
michael@0 5208 0xee, cum_ack, asoc->advanced_peer_ack_point,
michael@0 5209 old_adv_peer_ack_point);
michael@0 5210 }
michael@0 5211 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
michael@0 5212 send_forward_tsn(stcb, asoc);
michael@0 5213 } else if (lchk) {
michael@0 5214 /* try to FR fwd-tsn's that get lost too */
michael@0 5215 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
michael@0 5216 send_forward_tsn(stcb, asoc);
michael@0 5217 }
michael@0 5218 }
michael@0 5219 }
michael@0 5220 if (lchk) {
michael@0 5221 /* Assure a timer is up */
michael@0 5222 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
michael@0 5223 stcb->sctp_ep, stcb, lchk->whoTo);
michael@0 5224 }
michael@0 5225 }
michael@0 5226 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
michael@0 5227 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
michael@0 5228 a_rwnd,
michael@0 5229 stcb->asoc.peers_rwnd,
michael@0 5230 stcb->asoc.total_flight,
michael@0 5231 stcb->asoc.total_output_queue_size);
michael@0 5232 }
michael@0 5233 }
michael@0 5234
michael@0 5235 void
michael@0 5236 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
michael@0 5237 {
michael@0 5238 /* Copy cum-ack */
michael@0 5239 uint32_t cum_ack, a_rwnd;
michael@0 5240
michael@0 5241 cum_ack = ntohl(cp->cumulative_tsn_ack);
michael@0 5242 /* Arrange so a_rwnd does NOT change */
michael@0 5243 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
michael@0 5244
michael@0 5245 /* Now call the express sack handling */
michael@0 5246 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
michael@0 5247 }
michael@0 5248
michael@0 5249 static void
michael@0 5250 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
michael@0 5251 struct sctp_stream_in *strmin)
michael@0 5252 {
michael@0 5253 struct sctp_queued_to_read *ctl, *nctl;
michael@0 5254 struct sctp_association *asoc;
michael@0 5255 uint16_t tt;
michael@0 5256
michael@0 5257 asoc = &stcb->asoc;
michael@0 5258 tt = strmin->last_sequence_delivered;
michael@0 5259 /*
michael@0 5260 * First deliver anything prior to and including the stream no that
michael@0 5261 * came in
michael@0 5262 */
michael@0 5263 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
michael@0 5264 if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
michael@0 5265 /* this is deliverable now */
michael@0 5266 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
michael@0 5267 /* subtract pending on streams */
michael@0 5268 asoc->size_on_all_streams -= ctl->length;
michael@0 5269 sctp_ucount_decr(asoc->cnt_on_all_streams);
michael@0 5270 /* deliver it to at least the delivery-q */
michael@0 5271 if (stcb->sctp_socket) {
michael@0 5272 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
michael@0 5273 sctp_add_to_readq(stcb->sctp_ep, stcb,
michael@0 5274 ctl,
michael@0 5275 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
michael@0 5276 }
michael@0 5277 } else {
michael@0 5278 /* no more delivery now. */
michael@0 5279 break;
michael@0 5280 }
michael@0 5281 }
michael@0 5282 /*
michael@0 5283 * now we must deliver things in queue the normal way if any are
michael@0 5284 * now ready.
michael@0 5285 */
michael@0 5286 tt = strmin->last_sequence_delivered + 1;
michael@0 5287 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
michael@0 5288 if (tt == ctl->sinfo_ssn) {
michael@0 5289 /* this is deliverable now */
michael@0 5290 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
michael@0 5291 /* subtract pending on streams */
michael@0 5292 asoc->size_on_all_streams -= ctl->length;
michael@0 5293 sctp_ucount_decr(asoc->cnt_on_all_streams);
michael@0 5294 /* deliver it to at least the delivery-q */
michael@0 5295 strmin->last_sequence_delivered = ctl->sinfo_ssn;
michael@0 5296 if (stcb->sctp_socket) {
michael@0 5297 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
michael@0 5298 sctp_add_to_readq(stcb->sctp_ep, stcb,
michael@0 5299 ctl,
michael@0 5300 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
michael@0 5301
michael@0 5302 }
michael@0 5303 tt = strmin->last_sequence_delivered + 1;
michael@0 5304 } else {
michael@0 5305 break;
michael@0 5306 }
michael@0 5307 }
michael@0 5308 }
michael@0 5309
michael@0 5310 static void
michael@0 5311 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
michael@0 5312 struct sctp_association *asoc,
michael@0 5313 uint16_t stream, uint16_t seq)
michael@0 5314 {
michael@0 5315 struct sctp_tmit_chunk *chk, *nchk;
michael@0 5316
michael@0 5317 /* For each one on here see if we need to toss it */
michael@0 5318 /*
michael@0 5319 * For now large messages held on the reasmqueue that are
michael@0 5320 * complete will be tossed too. We could in theory do more
michael@0 5321 * work to spin through and stop after dumping one msg aka
michael@0 5322 * seeing the start of a new msg at the head, and call the
michael@0 5323 * delivery function... to see if it can be delivered... But
michael@0 5324 * for now we just dump everything on the queue.
michael@0 5325 */
michael@0 5326 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
michael@0 5327 /* Do not toss it if on a different stream or
michael@0 5328 * marked for unordered delivery in which case
michael@0 5329 * the stream sequence number has no meaning.
michael@0 5330 */
michael@0 5331 if ((chk->rec.data.stream_number != stream) ||
michael@0 5332 ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
michael@0 5333 continue;
michael@0 5334 }
michael@0 5335 if (chk->rec.data.stream_seq == seq) {
michael@0 5336 /* It needs to be tossed */
michael@0 5337 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
michael@0 5338 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
michael@0 5339 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
michael@0 5340 asoc->str_of_pdapi = chk->rec.data.stream_number;
michael@0 5341 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
michael@0 5342 asoc->fragment_flags = chk->rec.data.rcv_flags;
michael@0 5343 }
michael@0 5344 asoc->size_on_reasm_queue -= chk->send_size;
michael@0 5345 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
michael@0 5346
michael@0 5347 /* Clear up any stream problem */
michael@0 5348 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
michael@0 5349 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
michael@0 5350 /*
michael@0 5351 * We must dump forward this streams
michael@0 5352 * sequence number if the chunk is
michael@0 5353 * not unordered that is being
michael@0 5354 * skipped. There is a chance that
michael@0 5355 * if the peer does not include the
michael@0 5356 * last fragment in its FWD-TSN we
michael@0 5357 * WILL have a problem here since
michael@0 5358 * you would have a partial chunk in
michael@0 5359 * queue that may not be
michael@0 5360 * deliverable. Also if a Partial
michael@0 5361 * delivery API as started the user
michael@0 5362 * may get a partial chunk. The next
michael@0 5363 * read returning a new chunk...
michael@0 5364 * really ugly but I see no way
michael@0 5365 * around it! Maybe a notify??
michael@0 5366 */
michael@0 5367 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
michael@0 5368 }
michael@0 5369 if (chk->data) {
michael@0 5370 sctp_m_freem(chk->data);
michael@0 5371 chk->data = NULL;
michael@0 5372 }
michael@0 5373 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
michael@0 5374 } else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
michael@0 5375 /* If the stream_seq is > than the purging one, we are done */
michael@0 5376 break;
michael@0 5377 }
michael@0 5378 }
michael@0 5379 }
michael@0 5380
michael@0 5381
michael@0 5382 void
michael@0 5383 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
michael@0 5384 struct sctp_forward_tsn_chunk *fwd,
michael@0 5385 int *abort_flag, struct mbuf *m ,int offset)
michael@0 5386 {
michael@0 5387 /* The pr-sctp fwd tsn */
michael@0 5388 /*
michael@0 5389 * here we will perform all the data receiver side steps for
michael@0 5390 * processing FwdTSN, as required in by pr-sctp draft:
michael@0 5391 *
michael@0 5392 * Assume we get FwdTSN(x):
michael@0 5393 *
michael@0 5394 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
michael@0 5395 * others we have 3) examine and update re-ordering queue on
michael@0 5396 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
michael@0 5397 * report where we are.
michael@0 5398 */
michael@0 5399 struct sctp_association *asoc;
michael@0 5400 uint32_t new_cum_tsn, gap;
michael@0 5401 unsigned int i, fwd_sz, m_size;
michael@0 5402 uint32_t str_seq;
michael@0 5403 struct sctp_stream_in *strm;
michael@0 5404 struct sctp_tmit_chunk *chk, *nchk;
michael@0 5405 struct sctp_queued_to_read *ctl, *sv;
michael@0 5406
michael@0 5407 asoc = &stcb->asoc;
michael@0 5408 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
michael@0 5409 SCTPDBG(SCTP_DEBUG_INDATA1,
michael@0 5410 "Bad size too small/big fwd-tsn\n");
michael@0 5411 return;
michael@0 5412 }
michael@0 5413 m_size = (stcb->asoc.mapping_array_size << 3);
michael@0 5414 /*************************************************************/
michael@0 5415 /* 1. Here we update local cumTSN and shift the bitmap array */
michael@0 5416 /*************************************************************/
michael@0 5417 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
michael@0 5418
michael@0 5419 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
michael@0 5420 /* Already got there ... */
michael@0 5421 return;
michael@0 5422 }
michael@0 5423 /*
michael@0 5424 * now we know the new TSN is more advanced, let's find the actual
michael@0 5425 * gap
michael@0 5426 */
michael@0 5427 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
michael@0 5428 asoc->cumulative_tsn = new_cum_tsn;
michael@0 5429 if (gap >= m_size) {
michael@0 5430 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
michael@0 5431 struct mbuf *oper;
michael@0 5432 /*
michael@0 5433 * out of range (of single byte chunks in the rwnd I
michael@0 5434 * give out). This must be an attacker.
michael@0 5435 */
michael@0 5436 *abort_flag = 1;
michael@0 5437 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
michael@0 5438 0, M_NOWAIT, 1, MT_DATA);
michael@0 5439 if (oper) {
michael@0 5440 struct sctp_paramhdr *ph;
michael@0 5441 uint32_t *ippp;
michael@0 5442 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
michael@0 5443 (sizeof(uint32_t) * 3);
michael@0 5444 ph = mtod(oper, struct sctp_paramhdr *);
michael@0 5445 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
michael@0 5446 ph->param_length = htons(SCTP_BUF_LEN(oper));
michael@0 5447 ippp = (uint32_t *) (ph + 1);
michael@0 5448 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_33);
michael@0 5449 ippp++;
michael@0 5450 *ippp = asoc->highest_tsn_inside_map;
michael@0 5451 ippp++;
michael@0 5452 *ippp = new_cum_tsn;
michael@0 5453 }
michael@0 5454 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_33;
michael@0 5455 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
michael@0 5456 return;
michael@0 5457 }
michael@0 5458 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
michael@0 5459
michael@0 5460 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
michael@0 5461 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
michael@0 5462 asoc->highest_tsn_inside_map = new_cum_tsn;
michael@0 5463
michael@0 5464 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
michael@0 5465 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
michael@0 5466
michael@0 5467 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
michael@0 5468 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
michael@0 5469 }
michael@0 5470 } else {
michael@0 5471 SCTP_TCB_LOCK_ASSERT(stcb);
michael@0 5472 for (i = 0; i <= gap; i++) {
michael@0 5473 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
michael@0 5474 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
michael@0 5475 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
michael@0 5476 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
michael@0 5477 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
michael@0 5478 }
michael@0 5479 }
michael@0 5480 }
michael@0 5481 }
michael@0 5482 /*************************************************************/
michael@0 5483 /* 2. Clear up re-assembly queue */
michael@0 5484 /*************************************************************/
michael@0 5485 /*
michael@0 5486 * First service it if pd-api is up, just in case we can progress it
michael@0 5487 * forward
michael@0 5488 */
michael@0 5489 if (asoc->fragmented_delivery_inprogress) {
michael@0 5490 sctp_service_reassembly(stcb, asoc);
michael@0 5491 }
michael@0 5492 /* For each one on here see if we need to toss it */
michael@0 5493 /*
michael@0 5494 * For now large messages held on the reasmqueue that are
michael@0 5495 * complete will be tossed too. We could in theory do more
michael@0 5496 * work to spin through and stop after dumping one msg aka
michael@0 5497 * seeing the start of a new msg at the head, and call the
michael@0 5498 * delivery function... to see if it can be delivered... But
michael@0 5499 * for now we just dump everything on the queue.
michael@0 5500 */
michael@0 5501 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
michael@0 5502 if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
michael@0 5503 /* It needs to be tossed */
michael@0 5504 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
michael@0 5505 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
michael@0 5506 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
michael@0 5507 asoc->str_of_pdapi = chk->rec.data.stream_number;
michael@0 5508 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
michael@0 5509 asoc->fragment_flags = chk->rec.data.rcv_flags;
michael@0 5510 }
michael@0 5511 asoc->size_on_reasm_queue -= chk->send_size;
michael@0 5512 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
michael@0 5513
michael@0 5514 /* Clear up any stream problem */
michael@0 5515 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
michael@0 5516 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
michael@0 5517 /*
michael@0 5518 * We must dump forward this streams
michael@0 5519 * sequence number if the chunk is
michael@0 5520 * not unordered that is being
michael@0 5521 * skipped. There is a chance that
michael@0 5522 * if the peer does not include the
michael@0 5523 * last fragment in its FWD-TSN we
michael@0 5524 * WILL have a problem here since
michael@0 5525 * you would have a partial chunk in
michael@0 5526 * queue that may not be
michael@0 5527 * deliverable. Also if a Partial
michael@0 5528 * delivery API as started the user
michael@0 5529 * may get a partial chunk. The next
michael@0 5530 * read returning a new chunk...
michael@0 5531 * really ugly but I see no way
michael@0 5532 * around it! Maybe a notify??
michael@0 5533 */
michael@0 5534 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
michael@0 5535 }
michael@0 5536 if (chk->data) {
michael@0 5537 sctp_m_freem(chk->data);
michael@0 5538 chk->data = NULL;
michael@0 5539 }
michael@0 5540 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
michael@0 5541 } else {
michael@0 5542 /*
michael@0 5543 * Ok we have gone beyond the end of the
michael@0 5544 * fwd-tsn's mark.
michael@0 5545 */
michael@0 5546 break;
michael@0 5547 }
michael@0 5548 }
michael@0 5549 /*******************************************************/
michael@0 5550 /* 3. Update the PR-stream re-ordering queues and fix */
michael@0 5551 /* delivery issues as needed. */
michael@0 5552 /*******************************************************/
michael@0 5553 fwd_sz -= sizeof(*fwd);
michael@0 5554 if (m && fwd_sz) {
michael@0 5555 /* New method. */
michael@0 5556 unsigned int num_str;
michael@0 5557 struct sctp_strseq *stseq, strseqbuf;
michael@0 5558 offset += sizeof(*fwd);
michael@0 5559
michael@0 5560 SCTP_INP_READ_LOCK(stcb->sctp_ep);
michael@0 5561 num_str = fwd_sz / sizeof(struct sctp_strseq);
michael@0 5562 for (i = 0; i < num_str; i++) {
michael@0 5563 uint16_t st;
michael@0 5564 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
michael@0 5565 sizeof(struct sctp_strseq),
michael@0 5566 (uint8_t *)&strseqbuf);
michael@0 5567 offset += sizeof(struct sctp_strseq);
michael@0 5568 if (stseq == NULL) {
michael@0 5569 break;
michael@0 5570 }
michael@0 5571 /* Convert */
michael@0 5572 st = ntohs(stseq->stream);
michael@0 5573 stseq->stream = st;
michael@0 5574 st = ntohs(stseq->sequence);
michael@0 5575 stseq->sequence = st;
michael@0 5576
michael@0 5577 /* now process */
michael@0 5578
michael@0 5579 /*
michael@0 5580 * Ok we now look for the stream/seq on the read queue
michael@0 5581 * where its not all delivered. If we find it we transmute the
michael@0 5582 * read entry into a PDI_ABORTED.
michael@0 5583 */
michael@0 5584 if (stseq->stream >= asoc->streamincnt) {
michael@0 5585 /* screwed up streams, stop! */
michael@0 5586 break;
michael@0 5587 }
michael@0 5588 if ((asoc->str_of_pdapi == stseq->stream) &&
michael@0 5589 (asoc->ssn_of_pdapi == stseq->sequence)) {
michael@0 5590 /* If this is the one we were partially delivering
michael@0 5591 * now then we no longer are. Note this will change
michael@0 5592 * with the reassembly re-write.
michael@0 5593 */
michael@0 5594 asoc->fragmented_delivery_inprogress = 0;
michael@0 5595 }
michael@0 5596 sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
michael@0 5597 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
michael@0 5598 if ((ctl->sinfo_stream == stseq->stream) &&
michael@0 5599 (ctl->sinfo_ssn == stseq->sequence)) {
michael@0 5600 str_seq = (stseq->stream << 16) | stseq->sequence;
michael@0 5601 ctl->end_added = 1;
michael@0 5602 ctl->pdapi_aborted = 1;
michael@0 5603 sv = stcb->asoc.control_pdapi;
michael@0 5604 stcb->asoc.control_pdapi = ctl;
michael@0 5605 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
michael@0 5606 stcb,
michael@0 5607 SCTP_PARTIAL_DELIVERY_ABORTED,
michael@0 5608 (void *)&str_seq,
michael@0 5609 SCTP_SO_NOT_LOCKED);
michael@0 5610 stcb->asoc.control_pdapi = sv;
michael@0 5611 break;
michael@0 5612 } else if ((ctl->sinfo_stream == stseq->stream) &&
michael@0 5613 SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
michael@0 5614 /* We are past our victim SSN */
michael@0 5615 break;
michael@0 5616 }
michael@0 5617 }
michael@0 5618 strm = &asoc->strmin[stseq->stream];
michael@0 5619 if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
michael@0 5620 /* Update the sequence number */
michael@0 5621 strm->last_sequence_delivered = stseq->sequence;
michael@0 5622 }
michael@0 5623 /* now kick the stream the new way */
michael@0 5624 /*sa_ignore NO_NULL_CHK*/
michael@0 5625 sctp_kick_prsctp_reorder_queue(stcb, strm);
michael@0 5626 }
michael@0 5627 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
michael@0 5628 }
michael@0 5629 /*
michael@0 5630 * Now slide thing forward.
michael@0 5631 */
michael@0 5632 sctp_slide_mapping_arrays(stcb);
michael@0 5633
michael@0 5634 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
michael@0 5635 /* now lets kick out and check for more fragmented delivery */
michael@0 5636 /*sa_ignore NO_NULL_CHK*/
michael@0 5637 sctp_deliver_reasm_check(stcb, &stcb->asoc);
michael@0 5638 }
michael@0 5639 }

mercurial