michael@0: /*- michael@0: * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. michael@0: * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. michael@0: * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. michael@0: * michael@0: * Redistribution and use in source and binary forms, with or without michael@0: * modification, are permitted provided that the following conditions are met: michael@0: * michael@0: * a) Redistributions of source code must retain the above copyright notice, michael@0: * this list of conditions and the following disclaimer. michael@0: * michael@0: * b) Redistributions in binary form must reproduce the above copyright michael@0: * notice, this list of conditions and the following disclaimer in michael@0: * the documentation and/or other materials provided with the distribution. michael@0: * michael@0: * c) Neither the name of Cisco Systems, Inc. nor the names of its michael@0: * contributors may be used to endorse or promote products derived michael@0: * from this software without specific prior written permission. michael@0: * michael@0: * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS michael@0: * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, michael@0: * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE michael@0: * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE michael@0: * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR michael@0: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF michael@0: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS michael@0: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN michael@0: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) michael@0: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF michael@0: * THE POSSIBILITY OF SUCH DAMAGE. michael@0: */ michael@0: michael@0: #ifdef __FreeBSD__ michael@0: #include michael@0: __FBSDID("$FreeBSD: head/sys/netinet/sctp_indata.c 258228 2013-11-16 16:09:09Z tuexen $"); michael@0: #endif michael@0: michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: #include michael@0: michael@0: michael@0: /* michael@0: * NOTES: On the outbound side of things I need to check the sack timer to michael@0: * see if I should generate a sack into the chunk queue (if I have data to michael@0: * send that is and will be sending it .. for bundling. michael@0: * michael@0: * The callback in sctp_usrreq.c will get called when the socket is read from. michael@0: * This will cause sctp_service_queues() to get called on the top entry in michael@0: * the list. michael@0: */ michael@0: michael@0: void michael@0: sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) michael@0: { michael@0: asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc); michael@0: } michael@0: michael@0: /* Calculate what the rwnd would be */ michael@0: uint32_t michael@0: sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) michael@0: { michael@0: uint32_t calc = 0; michael@0: michael@0: /* michael@0: * This is really set wrong with respect to a 1-2-m socket. Since michael@0: * the sb_cc is the count that everyone as put up. When we re-write michael@0: * sctp_soreceive then we will fix this so that ONLY this michael@0: * associations data is taken into account. michael@0: */ michael@0: if (stcb->sctp_socket == NULL) michael@0: return (calc); michael@0: michael@0: if (stcb->asoc.sb_cc == 0 && michael@0: asoc->size_on_reasm_queue == 0 && michael@0: asoc->size_on_all_streams == 0) { michael@0: /* Full rwnd granted */ michael@0: calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND); michael@0: return (calc); michael@0: } michael@0: /* get actual space */ michael@0: calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); michael@0: michael@0: /* michael@0: * take out what has NOT been put on socket queue and we yet hold michael@0: * for putting up. michael@0: */ michael@0: calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue + michael@0: asoc->cnt_on_reasm_queue * MSIZE)); michael@0: calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams + michael@0: asoc->cnt_on_all_streams * MSIZE)); michael@0: michael@0: if (calc == 0) { michael@0: /* out of space */ michael@0: return (calc); michael@0: } michael@0: michael@0: /* what is the overhead of all these rwnd's */ michael@0: calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); michael@0: /* If the window gets too small due to ctrl-stuff, reduce it michael@0: * to 1, even it is 0. SWS engaged michael@0: */ michael@0: if (calc < stcb->asoc.my_rwnd_control_len) { michael@0: calc = 1; michael@0: } michael@0: return (calc); michael@0: } michael@0: michael@0: michael@0: michael@0: /* michael@0: * Build out our readq entry based on the incoming packet. michael@0: */ michael@0: struct sctp_queued_to_read * michael@0: sctp_build_readq_entry(struct sctp_tcb *stcb, michael@0: struct sctp_nets *net, michael@0: uint32_t tsn, uint32_t ppid, michael@0: uint32_t context, uint16_t stream_no, michael@0: uint16_t stream_seq, uint8_t flags, michael@0: struct mbuf *dm) michael@0: { michael@0: struct sctp_queued_to_read *read_queue_e = NULL; michael@0: michael@0: sctp_alloc_a_readq(stcb, read_queue_e); michael@0: if (read_queue_e == NULL) { michael@0: goto failed_build; michael@0: } michael@0: read_queue_e->sinfo_stream = stream_no; michael@0: read_queue_e->sinfo_ssn = stream_seq; michael@0: read_queue_e->sinfo_flags = (flags << 8); michael@0: read_queue_e->sinfo_ppid = ppid; michael@0: read_queue_e->sinfo_context = context; michael@0: read_queue_e->sinfo_timetolive = 0; michael@0: read_queue_e->sinfo_tsn = tsn; michael@0: read_queue_e->sinfo_cumtsn = tsn; michael@0: read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); michael@0: read_queue_e->whoFrom = net; michael@0: read_queue_e->length = 0; michael@0: atomic_add_int(&net->ref_count, 1); michael@0: read_queue_e->data = dm; michael@0: read_queue_e->spec_flags = 0; michael@0: read_queue_e->tail_mbuf = NULL; michael@0: read_queue_e->aux_data = NULL; michael@0: read_queue_e->stcb = stcb; michael@0: read_queue_e->port_from = stcb->rport; michael@0: read_queue_e->do_not_ref_stcb = 0; michael@0: read_queue_e->end_added = 0; michael@0: read_queue_e->some_taken = 0; michael@0: read_queue_e->pdapi_aborted = 0; michael@0: failed_build: michael@0: return (read_queue_e); michael@0: } michael@0: michael@0: michael@0: /* michael@0: * Build out our readq entry based on the incoming packet. michael@0: */ michael@0: static struct sctp_queued_to_read * michael@0: sctp_build_readq_entry_chk(struct sctp_tcb *stcb, michael@0: struct sctp_tmit_chunk *chk) michael@0: { michael@0: struct sctp_queued_to_read *read_queue_e = NULL; michael@0: michael@0: sctp_alloc_a_readq(stcb, read_queue_e); michael@0: if (read_queue_e == NULL) { michael@0: goto failed_build; michael@0: } michael@0: read_queue_e->sinfo_stream = chk->rec.data.stream_number; michael@0: read_queue_e->sinfo_ssn = chk->rec.data.stream_seq; michael@0: read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8); michael@0: read_queue_e->sinfo_ppid = chk->rec.data.payloadtype; michael@0: read_queue_e->sinfo_context = stcb->asoc.context; michael@0: read_queue_e->sinfo_timetolive = 0; michael@0: read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq; michael@0: read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq; michael@0: read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); michael@0: read_queue_e->whoFrom = chk->whoTo; michael@0: read_queue_e->aux_data = NULL; michael@0: read_queue_e->length = 0; michael@0: atomic_add_int(&chk->whoTo->ref_count, 1); michael@0: read_queue_e->data = chk->data; michael@0: read_queue_e->tail_mbuf = NULL; michael@0: read_queue_e->stcb = stcb; michael@0: read_queue_e->port_from = stcb->rport; michael@0: read_queue_e->spec_flags = 0; michael@0: read_queue_e->do_not_ref_stcb = 0; michael@0: read_queue_e->end_added = 0; michael@0: read_queue_e->some_taken = 0; michael@0: read_queue_e->pdapi_aborted = 0; michael@0: failed_build: michael@0: return (read_queue_e); michael@0: } michael@0: michael@0: michael@0: struct mbuf * michael@0: sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo) michael@0: { michael@0: struct sctp_extrcvinfo *seinfo; michael@0: struct sctp_sndrcvinfo *outinfo; michael@0: struct sctp_rcvinfo *rcvinfo; michael@0: struct sctp_nxtinfo *nxtinfo; michael@0: #if defined(__Userspace_os_Windows) michael@0: WSACMSGHDR *cmh; michael@0: #else michael@0: struct cmsghdr *cmh; michael@0: #endif michael@0: struct mbuf *ret; michael@0: int len; michael@0: int use_extended; michael@0: int provide_nxt; michael@0: michael@0: if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && michael@0: sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && michael@0: sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { michael@0: /* user does not want any ancillary data */ michael@0: return (NULL); michael@0: } michael@0: michael@0: len = 0; michael@0: if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { michael@0: len += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); michael@0: } michael@0: seinfo = (struct sctp_extrcvinfo *)sinfo; michael@0: if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) && michael@0: (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) { michael@0: provide_nxt = 1; michael@0: len += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); michael@0: } else { michael@0: provide_nxt = 0; michael@0: } michael@0: if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { michael@0: if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { michael@0: use_extended = 1; michael@0: len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); michael@0: } else { michael@0: use_extended = 0; michael@0: len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); michael@0: } michael@0: } else { michael@0: use_extended = 0; michael@0: } michael@0: michael@0: ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); michael@0: if (ret == NULL) { michael@0: /* No space */ michael@0: return (ret); michael@0: } michael@0: SCTP_BUF_LEN(ret) = 0; michael@0: michael@0: /* We need a CMSG header followed by the struct */ michael@0: #if defined(__Userspace_os_Windows) michael@0: cmh = mtod(ret, WSACMSGHDR *); michael@0: #else michael@0: cmh = mtod(ret, struct cmsghdr *); michael@0: #endif michael@0: if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { michael@0: cmh->cmsg_level = IPPROTO_SCTP; michael@0: cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo)); michael@0: cmh->cmsg_type = SCTP_RCVINFO; michael@0: rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh); michael@0: rcvinfo->rcv_sid = sinfo->sinfo_stream; michael@0: rcvinfo->rcv_ssn = sinfo->sinfo_ssn; michael@0: rcvinfo->rcv_flags = sinfo->sinfo_flags; michael@0: rcvinfo->rcv_ppid = sinfo->sinfo_ppid; michael@0: rcvinfo->rcv_tsn = sinfo->sinfo_tsn; michael@0: rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn; michael@0: rcvinfo->rcv_context = sinfo->sinfo_context; michael@0: rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id; michael@0: #if defined(__Userspace_os_Windows) michael@0: cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo))); michael@0: #else michael@0: cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo))); michael@0: #endif michael@0: SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); michael@0: } michael@0: if (provide_nxt) { michael@0: cmh->cmsg_level = IPPROTO_SCTP; michael@0: cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo)); michael@0: cmh->cmsg_type = SCTP_NXTINFO; michael@0: nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh); michael@0: nxtinfo->nxt_sid = seinfo->sreinfo_next_stream; michael@0: nxtinfo->nxt_flags = 0; michael@0: if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) { michael@0: nxtinfo->nxt_flags |= SCTP_UNORDERED; michael@0: } michael@0: if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) { michael@0: nxtinfo->nxt_flags |= SCTP_NOTIFICATION; michael@0: } michael@0: if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) { michael@0: nxtinfo->nxt_flags |= SCTP_COMPLETE; michael@0: } michael@0: nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid; michael@0: nxtinfo->nxt_length = seinfo->sreinfo_next_length; michael@0: nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid; michael@0: #if defined(__Userspace_os_Windows) michael@0: cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo))); michael@0: #else michael@0: cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo))); michael@0: #endif michael@0: SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); michael@0: } michael@0: if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { michael@0: cmh->cmsg_level = IPPROTO_SCTP; michael@0: outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); michael@0: if (use_extended) { michael@0: cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); michael@0: cmh->cmsg_type = SCTP_EXTRCV; michael@0: memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo)); michael@0: SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); michael@0: } else { michael@0: cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); michael@0: cmh->cmsg_type = SCTP_SNDRCV; michael@0: *outinfo = *sinfo; michael@0: SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); michael@0: } michael@0: } michael@0: return (ret); michael@0: } michael@0: michael@0: michael@0: static void michael@0: sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn) michael@0: { michael@0: uint32_t gap, i, cumackp1; michael@0: int fnd = 0; michael@0: michael@0: if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { michael@0: return; michael@0: } michael@0: cumackp1 = asoc->cumulative_tsn + 1; michael@0: if (SCTP_TSN_GT(cumackp1, tsn)) { michael@0: /* this tsn is behind the cum ack and thus we don't michael@0: * need to worry about it being moved from one to the other. michael@0: */ michael@0: return; michael@0: } michael@0: SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); michael@0: if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { michael@0: SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn); michael@0: sctp_print_mapping_array(asoc); michael@0: #ifdef INVARIANTS michael@0: panic("Things are really messed up now!!"); michael@0: #endif michael@0: } michael@0: SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); michael@0: SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); michael@0: if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { michael@0: asoc->highest_tsn_inside_nr_map = tsn; michael@0: } michael@0: if (tsn == asoc->highest_tsn_inside_map) { michael@0: /* We must back down to see what the new highest is */ michael@0: for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) { michael@0: SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn); michael@0: if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { michael@0: asoc->highest_tsn_inside_map = i; michael@0: fnd = 1; michael@0: break; michael@0: } michael@0: } michael@0: if (!fnd) { michael@0: asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1; michael@0: } michael@0: } michael@0: } michael@0: michael@0: michael@0: /* michael@0: * We are delivering currently from the reassembly queue. We must continue to michael@0: * deliver until we either: 1) run out of space. 2) run out of sequential michael@0: * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag. michael@0: */ michael@0: static void michael@0: sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc) michael@0: { michael@0: struct sctp_tmit_chunk *chk, *nchk; michael@0: uint16_t nxt_todel; michael@0: uint16_t stream_no; michael@0: int end = 0; michael@0: int cntDel; michael@0: struct sctp_queued_to_read *control, *ctl, *nctl; michael@0: michael@0: if (stcb == NULL) michael@0: return; michael@0: michael@0: cntDel = stream_no = 0; michael@0: if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || michael@0: (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) || michael@0: (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { michael@0: /* socket above is long gone or going.. */ michael@0: abandon: michael@0: asoc->fragmented_delivery_inprogress = 0; michael@0: TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) { michael@0: TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); michael@0: asoc->size_on_reasm_queue -= chk->send_size; michael@0: sctp_ucount_decr(asoc->cnt_on_reasm_queue); michael@0: /* michael@0: * Lose the data pointer, since its in the socket michael@0: * buffer michael@0: */ michael@0: if (chk->data) { michael@0: sctp_m_freem(chk->data); michael@0: chk->data = NULL; michael@0: } michael@0: /* Now free the address and data */ michael@0: sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); michael@0: /*sa_ignore FREED_MEMORY*/ michael@0: } michael@0: return; michael@0: } michael@0: SCTP_TCB_LOCK_ASSERT(stcb); michael@0: TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) { michael@0: if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) { michael@0: /* Can't deliver more :< */ michael@0: return; michael@0: } michael@0: stream_no = chk->rec.data.stream_number; michael@0: nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1; michael@0: if (nxt_todel != chk->rec.data.stream_seq && michael@0: (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { michael@0: /* michael@0: * Not the next sequence to deliver in its stream OR michael@0: * unordered michael@0: */ michael@0: return; michael@0: } michael@0: if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { michael@0: michael@0: control = sctp_build_readq_entry_chk(stcb, chk); michael@0: if (control == NULL) { michael@0: /* out of memory? */ michael@0: return; michael@0: } michael@0: /* save it off for our future deliveries */ michael@0: stcb->asoc.control_pdapi = control; michael@0: if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) michael@0: end = 1; michael@0: else michael@0: end = 0; michael@0: sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq); michael@0: sctp_add_to_readq(stcb->sctp_ep, michael@0: stcb, control, &stcb->sctp_socket->so_rcv, end, michael@0: SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); michael@0: cntDel++; michael@0: } else { michael@0: if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) michael@0: end = 1; michael@0: else michael@0: end = 0; michael@0: sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq); michael@0: if (sctp_append_to_readq(stcb->sctp_ep, stcb, michael@0: stcb->asoc.control_pdapi, michael@0: chk->data, end, chk->rec.data.TSN_seq, michael@0: &stcb->sctp_socket->so_rcv)) { michael@0: /* michael@0: * something is very wrong, either michael@0: * control_pdapi is NULL, or the tail_mbuf michael@0: * is corrupt, or there is a EOM already on michael@0: * the mbuf chain. michael@0: */ michael@0: if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { michael@0: goto abandon; michael@0: } else { michael@0: #ifdef INVARIANTS michael@0: if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) { michael@0: panic("This should not happen control_pdapi NULL?"); michael@0: } michael@0: /* if we did not panic, it was a EOM */ michael@0: panic("Bad chunking ??"); michael@0: #else michael@0: if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) { michael@0: SCTP_PRINTF("This should not happen control_pdapi NULL?\n"); michael@0: } michael@0: SCTP_PRINTF("Bad chunking ??\n"); michael@0: SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n"); michael@0: michael@0: #endif michael@0: goto abandon; michael@0: } michael@0: } michael@0: cntDel++; michael@0: } michael@0: /* pull it we did it */ michael@0: TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); michael@0: if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { michael@0: asoc->fragmented_delivery_inprogress = 0; michael@0: if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { michael@0: asoc->strmin[stream_no].last_sequence_delivered++; michael@0: } michael@0: if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { michael@0: SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); michael@0: } michael@0: } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { michael@0: /* michael@0: * turn the flag back on since we just delivered michael@0: * yet another one. michael@0: */ michael@0: asoc->fragmented_delivery_inprogress = 1; michael@0: } michael@0: asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq; michael@0: asoc->last_flags_delivered = chk->rec.data.rcv_flags; michael@0: asoc->last_strm_seq_delivered = chk->rec.data.stream_seq; michael@0: asoc->last_strm_no_delivered = chk->rec.data.stream_number; michael@0: michael@0: asoc->tsn_last_delivered = chk->rec.data.TSN_seq; michael@0: asoc->size_on_reasm_queue -= chk->send_size; michael@0: sctp_ucount_decr(asoc->cnt_on_reasm_queue); michael@0: /* free up the chk */ michael@0: chk->data = NULL; michael@0: sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); michael@0: michael@0: if (asoc->fragmented_delivery_inprogress == 0) { michael@0: /* michael@0: * Now lets see if we can deliver the next one on michael@0: * the stream michael@0: */ michael@0: struct sctp_stream_in *strm; michael@0: michael@0: strm = &asoc->strmin[stream_no]; michael@0: nxt_todel = strm->last_sequence_delivered + 1; michael@0: TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) { michael@0: /* Deliver more if we can. */ michael@0: if (nxt_todel == ctl->sinfo_ssn) { michael@0: TAILQ_REMOVE(&strm->inqueue, ctl, next); michael@0: asoc->size_on_all_streams -= ctl->length; michael@0: sctp_ucount_decr(asoc->cnt_on_all_streams); michael@0: strm->last_sequence_delivered++; michael@0: sctp_mark_non_revokable(asoc, ctl->sinfo_tsn); michael@0: sctp_add_to_readq(stcb->sctp_ep, stcb, michael@0: ctl, michael@0: &stcb->sctp_socket->so_rcv, 1, michael@0: SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); michael@0: } else { michael@0: break; michael@0: } michael@0: nxt_todel = strm->last_sequence_delivered + 1; michael@0: } michael@0: break; michael@0: } michael@0: } michael@0: } michael@0: michael@0: /* michael@0: * Queue the chunk either right into the socket buffer if it is the next one michael@0: * to go OR put it in the correct place in the delivery queue. If we do michael@0: * append to the so_buf, keep doing so until we are out of order. One big michael@0: * question still remains, what to do when the socket buffer is FULL?? michael@0: */ michael@0: static void michael@0: sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc, michael@0: struct sctp_queued_to_read *control, int *abort_flag) michael@0: { michael@0: /* michael@0: * FIX-ME maybe? What happens when the ssn wraps? If we are getting michael@0: * all the data in one stream this could happen quite rapidly. One michael@0: * could use the TSN to keep track of things, but this scheme breaks michael@0: * down in the other type of stream useage that could occur. Send a michael@0: * single msg to stream 0, send 4Billion messages to stream 1, now michael@0: * send a message to stream 0. You have a situation where the TSN michael@0: * has wrapped but not in the stream. Is this worth worrying about michael@0: * or should we just change our queue sort at the bottom to be by michael@0: * TSN. michael@0: * michael@0: * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2 michael@0: * with TSN 1? If the peer is doing some sort of funky TSN/SSN michael@0: * assignment this could happen... and I don't see how this would be michael@0: * a violation. So for now I am undecided an will leave the sort by michael@0: * SSN alone. Maybe a hybred approach is the answer michael@0: * michael@0: */ michael@0: struct sctp_stream_in *strm; michael@0: struct sctp_queued_to_read *at; michael@0: int queue_needed; michael@0: uint16_t nxt_todel; michael@0: struct mbuf *oper; michael@0: michael@0: queue_needed = 1; michael@0: asoc->size_on_all_streams += control->length; michael@0: sctp_ucount_incr(asoc->cnt_on_all_streams); michael@0: strm = &asoc->strmin[control->sinfo_stream]; michael@0: nxt_todel = strm->last_sequence_delivered + 1; michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { michael@0: sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD); michael@0: } michael@0: SCTPDBG(SCTP_DEBUG_INDATA1, michael@0: "queue to stream called for ssn:%u lastdel:%u nxt:%u\n", michael@0: (uint32_t) control->sinfo_stream, michael@0: (uint32_t) strm->last_sequence_delivered, michael@0: (uint32_t) nxt_todel); michael@0: if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) { michael@0: /* The incoming sseq is behind where we last delivered? */ michael@0: SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n", michael@0: control->sinfo_ssn, strm->last_sequence_delivered); michael@0: protocol_error: michael@0: /* michael@0: * throw it in the stream so it gets cleaned up in michael@0: * association destruction michael@0: */ michael@0: TAILQ_INSERT_HEAD(&strm->inqueue, control, next); michael@0: oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), michael@0: 0, M_NOWAIT, 1, MT_DATA); michael@0: if (oper) { michael@0: struct sctp_paramhdr *ph; michael@0: uint32_t *ippp; michael@0: SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + michael@0: (sizeof(uint32_t) * 3); michael@0: ph = mtod(oper, struct sctp_paramhdr *); michael@0: ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); michael@0: ph->param_length = htons(SCTP_BUF_LEN(oper)); michael@0: ippp = (uint32_t *) (ph + 1); michael@0: *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_1); michael@0: ippp++; michael@0: *ippp = control->sinfo_tsn; michael@0: ippp++; michael@0: *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn); michael@0: } michael@0: stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_1; michael@0: sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); michael@0: *abort_flag = 1; michael@0: return; michael@0: michael@0: } michael@0: if (nxt_todel == control->sinfo_ssn) { michael@0: /* can be delivered right away? */ michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { michael@0: sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); michael@0: } michael@0: /* EY it wont be queued if it could be delivered directly*/ michael@0: queue_needed = 0; michael@0: asoc->size_on_all_streams -= control->length; michael@0: sctp_ucount_decr(asoc->cnt_on_all_streams); michael@0: strm->last_sequence_delivered++; michael@0: michael@0: sctp_mark_non_revokable(asoc, control->sinfo_tsn); michael@0: sctp_add_to_readq(stcb->sctp_ep, stcb, michael@0: control, michael@0: &stcb->sctp_socket->so_rcv, 1, michael@0: SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); michael@0: TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) { michael@0: /* all delivered */ michael@0: nxt_todel = strm->last_sequence_delivered + 1; michael@0: if (nxt_todel == control->sinfo_ssn) { michael@0: TAILQ_REMOVE(&strm->inqueue, control, next); michael@0: asoc->size_on_all_streams -= control->length; michael@0: sctp_ucount_decr(asoc->cnt_on_all_streams); michael@0: strm->last_sequence_delivered++; michael@0: /* michael@0: * We ignore the return of deliver_data here michael@0: * since we always can hold the chunk on the michael@0: * d-queue. And we have a finite number that michael@0: * can be delivered from the strq. michael@0: */ michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { michael@0: sctp_log_strm_del(control, NULL, michael@0: SCTP_STR_LOG_FROM_IMMED_DEL); michael@0: } michael@0: sctp_mark_non_revokable(asoc, control->sinfo_tsn); michael@0: sctp_add_to_readq(stcb->sctp_ep, stcb, michael@0: control, michael@0: &stcb->sctp_socket->so_rcv, 1, michael@0: SCTP_READ_LOCK_NOT_HELD, michael@0: SCTP_SO_NOT_LOCKED); michael@0: continue; michael@0: } michael@0: break; michael@0: } michael@0: } michael@0: if (queue_needed) { michael@0: /* michael@0: * Ok, we did not deliver this guy, find the correct place michael@0: * to put it on the queue. michael@0: */ michael@0: if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) { michael@0: goto protocol_error; michael@0: } michael@0: if (TAILQ_EMPTY(&strm->inqueue)) { michael@0: /* Empty queue */ michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { michael@0: sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD); michael@0: } michael@0: TAILQ_INSERT_HEAD(&strm->inqueue, control, next); michael@0: } else { michael@0: TAILQ_FOREACH(at, &strm->inqueue, next) { michael@0: if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) { michael@0: /* michael@0: * one in queue is bigger than the michael@0: * new one, insert before this one michael@0: */ michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { michael@0: sctp_log_strm_del(control, at, michael@0: SCTP_STR_LOG_FROM_INSERT_MD); michael@0: } michael@0: TAILQ_INSERT_BEFORE(at, control, next); michael@0: break; michael@0: } else if (at->sinfo_ssn == control->sinfo_ssn) { michael@0: /* michael@0: * Gak, He sent me a duplicate str michael@0: * seq number michael@0: */ michael@0: /* michael@0: * foo bar, I guess I will just free michael@0: * this new guy, should we abort michael@0: * too? FIX ME MAYBE? Or it COULD be michael@0: * that the SSN's have wrapped. michael@0: * Maybe I should compare to TSN michael@0: * somehow... sigh for now just blow michael@0: * away the chunk! michael@0: */ michael@0: michael@0: if (control->data) michael@0: sctp_m_freem(control->data); michael@0: control->data = NULL; michael@0: asoc->size_on_all_streams -= control->length; michael@0: sctp_ucount_decr(asoc->cnt_on_all_streams); michael@0: if (control->whoFrom) { michael@0: sctp_free_remote_addr(control->whoFrom); michael@0: control->whoFrom = NULL; michael@0: } michael@0: sctp_free_a_readq(stcb, control); michael@0: return; michael@0: } else { michael@0: if (TAILQ_NEXT(at, next) == NULL) { michael@0: /* michael@0: * We are at the end, insert michael@0: * it after this one michael@0: */ michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { michael@0: sctp_log_strm_del(control, at, michael@0: SCTP_STR_LOG_FROM_INSERT_TL); michael@0: } michael@0: TAILQ_INSERT_AFTER(&strm->inqueue, michael@0: at, control, next); michael@0: break; michael@0: } michael@0: } michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: /* michael@0: * Returns two things: You get the total size of the deliverable parts of the michael@0: * first fragmented message on the reassembly queue. And you get a 1 back if michael@0: * all of the message is ready or a 0 back if the message is still incomplete michael@0: */ michael@0: static int michael@0: sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t *t_size) michael@0: { michael@0: struct sctp_tmit_chunk *chk; michael@0: uint32_t tsn; michael@0: michael@0: *t_size = 0; michael@0: chk = TAILQ_FIRST(&asoc->reasmqueue); michael@0: if (chk == NULL) { michael@0: /* nothing on the queue */ michael@0: return (0); michael@0: } michael@0: if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { michael@0: /* Not a first on the queue */ michael@0: return (0); michael@0: } michael@0: tsn = chk->rec.data.TSN_seq; michael@0: TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) { michael@0: if (tsn != chk->rec.data.TSN_seq) { michael@0: return (0); michael@0: } michael@0: *t_size += chk->send_size; michael@0: if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { michael@0: return (1); michael@0: } michael@0: tsn++; michael@0: } michael@0: return (0); michael@0: } michael@0: michael@0: static void michael@0: sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc) michael@0: { michael@0: struct sctp_tmit_chunk *chk; michael@0: uint16_t nxt_todel; michael@0: uint32_t tsize, pd_point; michael@0: michael@0: doit_again: michael@0: chk = TAILQ_FIRST(&asoc->reasmqueue); michael@0: if (chk == NULL) { michael@0: /* Huh? */ michael@0: asoc->size_on_reasm_queue = 0; michael@0: asoc->cnt_on_reasm_queue = 0; michael@0: return; michael@0: } michael@0: if (asoc->fragmented_delivery_inprogress == 0) { michael@0: nxt_todel = michael@0: asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; michael@0: if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && michael@0: (nxt_todel == chk->rec.data.stream_seq || michael@0: (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { michael@0: /* michael@0: * Yep the first one is here and its ok to deliver michael@0: * but should we? michael@0: */ michael@0: if (stcb->sctp_socket) { michael@0: pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT, michael@0: stcb->sctp_ep->partial_delivery_point); michael@0: } else { michael@0: pd_point = stcb->sctp_ep->partial_delivery_point; michael@0: } michael@0: if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) { michael@0: /* michael@0: * Yes, we setup to start reception, by michael@0: * backing down the TSN just in case we michael@0: * can't deliver. If we michael@0: */ michael@0: asoc->fragmented_delivery_inprogress = 1; michael@0: asoc->tsn_last_delivered = michael@0: chk->rec.data.TSN_seq - 1; michael@0: asoc->str_of_pdapi = michael@0: chk->rec.data.stream_number; michael@0: asoc->ssn_of_pdapi = chk->rec.data.stream_seq; michael@0: asoc->pdapi_ppid = chk->rec.data.payloadtype; michael@0: asoc->fragment_flags = chk->rec.data.rcv_flags; michael@0: sctp_service_reassembly(stcb, asoc); michael@0: } michael@0: } michael@0: } else { michael@0: /* Service re-assembly will deliver stream data queued michael@0: * at the end of fragmented delivery.. but it wont know michael@0: * to go back and call itself again... we do that here michael@0: * with the got doit_again michael@0: */ michael@0: sctp_service_reassembly(stcb, asoc); michael@0: if (asoc->fragmented_delivery_inprogress == 0) { michael@0: /* finished our Fragmented delivery, could be michael@0: * more waiting? michael@0: */ michael@0: goto doit_again; michael@0: } michael@0: } michael@0: } michael@0: michael@0: /* michael@0: * Dump onto the re-assembly queue, in its proper place. After dumping on the michael@0: * queue, see if anthing can be delivered. If so pull it off (or as much as michael@0: * we can. If we run out of space then we must dump what we can and set the michael@0: * appropriate flag to say we queued what we could. michael@0: */ michael@0: static void michael@0: sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, michael@0: struct sctp_tmit_chunk *chk, int *abort_flag) michael@0: { michael@0: struct mbuf *oper; michael@0: uint32_t cum_ackp1, prev_tsn, post_tsn; michael@0: struct sctp_tmit_chunk *at, *prev, *next; michael@0: michael@0: prev = next = NULL; michael@0: cum_ackp1 = asoc->tsn_last_delivered + 1; michael@0: if (TAILQ_EMPTY(&asoc->reasmqueue)) { michael@0: /* This is the first one on the queue */ michael@0: TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next); michael@0: /* michael@0: * we do not check for delivery of anything when only one michael@0: * fragment is here michael@0: */ michael@0: asoc->size_on_reasm_queue = chk->send_size; michael@0: sctp_ucount_incr(asoc->cnt_on_reasm_queue); michael@0: if (chk->rec.data.TSN_seq == cum_ackp1) { michael@0: if (asoc->fragmented_delivery_inprogress == 0 && michael@0: (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) != michael@0: SCTP_DATA_FIRST_FRAG) { michael@0: /* michael@0: * An empty queue, no delivery inprogress, michael@0: * we hit the next one and it does NOT have michael@0: * a FIRST fragment mark. michael@0: */ michael@0: SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n"); michael@0: oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), michael@0: 0, M_NOWAIT, 1, MT_DATA); michael@0: michael@0: if (oper) { michael@0: struct sctp_paramhdr *ph; michael@0: uint32_t *ippp; michael@0: michael@0: SCTP_BUF_LEN(oper) = michael@0: sizeof(struct sctp_paramhdr) + michael@0: (sizeof(uint32_t) * 3); michael@0: ph = mtod(oper, struct sctp_paramhdr *); michael@0: ph->param_type = michael@0: htons(SCTP_CAUSE_PROTOCOL_VIOLATION); michael@0: ph->param_length = htons(SCTP_BUF_LEN(oper)); michael@0: ippp = (uint32_t *) (ph + 1); michael@0: *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_2); michael@0: ippp++; michael@0: *ippp = chk->rec.data.TSN_seq; michael@0: ippp++; michael@0: *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); michael@0: michael@0: } michael@0: stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_2; michael@0: sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); michael@0: *abort_flag = 1; michael@0: } else if (asoc->fragmented_delivery_inprogress && michael@0: (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { michael@0: /* michael@0: * We are doing a partial delivery and the michael@0: * NEXT chunk MUST be either the LAST or michael@0: * MIDDLE fragment NOT a FIRST michael@0: */ michael@0: SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n"); michael@0: oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), michael@0: 0, M_NOWAIT, 1, MT_DATA); michael@0: if (oper) { michael@0: struct sctp_paramhdr *ph; michael@0: uint32_t *ippp; michael@0: michael@0: SCTP_BUF_LEN(oper) = michael@0: sizeof(struct sctp_paramhdr) + michael@0: (3 *sizeof(uint32_t)); michael@0: ph = mtod(oper, struct sctp_paramhdr *); michael@0: ph->param_type = michael@0: htons(SCTP_CAUSE_PROTOCOL_VIOLATION); michael@0: ph->param_length = htons(SCTP_BUF_LEN(oper)); michael@0: ippp = (uint32_t *) (ph + 1); michael@0: *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_3); michael@0: ippp++; michael@0: *ippp = chk->rec.data.TSN_seq; michael@0: ippp++; michael@0: *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); michael@0: } michael@0: stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_3; michael@0: sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); michael@0: *abort_flag = 1; michael@0: } else if (asoc->fragmented_delivery_inprogress) { michael@0: /* michael@0: * Here we are ok with a MIDDLE or LAST michael@0: * piece michael@0: */ michael@0: if (chk->rec.data.stream_number != michael@0: asoc->str_of_pdapi) { michael@0: /* Got to be the right STR No */ michael@0: SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n", michael@0: chk->rec.data.stream_number, michael@0: asoc->str_of_pdapi); michael@0: oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), michael@0: 0, M_NOWAIT, 1, MT_DATA); michael@0: if (oper) { michael@0: struct sctp_paramhdr *ph; michael@0: uint32_t *ippp; michael@0: michael@0: SCTP_BUF_LEN(oper) = michael@0: sizeof(struct sctp_paramhdr) + michael@0: (sizeof(uint32_t) * 3); michael@0: ph = mtod(oper, michael@0: struct sctp_paramhdr *); michael@0: ph->param_type = michael@0: htons(SCTP_CAUSE_PROTOCOL_VIOLATION); michael@0: ph->param_length = michael@0: htons(SCTP_BUF_LEN(oper)); michael@0: ippp = (uint32_t *) (ph + 1); michael@0: *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_4); michael@0: ippp++; michael@0: *ippp = chk->rec.data.TSN_seq; michael@0: ippp++; michael@0: *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); michael@0: } michael@0: stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_4; michael@0: sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); michael@0: *abort_flag = 1; michael@0: } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) != michael@0: SCTP_DATA_UNORDERED && michael@0: chk->rec.data.stream_seq != asoc->ssn_of_pdapi) { michael@0: /* Got to be the right STR Seq */ michael@0: SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n", michael@0: chk->rec.data.stream_seq, michael@0: asoc->ssn_of_pdapi); michael@0: oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), michael@0: 0, M_NOWAIT, 1, MT_DATA); michael@0: if (oper) { michael@0: struct sctp_paramhdr *ph; michael@0: uint32_t *ippp; michael@0: michael@0: SCTP_BUF_LEN(oper) = michael@0: sizeof(struct sctp_paramhdr) + michael@0: (3 * sizeof(uint32_t)); michael@0: ph = mtod(oper, michael@0: struct sctp_paramhdr *); michael@0: ph->param_type = michael@0: htons(SCTP_CAUSE_PROTOCOL_VIOLATION); michael@0: ph->param_length = michael@0: htons(SCTP_BUF_LEN(oper)); michael@0: ippp = (uint32_t *) (ph + 1); michael@0: *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_5); michael@0: ippp++; michael@0: *ippp = chk->rec.data.TSN_seq; michael@0: ippp++; michael@0: *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); michael@0: michael@0: } michael@0: stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_5; michael@0: sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); michael@0: *abort_flag = 1; michael@0: } michael@0: } michael@0: } michael@0: return; michael@0: } michael@0: /* Find its place */ michael@0: TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { michael@0: if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) { michael@0: /* michael@0: * one in queue is bigger than the new one, insert michael@0: * before this one michael@0: */ michael@0: /* A check */ michael@0: asoc->size_on_reasm_queue += chk->send_size; michael@0: sctp_ucount_incr(asoc->cnt_on_reasm_queue); michael@0: next = at; michael@0: TAILQ_INSERT_BEFORE(at, chk, sctp_next); michael@0: break; michael@0: } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) { michael@0: /* Gak, He sent me a duplicate str seq number */ michael@0: /* michael@0: * foo bar, I guess I will just free this new guy, michael@0: * should we abort too? FIX ME MAYBE? Or it COULD be michael@0: * that the SSN's have wrapped. Maybe I should michael@0: * compare to TSN somehow... sigh for now just blow michael@0: * away the chunk! michael@0: */ michael@0: if (chk->data) { michael@0: sctp_m_freem(chk->data); michael@0: chk->data = NULL; michael@0: } michael@0: sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); michael@0: return; michael@0: } else { michael@0: prev = at; michael@0: if (TAILQ_NEXT(at, sctp_next) == NULL) { michael@0: /* michael@0: * We are at the end, insert it after this michael@0: * one michael@0: */ michael@0: /* check it first */ michael@0: asoc->size_on_reasm_queue += chk->send_size; michael@0: sctp_ucount_incr(asoc->cnt_on_reasm_queue); michael@0: TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next); michael@0: break; michael@0: } michael@0: } michael@0: } michael@0: /* Now the audits */ michael@0: if (prev) { michael@0: prev_tsn = chk->rec.data.TSN_seq - 1; michael@0: if (prev_tsn == prev->rec.data.TSN_seq) { michael@0: /* michael@0: * Ok the one I am dropping onto the end is the michael@0: * NEXT. A bit of valdiation here. michael@0: */ michael@0: if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == michael@0: SCTP_DATA_FIRST_FRAG || michael@0: (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == michael@0: SCTP_DATA_MIDDLE_FRAG) { michael@0: /* michael@0: * Insert chk MUST be a MIDDLE or LAST michael@0: * fragment michael@0: */ michael@0: if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == michael@0: SCTP_DATA_FIRST_FRAG) { michael@0: SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n"); michael@0: SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n"); michael@0: oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), michael@0: 0, M_NOWAIT, 1, MT_DATA); michael@0: if (oper) { michael@0: struct sctp_paramhdr *ph; michael@0: uint32_t *ippp; michael@0: michael@0: SCTP_BUF_LEN(oper) = michael@0: sizeof(struct sctp_paramhdr) + michael@0: (3 * sizeof(uint32_t)); michael@0: ph = mtod(oper, michael@0: struct sctp_paramhdr *); michael@0: ph->param_type = michael@0: htons(SCTP_CAUSE_PROTOCOL_VIOLATION); michael@0: ph->param_length = michael@0: htons(SCTP_BUF_LEN(oper)); michael@0: ippp = (uint32_t *) (ph + 1); michael@0: *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_6); michael@0: ippp++; michael@0: *ippp = chk->rec.data.TSN_seq; michael@0: ippp++; michael@0: *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); michael@0: michael@0: } michael@0: stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_6; michael@0: sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); michael@0: *abort_flag = 1; michael@0: return; michael@0: } michael@0: if (chk->rec.data.stream_number != michael@0: prev->rec.data.stream_number) { michael@0: /* michael@0: * Huh, need the correct STR here, michael@0: * they must be the same. michael@0: */ michael@0: SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n", michael@0: chk->rec.data.stream_number, michael@0: prev->rec.data.stream_number); michael@0: oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), michael@0: 0, M_NOWAIT, 1, MT_DATA); michael@0: if (oper) { michael@0: struct sctp_paramhdr *ph; michael@0: uint32_t *ippp; michael@0: michael@0: SCTP_BUF_LEN(oper) = michael@0: sizeof(struct sctp_paramhdr) + michael@0: (3 * sizeof(uint32_t)); michael@0: ph = mtod(oper, michael@0: struct sctp_paramhdr *); michael@0: ph->param_type = michael@0: htons(SCTP_CAUSE_PROTOCOL_VIOLATION); michael@0: ph->param_length = michael@0: htons(SCTP_BUF_LEN(oper)); michael@0: ippp = (uint32_t *) (ph + 1); michael@0: *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_7); michael@0: ippp++; michael@0: *ippp = chk->rec.data.TSN_seq; michael@0: ippp++; michael@0: *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); michael@0: } michael@0: stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_7; michael@0: sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); michael@0: *abort_flag = 1; michael@0: return; michael@0: } michael@0: if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && michael@0: chk->rec.data.stream_seq != michael@0: prev->rec.data.stream_seq) { michael@0: /* michael@0: * Huh, need the correct STR here, michael@0: * they must be the same. michael@0: */ michael@0: SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n", michael@0: chk->rec.data.stream_seq, michael@0: prev->rec.data.stream_seq); michael@0: oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), michael@0: 0, M_NOWAIT, 1, MT_DATA); michael@0: if (oper) { michael@0: struct sctp_paramhdr *ph; michael@0: uint32_t *ippp; michael@0: michael@0: SCTP_BUF_LEN(oper) = michael@0: sizeof(struct sctp_paramhdr) + michael@0: (3 * sizeof(uint32_t)); michael@0: ph = mtod(oper, michael@0: struct sctp_paramhdr *); michael@0: ph->param_type = michael@0: htons(SCTP_CAUSE_PROTOCOL_VIOLATION); michael@0: ph->param_length = michael@0: htons(SCTP_BUF_LEN(oper)); michael@0: ippp = (uint32_t *) (ph + 1); michael@0: *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_8); michael@0: ippp++; michael@0: *ippp = chk->rec.data.TSN_seq; michael@0: ippp++; michael@0: *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); michael@0: } michael@0: stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_8; michael@0: sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); michael@0: *abort_flag = 1; michael@0: return; michael@0: } michael@0: } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == michael@0: SCTP_DATA_LAST_FRAG) { michael@0: /* Insert chk MUST be a FIRST */ michael@0: if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != michael@0: SCTP_DATA_FIRST_FRAG) { michael@0: SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n"); michael@0: oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), michael@0: 0, M_NOWAIT, 1, MT_DATA); michael@0: if (oper) { michael@0: struct sctp_paramhdr *ph; michael@0: uint32_t *ippp; michael@0: michael@0: SCTP_BUF_LEN(oper) = michael@0: sizeof(struct sctp_paramhdr) + michael@0: (3 * sizeof(uint32_t)); michael@0: ph = mtod(oper, michael@0: struct sctp_paramhdr *); michael@0: ph->param_type = michael@0: htons(SCTP_CAUSE_PROTOCOL_VIOLATION); michael@0: ph->param_length = michael@0: htons(SCTP_BUF_LEN(oper)); michael@0: ippp = (uint32_t *) (ph + 1); michael@0: *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_9); michael@0: ippp++; michael@0: *ippp = chk->rec.data.TSN_seq; michael@0: ippp++; michael@0: *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); michael@0: michael@0: } michael@0: stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_9; michael@0: sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); michael@0: *abort_flag = 1; michael@0: return; michael@0: } michael@0: } michael@0: } michael@0: } michael@0: if (next) { michael@0: post_tsn = chk->rec.data.TSN_seq + 1; michael@0: if (post_tsn == next->rec.data.TSN_seq) { michael@0: /* michael@0: * Ok the one I am inserting ahead of is my NEXT michael@0: * one. A bit of valdiation here. michael@0: */ michael@0: if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { michael@0: /* Insert chk MUST be a last fragment */ michael@0: if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) michael@0: != SCTP_DATA_LAST_FRAG) { michael@0: SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n"); michael@0: SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n"); michael@0: oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), michael@0: 0, M_NOWAIT, 1, MT_DATA); michael@0: if (oper) { michael@0: struct sctp_paramhdr *ph; michael@0: uint32_t *ippp; michael@0: michael@0: SCTP_BUF_LEN(oper) = michael@0: sizeof(struct sctp_paramhdr) + michael@0: ( 3 * sizeof(uint32_t)); michael@0: ph = mtod(oper, michael@0: struct sctp_paramhdr *); michael@0: ph->param_type = michael@0: htons(SCTP_CAUSE_PROTOCOL_VIOLATION); michael@0: ph->param_length = michael@0: htons(SCTP_BUF_LEN(oper)); michael@0: ippp = (uint32_t *) (ph + 1); michael@0: *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_10); michael@0: ippp++; michael@0: *ippp = chk->rec.data.TSN_seq; michael@0: ippp++; michael@0: *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); michael@0: } michael@0: stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_10; michael@0: sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); michael@0: *abort_flag = 1; michael@0: return; michael@0: } michael@0: } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == michael@0: SCTP_DATA_MIDDLE_FRAG || michael@0: (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == michael@0: SCTP_DATA_LAST_FRAG) { michael@0: /* michael@0: * Insert chk CAN be MIDDLE or FIRST NOT michael@0: * LAST michael@0: */ michael@0: if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == michael@0: SCTP_DATA_LAST_FRAG) { michael@0: SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n"); michael@0: SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n"); michael@0: oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), michael@0: 0, M_NOWAIT, 1, MT_DATA); michael@0: if (oper) { michael@0: struct sctp_paramhdr *ph; michael@0: uint32_t *ippp; michael@0: michael@0: SCTP_BUF_LEN(oper) = michael@0: sizeof(struct sctp_paramhdr) + michael@0: (3 * sizeof(uint32_t)); michael@0: ph = mtod(oper, michael@0: struct sctp_paramhdr *); michael@0: ph->param_type = michael@0: htons(SCTP_CAUSE_PROTOCOL_VIOLATION); michael@0: ph->param_length = michael@0: htons(SCTP_BUF_LEN(oper)); michael@0: ippp = (uint32_t *) (ph + 1); michael@0: *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_11); michael@0: ippp++; michael@0: *ippp = chk->rec.data.TSN_seq; michael@0: ippp++; michael@0: *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); michael@0: michael@0: } michael@0: stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_11; michael@0: sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); michael@0: *abort_flag = 1; michael@0: return; michael@0: } michael@0: if (chk->rec.data.stream_number != michael@0: next->rec.data.stream_number) { michael@0: /* michael@0: * Huh, need the correct STR here, michael@0: * they must be the same. michael@0: */ michael@0: SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n", michael@0: chk->rec.data.stream_number, michael@0: next->rec.data.stream_number); michael@0: oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), michael@0: 0, M_NOWAIT, 1, MT_DATA); michael@0: if (oper) { michael@0: struct sctp_paramhdr *ph; michael@0: uint32_t *ippp; michael@0: michael@0: SCTP_BUF_LEN(oper) = michael@0: sizeof(struct sctp_paramhdr) + michael@0: (3 * sizeof(uint32_t)); michael@0: ph = mtod(oper, michael@0: struct sctp_paramhdr *); michael@0: ph->param_type = michael@0: htons(SCTP_CAUSE_PROTOCOL_VIOLATION); michael@0: ph->param_length = michael@0: htons(SCTP_BUF_LEN(oper)); michael@0: ippp = (uint32_t *) (ph + 1); michael@0: *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_12); michael@0: ippp++; michael@0: *ippp = chk->rec.data.TSN_seq; michael@0: ippp++; michael@0: *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); michael@0: michael@0: } michael@0: stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_12; michael@0: sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); michael@0: *abort_flag = 1; michael@0: return; michael@0: } michael@0: if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && michael@0: chk->rec.data.stream_seq != michael@0: next->rec.data.stream_seq) { michael@0: /* michael@0: * Huh, need the correct STR here, michael@0: * they must be the same. michael@0: */ michael@0: SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n", michael@0: chk->rec.data.stream_seq, michael@0: next->rec.data.stream_seq); michael@0: oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), michael@0: 0, M_NOWAIT, 1, MT_DATA); michael@0: if (oper) { michael@0: struct sctp_paramhdr *ph; michael@0: uint32_t *ippp; michael@0: michael@0: SCTP_BUF_LEN(oper) = michael@0: sizeof(struct sctp_paramhdr) + michael@0: (3 * sizeof(uint32_t)); michael@0: ph = mtod(oper, michael@0: struct sctp_paramhdr *); michael@0: ph->param_type = michael@0: htons(SCTP_CAUSE_PROTOCOL_VIOLATION); michael@0: ph->param_length = michael@0: htons(SCTP_BUF_LEN(oper)); michael@0: ippp = (uint32_t *) (ph + 1); michael@0: *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_13); michael@0: ippp++; michael@0: *ippp = chk->rec.data.TSN_seq; michael@0: ippp++; michael@0: *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); michael@0: } michael@0: stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_13; michael@0: sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); michael@0: *abort_flag = 1; michael@0: return; michael@0: } michael@0: } michael@0: } michael@0: } michael@0: /* Do we need to do some delivery? check */ michael@0: sctp_deliver_reasm_check(stcb, asoc); michael@0: } michael@0: michael@0: /* michael@0: * This is an unfortunate routine. It checks to make sure a evil guy is not michael@0: * stuffing us full of bad packet fragments. A broken peer could also do this michael@0: * but this is doubtful. It is to bad I must worry about evil crackers sigh michael@0: * :< more cycles. michael@0: */ michael@0: static int michael@0: sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc, michael@0: uint32_t TSN_seq) michael@0: { michael@0: struct sctp_tmit_chunk *at; michael@0: uint32_t tsn_est; michael@0: michael@0: TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { michael@0: if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) { michael@0: /* is it one bigger? */ michael@0: tsn_est = at->rec.data.TSN_seq + 1; michael@0: if (tsn_est == TSN_seq) { michael@0: /* yep. It better be a last then */ michael@0: if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != michael@0: SCTP_DATA_LAST_FRAG) { michael@0: /* michael@0: * Ok this guy belongs next to a guy michael@0: * that is NOT last, it should be a michael@0: * middle/last, not a complete michael@0: * chunk. michael@0: */ michael@0: return (1); michael@0: } else { michael@0: /* michael@0: * This guy is ok since its a LAST michael@0: * and the new chunk is a fully michael@0: * self- contained one. michael@0: */ michael@0: return (0); michael@0: } michael@0: } michael@0: } else if (TSN_seq == at->rec.data.TSN_seq) { michael@0: /* Software error since I have a dup? */ michael@0: return (1); michael@0: } else { michael@0: /* michael@0: * Ok, 'at' is larger than new chunk but does it michael@0: * need to be right before it. michael@0: */ michael@0: tsn_est = TSN_seq + 1; michael@0: if (tsn_est == at->rec.data.TSN_seq) { michael@0: /* Yep, It better be a first */ michael@0: if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != michael@0: SCTP_DATA_FIRST_FRAG) { michael@0: return (1); michael@0: } else { michael@0: return (0); michael@0: } michael@0: } michael@0: } michael@0: } michael@0: return (0); michael@0: } michael@0: michael@0: static int michael@0: sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, michael@0: struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length, michael@0: struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag, michael@0: int *break_flag, int last_chunk) michael@0: { michael@0: /* Process a data chunk */ michael@0: /* struct sctp_tmit_chunk *chk; */ michael@0: struct sctp_tmit_chunk *chk; michael@0: uint32_t tsn, gap; michael@0: struct mbuf *dmbuf; michael@0: int the_len; michael@0: int need_reasm_check = 0; michael@0: uint16_t strmno, strmseq; michael@0: struct mbuf *oper; michael@0: struct sctp_queued_to_read *control; michael@0: int ordered; michael@0: uint32_t protocol_id; michael@0: uint8_t chunk_flags; michael@0: struct sctp_stream_reset_list *liste; michael@0: michael@0: chk = NULL; michael@0: tsn = ntohl(ch->dp.tsn); michael@0: chunk_flags = ch->ch.chunk_flags; michael@0: if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) { michael@0: asoc->send_sack = 1; michael@0: } michael@0: protocol_id = ch->dp.protocol_id; michael@0: ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0); michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { michael@0: sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS); michael@0: } michael@0: if (stcb == NULL) { michael@0: return (0); michael@0: } michael@0: SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn); michael@0: if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { michael@0: /* It is a duplicate */ michael@0: SCTP_STAT_INCR(sctps_recvdupdata); michael@0: if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { michael@0: /* Record a dup for the next outbound sack */ michael@0: asoc->dup_tsns[asoc->numduptsns] = tsn; michael@0: asoc->numduptsns++; michael@0: } michael@0: asoc->send_sack = 1; michael@0: return (0); michael@0: } michael@0: /* Calculate the number of TSN's between the base and this TSN */ michael@0: SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); michael@0: if (gap >= (SCTP_MAPPING_ARRAY << 3)) { michael@0: /* Can't hold the bit in the mapping at max array, toss it */ michael@0: return (0); michael@0: } michael@0: if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) { michael@0: SCTP_TCB_LOCK_ASSERT(stcb); michael@0: if (sctp_expand_mapping_array(asoc, gap)) { michael@0: /* Can't expand, drop it */ michael@0: return (0); michael@0: } michael@0: } michael@0: if (SCTP_TSN_GT(tsn, *high_tsn)) { michael@0: *high_tsn = tsn; michael@0: } michael@0: /* See if we have received this one already */ michael@0: if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) || michael@0: SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) { michael@0: SCTP_STAT_INCR(sctps_recvdupdata); michael@0: if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { michael@0: /* Record a dup for the next outbound sack */ michael@0: asoc->dup_tsns[asoc->numduptsns] = tsn; michael@0: asoc->numduptsns++; michael@0: } michael@0: asoc->send_sack = 1; michael@0: return (0); michael@0: } michael@0: /* michael@0: * Check to see about the GONE flag, duplicates would cause a sack michael@0: * to be sent up above michael@0: */ michael@0: if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || michael@0: (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || michael@0: (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) michael@0: ) { michael@0: /* michael@0: * wait a minute, this guy is gone, there is no longer a michael@0: * receiver. Send peer an ABORT! michael@0: */ michael@0: struct mbuf *op_err; michael@0: michael@0: op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); michael@0: sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); michael@0: *abort_flag = 1; michael@0: return (0); michael@0: } michael@0: /* michael@0: * Now before going further we see if there is room. If NOT then we michael@0: * MAY let one through only IF this TSN is the one we are waiting michael@0: * for on a partial delivery API. michael@0: */ michael@0: michael@0: /* now do the tests */ michael@0: if (((asoc->cnt_on_all_streams + michael@0: asoc->cnt_on_reasm_queue + michael@0: asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) || michael@0: (((int)asoc->my_rwnd) <= 0)) { michael@0: /* michael@0: * When we have NO room in the rwnd we check to make sure michael@0: * the reader is doing its job... michael@0: */ michael@0: if (stcb->sctp_socket->so_rcv.sb_cc) { michael@0: /* some to read, wake-up */ michael@0: #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) michael@0: struct socket *so; michael@0: michael@0: so = SCTP_INP_SO(stcb->sctp_ep); michael@0: atomic_add_int(&stcb->asoc.refcnt, 1); michael@0: SCTP_TCB_UNLOCK(stcb); michael@0: SCTP_SOCKET_LOCK(so, 1); michael@0: SCTP_TCB_LOCK(stcb); michael@0: atomic_subtract_int(&stcb->asoc.refcnt, 1); michael@0: if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { michael@0: /* assoc was freed while we were unlocked */ michael@0: SCTP_SOCKET_UNLOCK(so, 1); michael@0: return (0); michael@0: } michael@0: #endif michael@0: sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); michael@0: #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) michael@0: SCTP_SOCKET_UNLOCK(so, 1); michael@0: #endif michael@0: } michael@0: /* now is it in the mapping array of what we have accepted? */ michael@0: if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) && michael@0: SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { michael@0: /* Nope not in the valid range dump it */ michael@0: sctp_set_rwnd(stcb, asoc); michael@0: if ((asoc->cnt_on_all_streams + michael@0: asoc->cnt_on_reasm_queue + michael@0: asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) { michael@0: SCTP_STAT_INCR(sctps_datadropchklmt); michael@0: } else { michael@0: SCTP_STAT_INCR(sctps_datadroprwnd); michael@0: } michael@0: *break_flag = 1; michael@0: return (0); michael@0: } michael@0: } michael@0: strmno = ntohs(ch->dp.stream_id); michael@0: if (strmno >= asoc->streamincnt) { michael@0: struct sctp_paramhdr *phdr; michael@0: struct mbuf *mb; michael@0: michael@0: mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2), michael@0: 0, M_NOWAIT, 1, MT_DATA); michael@0: if (mb != NULL) { michael@0: /* add some space up front so prepend will work well */ michael@0: SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr)); michael@0: phdr = mtod(mb, struct sctp_paramhdr *); michael@0: /* michael@0: * Error causes are just param's and this one has michael@0: * two back to back phdr, one with the error type michael@0: * and size, the other with the streamid and a rsvd michael@0: */ michael@0: SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2); michael@0: phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM); michael@0: phdr->param_length = michael@0: htons(sizeof(struct sctp_paramhdr) * 2); michael@0: phdr++; michael@0: /* We insert the stream in the type field */ michael@0: phdr->param_type = ch->dp.stream_id; michael@0: /* And set the length to 0 for the rsvd field */ michael@0: phdr->param_length = 0; michael@0: sctp_queue_op_err(stcb, mb); michael@0: } michael@0: SCTP_STAT_INCR(sctps_badsid); michael@0: SCTP_TCB_LOCK_ASSERT(stcb); michael@0: SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); michael@0: if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { michael@0: asoc->highest_tsn_inside_nr_map = tsn; michael@0: } michael@0: if (tsn == (asoc->cumulative_tsn + 1)) { michael@0: /* Update cum-ack */ michael@0: asoc->cumulative_tsn = tsn; michael@0: } michael@0: return (0); michael@0: } michael@0: /* michael@0: * Before we continue lets validate that we are not being fooled by michael@0: * an evil attacker. We can only have 4k chunks based on our TSN michael@0: * spread allowed by the mapping array 512 * 8 bits, so there is no michael@0: * way our stream sequence numbers could have wrapped. We of course michael@0: * only validate the FIRST fragment so the bit must be set. michael@0: */ michael@0: strmseq = ntohs(ch->dp.stream_sequence); michael@0: #ifdef SCTP_ASOCLOG_OF_TSNS michael@0: SCTP_TCB_LOCK_ASSERT(stcb); michael@0: if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) { michael@0: asoc->tsn_in_at = 0; michael@0: asoc->tsn_in_wrapped = 1; michael@0: } michael@0: asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn; michael@0: asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno; michael@0: asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq; michael@0: asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length; michael@0: asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags; michael@0: asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb; michael@0: asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at; michael@0: asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1; michael@0: asoc->tsn_in_at++; michael@0: #endif michael@0: if ((chunk_flags & SCTP_DATA_FIRST_FRAG) && michael@0: (TAILQ_EMPTY(&asoc->resetHead)) && michael@0: (chunk_flags & SCTP_DATA_UNORDERED) == 0 && michael@0: SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) { michael@0: /* The incoming sseq is behind where we last delivered? */ michael@0: SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n", michael@0: strmseq, asoc->strmin[strmno].last_sequence_delivered); michael@0: oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), michael@0: 0, M_NOWAIT, 1, MT_DATA); michael@0: if (oper) { michael@0: struct sctp_paramhdr *ph; michael@0: uint32_t *ippp; michael@0: michael@0: SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + michael@0: (3 * sizeof(uint32_t)); michael@0: ph = mtod(oper, struct sctp_paramhdr *); michael@0: ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); michael@0: ph->param_length = htons(SCTP_BUF_LEN(oper)); michael@0: ippp = (uint32_t *) (ph + 1); michael@0: *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_14); michael@0: ippp++; michael@0: *ippp = tsn; michael@0: ippp++; michael@0: *ippp = ((strmno << 16) | strmseq); michael@0: michael@0: } michael@0: stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_14; michael@0: sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); michael@0: *abort_flag = 1; michael@0: return (0); michael@0: } michael@0: /************************************ michael@0: * From here down we may find ch-> invalid michael@0: * so its a good idea NOT to use it. michael@0: *************************************/ michael@0: michael@0: the_len = (chk_length - sizeof(struct sctp_data_chunk)); michael@0: if (last_chunk == 0) { michael@0: dmbuf = SCTP_M_COPYM(*m, michael@0: (offset + sizeof(struct sctp_data_chunk)), michael@0: the_len, M_NOWAIT); michael@0: #ifdef SCTP_MBUF_LOGGING michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { michael@0: struct mbuf *mat; michael@0: michael@0: for (mat = dmbuf; mat; mat = SCTP_BUF_NEXT(mat)) { michael@0: if (SCTP_BUF_IS_EXTENDED(mat)) { michael@0: sctp_log_mb(mat, SCTP_MBUF_ICOPY); michael@0: } michael@0: } michael@0: } michael@0: #endif michael@0: } else { michael@0: /* We can steal the last chunk */ michael@0: int l_len; michael@0: dmbuf = *m; michael@0: /* lop off the top part */ michael@0: m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); michael@0: if (SCTP_BUF_NEXT(dmbuf) == NULL) { michael@0: l_len = SCTP_BUF_LEN(dmbuf); michael@0: } else { michael@0: /* need to count up the size hopefully michael@0: * does not hit this to often :-0 michael@0: */ michael@0: struct mbuf *lat; michael@0: michael@0: l_len = 0; michael@0: for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) { michael@0: l_len += SCTP_BUF_LEN(lat); michael@0: } michael@0: } michael@0: if (l_len > the_len) { michael@0: /* Trim the end round bytes off too */ michael@0: m_adj(dmbuf, -(l_len - the_len)); michael@0: } michael@0: } michael@0: if (dmbuf == NULL) { michael@0: SCTP_STAT_INCR(sctps_nomem); michael@0: return (0); michael@0: } michael@0: if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && michael@0: asoc->fragmented_delivery_inprogress == 0 && michael@0: TAILQ_EMPTY(&asoc->resetHead) && michael@0: ((ordered == 0) || michael@0: ((uint16_t)(asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq && michael@0: TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) { michael@0: /* Candidate for express delivery */ michael@0: /* michael@0: * Its not fragmented, No PD-API is up, Nothing in the michael@0: * delivery queue, Its un-ordered OR ordered and the next to michael@0: * deliver AND nothing else is stuck on the stream queue, michael@0: * And there is room for it in the socket buffer. Lets just michael@0: * stuff it up the buffer.... michael@0: */ michael@0: michael@0: /* It would be nice to avoid this copy if we could :< */ michael@0: sctp_alloc_a_readq(stcb, control); michael@0: sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, michael@0: protocol_id, michael@0: strmno, strmseq, michael@0: chunk_flags, michael@0: dmbuf); michael@0: if (control == NULL) { michael@0: goto failed_express_del; michael@0: } michael@0: SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); michael@0: if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { michael@0: asoc->highest_tsn_inside_nr_map = tsn; michael@0: } michael@0: sctp_add_to_readq(stcb->sctp_ep, stcb, michael@0: control, &stcb->sctp_socket->so_rcv, michael@0: 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); michael@0: michael@0: if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) { michael@0: /* for ordered, bump what we delivered */ michael@0: asoc->strmin[strmno].last_sequence_delivered++; michael@0: } michael@0: SCTP_STAT_INCR(sctps_recvexpress); michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { michael@0: sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, michael@0: SCTP_STR_LOG_FROM_EXPRS_DEL); michael@0: } michael@0: control = NULL; michael@0: michael@0: goto finish_express_del; michael@0: } michael@0: failed_express_del: michael@0: /* If we reach here this is a new chunk */ michael@0: chk = NULL; michael@0: control = NULL; michael@0: /* Express for fragmented delivery? */ michael@0: if ((asoc->fragmented_delivery_inprogress) && michael@0: (stcb->asoc.control_pdapi) && michael@0: (asoc->str_of_pdapi == strmno) && michael@0: (asoc->ssn_of_pdapi == strmseq) michael@0: ) { michael@0: control = stcb->asoc.control_pdapi; michael@0: if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { michael@0: /* Can't be another first? */ michael@0: goto failed_pdapi_express_del; michael@0: } michael@0: if (tsn == (control->sinfo_tsn + 1)) { michael@0: /* Yep, we can add it on */ michael@0: int end = 0; michael@0: michael@0: if (chunk_flags & SCTP_DATA_LAST_FRAG) { michael@0: end = 1; michael@0: } michael@0: if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end, michael@0: tsn, michael@0: &stcb->sctp_socket->so_rcv)) { michael@0: SCTP_PRINTF("Append fails end:%d\n", end); michael@0: goto failed_pdapi_express_del; michael@0: } michael@0: michael@0: SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); michael@0: if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { michael@0: asoc->highest_tsn_inside_nr_map = tsn; michael@0: } michael@0: SCTP_STAT_INCR(sctps_recvexpressm); michael@0: asoc->tsn_last_delivered = tsn; michael@0: asoc->fragment_flags = chunk_flags; michael@0: asoc->tsn_of_pdapi_last_delivered = tsn; michael@0: asoc->last_flags_delivered = chunk_flags; michael@0: asoc->last_strm_seq_delivered = strmseq; michael@0: asoc->last_strm_no_delivered = strmno; michael@0: if (end) { michael@0: /* clean up the flags and such */ michael@0: asoc->fragmented_delivery_inprogress = 0; michael@0: if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) { michael@0: asoc->strmin[strmno].last_sequence_delivered++; michael@0: } michael@0: stcb->asoc.control_pdapi = NULL; michael@0: if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) { michael@0: /* There could be another message ready */ michael@0: need_reasm_check = 1; michael@0: } michael@0: } michael@0: control = NULL; michael@0: goto finish_express_del; michael@0: } michael@0: } michael@0: failed_pdapi_express_del: michael@0: control = NULL; michael@0: if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { michael@0: SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); michael@0: if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { michael@0: asoc->highest_tsn_inside_nr_map = tsn; michael@0: } michael@0: } else { michael@0: SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); michael@0: if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) { michael@0: asoc->highest_tsn_inside_map = tsn; michael@0: } michael@0: } michael@0: if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { michael@0: sctp_alloc_a_chunk(stcb, chk); michael@0: if (chk == NULL) { michael@0: /* No memory so we drop the chunk */ michael@0: SCTP_STAT_INCR(sctps_nomem); michael@0: if (last_chunk == 0) { michael@0: /* we copied it, free the copy */ michael@0: sctp_m_freem(dmbuf); michael@0: } michael@0: return (0); michael@0: } michael@0: chk->rec.data.TSN_seq = tsn; michael@0: chk->no_fr_allowed = 0; michael@0: chk->rec.data.stream_seq = strmseq; michael@0: chk->rec.data.stream_number = strmno; michael@0: chk->rec.data.payloadtype = protocol_id; michael@0: chk->rec.data.context = stcb->asoc.context; michael@0: chk->rec.data.doing_fast_retransmit = 0; michael@0: chk->rec.data.rcv_flags = chunk_flags; michael@0: chk->asoc = asoc; michael@0: chk->send_size = the_len; michael@0: chk->whoTo = net; michael@0: atomic_add_int(&net->ref_count, 1); michael@0: chk->data = dmbuf; michael@0: } else { michael@0: sctp_alloc_a_readq(stcb, control); michael@0: sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, michael@0: protocol_id, michael@0: strmno, strmseq, michael@0: chunk_flags, michael@0: dmbuf); michael@0: if (control == NULL) { michael@0: /* No memory so we drop the chunk */ michael@0: SCTP_STAT_INCR(sctps_nomem); michael@0: if (last_chunk == 0) { michael@0: /* we copied it, free the copy */ michael@0: sctp_m_freem(dmbuf); michael@0: } michael@0: return (0); michael@0: } michael@0: control->length = the_len; michael@0: } michael@0: michael@0: /* Mark it as received */ michael@0: /* Now queue it where it belongs */ michael@0: if (control != NULL) { michael@0: /* First a sanity check */ michael@0: if (asoc->fragmented_delivery_inprogress) { michael@0: /* michael@0: * Ok, we have a fragmented delivery in progress if michael@0: * this chunk is next to deliver OR belongs in our michael@0: * view to the reassembly, the peer is evil or michael@0: * broken. michael@0: */ michael@0: uint32_t estimate_tsn; michael@0: michael@0: estimate_tsn = asoc->tsn_last_delivered + 1; michael@0: if (TAILQ_EMPTY(&asoc->reasmqueue) && michael@0: (estimate_tsn == control->sinfo_tsn)) { michael@0: /* Evil/Broke peer */ michael@0: sctp_m_freem(control->data); michael@0: control->data = NULL; michael@0: if (control->whoFrom) { michael@0: sctp_free_remote_addr(control->whoFrom); michael@0: control->whoFrom = NULL; michael@0: } michael@0: sctp_free_a_readq(stcb, control); michael@0: oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), michael@0: 0, M_NOWAIT, 1, MT_DATA); michael@0: if (oper) { michael@0: struct sctp_paramhdr *ph; michael@0: uint32_t *ippp; michael@0: michael@0: SCTP_BUF_LEN(oper) = michael@0: sizeof(struct sctp_paramhdr) + michael@0: (3 * sizeof(uint32_t)); michael@0: ph = mtod(oper, struct sctp_paramhdr *); michael@0: ph->param_type = michael@0: htons(SCTP_CAUSE_PROTOCOL_VIOLATION); michael@0: ph->param_length = htons(SCTP_BUF_LEN(oper)); michael@0: ippp = (uint32_t *) (ph + 1); michael@0: *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_15); michael@0: ippp++; michael@0: *ippp = tsn; michael@0: ippp++; michael@0: *ippp = ((strmno << 16) | strmseq); michael@0: } michael@0: stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_15; michael@0: sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); michael@0: *abort_flag = 1; michael@0: return (0); michael@0: } else { michael@0: if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { michael@0: sctp_m_freem(control->data); michael@0: control->data = NULL; michael@0: if (control->whoFrom) { michael@0: sctp_free_remote_addr(control->whoFrom); michael@0: control->whoFrom = NULL; michael@0: } michael@0: sctp_free_a_readq(stcb, control); michael@0: michael@0: oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), michael@0: 0, M_NOWAIT, 1, MT_DATA); michael@0: if (oper) { michael@0: struct sctp_paramhdr *ph; michael@0: uint32_t *ippp; michael@0: michael@0: SCTP_BUF_LEN(oper) = michael@0: sizeof(struct sctp_paramhdr) + michael@0: ( 3 * sizeof(uint32_t)); michael@0: ph = mtod(oper, michael@0: struct sctp_paramhdr *); michael@0: ph->param_type = michael@0: htons(SCTP_CAUSE_PROTOCOL_VIOLATION); michael@0: ph->param_length = michael@0: htons(SCTP_BUF_LEN(oper)); michael@0: ippp = (uint32_t *) (ph + 1); michael@0: *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_16); michael@0: ippp++; michael@0: *ippp = tsn; michael@0: ippp++; michael@0: *ippp = ((strmno << 16) | strmseq); michael@0: } michael@0: stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_16; michael@0: sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); michael@0: *abort_flag = 1; michael@0: return (0); michael@0: } michael@0: } michael@0: } else { michael@0: /* No PDAPI running */ michael@0: if (!TAILQ_EMPTY(&asoc->reasmqueue)) { michael@0: /* michael@0: * Reassembly queue is NOT empty validate michael@0: * that this tsn does not need to be in michael@0: * reasembly queue. If it does then our peer michael@0: * is broken or evil. michael@0: */ michael@0: if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { michael@0: sctp_m_freem(control->data); michael@0: control->data = NULL; michael@0: if (control->whoFrom) { michael@0: sctp_free_remote_addr(control->whoFrom); michael@0: control->whoFrom = NULL; michael@0: } michael@0: sctp_free_a_readq(stcb, control); michael@0: oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), michael@0: 0, M_NOWAIT, 1, MT_DATA); michael@0: if (oper) { michael@0: struct sctp_paramhdr *ph; michael@0: uint32_t *ippp; michael@0: michael@0: SCTP_BUF_LEN(oper) = michael@0: sizeof(struct sctp_paramhdr) + michael@0: (3 * sizeof(uint32_t)); michael@0: ph = mtod(oper, michael@0: struct sctp_paramhdr *); michael@0: ph->param_type = michael@0: htons(SCTP_CAUSE_PROTOCOL_VIOLATION); michael@0: ph->param_length = michael@0: htons(SCTP_BUF_LEN(oper)); michael@0: ippp = (uint32_t *) (ph + 1); michael@0: *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_17); michael@0: ippp++; michael@0: *ippp = tsn; michael@0: ippp++; michael@0: *ippp = ((strmno << 16) | strmseq); michael@0: } michael@0: stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_17; michael@0: sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); michael@0: *abort_flag = 1; michael@0: return (0); michael@0: } michael@0: } michael@0: } michael@0: /* ok, if we reach here we have passed the sanity checks */ michael@0: if (chunk_flags & SCTP_DATA_UNORDERED) { michael@0: /* queue directly into socket buffer */ michael@0: sctp_mark_non_revokable(asoc, control->sinfo_tsn); michael@0: sctp_add_to_readq(stcb->sctp_ep, stcb, michael@0: control, michael@0: &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); michael@0: } else { michael@0: /* michael@0: * Special check for when streams are resetting. We michael@0: * could be more smart about this and check the michael@0: * actual stream to see if it is not being reset.. michael@0: * that way we would not create a HOLB when amongst michael@0: * streams being reset and those not being reset. michael@0: * michael@0: * We take complete messages that have a stream reset michael@0: * intervening (aka the TSN is after where our michael@0: * cum-ack needs to be) off and put them on a michael@0: * pending_reply_queue. The reassembly ones we do michael@0: * not have to worry about since they are all sorted michael@0: * and proceessed by TSN order. It is only the michael@0: * singletons I must worry about. michael@0: */ michael@0: if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && michael@0: SCTP_TSN_GT(tsn, liste->tsn)) { michael@0: /* michael@0: * yep its past where we need to reset... go michael@0: * ahead and queue it. michael@0: */ michael@0: if (TAILQ_EMPTY(&asoc->pending_reply_queue)) { michael@0: /* first one on */ michael@0: TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); michael@0: } else { michael@0: struct sctp_queued_to_read *ctlOn, *nctlOn; michael@0: unsigned char inserted = 0; michael@0: michael@0: TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) { michael@0: if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) { michael@0: continue; michael@0: } else { michael@0: /* found it */ michael@0: TAILQ_INSERT_BEFORE(ctlOn, control, next); michael@0: inserted = 1; michael@0: break; michael@0: } michael@0: } michael@0: if (inserted == 0) { michael@0: /* michael@0: * must be put at end, use michael@0: * prevP (all setup from michael@0: * loop) to setup nextP. michael@0: */ michael@0: TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); michael@0: } michael@0: } michael@0: } else { michael@0: sctp_queue_data_to_stream(stcb, asoc, control, abort_flag); michael@0: if (*abort_flag) { michael@0: return (0); michael@0: } michael@0: } michael@0: } michael@0: } else { michael@0: /* Into the re-assembly queue */ michael@0: sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag); michael@0: if (*abort_flag) { michael@0: /* michael@0: * the assoc is now gone and chk was put onto the michael@0: * reasm queue, which has all been freed. michael@0: */ michael@0: *m = NULL; michael@0: return (0); michael@0: } michael@0: } michael@0: finish_express_del: michael@0: if (tsn == (asoc->cumulative_tsn + 1)) { michael@0: /* Update cum-ack */ michael@0: asoc->cumulative_tsn = tsn; michael@0: } michael@0: if (last_chunk) { michael@0: *m = NULL; michael@0: } michael@0: if (ordered) { michael@0: SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks); michael@0: } else { michael@0: SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks); michael@0: } michael@0: SCTP_STAT_INCR(sctps_recvdata); michael@0: /* Set it present please */ michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { michael@0: sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN); michael@0: } michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { michael@0: sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, michael@0: asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); michael@0: } michael@0: /* check the special flag for stream resets */ michael@0: if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && michael@0: SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) { michael@0: /* michael@0: * we have finished working through the backlogged TSN's now michael@0: * time to reset streams. 1: call reset function. 2: free michael@0: * pending_reply space 3: distribute any chunks in michael@0: * pending_reply_queue. michael@0: */ michael@0: struct sctp_queued_to_read *ctl, *nctl; michael@0: michael@0: sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams); michael@0: TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); michael@0: SCTP_FREE(liste, SCTP_M_STRESET); michael@0: /*sa_ignore FREED_MEMORY*/ michael@0: liste = TAILQ_FIRST(&asoc->resetHead); michael@0: if (TAILQ_EMPTY(&asoc->resetHead)) { michael@0: /* All can be removed */ michael@0: TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) { michael@0: TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); michael@0: sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); michael@0: if (*abort_flag) { michael@0: return (0); michael@0: } michael@0: } michael@0: } else { michael@0: TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) { michael@0: if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) { michael@0: break; michael@0: } michael@0: /* michael@0: * if ctl->sinfo_tsn is <= liste->tsn we can michael@0: * process it which is the NOT of michael@0: * ctl->sinfo_tsn > liste->tsn michael@0: */ michael@0: TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); michael@0: sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); michael@0: if (*abort_flag) { michael@0: return (0); michael@0: } michael@0: } michael@0: } michael@0: /* michael@0: * Now service re-assembly to pick up anything that has been michael@0: * held on reassembly queue? michael@0: */ michael@0: sctp_deliver_reasm_check(stcb, asoc); michael@0: need_reasm_check = 0; michael@0: } michael@0: michael@0: if (need_reasm_check) { michael@0: /* Another one waits ? */ michael@0: sctp_deliver_reasm_check(stcb, asoc); michael@0: } michael@0: return (1); michael@0: } michael@0: michael@0: int8_t sctp_map_lookup_tab[256] = { michael@0: 0, 1, 0, 2, 0, 1, 0, 3, michael@0: 0, 1, 0, 2, 0, 1, 0, 4, michael@0: 0, 1, 0, 2, 0, 1, 0, 3, michael@0: 0, 1, 0, 2, 0, 1, 0, 5, michael@0: 0, 1, 0, 2, 0, 1, 0, 3, michael@0: 0, 1, 0, 2, 0, 1, 0, 4, michael@0: 0, 1, 0, 2, 0, 1, 0, 3, michael@0: 0, 1, 0, 2, 0, 1, 0, 6, michael@0: 0, 1, 0, 2, 0, 1, 0, 3, michael@0: 0, 1, 0, 2, 0, 1, 0, 4, michael@0: 0, 1, 0, 2, 0, 1, 0, 3, michael@0: 0, 1, 0, 2, 0, 1, 0, 5, michael@0: 0, 1, 0, 2, 0, 1, 0, 3, michael@0: 0, 1, 0, 2, 0, 1, 0, 4, michael@0: 0, 1, 0, 2, 0, 1, 0, 3, michael@0: 0, 1, 0, 2, 0, 1, 0, 7, michael@0: 0, 1, 0, 2, 0, 1, 0, 3, michael@0: 0, 1, 0, 2, 0, 1, 0, 4, michael@0: 0, 1, 0, 2, 0, 1, 0, 3, michael@0: 0, 1, 0, 2, 0, 1, 0, 5, michael@0: 0, 1, 0, 2, 0, 1, 0, 3, michael@0: 0, 1, 0, 2, 0, 1, 0, 4, michael@0: 0, 1, 0, 2, 0, 1, 0, 3, michael@0: 0, 1, 0, 2, 0, 1, 0, 6, michael@0: 0, 1, 0, 2, 0, 1, 0, 3, michael@0: 0, 1, 0, 2, 0, 1, 0, 4, michael@0: 0, 1, 0, 2, 0, 1, 0, 3, michael@0: 0, 1, 0, 2, 0, 1, 0, 5, michael@0: 0, 1, 0, 2, 0, 1, 0, 3, michael@0: 0, 1, 0, 2, 0, 1, 0, 4, michael@0: 0, 1, 0, 2, 0, 1, 0, 3, michael@0: 0, 1, 0, 2, 0, 1, 0, 8 michael@0: }; michael@0: michael@0: michael@0: void michael@0: sctp_slide_mapping_arrays(struct sctp_tcb *stcb) michael@0: { michael@0: /* michael@0: * Now we also need to check the mapping array in a couple of ways. michael@0: * 1) Did we move the cum-ack point? michael@0: * michael@0: * When you first glance at this you might think michael@0: * that all entries that make up the postion michael@0: * of the cum-ack would be in the nr-mapping array michael@0: * only.. i.e. things up to the cum-ack are always michael@0: * deliverable. Thats true with one exception, when michael@0: * its a fragmented message we may not deliver the data michael@0: * until some threshold (or all of it) is in place. So michael@0: * we must OR the nr_mapping_array and mapping_array to michael@0: * get a true picture of the cum-ack. michael@0: */ michael@0: struct sctp_association *asoc; michael@0: int at; michael@0: uint8_t val; michael@0: int slide_from, slide_end, lgap, distance; michael@0: uint32_t old_cumack, old_base, old_highest, highest_tsn; michael@0: michael@0: asoc = &stcb->asoc; michael@0: michael@0: old_cumack = asoc->cumulative_tsn; michael@0: old_base = asoc->mapping_array_base_tsn; michael@0: old_highest = asoc->highest_tsn_inside_map; michael@0: /* michael@0: * We could probably improve this a small bit by calculating the michael@0: * offset of the current cum-ack as the starting point. michael@0: */ michael@0: at = 0; michael@0: for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) { michael@0: val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from]; michael@0: if (val == 0xff) { michael@0: at += 8; michael@0: } else { michael@0: /* there is a 0 bit */ michael@0: at += sctp_map_lookup_tab[val]; michael@0: break; michael@0: } michael@0: } michael@0: asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at-1); michael@0: michael@0: if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) && michael@0: SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) { michael@0: #ifdef INVARIANTS michael@0: panic("huh, cumack 0x%x greater than high-tsn 0x%x in map", michael@0: asoc->cumulative_tsn, asoc->highest_tsn_inside_map); michael@0: #else michael@0: SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n", michael@0: asoc->cumulative_tsn, asoc->highest_tsn_inside_map); michael@0: sctp_print_mapping_array(asoc); michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { michael@0: sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); michael@0: } michael@0: asoc->highest_tsn_inside_map = asoc->cumulative_tsn; michael@0: asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn; michael@0: #endif michael@0: } michael@0: if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { michael@0: highest_tsn = asoc->highest_tsn_inside_nr_map; michael@0: } else { michael@0: highest_tsn = asoc->highest_tsn_inside_map; michael@0: } michael@0: if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) { michael@0: /* The complete array was completed by a single FR */ michael@0: /* highest becomes the cum-ack */ michael@0: int clr; michael@0: #ifdef INVARIANTS michael@0: unsigned int i; michael@0: #endif michael@0: michael@0: /* clear the array */ michael@0: clr = ((at+7) >> 3); michael@0: if (clr > asoc->mapping_array_size) { michael@0: clr = asoc->mapping_array_size; michael@0: } michael@0: memset(asoc->mapping_array, 0, clr); michael@0: memset(asoc->nr_mapping_array, 0, clr); michael@0: #ifdef INVARIANTS michael@0: for (i = 0; i < asoc->mapping_array_size; i++) { michael@0: if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) { michael@0: SCTP_PRINTF("Error Mapping array's not clean at clear\n"); michael@0: sctp_print_mapping_array(asoc); michael@0: } michael@0: } michael@0: #endif michael@0: asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; michael@0: asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn; michael@0: } else if (at >= 8) { michael@0: /* we can slide the mapping array down */ michael@0: /* slide_from holds where we hit the first NON 0xff byte */ michael@0: michael@0: /* michael@0: * now calculate the ceiling of the move using our highest michael@0: * TSN value michael@0: */ michael@0: SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn); michael@0: slide_end = (lgap >> 3); michael@0: if (slide_end < slide_from) { michael@0: sctp_print_mapping_array(asoc); michael@0: #ifdef INVARIANTS michael@0: panic("impossible slide"); michael@0: #else michael@0: SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n", michael@0: lgap, slide_end, slide_from, at); michael@0: return; michael@0: #endif michael@0: } michael@0: if (slide_end > asoc->mapping_array_size) { michael@0: #ifdef INVARIANTS michael@0: panic("would overrun buffer"); michael@0: #else michael@0: SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n", michael@0: asoc->mapping_array_size, slide_end); michael@0: slide_end = asoc->mapping_array_size; michael@0: #endif michael@0: } michael@0: distance = (slide_end - slide_from) + 1; michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { michael@0: sctp_log_map(old_base, old_cumack, old_highest, michael@0: SCTP_MAP_PREPARE_SLIDE); michael@0: sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end, michael@0: (uint32_t) lgap, SCTP_MAP_SLIDE_FROM); michael@0: } michael@0: if (distance + slide_from > asoc->mapping_array_size || michael@0: distance < 0) { michael@0: /* michael@0: * Here we do NOT slide forward the array so that michael@0: * hopefully when more data comes in to fill it up michael@0: * we will be able to slide it forward. Really I michael@0: * don't think this should happen :-0 michael@0: */ michael@0: michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { michael@0: sctp_log_map((uint32_t) distance, (uint32_t) slide_from, michael@0: (uint32_t) asoc->mapping_array_size, michael@0: SCTP_MAP_SLIDE_NONE); michael@0: } michael@0: } else { michael@0: int ii; michael@0: michael@0: for (ii = 0; ii < distance; ii++) { michael@0: asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii]; michael@0: asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii]; michael@0: michael@0: } michael@0: for (ii = distance; ii < asoc->mapping_array_size; ii++) { michael@0: asoc->mapping_array[ii] = 0; michael@0: asoc->nr_mapping_array[ii] = 0; michael@0: } michael@0: if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) { michael@0: asoc->highest_tsn_inside_map += (slide_from << 3); michael@0: } michael@0: if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) { michael@0: asoc->highest_tsn_inside_nr_map += (slide_from << 3); michael@0: } michael@0: asoc->mapping_array_base_tsn += (slide_from << 3); michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { michael@0: sctp_log_map(asoc->mapping_array_base_tsn, michael@0: asoc->cumulative_tsn, asoc->highest_tsn_inside_map, michael@0: SCTP_MAP_SLIDE_RESULT); michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: void michael@0: sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap) michael@0: { michael@0: struct sctp_association *asoc; michael@0: uint32_t highest_tsn; michael@0: michael@0: asoc = &stcb->asoc; michael@0: if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { michael@0: highest_tsn = asoc->highest_tsn_inside_nr_map; michael@0: } else { michael@0: highest_tsn = asoc->highest_tsn_inside_map; michael@0: } michael@0: michael@0: /* michael@0: * Now we need to see if we need to queue a sack or just start the michael@0: * timer (if allowed). michael@0: */ michael@0: if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { michael@0: /* michael@0: * Ok special case, in SHUTDOWN-SENT case. here we michael@0: * maker sure SACK timer is off and instead send a michael@0: * SHUTDOWN and a SACK michael@0: */ michael@0: if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { michael@0: sctp_timer_stop(SCTP_TIMER_TYPE_RECV, michael@0: stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA+SCTP_LOC_18); michael@0: } michael@0: sctp_send_shutdown(stcb, michael@0: ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination)); michael@0: sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); michael@0: } else { michael@0: int is_a_gap; michael@0: michael@0: /* is there a gap now ? */ michael@0: is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); michael@0: michael@0: /* michael@0: * CMT DAC algorithm: increase number of packets michael@0: * received since last ack michael@0: */ michael@0: stcb->asoc.cmt_dac_pkts_rcvd++; michael@0: michael@0: if ((stcb->asoc.send_sack == 1) || /* We need to send a SACK */ michael@0: ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no michael@0: * longer is one */ michael@0: (stcb->asoc.numduptsns) || /* we have dup's */ michael@0: (is_a_gap) || /* is still a gap */ michael@0: (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */ michael@0: (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */ michael@0: ) { michael@0: michael@0: if ((stcb->asoc.sctp_cmt_on_off > 0) && michael@0: (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) && michael@0: (stcb->asoc.send_sack == 0) && michael@0: (stcb->asoc.numduptsns == 0) && michael@0: (stcb->asoc.delayed_ack) && michael@0: (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) { michael@0: michael@0: /* michael@0: * CMT DAC algorithm: With CMT, michael@0: * delay acks even in the face of michael@0: michael@0: * reordering. Therefore, if acks michael@0: * that do not have to be sent michael@0: * because of the above reasons, michael@0: * will be delayed. That is, acks michael@0: * that would have been sent due to michael@0: * gap reports will be delayed with michael@0: * DAC. Start the delayed ack timer. michael@0: */ michael@0: sctp_timer_start(SCTP_TIMER_TYPE_RECV, michael@0: stcb->sctp_ep, stcb, NULL); michael@0: } else { michael@0: /* michael@0: * Ok we must build a SACK since the michael@0: * timer is pending, we got our michael@0: * first packet OR there are gaps or michael@0: * duplicates. michael@0: */ michael@0: (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); michael@0: sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); michael@0: } michael@0: } else { michael@0: if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { michael@0: sctp_timer_start(SCTP_TIMER_TYPE_RECV, michael@0: stcb->sctp_ep, stcb, NULL); michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: void michael@0: sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc) michael@0: { michael@0: struct sctp_tmit_chunk *chk; michael@0: uint32_t tsize, pd_point; michael@0: uint16_t nxt_todel; michael@0: michael@0: if (asoc->fragmented_delivery_inprogress) { michael@0: sctp_service_reassembly(stcb, asoc); michael@0: } michael@0: /* Can we proceed further, i.e. the PD-API is complete */ michael@0: if (asoc->fragmented_delivery_inprogress) { michael@0: /* no */ michael@0: return; michael@0: } michael@0: /* michael@0: * Now is there some other chunk I can deliver from the reassembly michael@0: * queue. michael@0: */ michael@0: doit_again: michael@0: chk = TAILQ_FIRST(&asoc->reasmqueue); michael@0: if (chk == NULL) { michael@0: asoc->size_on_reasm_queue = 0; michael@0: asoc->cnt_on_reasm_queue = 0; michael@0: return; michael@0: } michael@0: nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; michael@0: if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && michael@0: ((nxt_todel == chk->rec.data.stream_seq) || michael@0: (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { michael@0: /* michael@0: * Yep the first one is here. We setup to start reception, michael@0: * by backing down the TSN just in case we can't deliver. michael@0: */ michael@0: michael@0: /* michael@0: * Before we start though either all of the message should michael@0: * be here or the socket buffer max or nothing on the michael@0: * delivery queue and something can be delivered. michael@0: */ michael@0: if (stcb->sctp_socket) { michael@0: pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT, michael@0: stcb->sctp_ep->partial_delivery_point); michael@0: } else { michael@0: pd_point = stcb->sctp_ep->partial_delivery_point; michael@0: } michael@0: if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) { michael@0: asoc->fragmented_delivery_inprogress = 1; michael@0: asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1; michael@0: asoc->str_of_pdapi = chk->rec.data.stream_number; michael@0: asoc->ssn_of_pdapi = chk->rec.data.stream_seq; michael@0: asoc->pdapi_ppid = chk->rec.data.payloadtype; michael@0: asoc->fragment_flags = chk->rec.data.rcv_flags; michael@0: sctp_service_reassembly(stcb, asoc); michael@0: if (asoc->fragmented_delivery_inprogress == 0) { michael@0: goto doit_again; michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: int michael@0: sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, michael@0: struct sockaddr *src, struct sockaddr *dst, michael@0: struct sctphdr *sh, struct sctp_inpcb *inp, michael@0: struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t *high_tsn, michael@0: #if defined(__FreeBSD__) michael@0: uint8_t use_mflowid, uint32_t mflowid, michael@0: #endif michael@0: uint32_t vrf_id, uint16_t port) michael@0: { michael@0: struct sctp_data_chunk *ch, chunk_buf; michael@0: struct sctp_association *asoc; michael@0: int num_chunks = 0; /* number of control chunks processed */ michael@0: int stop_proc = 0; michael@0: int chk_length, break_flag, last_chunk; michael@0: int abort_flag = 0, was_a_gap; michael@0: struct mbuf *m; michael@0: uint32_t highest_tsn; michael@0: michael@0: /* set the rwnd */ michael@0: sctp_set_rwnd(stcb, &stcb->asoc); michael@0: michael@0: m = *mm; michael@0: SCTP_TCB_LOCK_ASSERT(stcb); michael@0: asoc = &stcb->asoc; michael@0: if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { michael@0: highest_tsn = asoc->highest_tsn_inside_nr_map; michael@0: } else { michael@0: highest_tsn = asoc->highest_tsn_inside_map; michael@0: } michael@0: was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); michael@0: /* michael@0: * setup where we got the last DATA packet from for any SACK that michael@0: * may need to go out. Don't bump the net. This is done ONLY when a michael@0: * chunk is assigned. michael@0: */ michael@0: asoc->last_data_chunk_from = net; michael@0: michael@0: #ifndef __Panda__ michael@0: /*- michael@0: * Now before we proceed we must figure out if this is a wasted michael@0: * cluster... i.e. it is a small packet sent in and yet the driver michael@0: * underneath allocated a full cluster for it. If so we must copy it michael@0: * to a smaller mbuf and free up the cluster mbuf. This will help michael@0: * with cluster starvation. Note for __Panda__ we don't do this michael@0: * since it has clusters all the way down to 64 bytes. michael@0: */ michael@0: if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) { michael@0: /* we only handle mbufs that are singletons.. not chains */ michael@0: m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA); michael@0: if (m) { michael@0: /* ok lets see if we can copy the data up */ michael@0: caddr_t *from, *to; michael@0: /* get the pointers and copy */ michael@0: to = mtod(m, caddr_t *); michael@0: from = mtod((*mm), caddr_t *); michael@0: memcpy(to, from, SCTP_BUF_LEN((*mm))); michael@0: /* copy the length and free up the old */ michael@0: SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm)); michael@0: sctp_m_freem(*mm); michael@0: /* sucess, back copy */ michael@0: *mm = m; michael@0: } else { michael@0: /* We are in trouble in the mbuf world .. yikes */ michael@0: m = *mm; michael@0: } michael@0: } michael@0: #endif michael@0: /* get pointer to the first chunk header */ michael@0: ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, michael@0: sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); michael@0: if (ch == NULL) { michael@0: return (1); michael@0: } michael@0: /* michael@0: * process all DATA chunks... michael@0: */ michael@0: *high_tsn = asoc->cumulative_tsn; michael@0: break_flag = 0; michael@0: asoc->data_pkts_seen++; michael@0: while (stop_proc == 0) { michael@0: /* validate chunk length */ michael@0: chk_length = ntohs(ch->ch.chunk_length); michael@0: if (length - *offset < chk_length) { michael@0: /* all done, mutulated chunk */ michael@0: stop_proc = 1; michael@0: continue; michael@0: } michael@0: if (ch->ch.chunk_type == SCTP_DATA) { michael@0: if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) { michael@0: /* michael@0: * Need to send an abort since we had a michael@0: * invalid data chunk. michael@0: */ michael@0: struct mbuf *op_err; michael@0: michael@0: op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)), michael@0: 0, M_NOWAIT, 1, MT_DATA); michael@0: michael@0: if (op_err) { michael@0: struct sctp_paramhdr *ph; michael@0: uint32_t *ippp; michael@0: michael@0: SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) + michael@0: (2 * sizeof(uint32_t)); michael@0: ph = mtod(op_err, struct sctp_paramhdr *); michael@0: ph->param_type = michael@0: htons(SCTP_CAUSE_PROTOCOL_VIOLATION); michael@0: ph->param_length = htons(SCTP_BUF_LEN(op_err)); michael@0: ippp = (uint32_t *) (ph + 1); michael@0: *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_19); michael@0: ippp++; michael@0: *ippp = asoc->cumulative_tsn; michael@0: michael@0: } michael@0: stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_19; michael@0: sctp_abort_association(inp, stcb, m, iphlen, michael@0: src, dst, sh, op_err, michael@0: #if defined(__FreeBSD__) michael@0: use_mflowid, mflowid, michael@0: #endif michael@0: vrf_id, port); michael@0: return (2); michael@0: } michael@0: #ifdef SCTP_AUDITING_ENABLED michael@0: sctp_audit_log(0xB1, 0); michael@0: #endif michael@0: if (SCTP_SIZE32(chk_length) == (length - *offset)) { michael@0: last_chunk = 1; michael@0: } else { michael@0: last_chunk = 0; michael@0: } michael@0: if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch, michael@0: chk_length, net, high_tsn, &abort_flag, &break_flag, michael@0: last_chunk)) { michael@0: num_chunks++; michael@0: } michael@0: if (abort_flag) michael@0: return (2); michael@0: michael@0: if (break_flag) { michael@0: /* michael@0: * Set because of out of rwnd space and no michael@0: * drop rep space left. michael@0: */ michael@0: stop_proc = 1; michael@0: continue; michael@0: } michael@0: } else { michael@0: /* not a data chunk in the data region */ michael@0: switch (ch->ch.chunk_type) { michael@0: case SCTP_INITIATION: michael@0: case SCTP_INITIATION_ACK: michael@0: case SCTP_SELECTIVE_ACK: michael@0: case SCTP_NR_SELECTIVE_ACK: michael@0: case SCTP_HEARTBEAT_REQUEST: michael@0: case SCTP_HEARTBEAT_ACK: michael@0: case SCTP_ABORT_ASSOCIATION: michael@0: case SCTP_SHUTDOWN: michael@0: case SCTP_SHUTDOWN_ACK: michael@0: case SCTP_OPERATION_ERROR: michael@0: case SCTP_COOKIE_ECHO: michael@0: case SCTP_COOKIE_ACK: michael@0: case SCTP_ECN_ECHO: michael@0: case SCTP_ECN_CWR: michael@0: case SCTP_SHUTDOWN_COMPLETE: michael@0: case SCTP_AUTHENTICATION: michael@0: case SCTP_ASCONF_ACK: michael@0: case SCTP_PACKET_DROPPED: michael@0: case SCTP_STREAM_RESET: michael@0: case SCTP_FORWARD_CUM_TSN: michael@0: case SCTP_ASCONF: michael@0: /* michael@0: * Now, what do we do with KNOWN chunks that michael@0: * are NOT in the right place? michael@0: * michael@0: * For now, I do nothing but ignore them. We michael@0: * may later want to add sysctl stuff to michael@0: * switch out and do either an ABORT() or michael@0: * possibly process them. michael@0: */ michael@0: if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) { michael@0: struct mbuf *op_err; michael@0: michael@0: op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION); michael@0: sctp_abort_association(inp, stcb, michael@0: m, iphlen, michael@0: src, dst, michael@0: sh, op_err, michael@0: #if defined(__FreeBSD__) michael@0: use_mflowid, mflowid, michael@0: #endif michael@0: vrf_id, port); michael@0: return (2); michael@0: } michael@0: break; michael@0: default: michael@0: /* unknown chunk type, use bit rules */ michael@0: if (ch->ch.chunk_type & 0x40) { michael@0: /* Add a error report to the queue */ michael@0: struct mbuf *merr; michael@0: struct sctp_paramhdr *phd; michael@0: michael@0: merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_NOWAIT, 1, MT_DATA); michael@0: if (merr) { michael@0: phd = mtod(merr, struct sctp_paramhdr *); michael@0: /* michael@0: * We cheat and use param michael@0: * type since we did not michael@0: * bother to define a error michael@0: * cause struct. They are michael@0: * the same basic format michael@0: * with different names. michael@0: */ michael@0: phd->param_type = michael@0: htons(SCTP_CAUSE_UNRECOG_CHUNK); michael@0: phd->param_length = michael@0: htons(chk_length + sizeof(*phd)); michael@0: SCTP_BUF_LEN(merr) = sizeof(*phd); michael@0: SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT); michael@0: if (SCTP_BUF_NEXT(merr)) { michael@0: if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL)) { michael@0: sctp_m_freem(merr); michael@0: } else { michael@0: sctp_queue_op_err(stcb, merr); michael@0: } michael@0: } else { michael@0: sctp_m_freem(merr); michael@0: } michael@0: } michael@0: } michael@0: if ((ch->ch.chunk_type & 0x80) == 0) { michael@0: /* discard the rest of this packet */ michael@0: stop_proc = 1; michael@0: } /* else skip this bad chunk and michael@0: * continue... */ michael@0: break; michael@0: } /* switch of chunk type */ michael@0: } michael@0: *offset += SCTP_SIZE32(chk_length); michael@0: if ((*offset >= length) || stop_proc) { michael@0: /* no more data left in the mbuf chain */ michael@0: stop_proc = 1; michael@0: continue; michael@0: } michael@0: ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, michael@0: sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); michael@0: if (ch == NULL) { michael@0: *offset = length; michael@0: stop_proc = 1; michael@0: continue; michael@0: } michael@0: } michael@0: if (break_flag) { michael@0: /* michael@0: * we need to report rwnd overrun drops. michael@0: */ michael@0: sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0); michael@0: } michael@0: if (num_chunks) { michael@0: /* michael@0: * Did we get data, if so update the time for auto-close and michael@0: * give peer credit for being alive. michael@0: */ michael@0: SCTP_STAT_INCR(sctps_recvpktwithdata); michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { michael@0: sctp_misc_ints(SCTP_THRESHOLD_CLEAR, michael@0: stcb->asoc.overall_error_count, michael@0: 0, michael@0: SCTP_FROM_SCTP_INDATA, michael@0: __LINE__); michael@0: } michael@0: stcb->asoc.overall_error_count = 0; michael@0: (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); michael@0: } michael@0: /* now service all of the reassm queue if needed */ michael@0: if (!(TAILQ_EMPTY(&asoc->reasmqueue))) michael@0: sctp_service_queues(stcb, asoc); michael@0: michael@0: if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { michael@0: /* Assure that we ack right away */ michael@0: stcb->asoc.send_sack = 1; michael@0: } michael@0: /* Start a sack timer or QUEUE a SACK for sending */ michael@0: sctp_sack_check(stcb, was_a_gap); michael@0: return (0); michael@0: } michael@0: michael@0: static int michael@0: sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn, michael@0: uint16_t frag_strt, uint16_t frag_end, int nr_sacking, michael@0: int *num_frs, michael@0: uint32_t *biggest_newly_acked_tsn, michael@0: uint32_t *this_sack_lowest_newack, michael@0: int *rto_ok) michael@0: { michael@0: struct sctp_tmit_chunk *tp1; michael@0: unsigned int theTSN; michael@0: int j, wake_him = 0, circled = 0; michael@0: michael@0: /* Recover the tp1 we last saw */ michael@0: tp1 = *p_tp1; michael@0: if (tp1 == NULL) { michael@0: tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); michael@0: } michael@0: for (j = frag_strt; j <= frag_end; j++) { michael@0: theTSN = j + last_tsn; michael@0: while (tp1) { michael@0: if (tp1->rec.data.doing_fast_retransmit) michael@0: (*num_frs) += 1; michael@0: michael@0: /*- michael@0: * CMT: CUCv2 algorithm. For each TSN being michael@0: * processed from the sent queue, track the michael@0: * next expected pseudo-cumack, or michael@0: * rtx_pseudo_cumack, if required. Separate michael@0: * cumack trackers for first transmissions, michael@0: * and retransmissions. michael@0: */ michael@0: if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && michael@0: (tp1->snd_count == 1)) { michael@0: tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq; michael@0: tp1->whoTo->find_pseudo_cumack = 0; michael@0: } michael@0: if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && michael@0: (tp1->snd_count > 1)) { michael@0: tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq; michael@0: tp1->whoTo->find_rtx_pseudo_cumack = 0; michael@0: } michael@0: if (tp1->rec.data.TSN_seq == theTSN) { michael@0: if (tp1->sent != SCTP_DATAGRAM_UNSENT) { michael@0: /*- michael@0: * must be held until michael@0: * cum-ack passes michael@0: */ michael@0: if (tp1->sent < SCTP_DATAGRAM_RESEND) { michael@0: /*- michael@0: * If it is less than RESEND, it is michael@0: * now no-longer in flight. michael@0: * Higher values may already be set michael@0: * via previous Gap Ack Blocks... michael@0: * i.e. ACKED or RESEND. michael@0: */ michael@0: if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, michael@0: *biggest_newly_acked_tsn)) { michael@0: *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq; michael@0: } michael@0: /*- michael@0: * CMT: SFR algo (and HTNA) - set michael@0: * saw_newack to 1 for dest being michael@0: * newly acked. update michael@0: * this_sack_highest_newack if michael@0: * appropriate. michael@0: */ michael@0: if (tp1->rec.data.chunk_was_revoked == 0) michael@0: tp1->whoTo->saw_newack = 1; michael@0: michael@0: if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, michael@0: tp1->whoTo->this_sack_highest_newack)) { michael@0: tp1->whoTo->this_sack_highest_newack = michael@0: tp1->rec.data.TSN_seq; michael@0: } michael@0: /*- michael@0: * CMT DAC algo: also update michael@0: * this_sack_lowest_newack michael@0: */ michael@0: if (*this_sack_lowest_newack == 0) { michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { michael@0: sctp_log_sack(*this_sack_lowest_newack, michael@0: last_tsn, michael@0: tp1->rec.data.TSN_seq, michael@0: 0, michael@0: 0, michael@0: SCTP_LOG_TSN_ACKED); michael@0: } michael@0: *this_sack_lowest_newack = tp1->rec.data.TSN_seq; michael@0: } michael@0: /*- michael@0: * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp michael@0: * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set michael@0: * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be michael@0: * updated. Also trigger search for the next expected (rtx-)pseudo-cumack. michael@0: * Separate pseudo_cumack trackers for first transmissions and michael@0: * retransmissions. michael@0: */ michael@0: if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) { michael@0: if (tp1->rec.data.chunk_was_revoked == 0) { michael@0: tp1->whoTo->new_pseudo_cumack = 1; michael@0: } michael@0: tp1->whoTo->find_pseudo_cumack = 1; michael@0: } michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { michael@0: sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); michael@0: } michael@0: if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) { michael@0: if (tp1->rec.data.chunk_was_revoked == 0) { michael@0: tp1->whoTo->new_pseudo_cumack = 1; michael@0: } michael@0: tp1->whoTo->find_rtx_pseudo_cumack = 1; michael@0: } michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { michael@0: sctp_log_sack(*biggest_newly_acked_tsn, michael@0: last_tsn, michael@0: tp1->rec.data.TSN_seq, michael@0: frag_strt, michael@0: frag_end, michael@0: SCTP_LOG_TSN_ACKED); michael@0: } michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { michael@0: sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP, michael@0: tp1->whoTo->flight_size, michael@0: tp1->book_size, michael@0: (uintptr_t)tp1->whoTo, michael@0: tp1->rec.data.TSN_seq); michael@0: } michael@0: sctp_flight_size_decrease(tp1); michael@0: if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { michael@0: (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo, michael@0: tp1); michael@0: } michael@0: sctp_total_flight_decrease(stcb, tp1); michael@0: michael@0: tp1->whoTo->net_ack += tp1->send_size; michael@0: if (tp1->snd_count < 2) { michael@0: /*- michael@0: * True non-retransmited chunk michael@0: */ michael@0: tp1->whoTo->net_ack2 += tp1->send_size; michael@0: michael@0: /*- michael@0: * update RTO too ? michael@0: */ michael@0: if (tp1->do_rtt) { michael@0: if (*rto_ok) { michael@0: tp1->whoTo->RTO = michael@0: sctp_calculate_rto(stcb, michael@0: &stcb->asoc, michael@0: tp1->whoTo, michael@0: &tp1->sent_rcv_time, michael@0: sctp_align_safe_nocopy, michael@0: SCTP_RTT_FROM_DATA); michael@0: *rto_ok = 0; michael@0: } michael@0: if (tp1->whoTo->rto_needed == 0) { michael@0: tp1->whoTo->rto_needed = 1; michael@0: } michael@0: tp1->do_rtt = 0; michael@0: } michael@0: } michael@0: michael@0: } michael@0: if (tp1->sent <= SCTP_DATAGRAM_RESEND) { michael@0: if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, michael@0: stcb->asoc.this_sack_highest_gap)) { michael@0: stcb->asoc.this_sack_highest_gap = michael@0: tp1->rec.data.TSN_seq; michael@0: } michael@0: if (tp1->sent == SCTP_DATAGRAM_RESEND) { michael@0: sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt); michael@0: #ifdef SCTP_AUDITING_ENABLED michael@0: sctp_audit_log(0xB2, michael@0: (stcb->asoc.sent_queue_retran_cnt & 0x000000ff)); michael@0: #endif michael@0: } michael@0: } michael@0: /*- michael@0: * All chunks NOT UNSENT fall through here and are marked michael@0: * (leave PR-SCTP ones that are to skip alone though) michael@0: */ michael@0: if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) && michael@0: (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { michael@0: tp1->sent = SCTP_DATAGRAM_MARKED; michael@0: } michael@0: if (tp1->rec.data.chunk_was_revoked) { michael@0: /* deflate the cwnd */ michael@0: tp1->whoTo->cwnd -= tp1->book_size; michael@0: tp1->rec.data.chunk_was_revoked = 0; michael@0: } michael@0: /* NR Sack code here */ michael@0: if (nr_sacking && michael@0: (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { michael@0: if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) { michael@0: stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--; michael@0: #ifdef INVARIANTS michael@0: } else { michael@0: panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number); michael@0: #endif michael@0: } michael@0: tp1->sent = SCTP_DATAGRAM_NR_ACKED; michael@0: if (tp1->data) { michael@0: /* sa_ignore NO_NULL_CHK */ michael@0: sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); michael@0: sctp_m_freem(tp1->data); michael@0: tp1->data = NULL; michael@0: } michael@0: wake_him++; michael@0: } michael@0: } michael@0: break; michael@0: } /* if (tp1->TSN_seq == theTSN) */ michael@0: if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) { michael@0: break; michael@0: } michael@0: tp1 = TAILQ_NEXT(tp1, sctp_next); michael@0: if ((tp1 == NULL) && (circled == 0)) { michael@0: circled++; michael@0: tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); michael@0: } michael@0: } /* end while (tp1) */ michael@0: if (tp1 == NULL) { michael@0: circled = 0; michael@0: tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); michael@0: } michael@0: /* In case the fragments were not in order we must reset */ michael@0: } /* end for (j = fragStart */ michael@0: *p_tp1 = tp1; michael@0: return (wake_him); /* Return value only used for nr-sack */ michael@0: } michael@0: michael@0: michael@0: static int michael@0: sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc, michael@0: uint32_t last_tsn, uint32_t *biggest_tsn_acked, michael@0: uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack, michael@0: int num_seg, int num_nr_seg, int *rto_ok) michael@0: { michael@0: struct sctp_gap_ack_block *frag, block; michael@0: struct sctp_tmit_chunk *tp1; michael@0: int i; michael@0: int num_frs = 0; michael@0: int chunk_freed; michael@0: int non_revocable; michael@0: uint16_t frag_strt, frag_end, prev_frag_end; michael@0: michael@0: tp1 = TAILQ_FIRST(&asoc->sent_queue); michael@0: prev_frag_end = 0; michael@0: chunk_freed = 0; michael@0: michael@0: for (i = 0; i < (num_seg + num_nr_seg); i++) { michael@0: if (i == num_seg) { michael@0: prev_frag_end = 0; michael@0: tp1 = TAILQ_FIRST(&asoc->sent_queue); michael@0: } michael@0: frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset, michael@0: sizeof(struct sctp_gap_ack_block), (uint8_t *) &block); michael@0: *offset += sizeof(block); michael@0: if (frag == NULL) { michael@0: return (chunk_freed); michael@0: } michael@0: frag_strt = ntohs(frag->start); michael@0: frag_end = ntohs(frag->end); michael@0: michael@0: if (frag_strt > frag_end) { michael@0: /* This gap report is malformed, skip it. */ michael@0: continue; michael@0: } michael@0: if (frag_strt <= prev_frag_end) { michael@0: /* This gap report is not in order, so restart. */ michael@0: tp1 = TAILQ_FIRST(&asoc->sent_queue); michael@0: } michael@0: if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) { michael@0: *biggest_tsn_acked = last_tsn + frag_end; michael@0: } michael@0: if (i < num_seg) { michael@0: non_revocable = 0; michael@0: } else { michael@0: non_revocable = 1; michael@0: } michael@0: if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end, michael@0: non_revocable, &num_frs, biggest_newly_acked_tsn, michael@0: this_sack_lowest_newack, rto_ok)) { michael@0: chunk_freed = 1; michael@0: } michael@0: prev_frag_end = frag_end; michael@0: } michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { michael@0: if (num_frs) michael@0: sctp_log_fr(*biggest_tsn_acked, michael@0: *biggest_newly_acked_tsn, michael@0: last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); michael@0: } michael@0: return (chunk_freed); michael@0: } michael@0: michael@0: static void michael@0: sctp_check_for_revoked(struct sctp_tcb *stcb, michael@0: struct sctp_association *asoc, uint32_t cumack, michael@0: uint32_t biggest_tsn_acked) michael@0: { michael@0: struct sctp_tmit_chunk *tp1; michael@0: michael@0: TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { michael@0: if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) { michael@0: /* michael@0: * ok this guy is either ACK or MARKED. If it is michael@0: * ACKED it has been previously acked but not this michael@0: * time i.e. revoked. If it is MARKED it was ACK'ed michael@0: * again. michael@0: */ michael@0: if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) { michael@0: break; michael@0: } michael@0: if (tp1->sent == SCTP_DATAGRAM_ACKED) { michael@0: /* it has been revoked */ michael@0: tp1->sent = SCTP_DATAGRAM_SENT; michael@0: tp1->rec.data.chunk_was_revoked = 1; michael@0: /* We must add this stuff back in to michael@0: * assure timers and such get started. michael@0: */ michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { michael@0: sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, michael@0: tp1->whoTo->flight_size, michael@0: tp1->book_size, michael@0: (uintptr_t)tp1->whoTo, michael@0: tp1->rec.data.TSN_seq); michael@0: } michael@0: sctp_flight_size_increase(tp1); michael@0: sctp_total_flight_increase(stcb, tp1); michael@0: /* We inflate the cwnd to compensate for our michael@0: * artificial inflation of the flight_size. michael@0: */ michael@0: tp1->whoTo->cwnd += tp1->book_size; michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { michael@0: sctp_log_sack(asoc->last_acked_seq, michael@0: cumack, michael@0: tp1->rec.data.TSN_seq, michael@0: 0, michael@0: 0, michael@0: SCTP_LOG_TSN_REVOKED); michael@0: } michael@0: } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { michael@0: /* it has been re-acked in this SACK */ michael@0: tp1->sent = SCTP_DATAGRAM_ACKED; michael@0: } michael@0: } michael@0: if (tp1->sent == SCTP_DATAGRAM_UNSENT) michael@0: break; michael@0: } michael@0: } michael@0: michael@0: michael@0: static void michael@0: sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, michael@0: uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved) michael@0: { michael@0: struct sctp_tmit_chunk *tp1; michael@0: int strike_flag = 0; michael@0: struct timeval now; michael@0: int tot_retrans = 0; michael@0: uint32_t sending_seq; michael@0: struct sctp_nets *net; michael@0: int num_dests_sacked = 0; michael@0: michael@0: /* michael@0: * select the sending_seq, this is either the next thing ready to be michael@0: * sent but not transmitted, OR, the next seq we assign. michael@0: */ michael@0: tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); michael@0: if (tp1 == NULL) { michael@0: sending_seq = asoc->sending_seq; michael@0: } else { michael@0: sending_seq = tp1->rec.data.TSN_seq; michael@0: } michael@0: michael@0: /* CMT DAC algo: finding out if SACK is a mixed SACK */ michael@0: if ((asoc->sctp_cmt_on_off > 0) && michael@0: SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { michael@0: TAILQ_FOREACH(net, &asoc->nets, sctp_next) { michael@0: if (net->saw_newack) michael@0: num_dests_sacked++; michael@0: } michael@0: } michael@0: if (stcb->asoc.peer_supports_prsctp) { michael@0: (void)SCTP_GETTIME_TIMEVAL(&now); michael@0: } michael@0: TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { michael@0: strike_flag = 0; michael@0: if (tp1->no_fr_allowed) { michael@0: /* this one had a timeout or something */ michael@0: continue; michael@0: } michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { michael@0: if (tp1->sent < SCTP_DATAGRAM_RESEND) michael@0: sctp_log_fr(biggest_tsn_newly_acked, michael@0: tp1->rec.data.TSN_seq, michael@0: tp1->sent, michael@0: SCTP_FR_LOG_CHECK_STRIKE); michael@0: } michael@0: if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) || michael@0: tp1->sent == SCTP_DATAGRAM_UNSENT) { michael@0: /* done */ michael@0: break; michael@0: } michael@0: if (stcb->asoc.peer_supports_prsctp) { michael@0: if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { michael@0: /* Is it expired? */ michael@0: #ifndef __FreeBSD__ michael@0: if (timercmp(&now, &tp1->rec.data.timetodrop, >)) { michael@0: #else michael@0: if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { michael@0: #endif michael@0: /* Yes so drop it */ michael@0: if (tp1->data != NULL) { michael@0: (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, michael@0: SCTP_SO_NOT_LOCKED); michael@0: } michael@0: continue; michael@0: } michael@0: } michael@0: michael@0: } michael@0: if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) { michael@0: /* we are beyond the tsn in the sack */ michael@0: break; michael@0: } michael@0: if (tp1->sent >= SCTP_DATAGRAM_RESEND) { michael@0: /* either a RESEND, ACKED, or MARKED */ michael@0: /* skip */ michael@0: if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { michael@0: /* Continue strikin FWD-TSN chunks */ michael@0: tp1->rec.data.fwd_tsn_cnt++; michael@0: } michael@0: continue; michael@0: } michael@0: /* michael@0: * CMT : SFR algo (covers part of DAC and HTNA as well) michael@0: */ michael@0: if (tp1->whoTo && tp1->whoTo->saw_newack == 0) { michael@0: /* michael@0: * No new acks were receieved for data sent to this michael@0: * dest. Therefore, according to the SFR algo for michael@0: * CMT, no data sent to this dest can be marked for michael@0: * FR using this SACK. michael@0: */ michael@0: continue; michael@0: } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq, michael@0: tp1->whoTo->this_sack_highest_newack)) { michael@0: /* michael@0: * CMT: New acks were receieved for data sent to michael@0: * this dest. But no new acks were seen for data michael@0: * sent after tp1. Therefore, according to the SFR michael@0: * algo for CMT, tp1 cannot be marked for FR using michael@0: * this SACK. This step covers part of the DAC algo michael@0: * and the HTNA algo as well. michael@0: */ michael@0: continue; michael@0: } michael@0: /* michael@0: * Here we check to see if we were have already done a FR michael@0: * and if so we see if the biggest TSN we saw in the sack is michael@0: * smaller than the recovery point. If so we don't strike michael@0: * the tsn... otherwise we CAN strike the TSN. michael@0: */ michael@0: /* michael@0: * @@@ JRI: Check for CMT michael@0: * if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) { michael@0: */ michael@0: if (accum_moved && asoc->fast_retran_loss_recovery) { michael@0: /* michael@0: * Strike the TSN if in fast-recovery and cum-ack michael@0: * moved. michael@0: */ michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { michael@0: sctp_log_fr(biggest_tsn_newly_acked, michael@0: tp1->rec.data.TSN_seq, michael@0: tp1->sent, michael@0: SCTP_FR_LOG_STRIKE_CHUNK); michael@0: } michael@0: if (tp1->sent < SCTP_DATAGRAM_RESEND) { michael@0: tp1->sent++; michael@0: } michael@0: if ((asoc->sctp_cmt_on_off > 0) && michael@0: SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { michael@0: /* michael@0: * CMT DAC algorithm: If SACK flag is set to michael@0: * 0, then lowest_newack test will not pass michael@0: * because it would have been set to the michael@0: * cumack earlier. If not already to be michael@0: * rtx'd, If not a mixed sack and if tp1 is michael@0: * not between two sacked TSNs, then mark by michael@0: * one more. michael@0: * NOTE that we are marking by one additional time since the SACK DAC flag indicates that michael@0: * two packets have been received after this missing TSN. michael@0: */ michael@0: if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && michael@0: SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) { michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { michael@0: sctp_log_fr(16 + num_dests_sacked, michael@0: tp1->rec.data.TSN_seq, michael@0: tp1->sent, michael@0: SCTP_FR_LOG_STRIKE_CHUNK); michael@0: } michael@0: tp1->sent++; michael@0: } michael@0: } michael@0: } else if ((tp1->rec.data.doing_fast_retransmit) && michael@0: (asoc->sctp_cmt_on_off == 0)) { michael@0: /* michael@0: * For those that have done a FR we must take michael@0: * special consideration if we strike. I.e the michael@0: * biggest_newly_acked must be higher than the michael@0: * sending_seq at the time we did the FR. michael@0: */ michael@0: if ( michael@0: #ifdef SCTP_FR_TO_ALTERNATE michael@0: /* michael@0: * If FR's go to new networks, then we must only do michael@0: * this for singly homed asoc's. However if the FR's michael@0: * go to the same network (Armando's work) then its michael@0: * ok to FR multiple times. michael@0: */ michael@0: (asoc->numnets < 2) michael@0: #else michael@0: (1) michael@0: #endif michael@0: ) { michael@0: michael@0: if (SCTP_TSN_GE(biggest_tsn_newly_acked, michael@0: tp1->rec.data.fast_retran_tsn)) { michael@0: /* michael@0: * Strike the TSN, since this ack is michael@0: * beyond where things were when we michael@0: * did a FR. michael@0: */ michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { michael@0: sctp_log_fr(biggest_tsn_newly_acked, michael@0: tp1->rec.data.TSN_seq, michael@0: tp1->sent, michael@0: SCTP_FR_LOG_STRIKE_CHUNK); michael@0: } michael@0: if (tp1->sent < SCTP_DATAGRAM_RESEND) { michael@0: tp1->sent++; michael@0: } michael@0: strike_flag = 1; michael@0: if ((asoc->sctp_cmt_on_off > 0) && michael@0: SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { michael@0: /* michael@0: * CMT DAC algorithm: If michael@0: * SACK flag is set to 0, michael@0: * then lowest_newack test michael@0: * will not pass because it michael@0: * would have been set to michael@0: * the cumack earlier. If michael@0: * not already to be rtx'd, michael@0: * If not a mixed sack and michael@0: * if tp1 is not between two michael@0: * sacked TSNs, then mark by michael@0: * one more. michael@0: * NOTE that we are marking by one additional time since the SACK DAC flag indicates that michael@0: * two packets have been received after this missing TSN. michael@0: */ michael@0: if ((tp1->sent < SCTP_DATAGRAM_RESEND) && michael@0: (num_dests_sacked == 1) && michael@0: SCTP_TSN_GT(this_sack_lowest_newack, michael@0: tp1->rec.data.TSN_seq)) { michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { michael@0: sctp_log_fr(32 + num_dests_sacked, michael@0: tp1->rec.data.TSN_seq, michael@0: tp1->sent, michael@0: SCTP_FR_LOG_STRIKE_CHUNK); michael@0: } michael@0: if (tp1->sent < SCTP_DATAGRAM_RESEND) { michael@0: tp1->sent++; michael@0: } michael@0: } michael@0: } michael@0: } michael@0: } michael@0: /* michael@0: * JRI: TODO: remove code for HTNA algo. CMT's michael@0: * SFR algo covers HTNA. michael@0: */ michael@0: } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, michael@0: biggest_tsn_newly_acked)) { michael@0: /* michael@0: * We don't strike these: This is the HTNA michael@0: * algorithm i.e. we don't strike If our TSN is michael@0: * larger than the Highest TSN Newly Acked. michael@0: */ michael@0: ; michael@0: } else { michael@0: /* Strike the TSN */ michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { michael@0: sctp_log_fr(biggest_tsn_newly_acked, michael@0: tp1->rec.data.TSN_seq, michael@0: tp1->sent, michael@0: SCTP_FR_LOG_STRIKE_CHUNK); michael@0: } michael@0: if (tp1->sent < SCTP_DATAGRAM_RESEND) { michael@0: tp1->sent++; michael@0: } michael@0: if ((asoc->sctp_cmt_on_off > 0) && michael@0: SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { michael@0: /* michael@0: * CMT DAC algorithm: If SACK flag is set to michael@0: * 0, then lowest_newack test will not pass michael@0: * because it would have been set to the michael@0: * cumack earlier. If not already to be michael@0: * rtx'd, If not a mixed sack and if tp1 is michael@0: * not between two sacked TSNs, then mark by michael@0: * one more. michael@0: * NOTE that we are marking by one additional time since the SACK DAC flag indicates that michael@0: * two packets have been received after this missing TSN. michael@0: */ michael@0: if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && michael@0: SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) { michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { michael@0: sctp_log_fr(48 + num_dests_sacked, michael@0: tp1->rec.data.TSN_seq, michael@0: tp1->sent, michael@0: SCTP_FR_LOG_STRIKE_CHUNK); michael@0: } michael@0: tp1->sent++; michael@0: } michael@0: } michael@0: } michael@0: if (tp1->sent == SCTP_DATAGRAM_RESEND) { michael@0: struct sctp_nets *alt; michael@0: michael@0: /* fix counts and things */ michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { michael@0: sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND, michael@0: (tp1->whoTo ? (tp1->whoTo->flight_size) : 0), michael@0: tp1->book_size, michael@0: (uintptr_t)tp1->whoTo, michael@0: tp1->rec.data.TSN_seq); michael@0: } michael@0: if (tp1->whoTo) { michael@0: tp1->whoTo->net_ack++; michael@0: sctp_flight_size_decrease(tp1); michael@0: if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { michael@0: (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo, michael@0: tp1); michael@0: } michael@0: } michael@0: michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { michael@0: sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, michael@0: asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); michael@0: } michael@0: /* add back to the rwnd */ michael@0: asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); michael@0: michael@0: /* remove from the total flight */ michael@0: sctp_total_flight_decrease(stcb, tp1); michael@0: michael@0: if ((stcb->asoc.peer_supports_prsctp) && michael@0: (PR_SCTP_RTX_ENABLED(tp1->flags))) { michael@0: /* Has it been retransmitted tv_sec times? - we store the retran count there. */ michael@0: if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) { michael@0: /* Yes, so drop it */ michael@0: if (tp1->data != NULL) { michael@0: (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, michael@0: SCTP_SO_NOT_LOCKED); michael@0: } michael@0: /* Make sure to flag we had a FR */ michael@0: tp1->whoTo->net_ack++; michael@0: continue; michael@0: } michael@0: } michael@0: /* SCTP_PRINTF("OK, we are now ready to FR this guy\n"); */ michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { michael@0: sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count, michael@0: 0, SCTP_FR_MARKED); michael@0: } michael@0: if (strike_flag) { michael@0: /* This is a subsequent FR */ michael@0: SCTP_STAT_INCR(sctps_sendmultfastretrans); michael@0: } michael@0: sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); michael@0: if (asoc->sctp_cmt_on_off > 0) { michael@0: /* michael@0: * CMT: Using RTX_SSTHRESH policy for CMT. michael@0: * If CMT is being used, then pick dest with michael@0: * largest ssthresh for any retransmission. michael@0: */ michael@0: tp1->no_fr_allowed = 1; michael@0: alt = tp1->whoTo; michael@0: /*sa_ignore NO_NULL_CHK*/ michael@0: if (asoc->sctp_cmt_pf > 0) { michael@0: /* JRS 5/18/07 - If CMT PF is on, use the PF version of find_alt_net() */ michael@0: alt = sctp_find_alternate_net(stcb, alt, 2); michael@0: } else { michael@0: /* JRS 5/18/07 - If only CMT is on, use the CMT version of find_alt_net() */ michael@0: /*sa_ignore NO_NULL_CHK*/ michael@0: alt = sctp_find_alternate_net(stcb, alt, 1); michael@0: } michael@0: if (alt == NULL) { michael@0: alt = tp1->whoTo; michael@0: } michael@0: /* michael@0: * CUCv2: If a different dest is picked for michael@0: * the retransmission, then new michael@0: * (rtx-)pseudo_cumack needs to be tracked michael@0: * for orig dest. Let CUCv2 track new (rtx-) michael@0: * pseudo-cumack always. michael@0: */ michael@0: if (tp1->whoTo) { michael@0: tp1->whoTo->find_pseudo_cumack = 1; michael@0: tp1->whoTo->find_rtx_pseudo_cumack = 1; michael@0: } michael@0: michael@0: } else {/* CMT is OFF */ michael@0: michael@0: #ifdef SCTP_FR_TO_ALTERNATE michael@0: /* Can we find an alternate? */ michael@0: alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); michael@0: #else michael@0: /* michael@0: * default behavior is to NOT retransmit michael@0: * FR's to an alternate. Armando Caro's michael@0: * paper details why. michael@0: */ michael@0: alt = tp1->whoTo; michael@0: #endif michael@0: } michael@0: michael@0: tp1->rec.data.doing_fast_retransmit = 1; michael@0: tot_retrans++; michael@0: /* mark the sending seq for possible subsequent FR's */ michael@0: /* michael@0: * SCTP_PRINTF("Marking TSN for FR new value %x\n", michael@0: * (uint32_t)tpi->rec.data.TSN_seq); michael@0: */ michael@0: if (TAILQ_EMPTY(&asoc->send_queue)) { michael@0: /* michael@0: * If the queue of send is empty then its michael@0: * the next sequence number that will be michael@0: * assigned so we subtract one from this to michael@0: * get the one we last sent. michael@0: */ michael@0: tp1->rec.data.fast_retran_tsn = sending_seq; michael@0: } else { michael@0: /* michael@0: * If there are chunks on the send queue michael@0: * (unsent data that has made it from the michael@0: * stream queues but not out the door, we michael@0: * take the first one (which will have the michael@0: * lowest TSN) and subtract one to get the michael@0: * one we last sent. michael@0: */ michael@0: struct sctp_tmit_chunk *ttt; michael@0: michael@0: ttt = TAILQ_FIRST(&asoc->send_queue); michael@0: tp1->rec.data.fast_retran_tsn = michael@0: ttt->rec.data.TSN_seq; michael@0: } michael@0: michael@0: if (tp1->do_rtt) { michael@0: /* michael@0: * this guy had a RTO calculation pending on michael@0: * it, cancel it michael@0: */ michael@0: if ((tp1->whoTo != NULL) && michael@0: (tp1->whoTo->rto_needed == 0)) { michael@0: tp1->whoTo->rto_needed = 1; michael@0: } michael@0: tp1->do_rtt = 0; michael@0: } michael@0: if (alt != tp1->whoTo) { michael@0: /* yes, there is an alternate. */ michael@0: sctp_free_remote_addr(tp1->whoTo); michael@0: /*sa_ignore FREED_MEMORY*/ michael@0: tp1->whoTo = alt; michael@0: atomic_add_int(&alt->ref_count, 1); michael@0: } michael@0: } michael@0: } michael@0: } michael@0: michael@0: struct sctp_tmit_chunk * michael@0: sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, michael@0: struct sctp_association *asoc) michael@0: { michael@0: struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL; michael@0: struct timeval now; michael@0: int now_filled = 0; michael@0: michael@0: if (asoc->peer_supports_prsctp == 0) { michael@0: return (NULL); michael@0: } michael@0: TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { michael@0: if (tp1->sent != SCTP_FORWARD_TSN_SKIP && michael@0: tp1->sent != SCTP_DATAGRAM_RESEND && michael@0: tp1->sent != SCTP_DATAGRAM_NR_ACKED) { michael@0: /* no chance to advance, out of here */ michael@0: break; michael@0: } michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { michael@0: if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || michael@0: (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { michael@0: sctp_misc_ints(SCTP_FWD_TSN_CHECK, michael@0: asoc->advanced_peer_ack_point, michael@0: tp1->rec.data.TSN_seq, 0, 0); michael@0: } michael@0: } michael@0: if (!PR_SCTP_ENABLED(tp1->flags)) { michael@0: /* michael@0: * We can't fwd-tsn past any that are reliable aka michael@0: * retransmitted until the asoc fails. michael@0: */ michael@0: break; michael@0: } michael@0: if (!now_filled) { michael@0: (void)SCTP_GETTIME_TIMEVAL(&now); michael@0: now_filled = 1; michael@0: } michael@0: /* michael@0: * now we got a chunk which is marked for another michael@0: * retransmission to a PR-stream but has run out its chances michael@0: * already maybe OR has been marked to skip now. Can we skip michael@0: * it if its a resend? michael@0: */ michael@0: if (tp1->sent == SCTP_DATAGRAM_RESEND && michael@0: (PR_SCTP_TTL_ENABLED(tp1->flags))) { michael@0: /* michael@0: * Now is this one marked for resend and its time is michael@0: * now up? michael@0: */ michael@0: #ifndef __FreeBSD__ michael@0: if (timercmp(&now, &tp1->rec.data.timetodrop, >)) { michael@0: #else michael@0: if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { michael@0: #endif michael@0: /* Yes so drop it */ michael@0: if (tp1->data) { michael@0: (void)sctp_release_pr_sctp_chunk(stcb, tp1, michael@0: 1, SCTP_SO_NOT_LOCKED); michael@0: } michael@0: } else { michael@0: /* michael@0: * No, we are done when hit one for resend michael@0: * whos time as not expired. michael@0: */ michael@0: break; michael@0: } michael@0: } michael@0: /* michael@0: * Ok now if this chunk is marked to drop it we can clean up michael@0: * the chunk, advance our peer ack point and we can check michael@0: * the next chunk. michael@0: */ michael@0: if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || michael@0: (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { michael@0: /* advance PeerAckPoint goes forward */ michael@0: if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) { michael@0: asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq; michael@0: a_adv = tp1; michael@0: } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) { michael@0: /* No update but we do save the chk */ michael@0: a_adv = tp1; michael@0: } michael@0: } else { michael@0: /* michael@0: * If it is still in RESEND we can advance no michael@0: * further michael@0: */ michael@0: break; michael@0: } michael@0: } michael@0: return (a_adv); michael@0: } michael@0: michael@0: static int michael@0: sctp_fs_audit(struct sctp_association *asoc) michael@0: { michael@0: struct sctp_tmit_chunk *chk; michael@0: int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0; michael@0: int entry_flight, entry_cnt, ret; michael@0: michael@0: entry_flight = asoc->total_flight; michael@0: entry_cnt = asoc->total_flight_count; michael@0: ret = 0; michael@0: michael@0: if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt) michael@0: return (0); michael@0: michael@0: TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { michael@0: if (chk->sent < SCTP_DATAGRAM_RESEND) { michael@0: SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n", michael@0: chk->rec.data.TSN_seq, michael@0: chk->send_size, michael@0: chk->snd_count); michael@0: inflight++; michael@0: } else if (chk->sent == SCTP_DATAGRAM_RESEND) { michael@0: resend++; michael@0: } else if (chk->sent < SCTP_DATAGRAM_ACKED) { michael@0: inbetween++; michael@0: } else if (chk->sent > SCTP_DATAGRAM_ACKED) { michael@0: above++; michael@0: } else { michael@0: acked++; michael@0: } michael@0: } michael@0: michael@0: if ((inflight > 0) || (inbetween > 0)) { michael@0: #ifdef INVARIANTS michael@0: panic("Flight size-express incorrect? \n"); michael@0: #else michael@0: SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n", michael@0: entry_flight, entry_cnt); michael@0: michael@0: SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n", michael@0: inflight, inbetween, resend, above, acked); michael@0: ret = 1; michael@0: #endif michael@0: } michael@0: return (ret); michael@0: } michael@0: michael@0: michael@0: static void michael@0: sctp_window_probe_recovery(struct sctp_tcb *stcb, michael@0: struct sctp_association *asoc, michael@0: struct sctp_tmit_chunk *tp1) michael@0: { michael@0: tp1->window_probe = 0; michael@0: if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) { michael@0: /* TSN's skipped we do NOT move back. */ michael@0: sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD, michael@0: tp1->whoTo->flight_size, michael@0: tp1->book_size, michael@0: (uintptr_t)tp1->whoTo, michael@0: tp1->rec.data.TSN_seq); michael@0: return; michael@0: } michael@0: /* First setup this by shrinking flight */ michael@0: if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { michael@0: (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo, michael@0: tp1); michael@0: } michael@0: sctp_flight_size_decrease(tp1); michael@0: sctp_total_flight_decrease(stcb, tp1); michael@0: /* Now mark for resend */ michael@0: tp1->sent = SCTP_DATAGRAM_RESEND; michael@0: sctp_ucount_incr(asoc->sent_queue_retran_cnt); michael@0: michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { michael@0: sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP, michael@0: tp1->whoTo->flight_size, michael@0: tp1->book_size, michael@0: (uintptr_t)tp1->whoTo, michael@0: tp1->rec.data.TSN_seq); michael@0: } michael@0: } michael@0: michael@0: void michael@0: sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, michael@0: uint32_t rwnd, int *abort_now, int ecne_seen) michael@0: { michael@0: struct sctp_nets *net; michael@0: struct sctp_association *asoc; michael@0: struct sctp_tmit_chunk *tp1, *tp2; michael@0: uint32_t old_rwnd; michael@0: int win_probe_recovery = 0; michael@0: int win_probe_recovered = 0; michael@0: int j, done_once = 0; michael@0: int rto_ok = 1; michael@0: michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { michael@0: sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack, michael@0: rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); michael@0: } michael@0: SCTP_TCB_LOCK_ASSERT(stcb); michael@0: #ifdef SCTP_ASOCLOG_OF_TSNS michael@0: stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack; michael@0: stcb->asoc.cumack_log_at++; michael@0: if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { michael@0: stcb->asoc.cumack_log_at = 0; michael@0: } michael@0: #endif michael@0: asoc = &stcb->asoc; michael@0: old_rwnd = asoc->peers_rwnd; michael@0: if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) { michael@0: /* old ack */ michael@0: return; michael@0: } else if (asoc->last_acked_seq == cumack) { michael@0: /* Window update sack */ michael@0: asoc->peers_rwnd = sctp_sbspace_sub(rwnd, michael@0: (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); michael@0: if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { michael@0: /* SWS sender side engages */ michael@0: asoc->peers_rwnd = 0; michael@0: } michael@0: if (asoc->peers_rwnd > old_rwnd) { michael@0: goto again; michael@0: } michael@0: return; michael@0: } michael@0: michael@0: /* First setup for CC stuff */ michael@0: TAILQ_FOREACH(net, &asoc->nets, sctp_next) { michael@0: if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) { michael@0: /* Drag along the window_tsn for cwr's */ michael@0: net->cwr_window_tsn = cumack; michael@0: } michael@0: net->prev_cwnd = net->cwnd; michael@0: net->net_ack = 0; michael@0: net->net_ack2 = 0; michael@0: michael@0: /* michael@0: * CMT: Reset CUC and Fast recovery algo variables before michael@0: * SACK processing michael@0: */ michael@0: net->new_pseudo_cumack = 0; michael@0: net->will_exit_fast_recovery = 0; michael@0: if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { michael@0: (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net); michael@0: } michael@0: } michael@0: if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) { michael@0: uint32_t send_s; michael@0: michael@0: if (!TAILQ_EMPTY(&asoc->sent_queue)) { michael@0: tp1 = TAILQ_LAST(&asoc->sent_queue, michael@0: sctpchunk_listhead); michael@0: send_s = tp1->rec.data.TSN_seq + 1; michael@0: } else { michael@0: send_s = asoc->sending_seq; michael@0: } michael@0: if (SCTP_TSN_GE(cumack, send_s)) { michael@0: #ifndef INVARIANTS michael@0: struct mbuf *oper; michael@0: michael@0: #endif michael@0: #ifdef INVARIANTS michael@0: panic("Impossible sack 1"); michael@0: #else michael@0: michael@0: *abort_now = 1; michael@0: /* XXX */ michael@0: oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), michael@0: 0, M_NOWAIT, 1, MT_DATA); michael@0: if (oper) { michael@0: struct sctp_paramhdr *ph; michael@0: uint32_t *ippp; michael@0: michael@0: SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + michael@0: sizeof(uint32_t); michael@0: ph = mtod(oper, struct sctp_paramhdr *); michael@0: ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); michael@0: ph->param_length = htons(SCTP_BUF_LEN(oper)); michael@0: ippp = (uint32_t *) (ph + 1); michael@0: *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25); michael@0: } michael@0: stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; michael@0: sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); michael@0: return; michael@0: #endif michael@0: } michael@0: } michael@0: asoc->this_sack_highest_gap = cumack; michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { michael@0: sctp_misc_ints(SCTP_THRESHOLD_CLEAR, michael@0: stcb->asoc.overall_error_count, michael@0: 0, michael@0: SCTP_FROM_SCTP_INDATA, michael@0: __LINE__); michael@0: } michael@0: stcb->asoc.overall_error_count = 0; michael@0: if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) { michael@0: /* process the new consecutive TSN first */ michael@0: TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { michael@0: if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) { michael@0: if (tp1->sent == SCTP_DATAGRAM_UNSENT) { michael@0: SCTP_PRINTF("Warning, an unsent is now acked?\n"); michael@0: } michael@0: if (tp1->sent < SCTP_DATAGRAM_ACKED) { michael@0: /* michael@0: * If it is less than ACKED, it is michael@0: * now no-longer in flight. Higher michael@0: * values may occur during marking michael@0: */ michael@0: if (tp1->sent < SCTP_DATAGRAM_RESEND) { michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { michael@0: sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, michael@0: tp1->whoTo->flight_size, michael@0: tp1->book_size, michael@0: (uintptr_t)tp1->whoTo, michael@0: tp1->rec.data.TSN_seq); michael@0: } michael@0: sctp_flight_size_decrease(tp1); michael@0: if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { michael@0: (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo, michael@0: tp1); michael@0: } michael@0: /* sa_ignore NO_NULL_CHK */ michael@0: sctp_total_flight_decrease(stcb, tp1); michael@0: } michael@0: tp1->whoTo->net_ack += tp1->send_size; michael@0: if (tp1->snd_count < 2) { michael@0: /* michael@0: * True non-retransmited michael@0: * chunk michael@0: */ michael@0: tp1->whoTo->net_ack2 += michael@0: tp1->send_size; michael@0: michael@0: /* update RTO too? */ michael@0: if (tp1->do_rtt) { michael@0: if (rto_ok) { michael@0: tp1->whoTo->RTO = michael@0: /* michael@0: * sa_ignore michael@0: * NO_NULL_CHK michael@0: */ michael@0: sctp_calculate_rto(stcb, michael@0: asoc, tp1->whoTo, michael@0: &tp1->sent_rcv_time, michael@0: sctp_align_safe_nocopy, michael@0: SCTP_RTT_FROM_DATA); michael@0: rto_ok = 0; michael@0: } michael@0: if (tp1->whoTo->rto_needed == 0) { michael@0: tp1->whoTo->rto_needed = 1; michael@0: } michael@0: tp1->do_rtt = 0; michael@0: } michael@0: } michael@0: /* michael@0: * CMT: CUCv2 algorithm. From the michael@0: * cumack'd TSNs, for each TSN being michael@0: * acked for the first time, set the michael@0: * following variables for the michael@0: * corresp destination. michael@0: * new_pseudo_cumack will trigger a michael@0: * cwnd update. michael@0: * find_(rtx_)pseudo_cumack will michael@0: * trigger search for the next michael@0: * expected (rtx-)pseudo-cumack. michael@0: */ michael@0: tp1->whoTo->new_pseudo_cumack = 1; michael@0: tp1->whoTo->find_pseudo_cumack = 1; michael@0: tp1->whoTo->find_rtx_pseudo_cumack = 1; michael@0: michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { michael@0: /* sa_ignore NO_NULL_CHK */ michael@0: sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); michael@0: } michael@0: } michael@0: if (tp1->sent == SCTP_DATAGRAM_RESEND) { michael@0: sctp_ucount_decr(asoc->sent_queue_retran_cnt); michael@0: } michael@0: if (tp1->rec.data.chunk_was_revoked) { michael@0: /* deflate the cwnd */ michael@0: tp1->whoTo->cwnd -= tp1->book_size; michael@0: tp1->rec.data.chunk_was_revoked = 0; michael@0: } michael@0: if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { michael@0: if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) { michael@0: asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--; michael@0: #ifdef INVARIANTS michael@0: } else { michael@0: panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number); michael@0: #endif michael@0: } michael@0: } michael@0: TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); michael@0: if (tp1->data) { michael@0: /* sa_ignore NO_NULL_CHK */ michael@0: sctp_free_bufspace(stcb, asoc, tp1, 1); michael@0: sctp_m_freem(tp1->data); michael@0: tp1->data = NULL; michael@0: } michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { michael@0: sctp_log_sack(asoc->last_acked_seq, michael@0: cumack, michael@0: tp1->rec.data.TSN_seq, michael@0: 0, michael@0: 0, michael@0: SCTP_LOG_FREE_SENT); michael@0: } michael@0: asoc->sent_queue_cnt--; michael@0: sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); michael@0: } else { michael@0: break; michael@0: } michael@0: } michael@0: michael@0: } michael@0: #if defined(__Userspace__) michael@0: if (stcb->sctp_ep->recv_callback) { michael@0: if (stcb->sctp_socket) { michael@0: uint32_t inqueue_bytes, sb_free_now; michael@0: struct sctp_inpcb *inp; michael@0: michael@0: inp = stcb->sctp_ep; michael@0: inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); michael@0: sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv); michael@0: michael@0: /* check if the amount free in the send socket buffer crossed the threshold */ michael@0: if (inp->send_callback && michael@0: (((inp->send_sb_threshold > 0) && michael@0: (sb_free_now >= inp->send_sb_threshold) && michael@0: (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) || michael@0: (inp->send_sb_threshold == 0))) { michael@0: atomic_add_int(&stcb->asoc.refcnt, 1); michael@0: SCTP_TCB_UNLOCK(stcb); michael@0: inp->send_callback(stcb->sctp_socket, sb_free_now); michael@0: SCTP_TCB_LOCK(stcb); michael@0: atomic_subtract_int(&stcb->asoc.refcnt, 1); michael@0: } michael@0: } michael@0: } else if (stcb->sctp_socket) { michael@0: #else michael@0: /* sa_ignore NO_NULL_CHK */ michael@0: if (stcb->sctp_socket) { michael@0: #endif michael@0: #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) michael@0: struct socket *so; michael@0: michael@0: #endif michael@0: SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { michael@0: /* sa_ignore NO_NULL_CHK */ michael@0: sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK); michael@0: } michael@0: #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) michael@0: so = SCTP_INP_SO(stcb->sctp_ep); michael@0: atomic_add_int(&stcb->asoc.refcnt, 1); michael@0: SCTP_TCB_UNLOCK(stcb); michael@0: SCTP_SOCKET_LOCK(so, 1); michael@0: SCTP_TCB_LOCK(stcb); michael@0: atomic_subtract_int(&stcb->asoc.refcnt, 1); michael@0: if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { michael@0: /* assoc was freed while we were unlocked */ michael@0: SCTP_SOCKET_UNLOCK(so, 1); michael@0: return; michael@0: } michael@0: #endif michael@0: sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); michael@0: #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) michael@0: SCTP_SOCKET_UNLOCK(so, 1); michael@0: #endif michael@0: } else { michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { michael@0: sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK); michael@0: } michael@0: } michael@0: michael@0: /* JRS - Use the congestion control given in the CC module */ michael@0: if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) { michael@0: TAILQ_FOREACH(net, &asoc->nets, sctp_next) { michael@0: if (net->net_ack2 > 0) { michael@0: /* michael@0: * Karn's rule applies to clearing error count, this michael@0: * is optional. michael@0: */ michael@0: net->error_count = 0; michael@0: if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { michael@0: /* addr came good */ michael@0: net->dest_state |= SCTP_ADDR_REACHABLE; michael@0: sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, michael@0: 0, (void *)net, SCTP_SO_NOT_LOCKED); michael@0: } michael@0: if (net == stcb->asoc.primary_destination) { michael@0: if (stcb->asoc.alternate) { michael@0: /* release the alternate, primary is good */ michael@0: sctp_free_remote_addr(stcb->asoc.alternate); michael@0: stcb->asoc.alternate = NULL; michael@0: } michael@0: } michael@0: if (net->dest_state & SCTP_ADDR_PF) { michael@0: net->dest_state &= ~SCTP_ADDR_PF; michael@0: sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3); michael@0: sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); michael@0: asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); michael@0: /* Done with this net */ michael@0: net->net_ack = 0; michael@0: } michael@0: /* restore any doubled timers */ michael@0: net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; michael@0: if (net->RTO < stcb->asoc.minrto) { michael@0: net->RTO = stcb->asoc.minrto; michael@0: } michael@0: if (net->RTO > stcb->asoc.maxrto) { michael@0: net->RTO = stcb->asoc.maxrto; michael@0: } michael@0: } michael@0: } michael@0: asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0); michael@0: } michael@0: asoc->last_acked_seq = cumack; michael@0: michael@0: if (TAILQ_EMPTY(&asoc->sent_queue)) { michael@0: /* nothing left in-flight */ michael@0: TAILQ_FOREACH(net, &asoc->nets, sctp_next) { michael@0: net->flight_size = 0; michael@0: net->partial_bytes_acked = 0; michael@0: } michael@0: asoc->total_flight = 0; michael@0: asoc->total_flight_count = 0; michael@0: } michael@0: michael@0: /* RWND update */ michael@0: asoc->peers_rwnd = sctp_sbspace_sub(rwnd, michael@0: (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); michael@0: if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { michael@0: /* SWS sender side engages */ michael@0: asoc->peers_rwnd = 0; michael@0: } michael@0: if (asoc->peers_rwnd > old_rwnd) { michael@0: win_probe_recovery = 1; michael@0: } michael@0: /* Now assure a timer where data is queued at */ michael@0: again: michael@0: j = 0; michael@0: TAILQ_FOREACH(net, &asoc->nets, sctp_next) { michael@0: int to_ticks; michael@0: if (win_probe_recovery && (net->window_probe)) { michael@0: win_probe_recovered = 1; michael@0: /* michael@0: * Find first chunk that was used with window probe michael@0: * and clear the sent michael@0: */ michael@0: /* sa_ignore FREED_MEMORY */ michael@0: TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { michael@0: if (tp1->window_probe) { michael@0: /* move back to data send queue */ michael@0: sctp_window_probe_recovery(stcb, asoc, tp1); michael@0: break; michael@0: } michael@0: } michael@0: } michael@0: if (net->RTO == 0) { michael@0: to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); michael@0: } else { michael@0: to_ticks = MSEC_TO_TICKS(net->RTO); michael@0: } michael@0: if (net->flight_size) { michael@0: j++; michael@0: (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks, michael@0: sctp_timeout_handler, &net->rxt_timer); michael@0: if (net->window_probe) { michael@0: net->window_probe = 0; michael@0: } michael@0: } else { michael@0: if (net->window_probe) { michael@0: /* In window probes we must assure a timer is still running there */ michael@0: net->window_probe = 0; michael@0: if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { michael@0: SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks, michael@0: sctp_timeout_handler, &net->rxt_timer); michael@0: } michael@0: } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { michael@0: sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, michael@0: stcb, net, michael@0: SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); michael@0: } michael@0: } michael@0: } michael@0: if ((j == 0) && michael@0: (!TAILQ_EMPTY(&asoc->sent_queue)) && michael@0: (asoc->sent_queue_retran_cnt == 0) && michael@0: (win_probe_recovered == 0) && michael@0: (done_once == 0)) { michael@0: /* huh, this should not happen unless all packets michael@0: * are PR-SCTP and marked to skip of course. michael@0: */ michael@0: if (sctp_fs_audit(asoc)) { michael@0: TAILQ_FOREACH(net, &asoc->nets, sctp_next) { michael@0: net->flight_size = 0; michael@0: } michael@0: asoc->total_flight = 0; michael@0: asoc->total_flight_count = 0; michael@0: asoc->sent_queue_retran_cnt = 0; michael@0: TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { michael@0: if (tp1->sent < SCTP_DATAGRAM_RESEND) { michael@0: sctp_flight_size_increase(tp1); michael@0: sctp_total_flight_increase(stcb, tp1); michael@0: } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { michael@0: sctp_ucount_incr(asoc->sent_queue_retran_cnt); michael@0: } michael@0: } michael@0: } michael@0: done_once = 1; michael@0: goto again; michael@0: } michael@0: /**********************************/ michael@0: /* Now what about shutdown issues */ michael@0: /**********************************/ michael@0: if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { michael@0: /* nothing left on sendqueue.. consider done */ michael@0: /* clean up */ michael@0: if ((asoc->stream_queue_cnt == 1) && michael@0: ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || michael@0: (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && michael@0: (asoc->locked_on_sending) michael@0: ) { michael@0: struct sctp_stream_queue_pending *sp; michael@0: /* I may be in a state where we got michael@0: * all across.. but cannot write more due michael@0: * to a shutdown... we abort since the michael@0: * user did not indicate EOR in this case. The michael@0: * sp will be cleaned during free of the asoc. michael@0: */ michael@0: sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), michael@0: sctp_streamhead); michael@0: if ((sp) && (sp->length == 0)) { michael@0: /* Let cleanup code purge it */ michael@0: if (sp->msg_is_complete) { michael@0: asoc->stream_queue_cnt--; michael@0: } else { michael@0: asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; michael@0: asoc->locked_on_sending = NULL; michael@0: asoc->stream_queue_cnt--; michael@0: } michael@0: } michael@0: } michael@0: if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && michael@0: (asoc->stream_queue_cnt == 0)) { michael@0: if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { michael@0: /* Need to abort here */ michael@0: struct mbuf *oper; michael@0: michael@0: abort_out_now: michael@0: *abort_now = 1; michael@0: /* XXX */ michael@0: oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), michael@0: 0, M_NOWAIT, 1, MT_DATA); michael@0: if (oper) { michael@0: struct sctp_paramhdr *ph; michael@0: michael@0: SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr); michael@0: ph = mtod(oper, struct sctp_paramhdr *); michael@0: ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); michael@0: ph->param_length = htons(SCTP_BUF_LEN(oper)); michael@0: } michael@0: stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; michael@0: sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); michael@0: } else { michael@0: struct sctp_nets *netp; michael@0: michael@0: if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || michael@0: (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { michael@0: SCTP_STAT_DECR_GAUGE32(sctps_currestab); michael@0: } michael@0: SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); michael@0: SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); michael@0: sctp_stop_timers_for_shutdown(stcb); michael@0: if (asoc->alternate) { michael@0: netp = asoc->alternate; michael@0: } else { michael@0: netp = asoc->primary_destination; michael@0: } michael@0: sctp_send_shutdown(stcb, netp); michael@0: sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, michael@0: stcb->sctp_ep, stcb, netp); michael@0: sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, michael@0: stcb->sctp_ep, stcb, netp); michael@0: } michael@0: } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && michael@0: (asoc->stream_queue_cnt == 0)) { michael@0: struct sctp_nets *netp; michael@0: michael@0: if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { michael@0: goto abort_out_now; michael@0: } michael@0: SCTP_STAT_DECR_GAUGE32(sctps_currestab); michael@0: SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); michael@0: SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); michael@0: sctp_stop_timers_for_shutdown(stcb); michael@0: if (asoc->alternate) { michael@0: netp = asoc->alternate; michael@0: } else { michael@0: netp = asoc->primary_destination; michael@0: } michael@0: sctp_send_shutdown_ack(stcb, netp); michael@0: sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, michael@0: stcb->sctp_ep, stcb, netp); michael@0: } michael@0: } michael@0: /*********************************************/ michael@0: /* Here we perform PR-SCTP procedures */ michael@0: /* (section 4.2) */ michael@0: /*********************************************/ michael@0: /* C1. update advancedPeerAckPoint */ michael@0: if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) { michael@0: asoc->advanced_peer_ack_point = cumack; michael@0: } michael@0: /* PR-Sctp issues need to be addressed too */ michael@0: if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) { michael@0: struct sctp_tmit_chunk *lchk; michael@0: uint32_t old_adv_peer_ack_point; michael@0: michael@0: old_adv_peer_ack_point = asoc->advanced_peer_ack_point; michael@0: lchk = sctp_try_advance_peer_ack_point(stcb, asoc); michael@0: /* C3. See if we need to send a Fwd-TSN */ michael@0: if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) { michael@0: /* michael@0: * ISSUE with ECN, see FWD-TSN processing. michael@0: */ michael@0: if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { michael@0: send_forward_tsn(stcb, asoc); michael@0: } else if (lchk) { michael@0: /* try to FR fwd-tsn's that get lost too */ michael@0: if (lchk->rec.data.fwd_tsn_cnt >= 3) { michael@0: send_forward_tsn(stcb, asoc); michael@0: } michael@0: } michael@0: } michael@0: if (lchk) { michael@0: /* Assure a timer is up */ michael@0: sctp_timer_start(SCTP_TIMER_TYPE_SEND, michael@0: stcb->sctp_ep, stcb, lchk->whoTo); michael@0: } michael@0: } michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { michael@0: sctp_misc_ints(SCTP_SACK_RWND_UPDATE, michael@0: rwnd, michael@0: stcb->asoc.peers_rwnd, michael@0: stcb->asoc.total_flight, michael@0: stcb->asoc.total_output_queue_size); michael@0: } michael@0: } michael@0: michael@0: void michael@0: sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, michael@0: struct sctp_tcb *stcb, michael@0: uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup, michael@0: int *abort_now, uint8_t flags, michael@0: uint32_t cum_ack, uint32_t rwnd, int ecne_seen) michael@0: { michael@0: struct sctp_association *asoc; michael@0: struct sctp_tmit_chunk *tp1, *tp2; michael@0: uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack; michael@0: uint16_t wake_him = 0; michael@0: uint32_t send_s = 0; michael@0: long j; michael@0: int accum_moved = 0; michael@0: int will_exit_fast_recovery = 0; michael@0: uint32_t a_rwnd, old_rwnd; michael@0: int win_probe_recovery = 0; michael@0: int win_probe_recovered = 0; michael@0: struct sctp_nets *net = NULL; michael@0: int done_once; michael@0: int rto_ok = 1; michael@0: uint8_t reneged_all = 0; michael@0: uint8_t cmt_dac_flag; michael@0: /* michael@0: * we take any chance we can to service our queues since we cannot michael@0: * get awoken when the socket is read from :< michael@0: */ michael@0: /* michael@0: * Now perform the actual SACK handling: 1) Verify that it is not an michael@0: * old sack, if so discard. 2) If there is nothing left in the send michael@0: * queue (cum-ack is equal to last acked) then you have a duplicate michael@0: * too, update any rwnd change and verify no timers are running. michael@0: * then return. 3) Process any new consequtive data i.e. cum-ack michael@0: * moved process these first and note that it moved. 4) Process any michael@0: * sack blocks. 5) Drop any acked from the queue. 6) Check for any michael@0: * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left, michael@0: * sync up flightsizes and things, stop all timers and also check michael@0: * for shutdown_pending state. If so then go ahead and send off the michael@0: * shutdown. If in shutdown recv, send off the shutdown-ack and michael@0: * start that timer, Ret. 9) Strike any non-acked things and do FR michael@0: * procedure if needed being sure to set the FR flag. 10) Do pr-sctp michael@0: * procedures. 11) Apply any FR penalties. 12) Assure we will SACK michael@0: * if in shutdown_recv state. michael@0: */ michael@0: SCTP_TCB_LOCK_ASSERT(stcb); michael@0: /* CMT DAC algo */ michael@0: this_sack_lowest_newack = 0; michael@0: SCTP_STAT_INCR(sctps_slowpath_sack); michael@0: last_tsn = cum_ack; michael@0: cmt_dac_flag = flags & SCTP_SACK_CMT_DAC; michael@0: #ifdef SCTP_ASOCLOG_OF_TSNS michael@0: stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack; michael@0: stcb->asoc.cumack_log_at++; michael@0: if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { michael@0: stcb->asoc.cumack_log_at = 0; michael@0: } michael@0: #endif michael@0: a_rwnd = rwnd; michael@0: michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { michael@0: sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack, michael@0: rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); michael@0: } michael@0: michael@0: old_rwnd = stcb->asoc.peers_rwnd; michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { michael@0: sctp_misc_ints(SCTP_THRESHOLD_CLEAR, michael@0: stcb->asoc.overall_error_count, michael@0: 0, michael@0: SCTP_FROM_SCTP_INDATA, michael@0: __LINE__); michael@0: } michael@0: stcb->asoc.overall_error_count = 0; michael@0: asoc = &stcb->asoc; michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { michael@0: sctp_log_sack(asoc->last_acked_seq, michael@0: cum_ack, michael@0: 0, michael@0: num_seg, michael@0: num_dup, michael@0: SCTP_LOG_NEW_SACK); michael@0: } michael@0: if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) { michael@0: uint16_t i; michael@0: uint32_t *dupdata, dblock; michael@0: michael@0: for (i = 0; i < num_dup; i++) { michael@0: dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t), michael@0: sizeof(uint32_t), (uint8_t *)&dblock); michael@0: if (dupdata == NULL) { michael@0: break; michael@0: } michael@0: sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED); michael@0: } michael@0: } michael@0: if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) { michael@0: /* reality check */ michael@0: if (!TAILQ_EMPTY(&asoc->sent_queue)) { michael@0: tp1 = TAILQ_LAST(&asoc->sent_queue, michael@0: sctpchunk_listhead); michael@0: send_s = tp1->rec.data.TSN_seq + 1; michael@0: } else { michael@0: tp1 = NULL; michael@0: send_s = asoc->sending_seq; michael@0: } michael@0: if (SCTP_TSN_GE(cum_ack, send_s)) { michael@0: struct mbuf *oper; michael@0: /* michael@0: * no way, we have not even sent this TSN out yet. michael@0: * Peer is hopelessly messed up with us. michael@0: */ michael@0: SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n", michael@0: cum_ack, send_s); michael@0: if (tp1) { michael@0: SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n", michael@0: tp1->rec.data.TSN_seq, (void *)tp1); michael@0: } michael@0: hopeless_peer: michael@0: *abort_now = 1; michael@0: /* XXX */ michael@0: oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), michael@0: 0, M_NOWAIT, 1, MT_DATA); michael@0: if (oper) { michael@0: struct sctp_paramhdr *ph; michael@0: uint32_t *ippp; michael@0: michael@0: SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + michael@0: sizeof(uint32_t); michael@0: ph = mtod(oper, struct sctp_paramhdr *); michael@0: ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); michael@0: ph->param_length = htons(SCTP_BUF_LEN(oper)); michael@0: ippp = (uint32_t *) (ph + 1); michael@0: *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25); michael@0: } michael@0: stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; michael@0: sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); michael@0: return; michael@0: } michael@0: } michael@0: /**********************/ michael@0: /* 1) check the range */ michael@0: /**********************/ michael@0: if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) { michael@0: /* acking something behind */ michael@0: return; michael@0: } michael@0: michael@0: /* update the Rwnd of the peer */ michael@0: if (TAILQ_EMPTY(&asoc->sent_queue) && michael@0: TAILQ_EMPTY(&asoc->send_queue) && michael@0: (asoc->stream_queue_cnt == 0)) { michael@0: /* nothing left on send/sent and strmq */ michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { michael@0: sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, michael@0: asoc->peers_rwnd, 0, 0, a_rwnd); michael@0: } michael@0: asoc->peers_rwnd = a_rwnd; michael@0: if (asoc->sent_queue_retran_cnt) { michael@0: asoc->sent_queue_retran_cnt = 0; michael@0: } michael@0: if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { michael@0: /* SWS sender side engages */ michael@0: asoc->peers_rwnd = 0; michael@0: } michael@0: /* stop any timers */ michael@0: TAILQ_FOREACH(net, &asoc->nets, sctp_next) { michael@0: sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, michael@0: stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); michael@0: net->partial_bytes_acked = 0; michael@0: net->flight_size = 0; michael@0: } michael@0: asoc->total_flight = 0; michael@0: asoc->total_flight_count = 0; michael@0: return; michael@0: } michael@0: /* michael@0: * We init netAckSz and netAckSz2 to 0. These are used to track 2 michael@0: * things. The total byte count acked is tracked in netAckSz AND michael@0: * netAck2 is used to track the total bytes acked that are un- michael@0: * amibguious and were never retransmitted. We track these on a per michael@0: * destination address basis. michael@0: */ michael@0: TAILQ_FOREACH(net, &asoc->nets, sctp_next) { michael@0: if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) { michael@0: /* Drag along the window_tsn for cwr's */ michael@0: net->cwr_window_tsn = cum_ack; michael@0: } michael@0: net->prev_cwnd = net->cwnd; michael@0: net->net_ack = 0; michael@0: net->net_ack2 = 0; michael@0: michael@0: /* michael@0: * CMT: Reset CUC and Fast recovery algo variables before michael@0: * SACK processing michael@0: */ michael@0: net->new_pseudo_cumack = 0; michael@0: net->will_exit_fast_recovery = 0; michael@0: if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { michael@0: (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net); michael@0: } michael@0: } michael@0: /* process the new consecutive TSN first */ michael@0: TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { michael@0: if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) { michael@0: if (tp1->sent != SCTP_DATAGRAM_UNSENT) { michael@0: accum_moved = 1; michael@0: if (tp1->sent < SCTP_DATAGRAM_ACKED) { michael@0: /* michael@0: * If it is less than ACKED, it is michael@0: * now no-longer in flight. Higher michael@0: * values may occur during marking michael@0: */ michael@0: if ((tp1->whoTo->dest_state & michael@0: SCTP_ADDR_UNCONFIRMED) && michael@0: (tp1->snd_count < 2)) { michael@0: /* michael@0: * If there was no retran michael@0: * and the address is michael@0: * un-confirmed and we sent michael@0: * there and are now michael@0: * sacked.. its confirmed, michael@0: * mark it so. michael@0: */ michael@0: tp1->whoTo->dest_state &= michael@0: ~SCTP_ADDR_UNCONFIRMED; michael@0: } michael@0: if (tp1->sent < SCTP_DATAGRAM_RESEND) { michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { michael@0: sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, michael@0: tp1->whoTo->flight_size, michael@0: tp1->book_size, michael@0: (uintptr_t)tp1->whoTo, michael@0: tp1->rec.data.TSN_seq); michael@0: } michael@0: sctp_flight_size_decrease(tp1); michael@0: sctp_total_flight_decrease(stcb, tp1); michael@0: if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { michael@0: (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo, michael@0: tp1); michael@0: } michael@0: } michael@0: tp1->whoTo->net_ack += tp1->send_size; michael@0: michael@0: /* CMT SFR and DAC algos */ michael@0: this_sack_lowest_newack = tp1->rec.data.TSN_seq; michael@0: tp1->whoTo->saw_newack = 1; michael@0: michael@0: if (tp1->snd_count < 2) { michael@0: /* michael@0: * True non-retransmited michael@0: * chunk michael@0: */ michael@0: tp1->whoTo->net_ack2 += michael@0: tp1->send_size; michael@0: michael@0: /* update RTO too? */ michael@0: if (tp1->do_rtt) { michael@0: if (rto_ok) { michael@0: tp1->whoTo->RTO = michael@0: sctp_calculate_rto(stcb, michael@0: asoc, tp1->whoTo, michael@0: &tp1->sent_rcv_time, michael@0: sctp_align_safe_nocopy, michael@0: SCTP_RTT_FROM_DATA); michael@0: rto_ok = 0; michael@0: } michael@0: if (tp1->whoTo->rto_needed == 0) { michael@0: tp1->whoTo->rto_needed = 1; michael@0: } michael@0: tp1->do_rtt = 0; michael@0: } michael@0: } michael@0: /* michael@0: * CMT: CUCv2 algorithm. From the michael@0: * cumack'd TSNs, for each TSN being michael@0: * acked for the first time, set the michael@0: * following variables for the michael@0: * corresp destination. michael@0: * new_pseudo_cumack will trigger a michael@0: * cwnd update. michael@0: * find_(rtx_)pseudo_cumack will michael@0: * trigger search for the next michael@0: * expected (rtx-)pseudo-cumack. michael@0: */ michael@0: tp1->whoTo->new_pseudo_cumack = 1; michael@0: tp1->whoTo->find_pseudo_cumack = 1; michael@0: tp1->whoTo->find_rtx_pseudo_cumack = 1; michael@0: michael@0: michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { michael@0: sctp_log_sack(asoc->last_acked_seq, michael@0: cum_ack, michael@0: tp1->rec.data.TSN_seq, michael@0: 0, michael@0: 0, michael@0: SCTP_LOG_TSN_ACKED); michael@0: } michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { michael@0: sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); michael@0: } michael@0: } michael@0: if (tp1->sent == SCTP_DATAGRAM_RESEND) { michael@0: sctp_ucount_decr(asoc->sent_queue_retran_cnt); michael@0: #ifdef SCTP_AUDITING_ENABLED michael@0: sctp_audit_log(0xB3, michael@0: (asoc->sent_queue_retran_cnt & 0x000000ff)); michael@0: #endif michael@0: } michael@0: if (tp1->rec.data.chunk_was_revoked) { michael@0: /* deflate the cwnd */ michael@0: tp1->whoTo->cwnd -= tp1->book_size; michael@0: tp1->rec.data.chunk_was_revoked = 0; michael@0: } michael@0: if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { michael@0: tp1->sent = SCTP_DATAGRAM_ACKED; michael@0: } michael@0: } michael@0: } else { michael@0: break; michael@0: } michael@0: } michael@0: biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; michael@0: /* always set this up to cum-ack */ michael@0: asoc->this_sack_highest_gap = last_tsn; michael@0: michael@0: if ((num_seg > 0) || (num_nr_seg > 0)) { michael@0: michael@0: /* michael@0: * CMT: SFR algo (and HTNA) - this_sack_highest_newack has michael@0: * to be greater than the cumack. Also reset saw_newack to 0 michael@0: * for all dests. michael@0: */ michael@0: TAILQ_FOREACH(net, &asoc->nets, sctp_next) { michael@0: net->saw_newack = 0; michael@0: net->this_sack_highest_newack = last_tsn; michael@0: } michael@0: michael@0: /* michael@0: * thisSackHighestGap will increase while handling NEW michael@0: * segments this_sack_highest_newack will increase while michael@0: * handling NEWLY ACKED chunks. this_sack_lowest_newack is michael@0: * used for CMT DAC algo. saw_newack will also change. michael@0: */ michael@0: if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked, michael@0: &biggest_tsn_newly_acked, &this_sack_lowest_newack, michael@0: num_seg, num_nr_seg, &rto_ok)) { michael@0: wake_him++; michael@0: } michael@0: if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) { michael@0: /* michael@0: * validate the biggest_tsn_acked in the gap acks if michael@0: * strict adherence is wanted. michael@0: */ michael@0: if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) { michael@0: /* michael@0: * peer is either confused or we are under michael@0: * attack. We must abort. michael@0: */ michael@0: SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n", michael@0: biggest_tsn_acked, send_s); michael@0: goto hopeless_peer; michael@0: } michael@0: } michael@0: } michael@0: /*******************************************/ michael@0: /* cancel ALL T3-send timer if accum moved */ michael@0: /*******************************************/ michael@0: if (asoc->sctp_cmt_on_off > 0) { michael@0: TAILQ_FOREACH(net, &asoc->nets, sctp_next) { michael@0: if (net->new_pseudo_cumack) michael@0: sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, michael@0: stcb, net, michael@0: SCTP_FROM_SCTP_INDATA + SCTP_LOC_27); michael@0: michael@0: } michael@0: } else { michael@0: if (accum_moved) { michael@0: TAILQ_FOREACH(net, &asoc->nets, sctp_next) { michael@0: sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, michael@0: stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28); michael@0: } michael@0: } michael@0: } michael@0: /********************************************/ michael@0: /* drop the acked chunks from the sentqueue */ michael@0: /********************************************/ michael@0: asoc->last_acked_seq = cum_ack; michael@0: michael@0: TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { michael@0: if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) { michael@0: break; michael@0: } michael@0: if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { michael@0: if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) { michael@0: asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--; michael@0: #ifdef INVARIANTS michael@0: } else { michael@0: panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number); michael@0: #endif michael@0: } michael@0: } michael@0: TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); michael@0: if (PR_SCTP_ENABLED(tp1->flags)) { michael@0: if (asoc->pr_sctp_cnt != 0) michael@0: asoc->pr_sctp_cnt--; michael@0: } michael@0: asoc->sent_queue_cnt--; michael@0: if (tp1->data) { michael@0: /* sa_ignore NO_NULL_CHK */ michael@0: sctp_free_bufspace(stcb, asoc, tp1, 1); michael@0: sctp_m_freem(tp1->data); michael@0: tp1->data = NULL; michael@0: if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) { michael@0: asoc->sent_queue_cnt_removeable--; michael@0: } michael@0: } michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { michael@0: sctp_log_sack(asoc->last_acked_seq, michael@0: cum_ack, michael@0: tp1->rec.data.TSN_seq, michael@0: 0, michael@0: 0, michael@0: SCTP_LOG_FREE_SENT); michael@0: } michael@0: sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); michael@0: wake_him++; michael@0: } michael@0: if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) { michael@0: #ifdef INVARIANTS michael@0: panic("Warning flight size is postive and should be 0"); michael@0: #else michael@0: SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n", michael@0: asoc->total_flight); michael@0: #endif michael@0: asoc->total_flight = 0; michael@0: } michael@0: michael@0: #if defined(__Userspace__) michael@0: if (stcb->sctp_ep->recv_callback) { michael@0: if (stcb->sctp_socket) { michael@0: uint32_t inqueue_bytes, sb_free_now; michael@0: struct sctp_inpcb *inp; michael@0: michael@0: inp = stcb->sctp_ep; michael@0: inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); michael@0: sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv); michael@0: michael@0: /* check if the amount free in the send socket buffer crossed the threshold */ michael@0: if (inp->send_callback && michael@0: (((inp->send_sb_threshold > 0) && (sb_free_now >= inp->send_sb_threshold)) || michael@0: (inp->send_sb_threshold == 0))) { michael@0: atomic_add_int(&stcb->asoc.refcnt, 1); michael@0: SCTP_TCB_UNLOCK(stcb); michael@0: inp->send_callback(stcb->sctp_socket, sb_free_now); michael@0: SCTP_TCB_LOCK(stcb); michael@0: atomic_subtract_int(&stcb->asoc.refcnt, 1); michael@0: } michael@0: } michael@0: } else if ((wake_him) && (stcb->sctp_socket)) { michael@0: #else michael@0: /* sa_ignore NO_NULL_CHK */ michael@0: if ((wake_him) && (stcb->sctp_socket)) { michael@0: #endif michael@0: #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) michael@0: struct socket *so; michael@0: michael@0: #endif michael@0: SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { michael@0: sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK); michael@0: } michael@0: #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) michael@0: so = SCTP_INP_SO(stcb->sctp_ep); michael@0: atomic_add_int(&stcb->asoc.refcnt, 1); michael@0: SCTP_TCB_UNLOCK(stcb); michael@0: SCTP_SOCKET_LOCK(so, 1); michael@0: SCTP_TCB_LOCK(stcb); michael@0: atomic_subtract_int(&stcb->asoc.refcnt, 1); michael@0: if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { michael@0: /* assoc was freed while we were unlocked */ michael@0: SCTP_SOCKET_UNLOCK(so, 1); michael@0: return; michael@0: } michael@0: #endif michael@0: sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); michael@0: #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) michael@0: SCTP_SOCKET_UNLOCK(so, 1); michael@0: #endif michael@0: } else { michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { michael@0: sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK); michael@0: } michael@0: } michael@0: michael@0: if (asoc->fast_retran_loss_recovery && accum_moved) { michael@0: if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) { michael@0: /* Setup so we will exit RFC2582 fast recovery */ michael@0: will_exit_fast_recovery = 1; michael@0: } michael@0: } michael@0: /* michael@0: * Check for revoked fragments: michael@0: * michael@0: * if Previous sack - Had no frags then we can't have any revoked if michael@0: * Previous sack - Had frag's then - If we now have frags aka michael@0: * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked michael@0: * some of them. else - The peer revoked all ACKED fragments, since michael@0: * we had some before and now we have NONE. michael@0: */ michael@0: michael@0: if (num_seg) { michael@0: sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked); michael@0: asoc->saw_sack_with_frags = 1; michael@0: } else if (asoc->saw_sack_with_frags) { michael@0: int cnt_revoked = 0; michael@0: michael@0: /* Peer revoked all dg's marked or acked */ michael@0: TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { michael@0: if (tp1->sent == SCTP_DATAGRAM_ACKED) { michael@0: tp1->sent = SCTP_DATAGRAM_SENT; michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { michael@0: sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, michael@0: tp1->whoTo->flight_size, michael@0: tp1->book_size, michael@0: (uintptr_t)tp1->whoTo, michael@0: tp1->rec.data.TSN_seq); michael@0: } michael@0: sctp_flight_size_increase(tp1); michael@0: sctp_total_flight_increase(stcb, tp1); michael@0: tp1->rec.data.chunk_was_revoked = 1; michael@0: /* michael@0: * To ensure that this increase in michael@0: * flightsize, which is artificial, michael@0: * does not throttle the sender, we michael@0: * also increase the cwnd michael@0: * artificially. michael@0: */ michael@0: tp1->whoTo->cwnd += tp1->book_size; michael@0: cnt_revoked++; michael@0: } michael@0: } michael@0: if (cnt_revoked) { michael@0: reneged_all = 1; michael@0: } michael@0: asoc->saw_sack_with_frags = 0; michael@0: } michael@0: if (num_nr_seg > 0) michael@0: asoc->saw_sack_with_nr_frags = 1; michael@0: else michael@0: asoc->saw_sack_with_nr_frags = 0; michael@0: michael@0: /* JRS - Use the congestion control given in the CC module */ michael@0: if (ecne_seen == 0) { michael@0: TAILQ_FOREACH(net, &asoc->nets, sctp_next) { michael@0: if (net->net_ack2 > 0) { michael@0: /* michael@0: * Karn's rule applies to clearing error count, this michael@0: * is optional. michael@0: */ michael@0: net->error_count = 0; michael@0: if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { michael@0: /* addr came good */ michael@0: net->dest_state |= SCTP_ADDR_REACHABLE; michael@0: sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, michael@0: 0, (void *)net, SCTP_SO_NOT_LOCKED); michael@0: } michael@0: michael@0: if (net == stcb->asoc.primary_destination) { michael@0: if (stcb->asoc.alternate) { michael@0: /* release the alternate, primary is good */ michael@0: sctp_free_remote_addr(stcb->asoc.alternate); michael@0: stcb->asoc.alternate = NULL; michael@0: } michael@0: } michael@0: michael@0: if (net->dest_state & SCTP_ADDR_PF) { michael@0: net->dest_state &= ~SCTP_ADDR_PF; michael@0: sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3); michael@0: sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); michael@0: asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); michael@0: /* Done with this net */ michael@0: net->net_ack = 0; michael@0: } michael@0: /* restore any doubled timers */ michael@0: net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; michael@0: if (net->RTO < stcb->asoc.minrto) { michael@0: net->RTO = stcb->asoc.minrto; michael@0: } michael@0: if (net->RTO > stcb->asoc.maxrto) { michael@0: net->RTO = stcb->asoc.maxrto; michael@0: } michael@0: } michael@0: } michael@0: asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery); michael@0: } michael@0: michael@0: if (TAILQ_EMPTY(&asoc->sent_queue)) { michael@0: /* nothing left in-flight */ michael@0: TAILQ_FOREACH(net, &asoc->nets, sctp_next) { michael@0: /* stop all timers */ michael@0: sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, michael@0: stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30); michael@0: net->flight_size = 0; michael@0: net->partial_bytes_acked = 0; michael@0: } michael@0: asoc->total_flight = 0; michael@0: asoc->total_flight_count = 0; michael@0: } michael@0: michael@0: /**********************************/ michael@0: /* Now what about shutdown issues */ michael@0: /**********************************/ michael@0: if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { michael@0: /* nothing left on sendqueue.. consider done */ michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { michael@0: sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, michael@0: asoc->peers_rwnd, 0, 0, a_rwnd); michael@0: } michael@0: asoc->peers_rwnd = a_rwnd; michael@0: if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { michael@0: /* SWS sender side engages */ michael@0: asoc->peers_rwnd = 0; michael@0: } michael@0: /* clean up */ michael@0: if ((asoc->stream_queue_cnt == 1) && michael@0: ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || michael@0: (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && michael@0: (asoc->locked_on_sending) michael@0: ) { michael@0: struct sctp_stream_queue_pending *sp; michael@0: /* I may be in a state where we got michael@0: * all across.. but cannot write more due michael@0: * to a shutdown... we abort since the michael@0: * user did not indicate EOR in this case. michael@0: */ michael@0: sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), michael@0: sctp_streamhead); michael@0: if ((sp) && (sp->length == 0)) { michael@0: asoc->locked_on_sending = NULL; michael@0: if (sp->msg_is_complete) { michael@0: asoc->stream_queue_cnt--; michael@0: } else { michael@0: asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; michael@0: asoc->stream_queue_cnt--; michael@0: } michael@0: } michael@0: } michael@0: if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && michael@0: (asoc->stream_queue_cnt == 0)) { michael@0: if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { michael@0: /* Need to abort here */ michael@0: struct mbuf *oper; michael@0: abort_out_now: michael@0: *abort_now = 1; michael@0: /* XXX */ michael@0: oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), michael@0: 0, M_NOWAIT, 1, MT_DATA); michael@0: if (oper) { michael@0: struct sctp_paramhdr *ph; michael@0: michael@0: SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr); michael@0: ph = mtod(oper, struct sctp_paramhdr *); michael@0: ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); michael@0: ph->param_length = htons(SCTP_BUF_LEN(oper)); michael@0: } michael@0: stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31; michael@0: sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); michael@0: return; michael@0: } else { michael@0: struct sctp_nets *netp; michael@0: michael@0: if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || michael@0: (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { michael@0: SCTP_STAT_DECR_GAUGE32(sctps_currestab); michael@0: } michael@0: SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); michael@0: SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); michael@0: sctp_stop_timers_for_shutdown(stcb); michael@0: if (asoc->alternate) { michael@0: netp = asoc->alternate; michael@0: } else { michael@0: netp = asoc->primary_destination; michael@0: } michael@0: sctp_send_shutdown(stcb, netp); michael@0: sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, michael@0: stcb->sctp_ep, stcb, netp); michael@0: sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, michael@0: stcb->sctp_ep, stcb, netp); michael@0: } michael@0: return; michael@0: } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && michael@0: (asoc->stream_queue_cnt == 0)) { michael@0: struct sctp_nets *netp; michael@0: michael@0: if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { michael@0: goto abort_out_now; michael@0: } michael@0: SCTP_STAT_DECR_GAUGE32(sctps_currestab); michael@0: SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); michael@0: SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); michael@0: sctp_stop_timers_for_shutdown(stcb); michael@0: if (asoc->alternate) { michael@0: netp = asoc->alternate; michael@0: } else { michael@0: netp = asoc->primary_destination; michael@0: } michael@0: sctp_send_shutdown_ack(stcb, netp); michael@0: sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, michael@0: stcb->sctp_ep, stcb, netp); michael@0: return; michael@0: } michael@0: } michael@0: /* michael@0: * Now here we are going to recycle net_ack for a different use... michael@0: * HEADS UP. michael@0: */ michael@0: TAILQ_FOREACH(net, &asoc->nets, sctp_next) { michael@0: net->net_ack = 0; michael@0: } michael@0: michael@0: /* michael@0: * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking michael@0: * to be done. Setting this_sack_lowest_newack to the cum_ack will michael@0: * automatically ensure that. michael@0: */ michael@0: if ((asoc->sctp_cmt_on_off > 0) && michael@0: SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && michael@0: (cmt_dac_flag == 0)) { michael@0: this_sack_lowest_newack = cum_ack; michael@0: } michael@0: if ((num_seg > 0) || (num_nr_seg > 0)) { michael@0: sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, michael@0: biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved); michael@0: } michael@0: /* JRS - Use the congestion control given in the CC module */ michael@0: asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc); michael@0: michael@0: /* Now are we exiting loss recovery ? */ michael@0: if (will_exit_fast_recovery) { michael@0: /* Ok, we must exit fast recovery */ michael@0: asoc->fast_retran_loss_recovery = 0; michael@0: } michael@0: if ((asoc->sat_t3_loss_recovery) && michael@0: SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) { michael@0: /* end satellite t3 loss recovery */ michael@0: asoc->sat_t3_loss_recovery = 0; michael@0: } michael@0: /* michael@0: * CMT Fast recovery michael@0: */ michael@0: TAILQ_FOREACH(net, &asoc->nets, sctp_next) { michael@0: if (net->will_exit_fast_recovery) { michael@0: /* Ok, we must exit fast recovery */ michael@0: net->fast_retran_loss_recovery = 0; michael@0: } michael@0: } michael@0: michael@0: /* Adjust and set the new rwnd value */ michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { michael@0: sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, michael@0: asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd); michael@0: } michael@0: asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, michael@0: (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); michael@0: if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { michael@0: /* SWS sender side engages */ michael@0: asoc->peers_rwnd = 0; michael@0: } michael@0: if (asoc->peers_rwnd > old_rwnd) { michael@0: win_probe_recovery = 1; michael@0: } michael@0: michael@0: /* michael@0: * Now we must setup so we have a timer up for anyone with michael@0: * outstanding data. michael@0: */ michael@0: done_once = 0; michael@0: again: michael@0: j = 0; michael@0: TAILQ_FOREACH(net, &asoc->nets, sctp_next) { michael@0: if (win_probe_recovery && (net->window_probe)) { michael@0: win_probe_recovered = 1; michael@0: /*- michael@0: * Find first chunk that was used with michael@0: * window probe and clear the event. Put michael@0: * it back into the send queue as if has michael@0: * not been sent. michael@0: */ michael@0: TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { michael@0: if (tp1->window_probe) { michael@0: sctp_window_probe_recovery(stcb, asoc, tp1); michael@0: break; michael@0: } michael@0: } michael@0: } michael@0: if (net->flight_size) { michael@0: j++; michael@0: if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { michael@0: sctp_timer_start(SCTP_TIMER_TYPE_SEND, michael@0: stcb->sctp_ep, stcb, net); michael@0: } michael@0: if (net->window_probe) { michael@0: net->window_probe = 0; michael@0: } michael@0: } else { michael@0: if (net->window_probe) { michael@0: /* In window probes we must assure a timer is still running there */ michael@0: if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { michael@0: sctp_timer_start(SCTP_TIMER_TYPE_SEND, michael@0: stcb->sctp_ep, stcb, net); michael@0: michael@0: } michael@0: } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { michael@0: sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, michael@0: stcb, net, michael@0: SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); michael@0: } michael@0: } michael@0: } michael@0: if ((j == 0) && michael@0: (!TAILQ_EMPTY(&asoc->sent_queue)) && michael@0: (asoc->sent_queue_retran_cnt == 0) && michael@0: (win_probe_recovered == 0) && michael@0: (done_once == 0)) { michael@0: /* huh, this should not happen unless all packets michael@0: * are PR-SCTP and marked to skip of course. michael@0: */ michael@0: if (sctp_fs_audit(asoc)) { michael@0: TAILQ_FOREACH(net, &asoc->nets, sctp_next) { michael@0: net->flight_size = 0; michael@0: } michael@0: asoc->total_flight = 0; michael@0: asoc->total_flight_count = 0; michael@0: asoc->sent_queue_retran_cnt = 0; michael@0: TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { michael@0: if (tp1->sent < SCTP_DATAGRAM_RESEND) { michael@0: sctp_flight_size_increase(tp1); michael@0: sctp_total_flight_increase(stcb, tp1); michael@0: } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { michael@0: sctp_ucount_incr(asoc->sent_queue_retran_cnt); michael@0: } michael@0: } michael@0: } michael@0: done_once = 1; michael@0: goto again; michael@0: } michael@0: /*********************************************/ michael@0: /* Here we perform PR-SCTP procedures */ michael@0: /* (section 4.2) */ michael@0: /*********************************************/ michael@0: /* C1. update advancedPeerAckPoint */ michael@0: if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) { michael@0: asoc->advanced_peer_ack_point = cum_ack; michael@0: } michael@0: /* C2. try to further move advancedPeerAckPoint ahead */ michael@0: if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) { michael@0: struct sctp_tmit_chunk *lchk; michael@0: uint32_t old_adv_peer_ack_point; michael@0: michael@0: old_adv_peer_ack_point = asoc->advanced_peer_ack_point; michael@0: lchk = sctp_try_advance_peer_ack_point(stcb, asoc); michael@0: /* C3. See if we need to send a Fwd-TSN */ michael@0: if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) { michael@0: /* michael@0: * ISSUE with ECN, see FWD-TSN processing. michael@0: */ michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { michael@0: sctp_misc_ints(SCTP_FWD_TSN_CHECK, michael@0: 0xee, cum_ack, asoc->advanced_peer_ack_point, michael@0: old_adv_peer_ack_point); michael@0: } michael@0: if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { michael@0: send_forward_tsn(stcb, asoc); michael@0: } else if (lchk) { michael@0: /* try to FR fwd-tsn's that get lost too */ michael@0: if (lchk->rec.data.fwd_tsn_cnt >= 3) { michael@0: send_forward_tsn(stcb, asoc); michael@0: } michael@0: } michael@0: } michael@0: if (lchk) { michael@0: /* Assure a timer is up */ michael@0: sctp_timer_start(SCTP_TIMER_TYPE_SEND, michael@0: stcb->sctp_ep, stcb, lchk->whoTo); michael@0: } michael@0: } michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { michael@0: sctp_misc_ints(SCTP_SACK_RWND_UPDATE, michael@0: a_rwnd, michael@0: stcb->asoc.peers_rwnd, michael@0: stcb->asoc.total_flight, michael@0: stcb->asoc.total_output_queue_size); michael@0: } michael@0: } michael@0: michael@0: void michael@0: sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag) michael@0: { michael@0: /* Copy cum-ack */ michael@0: uint32_t cum_ack, a_rwnd; michael@0: michael@0: cum_ack = ntohl(cp->cumulative_tsn_ack); michael@0: /* Arrange so a_rwnd does NOT change */ michael@0: a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight; michael@0: michael@0: /* Now call the express sack handling */ michael@0: sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0); michael@0: } michael@0: michael@0: static void michael@0: sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, michael@0: struct sctp_stream_in *strmin) michael@0: { michael@0: struct sctp_queued_to_read *ctl, *nctl; michael@0: struct sctp_association *asoc; michael@0: uint16_t tt; michael@0: michael@0: asoc = &stcb->asoc; michael@0: tt = strmin->last_sequence_delivered; michael@0: /* michael@0: * First deliver anything prior to and including the stream no that michael@0: * came in michael@0: */ michael@0: TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) { michael@0: if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) { michael@0: /* this is deliverable now */ michael@0: TAILQ_REMOVE(&strmin->inqueue, ctl, next); michael@0: /* subtract pending on streams */ michael@0: asoc->size_on_all_streams -= ctl->length; michael@0: sctp_ucount_decr(asoc->cnt_on_all_streams); michael@0: /* deliver it to at least the delivery-q */ michael@0: if (stcb->sctp_socket) { michael@0: sctp_mark_non_revokable(asoc, ctl->sinfo_tsn); michael@0: sctp_add_to_readq(stcb->sctp_ep, stcb, michael@0: ctl, michael@0: &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); michael@0: } michael@0: } else { michael@0: /* no more delivery now. */ michael@0: break; michael@0: } michael@0: } michael@0: /* michael@0: * now we must deliver things in queue the normal way if any are michael@0: * now ready. michael@0: */ michael@0: tt = strmin->last_sequence_delivered + 1; michael@0: TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) { michael@0: if (tt == ctl->sinfo_ssn) { michael@0: /* this is deliverable now */ michael@0: TAILQ_REMOVE(&strmin->inqueue, ctl, next); michael@0: /* subtract pending on streams */ michael@0: asoc->size_on_all_streams -= ctl->length; michael@0: sctp_ucount_decr(asoc->cnt_on_all_streams); michael@0: /* deliver it to at least the delivery-q */ michael@0: strmin->last_sequence_delivered = ctl->sinfo_ssn; michael@0: if (stcb->sctp_socket) { michael@0: sctp_mark_non_revokable(asoc, ctl->sinfo_tsn); michael@0: sctp_add_to_readq(stcb->sctp_ep, stcb, michael@0: ctl, michael@0: &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); michael@0: michael@0: } michael@0: tt = strmin->last_sequence_delivered + 1; michael@0: } else { michael@0: break; michael@0: } michael@0: } michael@0: } michael@0: michael@0: static void michael@0: sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb, michael@0: struct sctp_association *asoc, michael@0: uint16_t stream, uint16_t seq) michael@0: { michael@0: struct sctp_tmit_chunk *chk, *nchk; michael@0: michael@0: /* For each one on here see if we need to toss it */ michael@0: /* michael@0: * For now large messages held on the reasmqueue that are michael@0: * complete will be tossed too. We could in theory do more michael@0: * work to spin through and stop after dumping one msg aka michael@0: * seeing the start of a new msg at the head, and call the michael@0: * delivery function... to see if it can be delivered... But michael@0: * for now we just dump everything on the queue. michael@0: */ michael@0: TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) { michael@0: /* Do not toss it if on a different stream or michael@0: * marked for unordered delivery in which case michael@0: * the stream sequence number has no meaning. michael@0: */ michael@0: if ((chk->rec.data.stream_number != stream) || michael@0: ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) { michael@0: continue; michael@0: } michael@0: if (chk->rec.data.stream_seq == seq) { michael@0: /* It needs to be tossed */ michael@0: TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); michael@0: if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) { michael@0: asoc->tsn_last_delivered = chk->rec.data.TSN_seq; michael@0: asoc->str_of_pdapi = chk->rec.data.stream_number; michael@0: asoc->ssn_of_pdapi = chk->rec.data.stream_seq; michael@0: asoc->fragment_flags = chk->rec.data.rcv_flags; michael@0: } michael@0: asoc->size_on_reasm_queue -= chk->send_size; michael@0: sctp_ucount_decr(asoc->cnt_on_reasm_queue); michael@0: michael@0: /* Clear up any stream problem */ michael@0: if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED && michael@0: SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) { michael@0: /* michael@0: * We must dump forward this streams michael@0: * sequence number if the chunk is michael@0: * not unordered that is being michael@0: * skipped. There is a chance that michael@0: * if the peer does not include the michael@0: * last fragment in its FWD-TSN we michael@0: * WILL have a problem here since michael@0: * you would have a partial chunk in michael@0: * queue that may not be michael@0: * deliverable. Also if a Partial michael@0: * delivery API as started the user michael@0: * may get a partial chunk. The next michael@0: * read returning a new chunk... michael@0: * really ugly but I see no way michael@0: * around it! Maybe a notify?? michael@0: */ michael@0: asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq; michael@0: } michael@0: if (chk->data) { michael@0: sctp_m_freem(chk->data); michael@0: chk->data = NULL; michael@0: } michael@0: sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); michael@0: } else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) { michael@0: /* If the stream_seq is > than the purging one, we are done */ michael@0: break; michael@0: } michael@0: } michael@0: } michael@0: michael@0: michael@0: void michael@0: sctp_handle_forward_tsn(struct sctp_tcb *stcb, michael@0: struct sctp_forward_tsn_chunk *fwd, michael@0: int *abort_flag, struct mbuf *m ,int offset) michael@0: { michael@0: /* The pr-sctp fwd tsn */ michael@0: /* michael@0: * here we will perform all the data receiver side steps for michael@0: * processing FwdTSN, as required in by pr-sctp draft: michael@0: * michael@0: * Assume we get FwdTSN(x): michael@0: * michael@0: * 1) update local cumTSN to x 2) try to further advance cumTSN to x + michael@0: * others we have 3) examine and update re-ordering queue on michael@0: * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to michael@0: * report where we are. michael@0: */ michael@0: struct sctp_association *asoc; michael@0: uint32_t new_cum_tsn, gap; michael@0: unsigned int i, fwd_sz, m_size; michael@0: uint32_t str_seq; michael@0: struct sctp_stream_in *strm; michael@0: struct sctp_tmit_chunk *chk, *nchk; michael@0: struct sctp_queued_to_read *ctl, *sv; michael@0: michael@0: asoc = &stcb->asoc; michael@0: if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { michael@0: SCTPDBG(SCTP_DEBUG_INDATA1, michael@0: "Bad size too small/big fwd-tsn\n"); michael@0: return; michael@0: } michael@0: m_size = (stcb->asoc.mapping_array_size << 3); michael@0: /*************************************************************/ michael@0: /* 1. Here we update local cumTSN and shift the bitmap array */ michael@0: /*************************************************************/ michael@0: new_cum_tsn = ntohl(fwd->new_cumulative_tsn); michael@0: michael@0: if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) { michael@0: /* Already got there ... */ michael@0: return; michael@0: } michael@0: /* michael@0: * now we know the new TSN is more advanced, let's find the actual michael@0: * gap michael@0: */ michael@0: SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn); michael@0: asoc->cumulative_tsn = new_cum_tsn; michael@0: if (gap >= m_size) { michael@0: if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) { michael@0: struct mbuf *oper; michael@0: /* michael@0: * out of range (of single byte chunks in the rwnd I michael@0: * give out). This must be an attacker. michael@0: */ michael@0: *abort_flag = 1; michael@0: oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), michael@0: 0, M_NOWAIT, 1, MT_DATA); michael@0: if (oper) { michael@0: struct sctp_paramhdr *ph; michael@0: uint32_t *ippp; michael@0: SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + michael@0: (sizeof(uint32_t) * 3); michael@0: ph = mtod(oper, struct sctp_paramhdr *); michael@0: ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); michael@0: ph->param_length = htons(SCTP_BUF_LEN(oper)); michael@0: ippp = (uint32_t *) (ph + 1); michael@0: *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_33); michael@0: ippp++; michael@0: *ippp = asoc->highest_tsn_inside_map; michael@0: ippp++; michael@0: *ippp = new_cum_tsn; michael@0: } michael@0: stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_33; michael@0: sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); michael@0: return; michael@0: } michael@0: SCTP_STAT_INCR(sctps_fwdtsn_map_over); michael@0: michael@0: memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); michael@0: asoc->mapping_array_base_tsn = new_cum_tsn + 1; michael@0: asoc->highest_tsn_inside_map = new_cum_tsn; michael@0: michael@0: memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size); michael@0: asoc->highest_tsn_inside_nr_map = new_cum_tsn; michael@0: michael@0: if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { michael@0: sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); michael@0: } michael@0: } else { michael@0: SCTP_TCB_LOCK_ASSERT(stcb); michael@0: for (i = 0; i <= gap; i++) { michael@0: if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) && michael@0: !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) { michael@0: SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i); michael@0: if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) { michael@0: asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i; michael@0: } michael@0: } michael@0: } michael@0: } michael@0: /*************************************************************/ michael@0: /* 2. Clear up re-assembly queue */ michael@0: /*************************************************************/ michael@0: /* michael@0: * First service it if pd-api is up, just in case we can progress it michael@0: * forward michael@0: */ michael@0: if (asoc->fragmented_delivery_inprogress) { michael@0: sctp_service_reassembly(stcb, asoc); michael@0: } michael@0: /* For each one on here see if we need to toss it */ michael@0: /* michael@0: * For now large messages held on the reasmqueue that are michael@0: * complete will be tossed too. We could in theory do more michael@0: * work to spin through and stop after dumping one msg aka michael@0: * seeing the start of a new msg at the head, and call the michael@0: * delivery function... to see if it can be delivered... But michael@0: * for now we just dump everything on the queue. michael@0: */ michael@0: TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) { michael@0: if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) { michael@0: /* It needs to be tossed */ michael@0: TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); michael@0: if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) { michael@0: asoc->tsn_last_delivered = chk->rec.data.TSN_seq; michael@0: asoc->str_of_pdapi = chk->rec.data.stream_number; michael@0: asoc->ssn_of_pdapi = chk->rec.data.stream_seq; michael@0: asoc->fragment_flags = chk->rec.data.rcv_flags; michael@0: } michael@0: asoc->size_on_reasm_queue -= chk->send_size; michael@0: sctp_ucount_decr(asoc->cnt_on_reasm_queue); michael@0: michael@0: /* Clear up any stream problem */ michael@0: if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED && michael@0: SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) { michael@0: /* michael@0: * We must dump forward this streams michael@0: * sequence number if the chunk is michael@0: * not unordered that is being michael@0: * skipped. There is a chance that michael@0: * if the peer does not include the michael@0: * last fragment in its FWD-TSN we michael@0: * WILL have a problem here since michael@0: * you would have a partial chunk in michael@0: * queue that may not be michael@0: * deliverable. Also if a Partial michael@0: * delivery API as started the user michael@0: * may get a partial chunk. The next michael@0: * read returning a new chunk... michael@0: * really ugly but I see no way michael@0: * around it! Maybe a notify?? michael@0: */ michael@0: asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq; michael@0: } michael@0: if (chk->data) { michael@0: sctp_m_freem(chk->data); michael@0: chk->data = NULL; michael@0: } michael@0: sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); michael@0: } else { michael@0: /* michael@0: * Ok we have gone beyond the end of the michael@0: * fwd-tsn's mark. michael@0: */ michael@0: break; michael@0: } michael@0: } michael@0: /*******************************************************/ michael@0: /* 3. Update the PR-stream re-ordering queues and fix */ michael@0: /* delivery issues as needed. */ michael@0: /*******************************************************/ michael@0: fwd_sz -= sizeof(*fwd); michael@0: if (m && fwd_sz) { michael@0: /* New method. */ michael@0: unsigned int num_str; michael@0: struct sctp_strseq *stseq, strseqbuf; michael@0: offset += sizeof(*fwd); michael@0: michael@0: SCTP_INP_READ_LOCK(stcb->sctp_ep); michael@0: num_str = fwd_sz / sizeof(struct sctp_strseq); michael@0: for (i = 0; i < num_str; i++) { michael@0: uint16_t st; michael@0: stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset, michael@0: sizeof(struct sctp_strseq), michael@0: (uint8_t *)&strseqbuf); michael@0: offset += sizeof(struct sctp_strseq); michael@0: if (stseq == NULL) { michael@0: break; michael@0: } michael@0: /* Convert */ michael@0: st = ntohs(stseq->stream); michael@0: stseq->stream = st; michael@0: st = ntohs(stseq->sequence); michael@0: stseq->sequence = st; michael@0: michael@0: /* now process */ michael@0: michael@0: /* michael@0: * Ok we now look for the stream/seq on the read queue michael@0: * where its not all delivered. If we find it we transmute the michael@0: * read entry into a PDI_ABORTED. michael@0: */ michael@0: if (stseq->stream >= asoc->streamincnt) { michael@0: /* screwed up streams, stop! */ michael@0: break; michael@0: } michael@0: if ((asoc->str_of_pdapi == stseq->stream) && michael@0: (asoc->ssn_of_pdapi == stseq->sequence)) { michael@0: /* If this is the one we were partially delivering michael@0: * now then we no longer are. Note this will change michael@0: * with the reassembly re-write. michael@0: */ michael@0: asoc->fragmented_delivery_inprogress = 0; michael@0: } michael@0: sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence); michael@0: TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) { michael@0: if ((ctl->sinfo_stream == stseq->stream) && michael@0: (ctl->sinfo_ssn == stseq->sequence)) { michael@0: str_seq = (stseq->stream << 16) | stseq->sequence; michael@0: ctl->end_added = 1; michael@0: ctl->pdapi_aborted = 1; michael@0: sv = stcb->asoc.control_pdapi; michael@0: stcb->asoc.control_pdapi = ctl; michael@0: sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, michael@0: stcb, michael@0: SCTP_PARTIAL_DELIVERY_ABORTED, michael@0: (void *)&str_seq, michael@0: SCTP_SO_NOT_LOCKED); michael@0: stcb->asoc.control_pdapi = sv; michael@0: break; michael@0: } else if ((ctl->sinfo_stream == stseq->stream) && michael@0: SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) { michael@0: /* We are past our victim SSN */ michael@0: break; michael@0: } michael@0: } michael@0: strm = &asoc->strmin[stseq->stream]; michael@0: if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) { michael@0: /* Update the sequence number */ michael@0: strm->last_sequence_delivered = stseq->sequence; michael@0: } michael@0: /* now kick the stream the new way */ michael@0: /*sa_ignore NO_NULL_CHK*/ michael@0: sctp_kick_prsctp_reorder_queue(stcb, strm); michael@0: } michael@0: SCTP_INP_READ_UNLOCK(stcb->sctp_ep); michael@0: } michael@0: /* michael@0: * Now slide thing forward. michael@0: */ michael@0: sctp_slide_mapping_arrays(stcb); michael@0: michael@0: if (!TAILQ_EMPTY(&asoc->reasmqueue)) { michael@0: /* now lets kick out and check for more fragmented delivery */ michael@0: /*sa_ignore NO_NULL_CHK*/ michael@0: sctp_deliver_reasm_check(stcb, &stcb->asoc); michael@0: } michael@0: }