|
1 /*- |
|
2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. |
|
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. |
|
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. |
|
5 * |
|
6 * Redistribution and use in source and binary forms, with or without |
|
7 * modification, are permitted provided that the following conditions are met: |
|
8 * |
|
9 * a) Redistributions of source code must retain the above copyright notice, |
|
10 * this list of conditions and the following disclaimer. |
|
11 * |
|
12 * b) Redistributions in binary form must reproduce the above copyright |
|
13 * notice, this list of conditions and the following disclaimer in |
|
14 * the documentation and/or other materials provided with the distribution. |
|
15 * |
|
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its |
|
17 * contributors may be used to endorse or promote products derived |
|
18 * from this software without specific prior written permission. |
|
19 * |
|
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
|
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, |
|
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
|
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
|
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
|
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
|
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
|
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
|
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
|
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
|
30 * THE POSSIBILITY OF SUCH DAMAGE. |
|
31 */ |
|
32 |
|
33 #ifdef __FreeBSD__ |
|
34 #include <sys/cdefs.h> |
|
35 __FBSDID("$FreeBSD: head/sys/netinet/sctp_indata.c 258228 2013-11-16 16:09:09Z tuexen $"); |
|
36 #endif |
|
37 |
|
38 #include <netinet/sctp_os.h> |
|
39 #include <netinet/sctp_var.h> |
|
40 #include <netinet/sctp_sysctl.h> |
|
41 #include <netinet/sctp_pcb.h> |
|
42 #include <netinet/sctp_header.h> |
|
43 #include <netinet/sctputil.h> |
|
44 #include <netinet/sctp_output.h> |
|
45 #include <netinet/sctp_input.h> |
|
46 #include <netinet/sctp_indata.h> |
|
47 #include <netinet/sctp_uio.h> |
|
48 #include <netinet/sctp_timer.h> |
|
49 |
|
50 |
|
51 /* |
|
52 * NOTES: On the outbound side of things I need to check the sack timer to |
|
53 * see if I should generate a sack into the chunk queue (if I have data to |
|
54 * send that is and will be sending it .. for bundling. |
|
55 * |
|
56 * The callback in sctp_usrreq.c will get called when the socket is read from. |
|
57 * This will cause sctp_service_queues() to get called on the top entry in |
|
58 * the list. |
|
59 */ |
|
60 |
|
61 void |
|
62 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) |
|
63 { |
|
64 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc); |
|
65 } |
|
66 |
|
67 /* Calculate what the rwnd would be */ |
|
68 uint32_t |
|
69 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) |
|
70 { |
|
71 uint32_t calc = 0; |
|
72 |
|
73 /* |
|
74 * This is really set wrong with respect to a 1-2-m socket. Since |
|
75 * the sb_cc is the count that everyone as put up. When we re-write |
|
76 * sctp_soreceive then we will fix this so that ONLY this |
|
77 * associations data is taken into account. |
|
78 */ |
|
79 if (stcb->sctp_socket == NULL) |
|
80 return (calc); |
|
81 |
|
82 if (stcb->asoc.sb_cc == 0 && |
|
83 asoc->size_on_reasm_queue == 0 && |
|
84 asoc->size_on_all_streams == 0) { |
|
85 /* Full rwnd granted */ |
|
86 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND); |
|
87 return (calc); |
|
88 } |
|
89 /* get actual space */ |
|
90 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); |
|
91 |
|
92 /* |
|
93 * take out what has NOT been put on socket queue and we yet hold |
|
94 * for putting up. |
|
95 */ |
|
96 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue + |
|
97 asoc->cnt_on_reasm_queue * MSIZE)); |
|
98 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams + |
|
99 asoc->cnt_on_all_streams * MSIZE)); |
|
100 |
|
101 if (calc == 0) { |
|
102 /* out of space */ |
|
103 return (calc); |
|
104 } |
|
105 |
|
106 /* what is the overhead of all these rwnd's */ |
|
107 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); |
|
108 /* If the window gets too small due to ctrl-stuff, reduce it |
|
109 * to 1, even it is 0. SWS engaged |
|
110 */ |
|
111 if (calc < stcb->asoc.my_rwnd_control_len) { |
|
112 calc = 1; |
|
113 } |
|
114 return (calc); |
|
115 } |
|
116 |
|
117 |
|
118 |
|
119 /* |
|
120 * Build out our readq entry based on the incoming packet. |
|
121 */ |
|
122 struct sctp_queued_to_read * |
|
123 sctp_build_readq_entry(struct sctp_tcb *stcb, |
|
124 struct sctp_nets *net, |
|
125 uint32_t tsn, uint32_t ppid, |
|
126 uint32_t context, uint16_t stream_no, |
|
127 uint16_t stream_seq, uint8_t flags, |
|
128 struct mbuf *dm) |
|
129 { |
|
130 struct sctp_queued_to_read *read_queue_e = NULL; |
|
131 |
|
132 sctp_alloc_a_readq(stcb, read_queue_e); |
|
133 if (read_queue_e == NULL) { |
|
134 goto failed_build; |
|
135 } |
|
136 read_queue_e->sinfo_stream = stream_no; |
|
137 read_queue_e->sinfo_ssn = stream_seq; |
|
138 read_queue_e->sinfo_flags = (flags << 8); |
|
139 read_queue_e->sinfo_ppid = ppid; |
|
140 read_queue_e->sinfo_context = context; |
|
141 read_queue_e->sinfo_timetolive = 0; |
|
142 read_queue_e->sinfo_tsn = tsn; |
|
143 read_queue_e->sinfo_cumtsn = tsn; |
|
144 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); |
|
145 read_queue_e->whoFrom = net; |
|
146 read_queue_e->length = 0; |
|
147 atomic_add_int(&net->ref_count, 1); |
|
148 read_queue_e->data = dm; |
|
149 read_queue_e->spec_flags = 0; |
|
150 read_queue_e->tail_mbuf = NULL; |
|
151 read_queue_e->aux_data = NULL; |
|
152 read_queue_e->stcb = stcb; |
|
153 read_queue_e->port_from = stcb->rport; |
|
154 read_queue_e->do_not_ref_stcb = 0; |
|
155 read_queue_e->end_added = 0; |
|
156 read_queue_e->some_taken = 0; |
|
157 read_queue_e->pdapi_aborted = 0; |
|
158 failed_build: |
|
159 return (read_queue_e); |
|
160 } |
|
161 |
|
162 |
|
163 /* |
|
164 * Build out our readq entry based on the incoming packet. |
|
165 */ |
|
166 static struct sctp_queued_to_read * |
|
167 sctp_build_readq_entry_chk(struct sctp_tcb *stcb, |
|
168 struct sctp_tmit_chunk *chk) |
|
169 { |
|
170 struct sctp_queued_to_read *read_queue_e = NULL; |
|
171 |
|
172 sctp_alloc_a_readq(stcb, read_queue_e); |
|
173 if (read_queue_e == NULL) { |
|
174 goto failed_build; |
|
175 } |
|
176 read_queue_e->sinfo_stream = chk->rec.data.stream_number; |
|
177 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq; |
|
178 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8); |
|
179 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype; |
|
180 read_queue_e->sinfo_context = stcb->asoc.context; |
|
181 read_queue_e->sinfo_timetolive = 0; |
|
182 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq; |
|
183 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq; |
|
184 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); |
|
185 read_queue_e->whoFrom = chk->whoTo; |
|
186 read_queue_e->aux_data = NULL; |
|
187 read_queue_e->length = 0; |
|
188 atomic_add_int(&chk->whoTo->ref_count, 1); |
|
189 read_queue_e->data = chk->data; |
|
190 read_queue_e->tail_mbuf = NULL; |
|
191 read_queue_e->stcb = stcb; |
|
192 read_queue_e->port_from = stcb->rport; |
|
193 read_queue_e->spec_flags = 0; |
|
194 read_queue_e->do_not_ref_stcb = 0; |
|
195 read_queue_e->end_added = 0; |
|
196 read_queue_e->some_taken = 0; |
|
197 read_queue_e->pdapi_aborted = 0; |
|
198 failed_build: |
|
199 return (read_queue_e); |
|
200 } |
|
201 |
|
202 |
|
203 struct mbuf * |
|
204 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo) |
|
205 { |
|
206 struct sctp_extrcvinfo *seinfo; |
|
207 struct sctp_sndrcvinfo *outinfo; |
|
208 struct sctp_rcvinfo *rcvinfo; |
|
209 struct sctp_nxtinfo *nxtinfo; |
|
210 #if defined(__Userspace_os_Windows) |
|
211 WSACMSGHDR *cmh; |
|
212 #else |
|
213 struct cmsghdr *cmh; |
|
214 #endif |
|
215 struct mbuf *ret; |
|
216 int len; |
|
217 int use_extended; |
|
218 int provide_nxt; |
|
219 |
|
220 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && |
|
221 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && |
|
222 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { |
|
223 /* user does not want any ancillary data */ |
|
224 return (NULL); |
|
225 } |
|
226 |
|
227 len = 0; |
|
228 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { |
|
229 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); |
|
230 } |
|
231 seinfo = (struct sctp_extrcvinfo *)sinfo; |
|
232 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) && |
|
233 (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) { |
|
234 provide_nxt = 1; |
|
235 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); |
|
236 } else { |
|
237 provide_nxt = 0; |
|
238 } |
|
239 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { |
|
240 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { |
|
241 use_extended = 1; |
|
242 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); |
|
243 } else { |
|
244 use_extended = 0; |
|
245 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); |
|
246 } |
|
247 } else { |
|
248 use_extended = 0; |
|
249 } |
|
250 |
|
251 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); |
|
252 if (ret == NULL) { |
|
253 /* No space */ |
|
254 return (ret); |
|
255 } |
|
256 SCTP_BUF_LEN(ret) = 0; |
|
257 |
|
258 /* We need a CMSG header followed by the struct */ |
|
259 #if defined(__Userspace_os_Windows) |
|
260 cmh = mtod(ret, WSACMSGHDR *); |
|
261 #else |
|
262 cmh = mtod(ret, struct cmsghdr *); |
|
263 #endif |
|
264 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { |
|
265 cmh->cmsg_level = IPPROTO_SCTP; |
|
266 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo)); |
|
267 cmh->cmsg_type = SCTP_RCVINFO; |
|
268 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh); |
|
269 rcvinfo->rcv_sid = sinfo->sinfo_stream; |
|
270 rcvinfo->rcv_ssn = sinfo->sinfo_ssn; |
|
271 rcvinfo->rcv_flags = sinfo->sinfo_flags; |
|
272 rcvinfo->rcv_ppid = sinfo->sinfo_ppid; |
|
273 rcvinfo->rcv_tsn = sinfo->sinfo_tsn; |
|
274 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn; |
|
275 rcvinfo->rcv_context = sinfo->sinfo_context; |
|
276 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id; |
|
277 #if defined(__Userspace_os_Windows) |
|
278 cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo))); |
|
279 #else |
|
280 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo))); |
|
281 #endif |
|
282 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); |
|
283 } |
|
284 if (provide_nxt) { |
|
285 cmh->cmsg_level = IPPROTO_SCTP; |
|
286 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo)); |
|
287 cmh->cmsg_type = SCTP_NXTINFO; |
|
288 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh); |
|
289 nxtinfo->nxt_sid = seinfo->sreinfo_next_stream; |
|
290 nxtinfo->nxt_flags = 0; |
|
291 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) { |
|
292 nxtinfo->nxt_flags |= SCTP_UNORDERED; |
|
293 } |
|
294 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) { |
|
295 nxtinfo->nxt_flags |= SCTP_NOTIFICATION; |
|
296 } |
|
297 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) { |
|
298 nxtinfo->nxt_flags |= SCTP_COMPLETE; |
|
299 } |
|
300 nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid; |
|
301 nxtinfo->nxt_length = seinfo->sreinfo_next_length; |
|
302 nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid; |
|
303 #if defined(__Userspace_os_Windows) |
|
304 cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo))); |
|
305 #else |
|
306 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo))); |
|
307 #endif |
|
308 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); |
|
309 } |
|
310 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { |
|
311 cmh->cmsg_level = IPPROTO_SCTP; |
|
312 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); |
|
313 if (use_extended) { |
|
314 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); |
|
315 cmh->cmsg_type = SCTP_EXTRCV; |
|
316 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo)); |
|
317 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); |
|
318 } else { |
|
319 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); |
|
320 cmh->cmsg_type = SCTP_SNDRCV; |
|
321 *outinfo = *sinfo; |
|
322 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); |
|
323 } |
|
324 } |
|
325 return (ret); |
|
326 } |
|
327 |
|
328 |
|
329 static void |
|
330 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn) |
|
331 { |
|
332 uint32_t gap, i, cumackp1; |
|
333 int fnd = 0; |
|
334 |
|
335 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { |
|
336 return; |
|
337 } |
|
338 cumackp1 = asoc->cumulative_tsn + 1; |
|
339 if (SCTP_TSN_GT(cumackp1, tsn)) { |
|
340 /* this tsn is behind the cum ack and thus we don't |
|
341 * need to worry about it being moved from one to the other. |
|
342 */ |
|
343 return; |
|
344 } |
|
345 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); |
|
346 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { |
|
347 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn); |
|
348 sctp_print_mapping_array(asoc); |
|
349 #ifdef INVARIANTS |
|
350 panic("Things are really messed up now!!"); |
|
351 #endif |
|
352 } |
|
353 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); |
|
354 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); |
|
355 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { |
|
356 asoc->highest_tsn_inside_nr_map = tsn; |
|
357 } |
|
358 if (tsn == asoc->highest_tsn_inside_map) { |
|
359 /* We must back down to see what the new highest is */ |
|
360 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) { |
|
361 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn); |
|
362 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { |
|
363 asoc->highest_tsn_inside_map = i; |
|
364 fnd = 1; |
|
365 break; |
|
366 } |
|
367 } |
|
368 if (!fnd) { |
|
369 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1; |
|
370 } |
|
371 } |
|
372 } |
|
373 |
|
374 |
|
375 /* |
|
376 * We are delivering currently from the reassembly queue. We must continue to |
|
377 * deliver until we either: 1) run out of space. 2) run out of sequential |
|
378 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag. |
|
379 */ |
|
380 static void |
|
381 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc) |
|
382 { |
|
383 struct sctp_tmit_chunk *chk, *nchk; |
|
384 uint16_t nxt_todel; |
|
385 uint16_t stream_no; |
|
386 int end = 0; |
|
387 int cntDel; |
|
388 struct sctp_queued_to_read *control, *ctl, *nctl; |
|
389 |
|
390 if (stcb == NULL) |
|
391 return; |
|
392 |
|
393 cntDel = stream_no = 0; |
|
394 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || |
|
395 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) || |
|
396 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { |
|
397 /* socket above is long gone or going.. */ |
|
398 abandon: |
|
399 asoc->fragmented_delivery_inprogress = 0; |
|
400 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) { |
|
401 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); |
|
402 asoc->size_on_reasm_queue -= chk->send_size; |
|
403 sctp_ucount_decr(asoc->cnt_on_reasm_queue); |
|
404 /* |
|
405 * Lose the data pointer, since its in the socket |
|
406 * buffer |
|
407 */ |
|
408 if (chk->data) { |
|
409 sctp_m_freem(chk->data); |
|
410 chk->data = NULL; |
|
411 } |
|
412 /* Now free the address and data */ |
|
413 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); |
|
414 /*sa_ignore FREED_MEMORY*/ |
|
415 } |
|
416 return; |
|
417 } |
|
418 SCTP_TCB_LOCK_ASSERT(stcb); |
|
419 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) { |
|
420 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) { |
|
421 /* Can't deliver more :< */ |
|
422 return; |
|
423 } |
|
424 stream_no = chk->rec.data.stream_number; |
|
425 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1; |
|
426 if (nxt_todel != chk->rec.data.stream_seq && |
|
427 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { |
|
428 /* |
|
429 * Not the next sequence to deliver in its stream OR |
|
430 * unordered |
|
431 */ |
|
432 return; |
|
433 } |
|
434 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { |
|
435 |
|
436 control = sctp_build_readq_entry_chk(stcb, chk); |
|
437 if (control == NULL) { |
|
438 /* out of memory? */ |
|
439 return; |
|
440 } |
|
441 /* save it off for our future deliveries */ |
|
442 stcb->asoc.control_pdapi = control; |
|
443 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) |
|
444 end = 1; |
|
445 else |
|
446 end = 0; |
|
447 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq); |
|
448 sctp_add_to_readq(stcb->sctp_ep, |
|
449 stcb, control, &stcb->sctp_socket->so_rcv, end, |
|
450 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); |
|
451 cntDel++; |
|
452 } else { |
|
453 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) |
|
454 end = 1; |
|
455 else |
|
456 end = 0; |
|
457 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq); |
|
458 if (sctp_append_to_readq(stcb->sctp_ep, stcb, |
|
459 stcb->asoc.control_pdapi, |
|
460 chk->data, end, chk->rec.data.TSN_seq, |
|
461 &stcb->sctp_socket->so_rcv)) { |
|
462 /* |
|
463 * something is very wrong, either |
|
464 * control_pdapi is NULL, or the tail_mbuf |
|
465 * is corrupt, or there is a EOM already on |
|
466 * the mbuf chain. |
|
467 */ |
|
468 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { |
|
469 goto abandon; |
|
470 } else { |
|
471 #ifdef INVARIANTS |
|
472 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) { |
|
473 panic("This should not happen control_pdapi NULL?"); |
|
474 } |
|
475 /* if we did not panic, it was a EOM */ |
|
476 panic("Bad chunking ??"); |
|
477 #else |
|
478 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) { |
|
479 SCTP_PRINTF("This should not happen control_pdapi NULL?\n"); |
|
480 } |
|
481 SCTP_PRINTF("Bad chunking ??\n"); |
|
482 SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n"); |
|
483 |
|
484 #endif |
|
485 goto abandon; |
|
486 } |
|
487 } |
|
488 cntDel++; |
|
489 } |
|
490 /* pull it we did it */ |
|
491 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); |
|
492 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { |
|
493 asoc->fragmented_delivery_inprogress = 0; |
|
494 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { |
|
495 asoc->strmin[stream_no].last_sequence_delivered++; |
|
496 } |
|
497 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { |
|
498 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); |
|
499 } |
|
500 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { |
|
501 /* |
|
502 * turn the flag back on since we just delivered |
|
503 * yet another one. |
|
504 */ |
|
505 asoc->fragmented_delivery_inprogress = 1; |
|
506 } |
|
507 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq; |
|
508 asoc->last_flags_delivered = chk->rec.data.rcv_flags; |
|
509 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq; |
|
510 asoc->last_strm_no_delivered = chk->rec.data.stream_number; |
|
511 |
|
512 asoc->tsn_last_delivered = chk->rec.data.TSN_seq; |
|
513 asoc->size_on_reasm_queue -= chk->send_size; |
|
514 sctp_ucount_decr(asoc->cnt_on_reasm_queue); |
|
515 /* free up the chk */ |
|
516 chk->data = NULL; |
|
517 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); |
|
518 |
|
519 if (asoc->fragmented_delivery_inprogress == 0) { |
|
520 /* |
|
521 * Now lets see if we can deliver the next one on |
|
522 * the stream |
|
523 */ |
|
524 struct sctp_stream_in *strm; |
|
525 |
|
526 strm = &asoc->strmin[stream_no]; |
|
527 nxt_todel = strm->last_sequence_delivered + 1; |
|
528 TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) { |
|
529 /* Deliver more if we can. */ |
|
530 if (nxt_todel == ctl->sinfo_ssn) { |
|
531 TAILQ_REMOVE(&strm->inqueue, ctl, next); |
|
532 asoc->size_on_all_streams -= ctl->length; |
|
533 sctp_ucount_decr(asoc->cnt_on_all_streams); |
|
534 strm->last_sequence_delivered++; |
|
535 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn); |
|
536 sctp_add_to_readq(stcb->sctp_ep, stcb, |
|
537 ctl, |
|
538 &stcb->sctp_socket->so_rcv, 1, |
|
539 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); |
|
540 } else { |
|
541 break; |
|
542 } |
|
543 nxt_todel = strm->last_sequence_delivered + 1; |
|
544 } |
|
545 break; |
|
546 } |
|
547 } |
|
548 } |
|
549 |
|
550 /* |
|
551 * Queue the chunk either right into the socket buffer if it is the next one |
|
552 * to go OR put it in the correct place in the delivery queue. If we do |
|
553 * append to the so_buf, keep doing so until we are out of order. One big |
|
554 * question still remains, what to do when the socket buffer is FULL?? |
|
555 */ |
|
556 static void |
|
557 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc, |
|
558 struct sctp_queued_to_read *control, int *abort_flag) |
|
559 { |
|
560 /* |
|
561 * FIX-ME maybe? What happens when the ssn wraps? If we are getting |
|
562 * all the data in one stream this could happen quite rapidly. One |
|
563 * could use the TSN to keep track of things, but this scheme breaks |
|
564 * down in the other type of stream useage that could occur. Send a |
|
565 * single msg to stream 0, send 4Billion messages to stream 1, now |
|
566 * send a message to stream 0. You have a situation where the TSN |
|
567 * has wrapped but not in the stream. Is this worth worrying about |
|
568 * or should we just change our queue sort at the bottom to be by |
|
569 * TSN. |
|
570 * |
|
571 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2 |
|
572 * with TSN 1? If the peer is doing some sort of funky TSN/SSN |
|
573 * assignment this could happen... and I don't see how this would be |
|
574 * a violation. So for now I am undecided an will leave the sort by |
|
575 * SSN alone. Maybe a hybred approach is the answer |
|
576 * |
|
577 */ |
|
578 struct sctp_stream_in *strm; |
|
579 struct sctp_queued_to_read *at; |
|
580 int queue_needed; |
|
581 uint16_t nxt_todel; |
|
582 struct mbuf *oper; |
|
583 |
|
584 queue_needed = 1; |
|
585 asoc->size_on_all_streams += control->length; |
|
586 sctp_ucount_incr(asoc->cnt_on_all_streams); |
|
587 strm = &asoc->strmin[control->sinfo_stream]; |
|
588 nxt_todel = strm->last_sequence_delivered + 1; |
|
589 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { |
|
590 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD); |
|
591 } |
|
592 SCTPDBG(SCTP_DEBUG_INDATA1, |
|
593 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n", |
|
594 (uint32_t) control->sinfo_stream, |
|
595 (uint32_t) strm->last_sequence_delivered, |
|
596 (uint32_t) nxt_todel); |
|
597 if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) { |
|
598 /* The incoming sseq is behind where we last delivered? */ |
|
599 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n", |
|
600 control->sinfo_ssn, strm->last_sequence_delivered); |
|
601 protocol_error: |
|
602 /* |
|
603 * throw it in the stream so it gets cleaned up in |
|
604 * association destruction |
|
605 */ |
|
606 TAILQ_INSERT_HEAD(&strm->inqueue, control, next); |
|
607 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), |
|
608 0, M_NOWAIT, 1, MT_DATA); |
|
609 if (oper) { |
|
610 struct sctp_paramhdr *ph; |
|
611 uint32_t *ippp; |
|
612 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + |
|
613 (sizeof(uint32_t) * 3); |
|
614 ph = mtod(oper, struct sctp_paramhdr *); |
|
615 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); |
|
616 ph->param_length = htons(SCTP_BUF_LEN(oper)); |
|
617 ippp = (uint32_t *) (ph + 1); |
|
618 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_1); |
|
619 ippp++; |
|
620 *ippp = control->sinfo_tsn; |
|
621 ippp++; |
|
622 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn); |
|
623 } |
|
624 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_1; |
|
625 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); |
|
626 *abort_flag = 1; |
|
627 return; |
|
628 |
|
629 } |
|
630 if (nxt_todel == control->sinfo_ssn) { |
|
631 /* can be delivered right away? */ |
|
632 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { |
|
633 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); |
|
634 } |
|
635 /* EY it wont be queued if it could be delivered directly*/ |
|
636 queue_needed = 0; |
|
637 asoc->size_on_all_streams -= control->length; |
|
638 sctp_ucount_decr(asoc->cnt_on_all_streams); |
|
639 strm->last_sequence_delivered++; |
|
640 |
|
641 sctp_mark_non_revokable(asoc, control->sinfo_tsn); |
|
642 sctp_add_to_readq(stcb->sctp_ep, stcb, |
|
643 control, |
|
644 &stcb->sctp_socket->so_rcv, 1, |
|
645 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); |
|
646 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) { |
|
647 /* all delivered */ |
|
648 nxt_todel = strm->last_sequence_delivered + 1; |
|
649 if (nxt_todel == control->sinfo_ssn) { |
|
650 TAILQ_REMOVE(&strm->inqueue, control, next); |
|
651 asoc->size_on_all_streams -= control->length; |
|
652 sctp_ucount_decr(asoc->cnt_on_all_streams); |
|
653 strm->last_sequence_delivered++; |
|
654 /* |
|
655 * We ignore the return of deliver_data here |
|
656 * since we always can hold the chunk on the |
|
657 * d-queue. And we have a finite number that |
|
658 * can be delivered from the strq. |
|
659 */ |
|
660 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { |
|
661 sctp_log_strm_del(control, NULL, |
|
662 SCTP_STR_LOG_FROM_IMMED_DEL); |
|
663 } |
|
664 sctp_mark_non_revokable(asoc, control->sinfo_tsn); |
|
665 sctp_add_to_readq(stcb->sctp_ep, stcb, |
|
666 control, |
|
667 &stcb->sctp_socket->so_rcv, 1, |
|
668 SCTP_READ_LOCK_NOT_HELD, |
|
669 SCTP_SO_NOT_LOCKED); |
|
670 continue; |
|
671 } |
|
672 break; |
|
673 } |
|
674 } |
|
675 if (queue_needed) { |
|
676 /* |
|
677 * Ok, we did not deliver this guy, find the correct place |
|
678 * to put it on the queue. |
|
679 */ |
|
680 if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) { |
|
681 goto protocol_error; |
|
682 } |
|
683 if (TAILQ_EMPTY(&strm->inqueue)) { |
|
684 /* Empty queue */ |
|
685 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { |
|
686 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD); |
|
687 } |
|
688 TAILQ_INSERT_HEAD(&strm->inqueue, control, next); |
|
689 } else { |
|
690 TAILQ_FOREACH(at, &strm->inqueue, next) { |
|
691 if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) { |
|
692 /* |
|
693 * one in queue is bigger than the |
|
694 * new one, insert before this one |
|
695 */ |
|
696 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { |
|
697 sctp_log_strm_del(control, at, |
|
698 SCTP_STR_LOG_FROM_INSERT_MD); |
|
699 } |
|
700 TAILQ_INSERT_BEFORE(at, control, next); |
|
701 break; |
|
702 } else if (at->sinfo_ssn == control->sinfo_ssn) { |
|
703 /* |
|
704 * Gak, He sent me a duplicate str |
|
705 * seq number |
|
706 */ |
|
707 /* |
|
708 * foo bar, I guess I will just free |
|
709 * this new guy, should we abort |
|
710 * too? FIX ME MAYBE? Or it COULD be |
|
711 * that the SSN's have wrapped. |
|
712 * Maybe I should compare to TSN |
|
713 * somehow... sigh for now just blow |
|
714 * away the chunk! |
|
715 */ |
|
716 |
|
717 if (control->data) |
|
718 sctp_m_freem(control->data); |
|
719 control->data = NULL; |
|
720 asoc->size_on_all_streams -= control->length; |
|
721 sctp_ucount_decr(asoc->cnt_on_all_streams); |
|
722 if (control->whoFrom) { |
|
723 sctp_free_remote_addr(control->whoFrom); |
|
724 control->whoFrom = NULL; |
|
725 } |
|
726 sctp_free_a_readq(stcb, control); |
|
727 return; |
|
728 } else { |
|
729 if (TAILQ_NEXT(at, next) == NULL) { |
|
730 /* |
|
731 * We are at the end, insert |
|
732 * it after this one |
|
733 */ |
|
734 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { |
|
735 sctp_log_strm_del(control, at, |
|
736 SCTP_STR_LOG_FROM_INSERT_TL); |
|
737 } |
|
738 TAILQ_INSERT_AFTER(&strm->inqueue, |
|
739 at, control, next); |
|
740 break; |
|
741 } |
|
742 } |
|
743 } |
|
744 } |
|
745 } |
|
746 } |
|
747 |
|
748 /* |
|
749 * Returns two things: You get the total size of the deliverable parts of the |
|
750 * first fragmented message on the reassembly queue. And you get a 1 back if |
|
751 * all of the message is ready or a 0 back if the message is still incomplete |
|
752 */ |
|
753 static int |
|
754 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t *t_size) |
|
755 { |
|
756 struct sctp_tmit_chunk *chk; |
|
757 uint32_t tsn; |
|
758 |
|
759 *t_size = 0; |
|
760 chk = TAILQ_FIRST(&asoc->reasmqueue); |
|
761 if (chk == NULL) { |
|
762 /* nothing on the queue */ |
|
763 return (0); |
|
764 } |
|
765 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { |
|
766 /* Not a first on the queue */ |
|
767 return (0); |
|
768 } |
|
769 tsn = chk->rec.data.TSN_seq; |
|
770 TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) { |
|
771 if (tsn != chk->rec.data.TSN_seq) { |
|
772 return (0); |
|
773 } |
|
774 *t_size += chk->send_size; |
|
775 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { |
|
776 return (1); |
|
777 } |
|
778 tsn++; |
|
779 } |
|
780 return (0); |
|
781 } |
|
782 |
|
783 static void |
|
784 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc) |
|
785 { |
|
786 struct sctp_tmit_chunk *chk; |
|
787 uint16_t nxt_todel; |
|
788 uint32_t tsize, pd_point; |
|
789 |
|
790 doit_again: |
|
791 chk = TAILQ_FIRST(&asoc->reasmqueue); |
|
792 if (chk == NULL) { |
|
793 /* Huh? */ |
|
794 asoc->size_on_reasm_queue = 0; |
|
795 asoc->cnt_on_reasm_queue = 0; |
|
796 return; |
|
797 } |
|
798 if (asoc->fragmented_delivery_inprogress == 0) { |
|
799 nxt_todel = |
|
800 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; |
|
801 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && |
|
802 (nxt_todel == chk->rec.data.stream_seq || |
|
803 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { |
|
804 /* |
|
805 * Yep the first one is here and its ok to deliver |
|
806 * but should we? |
|
807 */ |
|
808 if (stcb->sctp_socket) { |
|
809 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT, |
|
810 stcb->sctp_ep->partial_delivery_point); |
|
811 } else { |
|
812 pd_point = stcb->sctp_ep->partial_delivery_point; |
|
813 } |
|
814 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) { |
|
815 /* |
|
816 * Yes, we setup to start reception, by |
|
817 * backing down the TSN just in case we |
|
818 * can't deliver. If we |
|
819 */ |
|
820 asoc->fragmented_delivery_inprogress = 1; |
|
821 asoc->tsn_last_delivered = |
|
822 chk->rec.data.TSN_seq - 1; |
|
823 asoc->str_of_pdapi = |
|
824 chk->rec.data.stream_number; |
|
825 asoc->ssn_of_pdapi = chk->rec.data.stream_seq; |
|
826 asoc->pdapi_ppid = chk->rec.data.payloadtype; |
|
827 asoc->fragment_flags = chk->rec.data.rcv_flags; |
|
828 sctp_service_reassembly(stcb, asoc); |
|
829 } |
|
830 } |
|
831 } else { |
|
832 /* Service re-assembly will deliver stream data queued |
|
833 * at the end of fragmented delivery.. but it wont know |
|
834 * to go back and call itself again... we do that here |
|
835 * with the got doit_again |
|
836 */ |
|
837 sctp_service_reassembly(stcb, asoc); |
|
838 if (asoc->fragmented_delivery_inprogress == 0) { |
|
839 /* finished our Fragmented delivery, could be |
|
840 * more waiting? |
|
841 */ |
|
842 goto doit_again; |
|
843 } |
|
844 } |
|
845 } |
|
846 |
|
847 /* |
|
848 * Dump onto the re-assembly queue, in its proper place. After dumping on the |
|
849 * queue, see if anthing can be delivered. If so pull it off (or as much as |
|
850 * we can. If we run out of space then we must dump what we can and set the |
|
851 * appropriate flag to say we queued what we could. |
|
852 */ |
|
853 static void |
|
854 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, |
|
855 struct sctp_tmit_chunk *chk, int *abort_flag) |
|
856 { |
|
857 struct mbuf *oper; |
|
858 uint32_t cum_ackp1, prev_tsn, post_tsn; |
|
859 struct sctp_tmit_chunk *at, *prev, *next; |
|
860 |
|
861 prev = next = NULL; |
|
862 cum_ackp1 = asoc->tsn_last_delivered + 1; |
|
863 if (TAILQ_EMPTY(&asoc->reasmqueue)) { |
|
864 /* This is the first one on the queue */ |
|
865 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next); |
|
866 /* |
|
867 * we do not check for delivery of anything when only one |
|
868 * fragment is here |
|
869 */ |
|
870 asoc->size_on_reasm_queue = chk->send_size; |
|
871 sctp_ucount_incr(asoc->cnt_on_reasm_queue); |
|
872 if (chk->rec.data.TSN_seq == cum_ackp1) { |
|
873 if (asoc->fragmented_delivery_inprogress == 0 && |
|
874 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) != |
|
875 SCTP_DATA_FIRST_FRAG) { |
|
876 /* |
|
877 * An empty queue, no delivery inprogress, |
|
878 * we hit the next one and it does NOT have |
|
879 * a FIRST fragment mark. |
|
880 */ |
|
881 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n"); |
|
882 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), |
|
883 0, M_NOWAIT, 1, MT_DATA); |
|
884 |
|
885 if (oper) { |
|
886 struct sctp_paramhdr *ph; |
|
887 uint32_t *ippp; |
|
888 |
|
889 SCTP_BUF_LEN(oper) = |
|
890 sizeof(struct sctp_paramhdr) + |
|
891 (sizeof(uint32_t) * 3); |
|
892 ph = mtod(oper, struct sctp_paramhdr *); |
|
893 ph->param_type = |
|
894 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); |
|
895 ph->param_length = htons(SCTP_BUF_LEN(oper)); |
|
896 ippp = (uint32_t *) (ph + 1); |
|
897 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_2); |
|
898 ippp++; |
|
899 *ippp = chk->rec.data.TSN_seq; |
|
900 ippp++; |
|
901 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); |
|
902 |
|
903 } |
|
904 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_2; |
|
905 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); |
|
906 *abort_flag = 1; |
|
907 } else if (asoc->fragmented_delivery_inprogress && |
|
908 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { |
|
909 /* |
|
910 * We are doing a partial delivery and the |
|
911 * NEXT chunk MUST be either the LAST or |
|
912 * MIDDLE fragment NOT a FIRST |
|
913 */ |
|
914 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n"); |
|
915 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), |
|
916 0, M_NOWAIT, 1, MT_DATA); |
|
917 if (oper) { |
|
918 struct sctp_paramhdr *ph; |
|
919 uint32_t *ippp; |
|
920 |
|
921 SCTP_BUF_LEN(oper) = |
|
922 sizeof(struct sctp_paramhdr) + |
|
923 (3 *sizeof(uint32_t)); |
|
924 ph = mtod(oper, struct sctp_paramhdr *); |
|
925 ph->param_type = |
|
926 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); |
|
927 ph->param_length = htons(SCTP_BUF_LEN(oper)); |
|
928 ippp = (uint32_t *) (ph + 1); |
|
929 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_3); |
|
930 ippp++; |
|
931 *ippp = chk->rec.data.TSN_seq; |
|
932 ippp++; |
|
933 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); |
|
934 } |
|
935 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_3; |
|
936 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); |
|
937 *abort_flag = 1; |
|
938 } else if (asoc->fragmented_delivery_inprogress) { |
|
939 /* |
|
940 * Here we are ok with a MIDDLE or LAST |
|
941 * piece |
|
942 */ |
|
943 if (chk->rec.data.stream_number != |
|
944 asoc->str_of_pdapi) { |
|
945 /* Got to be the right STR No */ |
|
946 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n", |
|
947 chk->rec.data.stream_number, |
|
948 asoc->str_of_pdapi); |
|
949 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), |
|
950 0, M_NOWAIT, 1, MT_DATA); |
|
951 if (oper) { |
|
952 struct sctp_paramhdr *ph; |
|
953 uint32_t *ippp; |
|
954 |
|
955 SCTP_BUF_LEN(oper) = |
|
956 sizeof(struct sctp_paramhdr) + |
|
957 (sizeof(uint32_t) * 3); |
|
958 ph = mtod(oper, |
|
959 struct sctp_paramhdr *); |
|
960 ph->param_type = |
|
961 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); |
|
962 ph->param_length = |
|
963 htons(SCTP_BUF_LEN(oper)); |
|
964 ippp = (uint32_t *) (ph + 1); |
|
965 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_4); |
|
966 ippp++; |
|
967 *ippp = chk->rec.data.TSN_seq; |
|
968 ippp++; |
|
969 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); |
|
970 } |
|
971 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_4; |
|
972 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); |
|
973 *abort_flag = 1; |
|
974 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) != |
|
975 SCTP_DATA_UNORDERED && |
|
976 chk->rec.data.stream_seq != asoc->ssn_of_pdapi) { |
|
977 /* Got to be the right STR Seq */ |
|
978 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n", |
|
979 chk->rec.data.stream_seq, |
|
980 asoc->ssn_of_pdapi); |
|
981 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), |
|
982 0, M_NOWAIT, 1, MT_DATA); |
|
983 if (oper) { |
|
984 struct sctp_paramhdr *ph; |
|
985 uint32_t *ippp; |
|
986 |
|
987 SCTP_BUF_LEN(oper) = |
|
988 sizeof(struct sctp_paramhdr) + |
|
989 (3 * sizeof(uint32_t)); |
|
990 ph = mtod(oper, |
|
991 struct sctp_paramhdr *); |
|
992 ph->param_type = |
|
993 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); |
|
994 ph->param_length = |
|
995 htons(SCTP_BUF_LEN(oper)); |
|
996 ippp = (uint32_t *) (ph + 1); |
|
997 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_5); |
|
998 ippp++; |
|
999 *ippp = chk->rec.data.TSN_seq; |
|
1000 ippp++; |
|
1001 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); |
|
1002 |
|
1003 } |
|
1004 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_5; |
|
1005 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); |
|
1006 *abort_flag = 1; |
|
1007 } |
|
1008 } |
|
1009 } |
|
1010 return; |
|
1011 } |
|
1012 /* Find its place */ |
|
1013 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { |
|
1014 if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) { |
|
1015 /* |
|
1016 * one in queue is bigger than the new one, insert |
|
1017 * before this one |
|
1018 */ |
|
1019 /* A check */ |
|
1020 asoc->size_on_reasm_queue += chk->send_size; |
|
1021 sctp_ucount_incr(asoc->cnt_on_reasm_queue); |
|
1022 next = at; |
|
1023 TAILQ_INSERT_BEFORE(at, chk, sctp_next); |
|
1024 break; |
|
1025 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) { |
|
1026 /* Gak, He sent me a duplicate str seq number */ |
|
1027 /* |
|
1028 * foo bar, I guess I will just free this new guy, |
|
1029 * should we abort too? FIX ME MAYBE? Or it COULD be |
|
1030 * that the SSN's have wrapped. Maybe I should |
|
1031 * compare to TSN somehow... sigh for now just blow |
|
1032 * away the chunk! |
|
1033 */ |
|
1034 if (chk->data) { |
|
1035 sctp_m_freem(chk->data); |
|
1036 chk->data = NULL; |
|
1037 } |
|
1038 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); |
|
1039 return; |
|
1040 } else { |
|
1041 prev = at; |
|
1042 if (TAILQ_NEXT(at, sctp_next) == NULL) { |
|
1043 /* |
|
1044 * We are at the end, insert it after this |
|
1045 * one |
|
1046 */ |
|
1047 /* check it first */ |
|
1048 asoc->size_on_reasm_queue += chk->send_size; |
|
1049 sctp_ucount_incr(asoc->cnt_on_reasm_queue); |
|
1050 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next); |
|
1051 break; |
|
1052 } |
|
1053 } |
|
1054 } |
|
1055 /* Now the audits */ |
|
1056 if (prev) { |
|
1057 prev_tsn = chk->rec.data.TSN_seq - 1; |
|
1058 if (prev_tsn == prev->rec.data.TSN_seq) { |
|
1059 /* |
|
1060 * Ok the one I am dropping onto the end is the |
|
1061 * NEXT. A bit of valdiation here. |
|
1062 */ |
|
1063 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == |
|
1064 SCTP_DATA_FIRST_FRAG || |
|
1065 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == |
|
1066 SCTP_DATA_MIDDLE_FRAG) { |
|
1067 /* |
|
1068 * Insert chk MUST be a MIDDLE or LAST |
|
1069 * fragment |
|
1070 */ |
|
1071 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == |
|
1072 SCTP_DATA_FIRST_FRAG) { |
|
1073 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n"); |
|
1074 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n"); |
|
1075 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), |
|
1076 0, M_NOWAIT, 1, MT_DATA); |
|
1077 if (oper) { |
|
1078 struct sctp_paramhdr *ph; |
|
1079 uint32_t *ippp; |
|
1080 |
|
1081 SCTP_BUF_LEN(oper) = |
|
1082 sizeof(struct sctp_paramhdr) + |
|
1083 (3 * sizeof(uint32_t)); |
|
1084 ph = mtod(oper, |
|
1085 struct sctp_paramhdr *); |
|
1086 ph->param_type = |
|
1087 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); |
|
1088 ph->param_length = |
|
1089 htons(SCTP_BUF_LEN(oper)); |
|
1090 ippp = (uint32_t *) (ph + 1); |
|
1091 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_6); |
|
1092 ippp++; |
|
1093 *ippp = chk->rec.data.TSN_seq; |
|
1094 ippp++; |
|
1095 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); |
|
1096 |
|
1097 } |
|
1098 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_6; |
|
1099 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); |
|
1100 *abort_flag = 1; |
|
1101 return; |
|
1102 } |
|
1103 if (chk->rec.data.stream_number != |
|
1104 prev->rec.data.stream_number) { |
|
1105 /* |
|
1106 * Huh, need the correct STR here, |
|
1107 * they must be the same. |
|
1108 */ |
|
1109 SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n", |
|
1110 chk->rec.data.stream_number, |
|
1111 prev->rec.data.stream_number); |
|
1112 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), |
|
1113 0, M_NOWAIT, 1, MT_DATA); |
|
1114 if (oper) { |
|
1115 struct sctp_paramhdr *ph; |
|
1116 uint32_t *ippp; |
|
1117 |
|
1118 SCTP_BUF_LEN(oper) = |
|
1119 sizeof(struct sctp_paramhdr) + |
|
1120 (3 * sizeof(uint32_t)); |
|
1121 ph = mtod(oper, |
|
1122 struct sctp_paramhdr *); |
|
1123 ph->param_type = |
|
1124 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); |
|
1125 ph->param_length = |
|
1126 htons(SCTP_BUF_LEN(oper)); |
|
1127 ippp = (uint32_t *) (ph + 1); |
|
1128 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_7); |
|
1129 ippp++; |
|
1130 *ippp = chk->rec.data.TSN_seq; |
|
1131 ippp++; |
|
1132 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); |
|
1133 } |
|
1134 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_7; |
|
1135 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); |
|
1136 *abort_flag = 1; |
|
1137 return; |
|
1138 } |
|
1139 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && |
|
1140 chk->rec.data.stream_seq != |
|
1141 prev->rec.data.stream_seq) { |
|
1142 /* |
|
1143 * Huh, need the correct STR here, |
|
1144 * they must be the same. |
|
1145 */ |
|
1146 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n", |
|
1147 chk->rec.data.stream_seq, |
|
1148 prev->rec.data.stream_seq); |
|
1149 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), |
|
1150 0, M_NOWAIT, 1, MT_DATA); |
|
1151 if (oper) { |
|
1152 struct sctp_paramhdr *ph; |
|
1153 uint32_t *ippp; |
|
1154 |
|
1155 SCTP_BUF_LEN(oper) = |
|
1156 sizeof(struct sctp_paramhdr) + |
|
1157 (3 * sizeof(uint32_t)); |
|
1158 ph = mtod(oper, |
|
1159 struct sctp_paramhdr *); |
|
1160 ph->param_type = |
|
1161 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); |
|
1162 ph->param_length = |
|
1163 htons(SCTP_BUF_LEN(oper)); |
|
1164 ippp = (uint32_t *) (ph + 1); |
|
1165 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_8); |
|
1166 ippp++; |
|
1167 *ippp = chk->rec.data.TSN_seq; |
|
1168 ippp++; |
|
1169 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); |
|
1170 } |
|
1171 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_8; |
|
1172 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); |
|
1173 *abort_flag = 1; |
|
1174 return; |
|
1175 } |
|
1176 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == |
|
1177 SCTP_DATA_LAST_FRAG) { |
|
1178 /* Insert chk MUST be a FIRST */ |
|
1179 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != |
|
1180 SCTP_DATA_FIRST_FRAG) { |
|
1181 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n"); |
|
1182 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), |
|
1183 0, M_NOWAIT, 1, MT_DATA); |
|
1184 if (oper) { |
|
1185 struct sctp_paramhdr *ph; |
|
1186 uint32_t *ippp; |
|
1187 |
|
1188 SCTP_BUF_LEN(oper) = |
|
1189 sizeof(struct sctp_paramhdr) + |
|
1190 (3 * sizeof(uint32_t)); |
|
1191 ph = mtod(oper, |
|
1192 struct sctp_paramhdr *); |
|
1193 ph->param_type = |
|
1194 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); |
|
1195 ph->param_length = |
|
1196 htons(SCTP_BUF_LEN(oper)); |
|
1197 ippp = (uint32_t *) (ph + 1); |
|
1198 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_9); |
|
1199 ippp++; |
|
1200 *ippp = chk->rec.data.TSN_seq; |
|
1201 ippp++; |
|
1202 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); |
|
1203 |
|
1204 } |
|
1205 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_9; |
|
1206 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); |
|
1207 *abort_flag = 1; |
|
1208 return; |
|
1209 } |
|
1210 } |
|
1211 } |
|
1212 } |
|
1213 if (next) { |
|
1214 post_tsn = chk->rec.data.TSN_seq + 1; |
|
1215 if (post_tsn == next->rec.data.TSN_seq) { |
|
1216 /* |
|
1217 * Ok the one I am inserting ahead of is my NEXT |
|
1218 * one. A bit of valdiation here. |
|
1219 */ |
|
1220 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { |
|
1221 /* Insert chk MUST be a last fragment */ |
|
1222 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) |
|
1223 != SCTP_DATA_LAST_FRAG) { |
|
1224 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n"); |
|
1225 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n"); |
|
1226 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), |
|
1227 0, M_NOWAIT, 1, MT_DATA); |
|
1228 if (oper) { |
|
1229 struct sctp_paramhdr *ph; |
|
1230 uint32_t *ippp; |
|
1231 |
|
1232 SCTP_BUF_LEN(oper) = |
|
1233 sizeof(struct sctp_paramhdr) + |
|
1234 ( 3 * sizeof(uint32_t)); |
|
1235 ph = mtod(oper, |
|
1236 struct sctp_paramhdr *); |
|
1237 ph->param_type = |
|
1238 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); |
|
1239 ph->param_length = |
|
1240 htons(SCTP_BUF_LEN(oper)); |
|
1241 ippp = (uint32_t *) (ph + 1); |
|
1242 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_10); |
|
1243 ippp++; |
|
1244 *ippp = chk->rec.data.TSN_seq; |
|
1245 ippp++; |
|
1246 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); |
|
1247 } |
|
1248 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_10; |
|
1249 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); |
|
1250 *abort_flag = 1; |
|
1251 return; |
|
1252 } |
|
1253 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == |
|
1254 SCTP_DATA_MIDDLE_FRAG || |
|
1255 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == |
|
1256 SCTP_DATA_LAST_FRAG) { |
|
1257 /* |
|
1258 * Insert chk CAN be MIDDLE or FIRST NOT |
|
1259 * LAST |
|
1260 */ |
|
1261 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) == |
|
1262 SCTP_DATA_LAST_FRAG) { |
|
1263 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n"); |
|
1264 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n"); |
|
1265 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), |
|
1266 0, M_NOWAIT, 1, MT_DATA); |
|
1267 if (oper) { |
|
1268 struct sctp_paramhdr *ph; |
|
1269 uint32_t *ippp; |
|
1270 |
|
1271 SCTP_BUF_LEN(oper) = |
|
1272 sizeof(struct sctp_paramhdr) + |
|
1273 (3 * sizeof(uint32_t)); |
|
1274 ph = mtod(oper, |
|
1275 struct sctp_paramhdr *); |
|
1276 ph->param_type = |
|
1277 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); |
|
1278 ph->param_length = |
|
1279 htons(SCTP_BUF_LEN(oper)); |
|
1280 ippp = (uint32_t *) (ph + 1); |
|
1281 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_11); |
|
1282 ippp++; |
|
1283 *ippp = chk->rec.data.TSN_seq; |
|
1284 ippp++; |
|
1285 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); |
|
1286 |
|
1287 } |
|
1288 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_11; |
|
1289 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); |
|
1290 *abort_flag = 1; |
|
1291 return; |
|
1292 } |
|
1293 if (chk->rec.data.stream_number != |
|
1294 next->rec.data.stream_number) { |
|
1295 /* |
|
1296 * Huh, need the correct STR here, |
|
1297 * they must be the same. |
|
1298 */ |
|
1299 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n", |
|
1300 chk->rec.data.stream_number, |
|
1301 next->rec.data.stream_number); |
|
1302 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), |
|
1303 0, M_NOWAIT, 1, MT_DATA); |
|
1304 if (oper) { |
|
1305 struct sctp_paramhdr *ph; |
|
1306 uint32_t *ippp; |
|
1307 |
|
1308 SCTP_BUF_LEN(oper) = |
|
1309 sizeof(struct sctp_paramhdr) + |
|
1310 (3 * sizeof(uint32_t)); |
|
1311 ph = mtod(oper, |
|
1312 struct sctp_paramhdr *); |
|
1313 ph->param_type = |
|
1314 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); |
|
1315 ph->param_length = |
|
1316 htons(SCTP_BUF_LEN(oper)); |
|
1317 ippp = (uint32_t *) (ph + 1); |
|
1318 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_12); |
|
1319 ippp++; |
|
1320 *ippp = chk->rec.data.TSN_seq; |
|
1321 ippp++; |
|
1322 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); |
|
1323 |
|
1324 } |
|
1325 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_12; |
|
1326 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); |
|
1327 *abort_flag = 1; |
|
1328 return; |
|
1329 } |
|
1330 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 && |
|
1331 chk->rec.data.stream_seq != |
|
1332 next->rec.data.stream_seq) { |
|
1333 /* |
|
1334 * Huh, need the correct STR here, |
|
1335 * they must be the same. |
|
1336 */ |
|
1337 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n", |
|
1338 chk->rec.data.stream_seq, |
|
1339 next->rec.data.stream_seq); |
|
1340 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), |
|
1341 0, M_NOWAIT, 1, MT_DATA); |
|
1342 if (oper) { |
|
1343 struct sctp_paramhdr *ph; |
|
1344 uint32_t *ippp; |
|
1345 |
|
1346 SCTP_BUF_LEN(oper) = |
|
1347 sizeof(struct sctp_paramhdr) + |
|
1348 (3 * sizeof(uint32_t)); |
|
1349 ph = mtod(oper, |
|
1350 struct sctp_paramhdr *); |
|
1351 ph->param_type = |
|
1352 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); |
|
1353 ph->param_length = |
|
1354 htons(SCTP_BUF_LEN(oper)); |
|
1355 ippp = (uint32_t *) (ph + 1); |
|
1356 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_13); |
|
1357 ippp++; |
|
1358 *ippp = chk->rec.data.TSN_seq; |
|
1359 ippp++; |
|
1360 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq); |
|
1361 } |
|
1362 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_13; |
|
1363 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); |
|
1364 *abort_flag = 1; |
|
1365 return; |
|
1366 } |
|
1367 } |
|
1368 } |
|
1369 } |
|
1370 /* Do we need to do some delivery? check */ |
|
1371 sctp_deliver_reasm_check(stcb, asoc); |
|
1372 } |
|
1373 |
|
1374 /* |
|
1375 * This is an unfortunate routine. It checks to make sure a evil guy is not |
|
1376 * stuffing us full of bad packet fragments. A broken peer could also do this |
|
1377 * but this is doubtful. It is to bad I must worry about evil crackers sigh |
|
1378 * :< more cycles. |
|
1379 */ |
|
1380 static int |
|
1381 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc, |
|
1382 uint32_t TSN_seq) |
|
1383 { |
|
1384 struct sctp_tmit_chunk *at; |
|
1385 uint32_t tsn_est; |
|
1386 |
|
1387 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) { |
|
1388 if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) { |
|
1389 /* is it one bigger? */ |
|
1390 tsn_est = at->rec.data.TSN_seq + 1; |
|
1391 if (tsn_est == TSN_seq) { |
|
1392 /* yep. It better be a last then */ |
|
1393 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != |
|
1394 SCTP_DATA_LAST_FRAG) { |
|
1395 /* |
|
1396 * Ok this guy belongs next to a guy |
|
1397 * that is NOT last, it should be a |
|
1398 * middle/last, not a complete |
|
1399 * chunk. |
|
1400 */ |
|
1401 return (1); |
|
1402 } else { |
|
1403 /* |
|
1404 * This guy is ok since its a LAST |
|
1405 * and the new chunk is a fully |
|
1406 * self- contained one. |
|
1407 */ |
|
1408 return (0); |
|
1409 } |
|
1410 } |
|
1411 } else if (TSN_seq == at->rec.data.TSN_seq) { |
|
1412 /* Software error since I have a dup? */ |
|
1413 return (1); |
|
1414 } else { |
|
1415 /* |
|
1416 * Ok, 'at' is larger than new chunk but does it |
|
1417 * need to be right before it. |
|
1418 */ |
|
1419 tsn_est = TSN_seq + 1; |
|
1420 if (tsn_est == at->rec.data.TSN_seq) { |
|
1421 /* Yep, It better be a first */ |
|
1422 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) != |
|
1423 SCTP_DATA_FIRST_FRAG) { |
|
1424 return (1); |
|
1425 } else { |
|
1426 return (0); |
|
1427 } |
|
1428 } |
|
1429 } |
|
1430 } |
|
1431 return (0); |
|
1432 } |
|
1433 |
|
1434 static int |
|
1435 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, |
|
1436 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length, |
|
1437 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag, |
|
1438 int *break_flag, int last_chunk) |
|
1439 { |
|
1440 /* Process a data chunk */ |
|
1441 /* struct sctp_tmit_chunk *chk; */ |
|
1442 struct sctp_tmit_chunk *chk; |
|
1443 uint32_t tsn, gap; |
|
1444 struct mbuf *dmbuf; |
|
1445 int the_len; |
|
1446 int need_reasm_check = 0; |
|
1447 uint16_t strmno, strmseq; |
|
1448 struct mbuf *oper; |
|
1449 struct sctp_queued_to_read *control; |
|
1450 int ordered; |
|
1451 uint32_t protocol_id; |
|
1452 uint8_t chunk_flags; |
|
1453 struct sctp_stream_reset_list *liste; |
|
1454 |
|
1455 chk = NULL; |
|
1456 tsn = ntohl(ch->dp.tsn); |
|
1457 chunk_flags = ch->ch.chunk_flags; |
|
1458 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) { |
|
1459 asoc->send_sack = 1; |
|
1460 } |
|
1461 protocol_id = ch->dp.protocol_id; |
|
1462 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0); |
|
1463 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { |
|
1464 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS); |
|
1465 } |
|
1466 if (stcb == NULL) { |
|
1467 return (0); |
|
1468 } |
|
1469 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn); |
|
1470 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { |
|
1471 /* It is a duplicate */ |
|
1472 SCTP_STAT_INCR(sctps_recvdupdata); |
|
1473 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { |
|
1474 /* Record a dup for the next outbound sack */ |
|
1475 asoc->dup_tsns[asoc->numduptsns] = tsn; |
|
1476 asoc->numduptsns++; |
|
1477 } |
|
1478 asoc->send_sack = 1; |
|
1479 return (0); |
|
1480 } |
|
1481 /* Calculate the number of TSN's between the base and this TSN */ |
|
1482 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); |
|
1483 if (gap >= (SCTP_MAPPING_ARRAY << 3)) { |
|
1484 /* Can't hold the bit in the mapping at max array, toss it */ |
|
1485 return (0); |
|
1486 } |
|
1487 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) { |
|
1488 SCTP_TCB_LOCK_ASSERT(stcb); |
|
1489 if (sctp_expand_mapping_array(asoc, gap)) { |
|
1490 /* Can't expand, drop it */ |
|
1491 return (0); |
|
1492 } |
|
1493 } |
|
1494 if (SCTP_TSN_GT(tsn, *high_tsn)) { |
|
1495 *high_tsn = tsn; |
|
1496 } |
|
1497 /* See if we have received this one already */ |
|
1498 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) || |
|
1499 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) { |
|
1500 SCTP_STAT_INCR(sctps_recvdupdata); |
|
1501 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { |
|
1502 /* Record a dup for the next outbound sack */ |
|
1503 asoc->dup_tsns[asoc->numduptsns] = tsn; |
|
1504 asoc->numduptsns++; |
|
1505 } |
|
1506 asoc->send_sack = 1; |
|
1507 return (0); |
|
1508 } |
|
1509 /* |
|
1510 * Check to see about the GONE flag, duplicates would cause a sack |
|
1511 * to be sent up above |
|
1512 */ |
|
1513 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || |
|
1514 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || |
|
1515 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) |
|
1516 ) { |
|
1517 /* |
|
1518 * wait a minute, this guy is gone, there is no longer a |
|
1519 * receiver. Send peer an ABORT! |
|
1520 */ |
|
1521 struct mbuf *op_err; |
|
1522 |
|
1523 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); |
|
1524 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); |
|
1525 *abort_flag = 1; |
|
1526 return (0); |
|
1527 } |
|
1528 /* |
|
1529 * Now before going further we see if there is room. If NOT then we |
|
1530 * MAY let one through only IF this TSN is the one we are waiting |
|
1531 * for on a partial delivery API. |
|
1532 */ |
|
1533 |
|
1534 /* now do the tests */ |
|
1535 if (((asoc->cnt_on_all_streams + |
|
1536 asoc->cnt_on_reasm_queue + |
|
1537 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) || |
|
1538 (((int)asoc->my_rwnd) <= 0)) { |
|
1539 /* |
|
1540 * When we have NO room in the rwnd we check to make sure |
|
1541 * the reader is doing its job... |
|
1542 */ |
|
1543 if (stcb->sctp_socket->so_rcv.sb_cc) { |
|
1544 /* some to read, wake-up */ |
|
1545 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) |
|
1546 struct socket *so; |
|
1547 |
|
1548 so = SCTP_INP_SO(stcb->sctp_ep); |
|
1549 atomic_add_int(&stcb->asoc.refcnt, 1); |
|
1550 SCTP_TCB_UNLOCK(stcb); |
|
1551 SCTP_SOCKET_LOCK(so, 1); |
|
1552 SCTP_TCB_LOCK(stcb); |
|
1553 atomic_subtract_int(&stcb->asoc.refcnt, 1); |
|
1554 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { |
|
1555 /* assoc was freed while we were unlocked */ |
|
1556 SCTP_SOCKET_UNLOCK(so, 1); |
|
1557 return (0); |
|
1558 } |
|
1559 #endif |
|
1560 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); |
|
1561 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) |
|
1562 SCTP_SOCKET_UNLOCK(so, 1); |
|
1563 #endif |
|
1564 } |
|
1565 /* now is it in the mapping array of what we have accepted? */ |
|
1566 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) && |
|
1567 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { |
|
1568 /* Nope not in the valid range dump it */ |
|
1569 sctp_set_rwnd(stcb, asoc); |
|
1570 if ((asoc->cnt_on_all_streams + |
|
1571 asoc->cnt_on_reasm_queue + |
|
1572 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) { |
|
1573 SCTP_STAT_INCR(sctps_datadropchklmt); |
|
1574 } else { |
|
1575 SCTP_STAT_INCR(sctps_datadroprwnd); |
|
1576 } |
|
1577 *break_flag = 1; |
|
1578 return (0); |
|
1579 } |
|
1580 } |
|
1581 strmno = ntohs(ch->dp.stream_id); |
|
1582 if (strmno >= asoc->streamincnt) { |
|
1583 struct sctp_paramhdr *phdr; |
|
1584 struct mbuf *mb; |
|
1585 |
|
1586 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2), |
|
1587 0, M_NOWAIT, 1, MT_DATA); |
|
1588 if (mb != NULL) { |
|
1589 /* add some space up front so prepend will work well */ |
|
1590 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr)); |
|
1591 phdr = mtod(mb, struct sctp_paramhdr *); |
|
1592 /* |
|
1593 * Error causes are just param's and this one has |
|
1594 * two back to back phdr, one with the error type |
|
1595 * and size, the other with the streamid and a rsvd |
|
1596 */ |
|
1597 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2); |
|
1598 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM); |
|
1599 phdr->param_length = |
|
1600 htons(sizeof(struct sctp_paramhdr) * 2); |
|
1601 phdr++; |
|
1602 /* We insert the stream in the type field */ |
|
1603 phdr->param_type = ch->dp.stream_id; |
|
1604 /* And set the length to 0 for the rsvd field */ |
|
1605 phdr->param_length = 0; |
|
1606 sctp_queue_op_err(stcb, mb); |
|
1607 } |
|
1608 SCTP_STAT_INCR(sctps_badsid); |
|
1609 SCTP_TCB_LOCK_ASSERT(stcb); |
|
1610 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); |
|
1611 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { |
|
1612 asoc->highest_tsn_inside_nr_map = tsn; |
|
1613 } |
|
1614 if (tsn == (asoc->cumulative_tsn + 1)) { |
|
1615 /* Update cum-ack */ |
|
1616 asoc->cumulative_tsn = tsn; |
|
1617 } |
|
1618 return (0); |
|
1619 } |
|
1620 /* |
|
1621 * Before we continue lets validate that we are not being fooled by |
|
1622 * an evil attacker. We can only have 4k chunks based on our TSN |
|
1623 * spread allowed by the mapping array 512 * 8 bits, so there is no |
|
1624 * way our stream sequence numbers could have wrapped. We of course |
|
1625 * only validate the FIRST fragment so the bit must be set. |
|
1626 */ |
|
1627 strmseq = ntohs(ch->dp.stream_sequence); |
|
1628 #ifdef SCTP_ASOCLOG_OF_TSNS |
|
1629 SCTP_TCB_LOCK_ASSERT(stcb); |
|
1630 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) { |
|
1631 asoc->tsn_in_at = 0; |
|
1632 asoc->tsn_in_wrapped = 1; |
|
1633 } |
|
1634 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn; |
|
1635 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno; |
|
1636 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq; |
|
1637 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length; |
|
1638 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags; |
|
1639 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb; |
|
1640 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at; |
|
1641 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1; |
|
1642 asoc->tsn_in_at++; |
|
1643 #endif |
|
1644 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) && |
|
1645 (TAILQ_EMPTY(&asoc->resetHead)) && |
|
1646 (chunk_flags & SCTP_DATA_UNORDERED) == 0 && |
|
1647 SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) { |
|
1648 /* The incoming sseq is behind where we last delivered? */ |
|
1649 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n", |
|
1650 strmseq, asoc->strmin[strmno].last_sequence_delivered); |
|
1651 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), |
|
1652 0, M_NOWAIT, 1, MT_DATA); |
|
1653 if (oper) { |
|
1654 struct sctp_paramhdr *ph; |
|
1655 uint32_t *ippp; |
|
1656 |
|
1657 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + |
|
1658 (3 * sizeof(uint32_t)); |
|
1659 ph = mtod(oper, struct sctp_paramhdr *); |
|
1660 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); |
|
1661 ph->param_length = htons(SCTP_BUF_LEN(oper)); |
|
1662 ippp = (uint32_t *) (ph + 1); |
|
1663 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_14); |
|
1664 ippp++; |
|
1665 *ippp = tsn; |
|
1666 ippp++; |
|
1667 *ippp = ((strmno << 16) | strmseq); |
|
1668 |
|
1669 } |
|
1670 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_14; |
|
1671 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); |
|
1672 *abort_flag = 1; |
|
1673 return (0); |
|
1674 } |
|
1675 /************************************ |
|
1676 * From here down we may find ch-> invalid |
|
1677 * so its a good idea NOT to use it. |
|
1678 *************************************/ |
|
1679 |
|
1680 the_len = (chk_length - sizeof(struct sctp_data_chunk)); |
|
1681 if (last_chunk == 0) { |
|
1682 dmbuf = SCTP_M_COPYM(*m, |
|
1683 (offset + sizeof(struct sctp_data_chunk)), |
|
1684 the_len, M_NOWAIT); |
|
1685 #ifdef SCTP_MBUF_LOGGING |
|
1686 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { |
|
1687 struct mbuf *mat; |
|
1688 |
|
1689 for (mat = dmbuf; mat; mat = SCTP_BUF_NEXT(mat)) { |
|
1690 if (SCTP_BUF_IS_EXTENDED(mat)) { |
|
1691 sctp_log_mb(mat, SCTP_MBUF_ICOPY); |
|
1692 } |
|
1693 } |
|
1694 } |
|
1695 #endif |
|
1696 } else { |
|
1697 /* We can steal the last chunk */ |
|
1698 int l_len; |
|
1699 dmbuf = *m; |
|
1700 /* lop off the top part */ |
|
1701 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); |
|
1702 if (SCTP_BUF_NEXT(dmbuf) == NULL) { |
|
1703 l_len = SCTP_BUF_LEN(dmbuf); |
|
1704 } else { |
|
1705 /* need to count up the size hopefully |
|
1706 * does not hit this to often :-0 |
|
1707 */ |
|
1708 struct mbuf *lat; |
|
1709 |
|
1710 l_len = 0; |
|
1711 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) { |
|
1712 l_len += SCTP_BUF_LEN(lat); |
|
1713 } |
|
1714 } |
|
1715 if (l_len > the_len) { |
|
1716 /* Trim the end round bytes off too */ |
|
1717 m_adj(dmbuf, -(l_len - the_len)); |
|
1718 } |
|
1719 } |
|
1720 if (dmbuf == NULL) { |
|
1721 SCTP_STAT_INCR(sctps_nomem); |
|
1722 return (0); |
|
1723 } |
|
1724 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && |
|
1725 asoc->fragmented_delivery_inprogress == 0 && |
|
1726 TAILQ_EMPTY(&asoc->resetHead) && |
|
1727 ((ordered == 0) || |
|
1728 ((uint16_t)(asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq && |
|
1729 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) { |
|
1730 /* Candidate for express delivery */ |
|
1731 /* |
|
1732 * Its not fragmented, No PD-API is up, Nothing in the |
|
1733 * delivery queue, Its un-ordered OR ordered and the next to |
|
1734 * deliver AND nothing else is stuck on the stream queue, |
|
1735 * And there is room for it in the socket buffer. Lets just |
|
1736 * stuff it up the buffer.... |
|
1737 */ |
|
1738 |
|
1739 /* It would be nice to avoid this copy if we could :< */ |
|
1740 sctp_alloc_a_readq(stcb, control); |
|
1741 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, |
|
1742 protocol_id, |
|
1743 strmno, strmseq, |
|
1744 chunk_flags, |
|
1745 dmbuf); |
|
1746 if (control == NULL) { |
|
1747 goto failed_express_del; |
|
1748 } |
|
1749 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); |
|
1750 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { |
|
1751 asoc->highest_tsn_inside_nr_map = tsn; |
|
1752 } |
|
1753 sctp_add_to_readq(stcb->sctp_ep, stcb, |
|
1754 control, &stcb->sctp_socket->so_rcv, |
|
1755 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); |
|
1756 |
|
1757 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) { |
|
1758 /* for ordered, bump what we delivered */ |
|
1759 asoc->strmin[strmno].last_sequence_delivered++; |
|
1760 } |
|
1761 SCTP_STAT_INCR(sctps_recvexpress); |
|
1762 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { |
|
1763 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, |
|
1764 SCTP_STR_LOG_FROM_EXPRS_DEL); |
|
1765 } |
|
1766 control = NULL; |
|
1767 |
|
1768 goto finish_express_del; |
|
1769 } |
|
1770 failed_express_del: |
|
1771 /* If we reach here this is a new chunk */ |
|
1772 chk = NULL; |
|
1773 control = NULL; |
|
1774 /* Express for fragmented delivery? */ |
|
1775 if ((asoc->fragmented_delivery_inprogress) && |
|
1776 (stcb->asoc.control_pdapi) && |
|
1777 (asoc->str_of_pdapi == strmno) && |
|
1778 (asoc->ssn_of_pdapi == strmseq) |
|
1779 ) { |
|
1780 control = stcb->asoc.control_pdapi; |
|
1781 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) { |
|
1782 /* Can't be another first? */ |
|
1783 goto failed_pdapi_express_del; |
|
1784 } |
|
1785 if (tsn == (control->sinfo_tsn + 1)) { |
|
1786 /* Yep, we can add it on */ |
|
1787 int end = 0; |
|
1788 |
|
1789 if (chunk_flags & SCTP_DATA_LAST_FRAG) { |
|
1790 end = 1; |
|
1791 } |
|
1792 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end, |
|
1793 tsn, |
|
1794 &stcb->sctp_socket->so_rcv)) { |
|
1795 SCTP_PRINTF("Append fails end:%d\n", end); |
|
1796 goto failed_pdapi_express_del; |
|
1797 } |
|
1798 |
|
1799 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); |
|
1800 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { |
|
1801 asoc->highest_tsn_inside_nr_map = tsn; |
|
1802 } |
|
1803 SCTP_STAT_INCR(sctps_recvexpressm); |
|
1804 asoc->tsn_last_delivered = tsn; |
|
1805 asoc->fragment_flags = chunk_flags; |
|
1806 asoc->tsn_of_pdapi_last_delivered = tsn; |
|
1807 asoc->last_flags_delivered = chunk_flags; |
|
1808 asoc->last_strm_seq_delivered = strmseq; |
|
1809 asoc->last_strm_no_delivered = strmno; |
|
1810 if (end) { |
|
1811 /* clean up the flags and such */ |
|
1812 asoc->fragmented_delivery_inprogress = 0; |
|
1813 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) { |
|
1814 asoc->strmin[strmno].last_sequence_delivered++; |
|
1815 } |
|
1816 stcb->asoc.control_pdapi = NULL; |
|
1817 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) { |
|
1818 /* There could be another message ready */ |
|
1819 need_reasm_check = 1; |
|
1820 } |
|
1821 } |
|
1822 control = NULL; |
|
1823 goto finish_express_del; |
|
1824 } |
|
1825 } |
|
1826 failed_pdapi_express_del: |
|
1827 control = NULL; |
|
1828 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { |
|
1829 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); |
|
1830 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { |
|
1831 asoc->highest_tsn_inside_nr_map = tsn; |
|
1832 } |
|
1833 } else { |
|
1834 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); |
|
1835 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) { |
|
1836 asoc->highest_tsn_inside_map = tsn; |
|
1837 } |
|
1838 } |
|
1839 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { |
|
1840 sctp_alloc_a_chunk(stcb, chk); |
|
1841 if (chk == NULL) { |
|
1842 /* No memory so we drop the chunk */ |
|
1843 SCTP_STAT_INCR(sctps_nomem); |
|
1844 if (last_chunk == 0) { |
|
1845 /* we copied it, free the copy */ |
|
1846 sctp_m_freem(dmbuf); |
|
1847 } |
|
1848 return (0); |
|
1849 } |
|
1850 chk->rec.data.TSN_seq = tsn; |
|
1851 chk->no_fr_allowed = 0; |
|
1852 chk->rec.data.stream_seq = strmseq; |
|
1853 chk->rec.data.stream_number = strmno; |
|
1854 chk->rec.data.payloadtype = protocol_id; |
|
1855 chk->rec.data.context = stcb->asoc.context; |
|
1856 chk->rec.data.doing_fast_retransmit = 0; |
|
1857 chk->rec.data.rcv_flags = chunk_flags; |
|
1858 chk->asoc = asoc; |
|
1859 chk->send_size = the_len; |
|
1860 chk->whoTo = net; |
|
1861 atomic_add_int(&net->ref_count, 1); |
|
1862 chk->data = dmbuf; |
|
1863 } else { |
|
1864 sctp_alloc_a_readq(stcb, control); |
|
1865 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, |
|
1866 protocol_id, |
|
1867 strmno, strmseq, |
|
1868 chunk_flags, |
|
1869 dmbuf); |
|
1870 if (control == NULL) { |
|
1871 /* No memory so we drop the chunk */ |
|
1872 SCTP_STAT_INCR(sctps_nomem); |
|
1873 if (last_chunk == 0) { |
|
1874 /* we copied it, free the copy */ |
|
1875 sctp_m_freem(dmbuf); |
|
1876 } |
|
1877 return (0); |
|
1878 } |
|
1879 control->length = the_len; |
|
1880 } |
|
1881 |
|
1882 /* Mark it as received */ |
|
1883 /* Now queue it where it belongs */ |
|
1884 if (control != NULL) { |
|
1885 /* First a sanity check */ |
|
1886 if (asoc->fragmented_delivery_inprogress) { |
|
1887 /* |
|
1888 * Ok, we have a fragmented delivery in progress if |
|
1889 * this chunk is next to deliver OR belongs in our |
|
1890 * view to the reassembly, the peer is evil or |
|
1891 * broken. |
|
1892 */ |
|
1893 uint32_t estimate_tsn; |
|
1894 |
|
1895 estimate_tsn = asoc->tsn_last_delivered + 1; |
|
1896 if (TAILQ_EMPTY(&asoc->reasmqueue) && |
|
1897 (estimate_tsn == control->sinfo_tsn)) { |
|
1898 /* Evil/Broke peer */ |
|
1899 sctp_m_freem(control->data); |
|
1900 control->data = NULL; |
|
1901 if (control->whoFrom) { |
|
1902 sctp_free_remote_addr(control->whoFrom); |
|
1903 control->whoFrom = NULL; |
|
1904 } |
|
1905 sctp_free_a_readq(stcb, control); |
|
1906 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), |
|
1907 0, M_NOWAIT, 1, MT_DATA); |
|
1908 if (oper) { |
|
1909 struct sctp_paramhdr *ph; |
|
1910 uint32_t *ippp; |
|
1911 |
|
1912 SCTP_BUF_LEN(oper) = |
|
1913 sizeof(struct sctp_paramhdr) + |
|
1914 (3 * sizeof(uint32_t)); |
|
1915 ph = mtod(oper, struct sctp_paramhdr *); |
|
1916 ph->param_type = |
|
1917 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); |
|
1918 ph->param_length = htons(SCTP_BUF_LEN(oper)); |
|
1919 ippp = (uint32_t *) (ph + 1); |
|
1920 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_15); |
|
1921 ippp++; |
|
1922 *ippp = tsn; |
|
1923 ippp++; |
|
1924 *ippp = ((strmno << 16) | strmseq); |
|
1925 } |
|
1926 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_15; |
|
1927 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); |
|
1928 *abort_flag = 1; |
|
1929 return (0); |
|
1930 } else { |
|
1931 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { |
|
1932 sctp_m_freem(control->data); |
|
1933 control->data = NULL; |
|
1934 if (control->whoFrom) { |
|
1935 sctp_free_remote_addr(control->whoFrom); |
|
1936 control->whoFrom = NULL; |
|
1937 } |
|
1938 sctp_free_a_readq(stcb, control); |
|
1939 |
|
1940 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), |
|
1941 0, M_NOWAIT, 1, MT_DATA); |
|
1942 if (oper) { |
|
1943 struct sctp_paramhdr *ph; |
|
1944 uint32_t *ippp; |
|
1945 |
|
1946 SCTP_BUF_LEN(oper) = |
|
1947 sizeof(struct sctp_paramhdr) + |
|
1948 ( 3 * sizeof(uint32_t)); |
|
1949 ph = mtod(oper, |
|
1950 struct sctp_paramhdr *); |
|
1951 ph->param_type = |
|
1952 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); |
|
1953 ph->param_length = |
|
1954 htons(SCTP_BUF_LEN(oper)); |
|
1955 ippp = (uint32_t *) (ph + 1); |
|
1956 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_16); |
|
1957 ippp++; |
|
1958 *ippp = tsn; |
|
1959 ippp++; |
|
1960 *ippp = ((strmno << 16) | strmseq); |
|
1961 } |
|
1962 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_16; |
|
1963 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); |
|
1964 *abort_flag = 1; |
|
1965 return (0); |
|
1966 } |
|
1967 } |
|
1968 } else { |
|
1969 /* No PDAPI running */ |
|
1970 if (!TAILQ_EMPTY(&asoc->reasmqueue)) { |
|
1971 /* |
|
1972 * Reassembly queue is NOT empty validate |
|
1973 * that this tsn does not need to be in |
|
1974 * reasembly queue. If it does then our peer |
|
1975 * is broken or evil. |
|
1976 */ |
|
1977 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) { |
|
1978 sctp_m_freem(control->data); |
|
1979 control->data = NULL; |
|
1980 if (control->whoFrom) { |
|
1981 sctp_free_remote_addr(control->whoFrom); |
|
1982 control->whoFrom = NULL; |
|
1983 } |
|
1984 sctp_free_a_readq(stcb, control); |
|
1985 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), |
|
1986 0, M_NOWAIT, 1, MT_DATA); |
|
1987 if (oper) { |
|
1988 struct sctp_paramhdr *ph; |
|
1989 uint32_t *ippp; |
|
1990 |
|
1991 SCTP_BUF_LEN(oper) = |
|
1992 sizeof(struct sctp_paramhdr) + |
|
1993 (3 * sizeof(uint32_t)); |
|
1994 ph = mtod(oper, |
|
1995 struct sctp_paramhdr *); |
|
1996 ph->param_type = |
|
1997 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); |
|
1998 ph->param_length = |
|
1999 htons(SCTP_BUF_LEN(oper)); |
|
2000 ippp = (uint32_t *) (ph + 1); |
|
2001 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_17); |
|
2002 ippp++; |
|
2003 *ippp = tsn; |
|
2004 ippp++; |
|
2005 *ippp = ((strmno << 16) | strmseq); |
|
2006 } |
|
2007 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_17; |
|
2008 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); |
|
2009 *abort_flag = 1; |
|
2010 return (0); |
|
2011 } |
|
2012 } |
|
2013 } |
|
2014 /* ok, if we reach here we have passed the sanity checks */ |
|
2015 if (chunk_flags & SCTP_DATA_UNORDERED) { |
|
2016 /* queue directly into socket buffer */ |
|
2017 sctp_mark_non_revokable(asoc, control->sinfo_tsn); |
|
2018 sctp_add_to_readq(stcb->sctp_ep, stcb, |
|
2019 control, |
|
2020 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); |
|
2021 } else { |
|
2022 /* |
|
2023 * Special check for when streams are resetting. We |
|
2024 * could be more smart about this and check the |
|
2025 * actual stream to see if it is not being reset.. |
|
2026 * that way we would not create a HOLB when amongst |
|
2027 * streams being reset and those not being reset. |
|
2028 * |
|
2029 * We take complete messages that have a stream reset |
|
2030 * intervening (aka the TSN is after where our |
|
2031 * cum-ack needs to be) off and put them on a |
|
2032 * pending_reply_queue. The reassembly ones we do |
|
2033 * not have to worry about since they are all sorted |
|
2034 * and proceessed by TSN order. It is only the |
|
2035 * singletons I must worry about. |
|
2036 */ |
|
2037 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && |
|
2038 SCTP_TSN_GT(tsn, liste->tsn)) { |
|
2039 /* |
|
2040 * yep its past where we need to reset... go |
|
2041 * ahead and queue it. |
|
2042 */ |
|
2043 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) { |
|
2044 /* first one on */ |
|
2045 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); |
|
2046 } else { |
|
2047 struct sctp_queued_to_read *ctlOn, *nctlOn; |
|
2048 unsigned char inserted = 0; |
|
2049 |
|
2050 TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) { |
|
2051 if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) { |
|
2052 continue; |
|
2053 } else { |
|
2054 /* found it */ |
|
2055 TAILQ_INSERT_BEFORE(ctlOn, control, next); |
|
2056 inserted = 1; |
|
2057 break; |
|
2058 } |
|
2059 } |
|
2060 if (inserted == 0) { |
|
2061 /* |
|
2062 * must be put at end, use |
|
2063 * prevP (all setup from |
|
2064 * loop) to setup nextP. |
|
2065 */ |
|
2066 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); |
|
2067 } |
|
2068 } |
|
2069 } else { |
|
2070 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag); |
|
2071 if (*abort_flag) { |
|
2072 return (0); |
|
2073 } |
|
2074 } |
|
2075 } |
|
2076 } else { |
|
2077 /* Into the re-assembly queue */ |
|
2078 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag); |
|
2079 if (*abort_flag) { |
|
2080 /* |
|
2081 * the assoc is now gone and chk was put onto the |
|
2082 * reasm queue, which has all been freed. |
|
2083 */ |
|
2084 *m = NULL; |
|
2085 return (0); |
|
2086 } |
|
2087 } |
|
2088 finish_express_del: |
|
2089 if (tsn == (asoc->cumulative_tsn + 1)) { |
|
2090 /* Update cum-ack */ |
|
2091 asoc->cumulative_tsn = tsn; |
|
2092 } |
|
2093 if (last_chunk) { |
|
2094 *m = NULL; |
|
2095 } |
|
2096 if (ordered) { |
|
2097 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks); |
|
2098 } else { |
|
2099 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks); |
|
2100 } |
|
2101 SCTP_STAT_INCR(sctps_recvdata); |
|
2102 /* Set it present please */ |
|
2103 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { |
|
2104 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN); |
|
2105 } |
|
2106 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { |
|
2107 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, |
|
2108 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); |
|
2109 } |
|
2110 /* check the special flag for stream resets */ |
|
2111 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && |
|
2112 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) { |
|
2113 /* |
|
2114 * we have finished working through the backlogged TSN's now |
|
2115 * time to reset streams. 1: call reset function. 2: free |
|
2116 * pending_reply space 3: distribute any chunks in |
|
2117 * pending_reply_queue. |
|
2118 */ |
|
2119 struct sctp_queued_to_read *ctl, *nctl; |
|
2120 |
|
2121 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams); |
|
2122 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); |
|
2123 SCTP_FREE(liste, SCTP_M_STRESET); |
|
2124 /*sa_ignore FREED_MEMORY*/ |
|
2125 liste = TAILQ_FIRST(&asoc->resetHead); |
|
2126 if (TAILQ_EMPTY(&asoc->resetHead)) { |
|
2127 /* All can be removed */ |
|
2128 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) { |
|
2129 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); |
|
2130 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); |
|
2131 if (*abort_flag) { |
|
2132 return (0); |
|
2133 } |
|
2134 } |
|
2135 } else { |
|
2136 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) { |
|
2137 if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) { |
|
2138 break; |
|
2139 } |
|
2140 /* |
|
2141 * if ctl->sinfo_tsn is <= liste->tsn we can |
|
2142 * process it which is the NOT of |
|
2143 * ctl->sinfo_tsn > liste->tsn |
|
2144 */ |
|
2145 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next); |
|
2146 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag); |
|
2147 if (*abort_flag) { |
|
2148 return (0); |
|
2149 } |
|
2150 } |
|
2151 } |
|
2152 /* |
|
2153 * Now service re-assembly to pick up anything that has been |
|
2154 * held on reassembly queue? |
|
2155 */ |
|
2156 sctp_deliver_reasm_check(stcb, asoc); |
|
2157 need_reasm_check = 0; |
|
2158 } |
|
2159 |
|
2160 if (need_reasm_check) { |
|
2161 /* Another one waits ? */ |
|
2162 sctp_deliver_reasm_check(stcb, asoc); |
|
2163 } |
|
2164 return (1); |
|
2165 } |
|
2166 |
|
2167 int8_t sctp_map_lookup_tab[256] = { |
|
2168 0, 1, 0, 2, 0, 1, 0, 3, |
|
2169 0, 1, 0, 2, 0, 1, 0, 4, |
|
2170 0, 1, 0, 2, 0, 1, 0, 3, |
|
2171 0, 1, 0, 2, 0, 1, 0, 5, |
|
2172 0, 1, 0, 2, 0, 1, 0, 3, |
|
2173 0, 1, 0, 2, 0, 1, 0, 4, |
|
2174 0, 1, 0, 2, 0, 1, 0, 3, |
|
2175 0, 1, 0, 2, 0, 1, 0, 6, |
|
2176 0, 1, 0, 2, 0, 1, 0, 3, |
|
2177 0, 1, 0, 2, 0, 1, 0, 4, |
|
2178 0, 1, 0, 2, 0, 1, 0, 3, |
|
2179 0, 1, 0, 2, 0, 1, 0, 5, |
|
2180 0, 1, 0, 2, 0, 1, 0, 3, |
|
2181 0, 1, 0, 2, 0, 1, 0, 4, |
|
2182 0, 1, 0, 2, 0, 1, 0, 3, |
|
2183 0, 1, 0, 2, 0, 1, 0, 7, |
|
2184 0, 1, 0, 2, 0, 1, 0, 3, |
|
2185 0, 1, 0, 2, 0, 1, 0, 4, |
|
2186 0, 1, 0, 2, 0, 1, 0, 3, |
|
2187 0, 1, 0, 2, 0, 1, 0, 5, |
|
2188 0, 1, 0, 2, 0, 1, 0, 3, |
|
2189 0, 1, 0, 2, 0, 1, 0, 4, |
|
2190 0, 1, 0, 2, 0, 1, 0, 3, |
|
2191 0, 1, 0, 2, 0, 1, 0, 6, |
|
2192 0, 1, 0, 2, 0, 1, 0, 3, |
|
2193 0, 1, 0, 2, 0, 1, 0, 4, |
|
2194 0, 1, 0, 2, 0, 1, 0, 3, |
|
2195 0, 1, 0, 2, 0, 1, 0, 5, |
|
2196 0, 1, 0, 2, 0, 1, 0, 3, |
|
2197 0, 1, 0, 2, 0, 1, 0, 4, |
|
2198 0, 1, 0, 2, 0, 1, 0, 3, |
|
2199 0, 1, 0, 2, 0, 1, 0, 8 |
|
2200 }; |
|
2201 |
|
2202 |
|
2203 void |
|
2204 sctp_slide_mapping_arrays(struct sctp_tcb *stcb) |
|
2205 { |
|
2206 /* |
|
2207 * Now we also need to check the mapping array in a couple of ways. |
|
2208 * 1) Did we move the cum-ack point? |
|
2209 * |
|
2210 * When you first glance at this you might think |
|
2211 * that all entries that make up the postion |
|
2212 * of the cum-ack would be in the nr-mapping array |
|
2213 * only.. i.e. things up to the cum-ack are always |
|
2214 * deliverable. Thats true with one exception, when |
|
2215 * its a fragmented message we may not deliver the data |
|
2216 * until some threshold (or all of it) is in place. So |
|
2217 * we must OR the nr_mapping_array and mapping_array to |
|
2218 * get a true picture of the cum-ack. |
|
2219 */ |
|
2220 struct sctp_association *asoc; |
|
2221 int at; |
|
2222 uint8_t val; |
|
2223 int slide_from, slide_end, lgap, distance; |
|
2224 uint32_t old_cumack, old_base, old_highest, highest_tsn; |
|
2225 |
|
2226 asoc = &stcb->asoc; |
|
2227 |
|
2228 old_cumack = asoc->cumulative_tsn; |
|
2229 old_base = asoc->mapping_array_base_tsn; |
|
2230 old_highest = asoc->highest_tsn_inside_map; |
|
2231 /* |
|
2232 * We could probably improve this a small bit by calculating the |
|
2233 * offset of the current cum-ack as the starting point. |
|
2234 */ |
|
2235 at = 0; |
|
2236 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) { |
|
2237 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from]; |
|
2238 if (val == 0xff) { |
|
2239 at += 8; |
|
2240 } else { |
|
2241 /* there is a 0 bit */ |
|
2242 at += sctp_map_lookup_tab[val]; |
|
2243 break; |
|
2244 } |
|
2245 } |
|
2246 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at-1); |
|
2247 |
|
2248 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) && |
|
2249 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) { |
|
2250 #ifdef INVARIANTS |
|
2251 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map", |
|
2252 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); |
|
2253 #else |
|
2254 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n", |
|
2255 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); |
|
2256 sctp_print_mapping_array(asoc); |
|
2257 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { |
|
2258 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); |
|
2259 } |
|
2260 asoc->highest_tsn_inside_map = asoc->cumulative_tsn; |
|
2261 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn; |
|
2262 #endif |
|
2263 } |
|
2264 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { |
|
2265 highest_tsn = asoc->highest_tsn_inside_nr_map; |
|
2266 } else { |
|
2267 highest_tsn = asoc->highest_tsn_inside_map; |
|
2268 } |
|
2269 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) { |
|
2270 /* The complete array was completed by a single FR */ |
|
2271 /* highest becomes the cum-ack */ |
|
2272 int clr; |
|
2273 #ifdef INVARIANTS |
|
2274 unsigned int i; |
|
2275 #endif |
|
2276 |
|
2277 /* clear the array */ |
|
2278 clr = ((at+7) >> 3); |
|
2279 if (clr > asoc->mapping_array_size) { |
|
2280 clr = asoc->mapping_array_size; |
|
2281 } |
|
2282 memset(asoc->mapping_array, 0, clr); |
|
2283 memset(asoc->nr_mapping_array, 0, clr); |
|
2284 #ifdef INVARIANTS |
|
2285 for (i = 0; i < asoc->mapping_array_size; i++) { |
|
2286 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) { |
|
2287 SCTP_PRINTF("Error Mapping array's not clean at clear\n"); |
|
2288 sctp_print_mapping_array(asoc); |
|
2289 } |
|
2290 } |
|
2291 #endif |
|
2292 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; |
|
2293 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn; |
|
2294 } else if (at >= 8) { |
|
2295 /* we can slide the mapping array down */ |
|
2296 /* slide_from holds where we hit the first NON 0xff byte */ |
|
2297 |
|
2298 /* |
|
2299 * now calculate the ceiling of the move using our highest |
|
2300 * TSN value |
|
2301 */ |
|
2302 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn); |
|
2303 slide_end = (lgap >> 3); |
|
2304 if (slide_end < slide_from) { |
|
2305 sctp_print_mapping_array(asoc); |
|
2306 #ifdef INVARIANTS |
|
2307 panic("impossible slide"); |
|
2308 #else |
|
2309 SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n", |
|
2310 lgap, slide_end, slide_from, at); |
|
2311 return; |
|
2312 #endif |
|
2313 } |
|
2314 if (slide_end > asoc->mapping_array_size) { |
|
2315 #ifdef INVARIANTS |
|
2316 panic("would overrun buffer"); |
|
2317 #else |
|
2318 SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n", |
|
2319 asoc->mapping_array_size, slide_end); |
|
2320 slide_end = asoc->mapping_array_size; |
|
2321 #endif |
|
2322 } |
|
2323 distance = (slide_end - slide_from) + 1; |
|
2324 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { |
|
2325 sctp_log_map(old_base, old_cumack, old_highest, |
|
2326 SCTP_MAP_PREPARE_SLIDE); |
|
2327 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end, |
|
2328 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM); |
|
2329 } |
|
2330 if (distance + slide_from > asoc->mapping_array_size || |
|
2331 distance < 0) { |
|
2332 /* |
|
2333 * Here we do NOT slide forward the array so that |
|
2334 * hopefully when more data comes in to fill it up |
|
2335 * we will be able to slide it forward. Really I |
|
2336 * don't think this should happen :-0 |
|
2337 */ |
|
2338 |
|
2339 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { |
|
2340 sctp_log_map((uint32_t) distance, (uint32_t) slide_from, |
|
2341 (uint32_t) asoc->mapping_array_size, |
|
2342 SCTP_MAP_SLIDE_NONE); |
|
2343 } |
|
2344 } else { |
|
2345 int ii; |
|
2346 |
|
2347 for (ii = 0; ii < distance; ii++) { |
|
2348 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii]; |
|
2349 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii]; |
|
2350 |
|
2351 } |
|
2352 for (ii = distance; ii < asoc->mapping_array_size; ii++) { |
|
2353 asoc->mapping_array[ii] = 0; |
|
2354 asoc->nr_mapping_array[ii] = 0; |
|
2355 } |
|
2356 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) { |
|
2357 asoc->highest_tsn_inside_map += (slide_from << 3); |
|
2358 } |
|
2359 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) { |
|
2360 asoc->highest_tsn_inside_nr_map += (slide_from << 3); |
|
2361 } |
|
2362 asoc->mapping_array_base_tsn += (slide_from << 3); |
|
2363 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { |
|
2364 sctp_log_map(asoc->mapping_array_base_tsn, |
|
2365 asoc->cumulative_tsn, asoc->highest_tsn_inside_map, |
|
2366 SCTP_MAP_SLIDE_RESULT); |
|
2367 } |
|
2368 } |
|
2369 } |
|
2370 } |
|
2371 |
|
2372 void |
|
2373 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap) |
|
2374 { |
|
2375 struct sctp_association *asoc; |
|
2376 uint32_t highest_tsn; |
|
2377 |
|
2378 asoc = &stcb->asoc; |
|
2379 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { |
|
2380 highest_tsn = asoc->highest_tsn_inside_nr_map; |
|
2381 } else { |
|
2382 highest_tsn = asoc->highest_tsn_inside_map; |
|
2383 } |
|
2384 |
|
2385 /* |
|
2386 * Now we need to see if we need to queue a sack or just start the |
|
2387 * timer (if allowed). |
|
2388 */ |
|
2389 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { |
|
2390 /* |
|
2391 * Ok special case, in SHUTDOWN-SENT case. here we |
|
2392 * maker sure SACK timer is off and instead send a |
|
2393 * SHUTDOWN and a SACK |
|
2394 */ |
|
2395 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { |
|
2396 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, |
|
2397 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA+SCTP_LOC_18); |
|
2398 } |
|
2399 sctp_send_shutdown(stcb, |
|
2400 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination)); |
|
2401 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); |
|
2402 } else { |
|
2403 int is_a_gap; |
|
2404 |
|
2405 /* is there a gap now ? */ |
|
2406 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); |
|
2407 |
|
2408 /* |
|
2409 * CMT DAC algorithm: increase number of packets |
|
2410 * received since last ack |
|
2411 */ |
|
2412 stcb->asoc.cmt_dac_pkts_rcvd++; |
|
2413 |
|
2414 if ((stcb->asoc.send_sack == 1) || /* We need to send a SACK */ |
|
2415 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no |
|
2416 * longer is one */ |
|
2417 (stcb->asoc.numduptsns) || /* we have dup's */ |
|
2418 (is_a_gap) || /* is still a gap */ |
|
2419 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */ |
|
2420 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */ |
|
2421 ) { |
|
2422 |
|
2423 if ((stcb->asoc.sctp_cmt_on_off > 0) && |
|
2424 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) && |
|
2425 (stcb->asoc.send_sack == 0) && |
|
2426 (stcb->asoc.numduptsns == 0) && |
|
2427 (stcb->asoc.delayed_ack) && |
|
2428 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) { |
|
2429 |
|
2430 /* |
|
2431 * CMT DAC algorithm: With CMT, |
|
2432 * delay acks even in the face of |
|
2433 |
|
2434 * reordering. Therefore, if acks |
|
2435 * that do not have to be sent |
|
2436 * because of the above reasons, |
|
2437 * will be delayed. That is, acks |
|
2438 * that would have been sent due to |
|
2439 * gap reports will be delayed with |
|
2440 * DAC. Start the delayed ack timer. |
|
2441 */ |
|
2442 sctp_timer_start(SCTP_TIMER_TYPE_RECV, |
|
2443 stcb->sctp_ep, stcb, NULL); |
|
2444 } else { |
|
2445 /* |
|
2446 * Ok we must build a SACK since the |
|
2447 * timer is pending, we got our |
|
2448 * first packet OR there are gaps or |
|
2449 * duplicates. |
|
2450 */ |
|
2451 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); |
|
2452 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); |
|
2453 } |
|
2454 } else { |
|
2455 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { |
|
2456 sctp_timer_start(SCTP_TIMER_TYPE_RECV, |
|
2457 stcb->sctp_ep, stcb, NULL); |
|
2458 } |
|
2459 } |
|
2460 } |
|
2461 } |
|
2462 |
|
2463 void |
|
2464 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc) |
|
2465 { |
|
2466 struct sctp_tmit_chunk *chk; |
|
2467 uint32_t tsize, pd_point; |
|
2468 uint16_t nxt_todel; |
|
2469 |
|
2470 if (asoc->fragmented_delivery_inprogress) { |
|
2471 sctp_service_reassembly(stcb, asoc); |
|
2472 } |
|
2473 /* Can we proceed further, i.e. the PD-API is complete */ |
|
2474 if (asoc->fragmented_delivery_inprogress) { |
|
2475 /* no */ |
|
2476 return; |
|
2477 } |
|
2478 /* |
|
2479 * Now is there some other chunk I can deliver from the reassembly |
|
2480 * queue. |
|
2481 */ |
|
2482 doit_again: |
|
2483 chk = TAILQ_FIRST(&asoc->reasmqueue); |
|
2484 if (chk == NULL) { |
|
2485 asoc->size_on_reasm_queue = 0; |
|
2486 asoc->cnt_on_reasm_queue = 0; |
|
2487 return; |
|
2488 } |
|
2489 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1; |
|
2490 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) && |
|
2491 ((nxt_todel == chk->rec.data.stream_seq) || |
|
2492 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) { |
|
2493 /* |
|
2494 * Yep the first one is here. We setup to start reception, |
|
2495 * by backing down the TSN just in case we can't deliver. |
|
2496 */ |
|
2497 |
|
2498 /* |
|
2499 * Before we start though either all of the message should |
|
2500 * be here or the socket buffer max or nothing on the |
|
2501 * delivery queue and something can be delivered. |
|
2502 */ |
|
2503 if (stcb->sctp_socket) { |
|
2504 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT, |
|
2505 stcb->sctp_ep->partial_delivery_point); |
|
2506 } else { |
|
2507 pd_point = stcb->sctp_ep->partial_delivery_point; |
|
2508 } |
|
2509 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) { |
|
2510 asoc->fragmented_delivery_inprogress = 1; |
|
2511 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1; |
|
2512 asoc->str_of_pdapi = chk->rec.data.stream_number; |
|
2513 asoc->ssn_of_pdapi = chk->rec.data.stream_seq; |
|
2514 asoc->pdapi_ppid = chk->rec.data.payloadtype; |
|
2515 asoc->fragment_flags = chk->rec.data.rcv_flags; |
|
2516 sctp_service_reassembly(stcb, asoc); |
|
2517 if (asoc->fragmented_delivery_inprogress == 0) { |
|
2518 goto doit_again; |
|
2519 } |
|
2520 } |
|
2521 } |
|
2522 } |
|
2523 |
|
2524 int |
|
2525 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, |
|
2526 struct sockaddr *src, struct sockaddr *dst, |
|
2527 struct sctphdr *sh, struct sctp_inpcb *inp, |
|
2528 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t *high_tsn, |
|
2529 #if defined(__FreeBSD__) |
|
2530 uint8_t use_mflowid, uint32_t mflowid, |
|
2531 #endif |
|
2532 uint32_t vrf_id, uint16_t port) |
|
2533 { |
|
2534 struct sctp_data_chunk *ch, chunk_buf; |
|
2535 struct sctp_association *asoc; |
|
2536 int num_chunks = 0; /* number of control chunks processed */ |
|
2537 int stop_proc = 0; |
|
2538 int chk_length, break_flag, last_chunk; |
|
2539 int abort_flag = 0, was_a_gap; |
|
2540 struct mbuf *m; |
|
2541 uint32_t highest_tsn; |
|
2542 |
|
2543 /* set the rwnd */ |
|
2544 sctp_set_rwnd(stcb, &stcb->asoc); |
|
2545 |
|
2546 m = *mm; |
|
2547 SCTP_TCB_LOCK_ASSERT(stcb); |
|
2548 asoc = &stcb->asoc; |
|
2549 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { |
|
2550 highest_tsn = asoc->highest_tsn_inside_nr_map; |
|
2551 } else { |
|
2552 highest_tsn = asoc->highest_tsn_inside_map; |
|
2553 } |
|
2554 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); |
|
2555 /* |
|
2556 * setup where we got the last DATA packet from for any SACK that |
|
2557 * may need to go out. Don't bump the net. This is done ONLY when a |
|
2558 * chunk is assigned. |
|
2559 */ |
|
2560 asoc->last_data_chunk_from = net; |
|
2561 |
|
2562 #ifndef __Panda__ |
|
2563 /*- |
|
2564 * Now before we proceed we must figure out if this is a wasted |
|
2565 * cluster... i.e. it is a small packet sent in and yet the driver |
|
2566 * underneath allocated a full cluster for it. If so we must copy it |
|
2567 * to a smaller mbuf and free up the cluster mbuf. This will help |
|
2568 * with cluster starvation. Note for __Panda__ we don't do this |
|
2569 * since it has clusters all the way down to 64 bytes. |
|
2570 */ |
|
2571 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) { |
|
2572 /* we only handle mbufs that are singletons.. not chains */ |
|
2573 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA); |
|
2574 if (m) { |
|
2575 /* ok lets see if we can copy the data up */ |
|
2576 caddr_t *from, *to; |
|
2577 /* get the pointers and copy */ |
|
2578 to = mtod(m, caddr_t *); |
|
2579 from = mtod((*mm), caddr_t *); |
|
2580 memcpy(to, from, SCTP_BUF_LEN((*mm))); |
|
2581 /* copy the length and free up the old */ |
|
2582 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm)); |
|
2583 sctp_m_freem(*mm); |
|
2584 /* sucess, back copy */ |
|
2585 *mm = m; |
|
2586 } else { |
|
2587 /* We are in trouble in the mbuf world .. yikes */ |
|
2588 m = *mm; |
|
2589 } |
|
2590 } |
|
2591 #endif |
|
2592 /* get pointer to the first chunk header */ |
|
2593 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, |
|
2594 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); |
|
2595 if (ch == NULL) { |
|
2596 return (1); |
|
2597 } |
|
2598 /* |
|
2599 * process all DATA chunks... |
|
2600 */ |
|
2601 *high_tsn = asoc->cumulative_tsn; |
|
2602 break_flag = 0; |
|
2603 asoc->data_pkts_seen++; |
|
2604 while (stop_proc == 0) { |
|
2605 /* validate chunk length */ |
|
2606 chk_length = ntohs(ch->ch.chunk_length); |
|
2607 if (length - *offset < chk_length) { |
|
2608 /* all done, mutulated chunk */ |
|
2609 stop_proc = 1; |
|
2610 continue; |
|
2611 } |
|
2612 if (ch->ch.chunk_type == SCTP_DATA) { |
|
2613 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) { |
|
2614 /* |
|
2615 * Need to send an abort since we had a |
|
2616 * invalid data chunk. |
|
2617 */ |
|
2618 struct mbuf *op_err; |
|
2619 |
|
2620 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)), |
|
2621 0, M_NOWAIT, 1, MT_DATA); |
|
2622 |
|
2623 if (op_err) { |
|
2624 struct sctp_paramhdr *ph; |
|
2625 uint32_t *ippp; |
|
2626 |
|
2627 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) + |
|
2628 (2 * sizeof(uint32_t)); |
|
2629 ph = mtod(op_err, struct sctp_paramhdr *); |
|
2630 ph->param_type = |
|
2631 htons(SCTP_CAUSE_PROTOCOL_VIOLATION); |
|
2632 ph->param_length = htons(SCTP_BUF_LEN(op_err)); |
|
2633 ippp = (uint32_t *) (ph + 1); |
|
2634 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_19); |
|
2635 ippp++; |
|
2636 *ippp = asoc->cumulative_tsn; |
|
2637 |
|
2638 } |
|
2639 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_19; |
|
2640 sctp_abort_association(inp, stcb, m, iphlen, |
|
2641 src, dst, sh, op_err, |
|
2642 #if defined(__FreeBSD__) |
|
2643 use_mflowid, mflowid, |
|
2644 #endif |
|
2645 vrf_id, port); |
|
2646 return (2); |
|
2647 } |
|
2648 #ifdef SCTP_AUDITING_ENABLED |
|
2649 sctp_audit_log(0xB1, 0); |
|
2650 #endif |
|
2651 if (SCTP_SIZE32(chk_length) == (length - *offset)) { |
|
2652 last_chunk = 1; |
|
2653 } else { |
|
2654 last_chunk = 0; |
|
2655 } |
|
2656 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch, |
|
2657 chk_length, net, high_tsn, &abort_flag, &break_flag, |
|
2658 last_chunk)) { |
|
2659 num_chunks++; |
|
2660 } |
|
2661 if (abort_flag) |
|
2662 return (2); |
|
2663 |
|
2664 if (break_flag) { |
|
2665 /* |
|
2666 * Set because of out of rwnd space and no |
|
2667 * drop rep space left. |
|
2668 */ |
|
2669 stop_proc = 1; |
|
2670 continue; |
|
2671 } |
|
2672 } else { |
|
2673 /* not a data chunk in the data region */ |
|
2674 switch (ch->ch.chunk_type) { |
|
2675 case SCTP_INITIATION: |
|
2676 case SCTP_INITIATION_ACK: |
|
2677 case SCTP_SELECTIVE_ACK: |
|
2678 case SCTP_NR_SELECTIVE_ACK: |
|
2679 case SCTP_HEARTBEAT_REQUEST: |
|
2680 case SCTP_HEARTBEAT_ACK: |
|
2681 case SCTP_ABORT_ASSOCIATION: |
|
2682 case SCTP_SHUTDOWN: |
|
2683 case SCTP_SHUTDOWN_ACK: |
|
2684 case SCTP_OPERATION_ERROR: |
|
2685 case SCTP_COOKIE_ECHO: |
|
2686 case SCTP_COOKIE_ACK: |
|
2687 case SCTP_ECN_ECHO: |
|
2688 case SCTP_ECN_CWR: |
|
2689 case SCTP_SHUTDOWN_COMPLETE: |
|
2690 case SCTP_AUTHENTICATION: |
|
2691 case SCTP_ASCONF_ACK: |
|
2692 case SCTP_PACKET_DROPPED: |
|
2693 case SCTP_STREAM_RESET: |
|
2694 case SCTP_FORWARD_CUM_TSN: |
|
2695 case SCTP_ASCONF: |
|
2696 /* |
|
2697 * Now, what do we do with KNOWN chunks that |
|
2698 * are NOT in the right place? |
|
2699 * |
|
2700 * For now, I do nothing but ignore them. We |
|
2701 * may later want to add sysctl stuff to |
|
2702 * switch out and do either an ABORT() or |
|
2703 * possibly process them. |
|
2704 */ |
|
2705 if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) { |
|
2706 struct mbuf *op_err; |
|
2707 |
|
2708 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION); |
|
2709 sctp_abort_association(inp, stcb, |
|
2710 m, iphlen, |
|
2711 src, dst, |
|
2712 sh, op_err, |
|
2713 #if defined(__FreeBSD__) |
|
2714 use_mflowid, mflowid, |
|
2715 #endif |
|
2716 vrf_id, port); |
|
2717 return (2); |
|
2718 } |
|
2719 break; |
|
2720 default: |
|
2721 /* unknown chunk type, use bit rules */ |
|
2722 if (ch->ch.chunk_type & 0x40) { |
|
2723 /* Add a error report to the queue */ |
|
2724 struct mbuf *merr; |
|
2725 struct sctp_paramhdr *phd; |
|
2726 |
|
2727 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_NOWAIT, 1, MT_DATA); |
|
2728 if (merr) { |
|
2729 phd = mtod(merr, struct sctp_paramhdr *); |
|
2730 /* |
|
2731 * We cheat and use param |
|
2732 * type since we did not |
|
2733 * bother to define a error |
|
2734 * cause struct. They are |
|
2735 * the same basic format |
|
2736 * with different names. |
|
2737 */ |
|
2738 phd->param_type = |
|
2739 htons(SCTP_CAUSE_UNRECOG_CHUNK); |
|
2740 phd->param_length = |
|
2741 htons(chk_length + sizeof(*phd)); |
|
2742 SCTP_BUF_LEN(merr) = sizeof(*phd); |
|
2743 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT); |
|
2744 if (SCTP_BUF_NEXT(merr)) { |
|
2745 if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL)) { |
|
2746 sctp_m_freem(merr); |
|
2747 } else { |
|
2748 sctp_queue_op_err(stcb, merr); |
|
2749 } |
|
2750 } else { |
|
2751 sctp_m_freem(merr); |
|
2752 } |
|
2753 } |
|
2754 } |
|
2755 if ((ch->ch.chunk_type & 0x80) == 0) { |
|
2756 /* discard the rest of this packet */ |
|
2757 stop_proc = 1; |
|
2758 } /* else skip this bad chunk and |
|
2759 * continue... */ |
|
2760 break; |
|
2761 } /* switch of chunk type */ |
|
2762 } |
|
2763 *offset += SCTP_SIZE32(chk_length); |
|
2764 if ((*offset >= length) || stop_proc) { |
|
2765 /* no more data left in the mbuf chain */ |
|
2766 stop_proc = 1; |
|
2767 continue; |
|
2768 } |
|
2769 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset, |
|
2770 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf); |
|
2771 if (ch == NULL) { |
|
2772 *offset = length; |
|
2773 stop_proc = 1; |
|
2774 continue; |
|
2775 } |
|
2776 } |
|
2777 if (break_flag) { |
|
2778 /* |
|
2779 * we need to report rwnd overrun drops. |
|
2780 */ |
|
2781 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0); |
|
2782 } |
|
2783 if (num_chunks) { |
|
2784 /* |
|
2785 * Did we get data, if so update the time for auto-close and |
|
2786 * give peer credit for being alive. |
|
2787 */ |
|
2788 SCTP_STAT_INCR(sctps_recvpktwithdata); |
|
2789 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { |
|
2790 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, |
|
2791 stcb->asoc.overall_error_count, |
|
2792 0, |
|
2793 SCTP_FROM_SCTP_INDATA, |
|
2794 __LINE__); |
|
2795 } |
|
2796 stcb->asoc.overall_error_count = 0; |
|
2797 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); |
|
2798 } |
|
2799 /* now service all of the reassm queue if needed */ |
|
2800 if (!(TAILQ_EMPTY(&asoc->reasmqueue))) |
|
2801 sctp_service_queues(stcb, asoc); |
|
2802 |
|
2803 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { |
|
2804 /* Assure that we ack right away */ |
|
2805 stcb->asoc.send_sack = 1; |
|
2806 } |
|
2807 /* Start a sack timer or QUEUE a SACK for sending */ |
|
2808 sctp_sack_check(stcb, was_a_gap); |
|
2809 return (0); |
|
2810 } |
|
2811 |
|
2812 static int |
|
2813 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn, |
|
2814 uint16_t frag_strt, uint16_t frag_end, int nr_sacking, |
|
2815 int *num_frs, |
|
2816 uint32_t *biggest_newly_acked_tsn, |
|
2817 uint32_t *this_sack_lowest_newack, |
|
2818 int *rto_ok) |
|
2819 { |
|
2820 struct sctp_tmit_chunk *tp1; |
|
2821 unsigned int theTSN; |
|
2822 int j, wake_him = 0, circled = 0; |
|
2823 |
|
2824 /* Recover the tp1 we last saw */ |
|
2825 tp1 = *p_tp1; |
|
2826 if (tp1 == NULL) { |
|
2827 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); |
|
2828 } |
|
2829 for (j = frag_strt; j <= frag_end; j++) { |
|
2830 theTSN = j + last_tsn; |
|
2831 while (tp1) { |
|
2832 if (tp1->rec.data.doing_fast_retransmit) |
|
2833 (*num_frs) += 1; |
|
2834 |
|
2835 /*- |
|
2836 * CMT: CUCv2 algorithm. For each TSN being |
|
2837 * processed from the sent queue, track the |
|
2838 * next expected pseudo-cumack, or |
|
2839 * rtx_pseudo_cumack, if required. Separate |
|
2840 * cumack trackers for first transmissions, |
|
2841 * and retransmissions. |
|
2842 */ |
|
2843 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && |
|
2844 (tp1->snd_count == 1)) { |
|
2845 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq; |
|
2846 tp1->whoTo->find_pseudo_cumack = 0; |
|
2847 } |
|
2848 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) && |
|
2849 (tp1->snd_count > 1)) { |
|
2850 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq; |
|
2851 tp1->whoTo->find_rtx_pseudo_cumack = 0; |
|
2852 } |
|
2853 if (tp1->rec.data.TSN_seq == theTSN) { |
|
2854 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { |
|
2855 /*- |
|
2856 * must be held until |
|
2857 * cum-ack passes |
|
2858 */ |
|
2859 if (tp1->sent < SCTP_DATAGRAM_RESEND) { |
|
2860 /*- |
|
2861 * If it is less than RESEND, it is |
|
2862 * now no-longer in flight. |
|
2863 * Higher values may already be set |
|
2864 * via previous Gap Ack Blocks... |
|
2865 * i.e. ACKED or RESEND. |
|
2866 */ |
|
2867 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, |
|
2868 *biggest_newly_acked_tsn)) { |
|
2869 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq; |
|
2870 } |
|
2871 /*- |
|
2872 * CMT: SFR algo (and HTNA) - set |
|
2873 * saw_newack to 1 for dest being |
|
2874 * newly acked. update |
|
2875 * this_sack_highest_newack if |
|
2876 * appropriate. |
|
2877 */ |
|
2878 if (tp1->rec.data.chunk_was_revoked == 0) |
|
2879 tp1->whoTo->saw_newack = 1; |
|
2880 |
|
2881 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, |
|
2882 tp1->whoTo->this_sack_highest_newack)) { |
|
2883 tp1->whoTo->this_sack_highest_newack = |
|
2884 tp1->rec.data.TSN_seq; |
|
2885 } |
|
2886 /*- |
|
2887 * CMT DAC algo: also update |
|
2888 * this_sack_lowest_newack |
|
2889 */ |
|
2890 if (*this_sack_lowest_newack == 0) { |
|
2891 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { |
|
2892 sctp_log_sack(*this_sack_lowest_newack, |
|
2893 last_tsn, |
|
2894 tp1->rec.data.TSN_seq, |
|
2895 0, |
|
2896 0, |
|
2897 SCTP_LOG_TSN_ACKED); |
|
2898 } |
|
2899 *this_sack_lowest_newack = tp1->rec.data.TSN_seq; |
|
2900 } |
|
2901 /*- |
|
2902 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp |
|
2903 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set |
|
2904 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be |
|
2905 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack. |
|
2906 * Separate pseudo_cumack trackers for first transmissions and |
|
2907 * retransmissions. |
|
2908 */ |
|
2909 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) { |
|
2910 if (tp1->rec.data.chunk_was_revoked == 0) { |
|
2911 tp1->whoTo->new_pseudo_cumack = 1; |
|
2912 } |
|
2913 tp1->whoTo->find_pseudo_cumack = 1; |
|
2914 } |
|
2915 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { |
|
2916 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); |
|
2917 } |
|
2918 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) { |
|
2919 if (tp1->rec.data.chunk_was_revoked == 0) { |
|
2920 tp1->whoTo->new_pseudo_cumack = 1; |
|
2921 } |
|
2922 tp1->whoTo->find_rtx_pseudo_cumack = 1; |
|
2923 } |
|
2924 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { |
|
2925 sctp_log_sack(*biggest_newly_acked_tsn, |
|
2926 last_tsn, |
|
2927 tp1->rec.data.TSN_seq, |
|
2928 frag_strt, |
|
2929 frag_end, |
|
2930 SCTP_LOG_TSN_ACKED); |
|
2931 } |
|
2932 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { |
|
2933 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP, |
|
2934 tp1->whoTo->flight_size, |
|
2935 tp1->book_size, |
|
2936 (uintptr_t)tp1->whoTo, |
|
2937 tp1->rec.data.TSN_seq); |
|
2938 } |
|
2939 sctp_flight_size_decrease(tp1); |
|
2940 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { |
|
2941 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo, |
|
2942 tp1); |
|
2943 } |
|
2944 sctp_total_flight_decrease(stcb, tp1); |
|
2945 |
|
2946 tp1->whoTo->net_ack += tp1->send_size; |
|
2947 if (tp1->snd_count < 2) { |
|
2948 /*- |
|
2949 * True non-retransmited chunk |
|
2950 */ |
|
2951 tp1->whoTo->net_ack2 += tp1->send_size; |
|
2952 |
|
2953 /*- |
|
2954 * update RTO too ? |
|
2955 */ |
|
2956 if (tp1->do_rtt) { |
|
2957 if (*rto_ok) { |
|
2958 tp1->whoTo->RTO = |
|
2959 sctp_calculate_rto(stcb, |
|
2960 &stcb->asoc, |
|
2961 tp1->whoTo, |
|
2962 &tp1->sent_rcv_time, |
|
2963 sctp_align_safe_nocopy, |
|
2964 SCTP_RTT_FROM_DATA); |
|
2965 *rto_ok = 0; |
|
2966 } |
|
2967 if (tp1->whoTo->rto_needed == 0) { |
|
2968 tp1->whoTo->rto_needed = 1; |
|
2969 } |
|
2970 tp1->do_rtt = 0; |
|
2971 } |
|
2972 } |
|
2973 |
|
2974 } |
|
2975 if (tp1->sent <= SCTP_DATAGRAM_RESEND) { |
|
2976 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, |
|
2977 stcb->asoc.this_sack_highest_gap)) { |
|
2978 stcb->asoc.this_sack_highest_gap = |
|
2979 tp1->rec.data.TSN_seq; |
|
2980 } |
|
2981 if (tp1->sent == SCTP_DATAGRAM_RESEND) { |
|
2982 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt); |
|
2983 #ifdef SCTP_AUDITING_ENABLED |
|
2984 sctp_audit_log(0xB2, |
|
2985 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff)); |
|
2986 #endif |
|
2987 } |
|
2988 } |
|
2989 /*- |
|
2990 * All chunks NOT UNSENT fall through here and are marked |
|
2991 * (leave PR-SCTP ones that are to skip alone though) |
|
2992 */ |
|
2993 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) && |
|
2994 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { |
|
2995 tp1->sent = SCTP_DATAGRAM_MARKED; |
|
2996 } |
|
2997 if (tp1->rec.data.chunk_was_revoked) { |
|
2998 /* deflate the cwnd */ |
|
2999 tp1->whoTo->cwnd -= tp1->book_size; |
|
3000 tp1->rec.data.chunk_was_revoked = 0; |
|
3001 } |
|
3002 /* NR Sack code here */ |
|
3003 if (nr_sacking && |
|
3004 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { |
|
3005 if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) { |
|
3006 stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--; |
|
3007 #ifdef INVARIANTS |
|
3008 } else { |
|
3009 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number); |
|
3010 #endif |
|
3011 } |
|
3012 tp1->sent = SCTP_DATAGRAM_NR_ACKED; |
|
3013 if (tp1->data) { |
|
3014 /* sa_ignore NO_NULL_CHK */ |
|
3015 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); |
|
3016 sctp_m_freem(tp1->data); |
|
3017 tp1->data = NULL; |
|
3018 } |
|
3019 wake_him++; |
|
3020 } |
|
3021 } |
|
3022 break; |
|
3023 } /* if (tp1->TSN_seq == theTSN) */ |
|
3024 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) { |
|
3025 break; |
|
3026 } |
|
3027 tp1 = TAILQ_NEXT(tp1, sctp_next); |
|
3028 if ((tp1 == NULL) && (circled == 0)) { |
|
3029 circled++; |
|
3030 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); |
|
3031 } |
|
3032 } /* end while (tp1) */ |
|
3033 if (tp1 == NULL) { |
|
3034 circled = 0; |
|
3035 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); |
|
3036 } |
|
3037 /* In case the fragments were not in order we must reset */ |
|
3038 } /* end for (j = fragStart */ |
|
3039 *p_tp1 = tp1; |
|
3040 return (wake_him); /* Return value only used for nr-sack */ |
|
3041 } |
|
3042 |
|
3043 |
|
3044 static int |
|
3045 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc, |
|
3046 uint32_t last_tsn, uint32_t *biggest_tsn_acked, |
|
3047 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack, |
|
3048 int num_seg, int num_nr_seg, int *rto_ok) |
|
3049 { |
|
3050 struct sctp_gap_ack_block *frag, block; |
|
3051 struct sctp_tmit_chunk *tp1; |
|
3052 int i; |
|
3053 int num_frs = 0; |
|
3054 int chunk_freed; |
|
3055 int non_revocable; |
|
3056 uint16_t frag_strt, frag_end, prev_frag_end; |
|
3057 |
|
3058 tp1 = TAILQ_FIRST(&asoc->sent_queue); |
|
3059 prev_frag_end = 0; |
|
3060 chunk_freed = 0; |
|
3061 |
|
3062 for (i = 0; i < (num_seg + num_nr_seg); i++) { |
|
3063 if (i == num_seg) { |
|
3064 prev_frag_end = 0; |
|
3065 tp1 = TAILQ_FIRST(&asoc->sent_queue); |
|
3066 } |
|
3067 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset, |
|
3068 sizeof(struct sctp_gap_ack_block), (uint8_t *) &block); |
|
3069 *offset += sizeof(block); |
|
3070 if (frag == NULL) { |
|
3071 return (chunk_freed); |
|
3072 } |
|
3073 frag_strt = ntohs(frag->start); |
|
3074 frag_end = ntohs(frag->end); |
|
3075 |
|
3076 if (frag_strt > frag_end) { |
|
3077 /* This gap report is malformed, skip it. */ |
|
3078 continue; |
|
3079 } |
|
3080 if (frag_strt <= prev_frag_end) { |
|
3081 /* This gap report is not in order, so restart. */ |
|
3082 tp1 = TAILQ_FIRST(&asoc->sent_queue); |
|
3083 } |
|
3084 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) { |
|
3085 *biggest_tsn_acked = last_tsn + frag_end; |
|
3086 } |
|
3087 if (i < num_seg) { |
|
3088 non_revocable = 0; |
|
3089 } else { |
|
3090 non_revocable = 1; |
|
3091 } |
|
3092 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end, |
|
3093 non_revocable, &num_frs, biggest_newly_acked_tsn, |
|
3094 this_sack_lowest_newack, rto_ok)) { |
|
3095 chunk_freed = 1; |
|
3096 } |
|
3097 prev_frag_end = frag_end; |
|
3098 } |
|
3099 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
|
3100 if (num_frs) |
|
3101 sctp_log_fr(*biggest_tsn_acked, |
|
3102 *biggest_newly_acked_tsn, |
|
3103 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); |
|
3104 } |
|
3105 return (chunk_freed); |
|
3106 } |
|
3107 |
|
3108 static void |
|
3109 sctp_check_for_revoked(struct sctp_tcb *stcb, |
|
3110 struct sctp_association *asoc, uint32_t cumack, |
|
3111 uint32_t biggest_tsn_acked) |
|
3112 { |
|
3113 struct sctp_tmit_chunk *tp1; |
|
3114 |
|
3115 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { |
|
3116 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) { |
|
3117 /* |
|
3118 * ok this guy is either ACK or MARKED. If it is |
|
3119 * ACKED it has been previously acked but not this |
|
3120 * time i.e. revoked. If it is MARKED it was ACK'ed |
|
3121 * again. |
|
3122 */ |
|
3123 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) { |
|
3124 break; |
|
3125 } |
|
3126 if (tp1->sent == SCTP_DATAGRAM_ACKED) { |
|
3127 /* it has been revoked */ |
|
3128 tp1->sent = SCTP_DATAGRAM_SENT; |
|
3129 tp1->rec.data.chunk_was_revoked = 1; |
|
3130 /* We must add this stuff back in to |
|
3131 * assure timers and such get started. |
|
3132 */ |
|
3133 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { |
|
3134 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, |
|
3135 tp1->whoTo->flight_size, |
|
3136 tp1->book_size, |
|
3137 (uintptr_t)tp1->whoTo, |
|
3138 tp1->rec.data.TSN_seq); |
|
3139 } |
|
3140 sctp_flight_size_increase(tp1); |
|
3141 sctp_total_flight_increase(stcb, tp1); |
|
3142 /* We inflate the cwnd to compensate for our |
|
3143 * artificial inflation of the flight_size. |
|
3144 */ |
|
3145 tp1->whoTo->cwnd += tp1->book_size; |
|
3146 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { |
|
3147 sctp_log_sack(asoc->last_acked_seq, |
|
3148 cumack, |
|
3149 tp1->rec.data.TSN_seq, |
|
3150 0, |
|
3151 0, |
|
3152 SCTP_LOG_TSN_REVOKED); |
|
3153 } |
|
3154 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { |
|
3155 /* it has been re-acked in this SACK */ |
|
3156 tp1->sent = SCTP_DATAGRAM_ACKED; |
|
3157 } |
|
3158 } |
|
3159 if (tp1->sent == SCTP_DATAGRAM_UNSENT) |
|
3160 break; |
|
3161 } |
|
3162 } |
|
3163 |
|
3164 |
|
3165 static void |
|
3166 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, |
|
3167 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved) |
|
3168 { |
|
3169 struct sctp_tmit_chunk *tp1; |
|
3170 int strike_flag = 0; |
|
3171 struct timeval now; |
|
3172 int tot_retrans = 0; |
|
3173 uint32_t sending_seq; |
|
3174 struct sctp_nets *net; |
|
3175 int num_dests_sacked = 0; |
|
3176 |
|
3177 /* |
|
3178 * select the sending_seq, this is either the next thing ready to be |
|
3179 * sent but not transmitted, OR, the next seq we assign. |
|
3180 */ |
|
3181 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); |
|
3182 if (tp1 == NULL) { |
|
3183 sending_seq = asoc->sending_seq; |
|
3184 } else { |
|
3185 sending_seq = tp1->rec.data.TSN_seq; |
|
3186 } |
|
3187 |
|
3188 /* CMT DAC algo: finding out if SACK is a mixed SACK */ |
|
3189 if ((asoc->sctp_cmt_on_off > 0) && |
|
3190 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { |
|
3191 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
|
3192 if (net->saw_newack) |
|
3193 num_dests_sacked++; |
|
3194 } |
|
3195 } |
|
3196 if (stcb->asoc.peer_supports_prsctp) { |
|
3197 (void)SCTP_GETTIME_TIMEVAL(&now); |
|
3198 } |
|
3199 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { |
|
3200 strike_flag = 0; |
|
3201 if (tp1->no_fr_allowed) { |
|
3202 /* this one had a timeout or something */ |
|
3203 continue; |
|
3204 } |
|
3205 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
|
3206 if (tp1->sent < SCTP_DATAGRAM_RESEND) |
|
3207 sctp_log_fr(biggest_tsn_newly_acked, |
|
3208 tp1->rec.data.TSN_seq, |
|
3209 tp1->sent, |
|
3210 SCTP_FR_LOG_CHECK_STRIKE); |
|
3211 } |
|
3212 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) || |
|
3213 tp1->sent == SCTP_DATAGRAM_UNSENT) { |
|
3214 /* done */ |
|
3215 break; |
|
3216 } |
|
3217 if (stcb->asoc.peer_supports_prsctp) { |
|
3218 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { |
|
3219 /* Is it expired? */ |
|
3220 #ifndef __FreeBSD__ |
|
3221 if (timercmp(&now, &tp1->rec.data.timetodrop, >)) { |
|
3222 #else |
|
3223 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { |
|
3224 #endif |
|
3225 /* Yes so drop it */ |
|
3226 if (tp1->data != NULL) { |
|
3227 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, |
|
3228 SCTP_SO_NOT_LOCKED); |
|
3229 } |
|
3230 continue; |
|
3231 } |
|
3232 } |
|
3233 |
|
3234 } |
|
3235 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) { |
|
3236 /* we are beyond the tsn in the sack */ |
|
3237 break; |
|
3238 } |
|
3239 if (tp1->sent >= SCTP_DATAGRAM_RESEND) { |
|
3240 /* either a RESEND, ACKED, or MARKED */ |
|
3241 /* skip */ |
|
3242 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { |
|
3243 /* Continue strikin FWD-TSN chunks */ |
|
3244 tp1->rec.data.fwd_tsn_cnt++; |
|
3245 } |
|
3246 continue; |
|
3247 } |
|
3248 /* |
|
3249 * CMT : SFR algo (covers part of DAC and HTNA as well) |
|
3250 */ |
|
3251 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) { |
|
3252 /* |
|
3253 * No new acks were receieved for data sent to this |
|
3254 * dest. Therefore, according to the SFR algo for |
|
3255 * CMT, no data sent to this dest can be marked for |
|
3256 * FR using this SACK. |
|
3257 */ |
|
3258 continue; |
|
3259 } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq, |
|
3260 tp1->whoTo->this_sack_highest_newack)) { |
|
3261 /* |
|
3262 * CMT: New acks were receieved for data sent to |
|
3263 * this dest. But no new acks were seen for data |
|
3264 * sent after tp1. Therefore, according to the SFR |
|
3265 * algo for CMT, tp1 cannot be marked for FR using |
|
3266 * this SACK. This step covers part of the DAC algo |
|
3267 * and the HTNA algo as well. |
|
3268 */ |
|
3269 continue; |
|
3270 } |
|
3271 /* |
|
3272 * Here we check to see if we were have already done a FR |
|
3273 * and if so we see if the biggest TSN we saw in the sack is |
|
3274 * smaller than the recovery point. If so we don't strike |
|
3275 * the tsn... otherwise we CAN strike the TSN. |
|
3276 */ |
|
3277 /* |
|
3278 * @@@ JRI: Check for CMT |
|
3279 * if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) { |
|
3280 */ |
|
3281 if (accum_moved && asoc->fast_retran_loss_recovery) { |
|
3282 /* |
|
3283 * Strike the TSN if in fast-recovery and cum-ack |
|
3284 * moved. |
|
3285 */ |
|
3286 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
|
3287 sctp_log_fr(biggest_tsn_newly_acked, |
|
3288 tp1->rec.data.TSN_seq, |
|
3289 tp1->sent, |
|
3290 SCTP_FR_LOG_STRIKE_CHUNK); |
|
3291 } |
|
3292 if (tp1->sent < SCTP_DATAGRAM_RESEND) { |
|
3293 tp1->sent++; |
|
3294 } |
|
3295 if ((asoc->sctp_cmt_on_off > 0) && |
|
3296 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { |
|
3297 /* |
|
3298 * CMT DAC algorithm: If SACK flag is set to |
|
3299 * 0, then lowest_newack test will not pass |
|
3300 * because it would have been set to the |
|
3301 * cumack earlier. If not already to be |
|
3302 * rtx'd, If not a mixed sack and if tp1 is |
|
3303 * not between two sacked TSNs, then mark by |
|
3304 * one more. |
|
3305 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that |
|
3306 * two packets have been received after this missing TSN. |
|
3307 */ |
|
3308 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && |
|
3309 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) { |
|
3310 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
|
3311 sctp_log_fr(16 + num_dests_sacked, |
|
3312 tp1->rec.data.TSN_seq, |
|
3313 tp1->sent, |
|
3314 SCTP_FR_LOG_STRIKE_CHUNK); |
|
3315 } |
|
3316 tp1->sent++; |
|
3317 } |
|
3318 } |
|
3319 } else if ((tp1->rec.data.doing_fast_retransmit) && |
|
3320 (asoc->sctp_cmt_on_off == 0)) { |
|
3321 /* |
|
3322 * For those that have done a FR we must take |
|
3323 * special consideration if we strike. I.e the |
|
3324 * biggest_newly_acked must be higher than the |
|
3325 * sending_seq at the time we did the FR. |
|
3326 */ |
|
3327 if ( |
|
3328 #ifdef SCTP_FR_TO_ALTERNATE |
|
3329 /* |
|
3330 * If FR's go to new networks, then we must only do |
|
3331 * this for singly homed asoc's. However if the FR's |
|
3332 * go to the same network (Armando's work) then its |
|
3333 * ok to FR multiple times. |
|
3334 */ |
|
3335 (asoc->numnets < 2) |
|
3336 #else |
|
3337 (1) |
|
3338 #endif |
|
3339 ) { |
|
3340 |
|
3341 if (SCTP_TSN_GE(biggest_tsn_newly_acked, |
|
3342 tp1->rec.data.fast_retran_tsn)) { |
|
3343 /* |
|
3344 * Strike the TSN, since this ack is |
|
3345 * beyond where things were when we |
|
3346 * did a FR. |
|
3347 */ |
|
3348 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
|
3349 sctp_log_fr(biggest_tsn_newly_acked, |
|
3350 tp1->rec.data.TSN_seq, |
|
3351 tp1->sent, |
|
3352 SCTP_FR_LOG_STRIKE_CHUNK); |
|
3353 } |
|
3354 if (tp1->sent < SCTP_DATAGRAM_RESEND) { |
|
3355 tp1->sent++; |
|
3356 } |
|
3357 strike_flag = 1; |
|
3358 if ((asoc->sctp_cmt_on_off > 0) && |
|
3359 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { |
|
3360 /* |
|
3361 * CMT DAC algorithm: If |
|
3362 * SACK flag is set to 0, |
|
3363 * then lowest_newack test |
|
3364 * will not pass because it |
|
3365 * would have been set to |
|
3366 * the cumack earlier. If |
|
3367 * not already to be rtx'd, |
|
3368 * If not a mixed sack and |
|
3369 * if tp1 is not between two |
|
3370 * sacked TSNs, then mark by |
|
3371 * one more. |
|
3372 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that |
|
3373 * two packets have been received after this missing TSN. |
|
3374 */ |
|
3375 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && |
|
3376 (num_dests_sacked == 1) && |
|
3377 SCTP_TSN_GT(this_sack_lowest_newack, |
|
3378 tp1->rec.data.TSN_seq)) { |
|
3379 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
|
3380 sctp_log_fr(32 + num_dests_sacked, |
|
3381 tp1->rec.data.TSN_seq, |
|
3382 tp1->sent, |
|
3383 SCTP_FR_LOG_STRIKE_CHUNK); |
|
3384 } |
|
3385 if (tp1->sent < SCTP_DATAGRAM_RESEND) { |
|
3386 tp1->sent++; |
|
3387 } |
|
3388 } |
|
3389 } |
|
3390 } |
|
3391 } |
|
3392 /* |
|
3393 * JRI: TODO: remove code for HTNA algo. CMT's |
|
3394 * SFR algo covers HTNA. |
|
3395 */ |
|
3396 } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, |
|
3397 biggest_tsn_newly_acked)) { |
|
3398 /* |
|
3399 * We don't strike these: This is the HTNA |
|
3400 * algorithm i.e. we don't strike If our TSN is |
|
3401 * larger than the Highest TSN Newly Acked. |
|
3402 */ |
|
3403 ; |
|
3404 } else { |
|
3405 /* Strike the TSN */ |
|
3406 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
|
3407 sctp_log_fr(biggest_tsn_newly_acked, |
|
3408 tp1->rec.data.TSN_seq, |
|
3409 tp1->sent, |
|
3410 SCTP_FR_LOG_STRIKE_CHUNK); |
|
3411 } |
|
3412 if (tp1->sent < SCTP_DATAGRAM_RESEND) { |
|
3413 tp1->sent++; |
|
3414 } |
|
3415 if ((asoc->sctp_cmt_on_off > 0) && |
|
3416 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { |
|
3417 /* |
|
3418 * CMT DAC algorithm: If SACK flag is set to |
|
3419 * 0, then lowest_newack test will not pass |
|
3420 * because it would have been set to the |
|
3421 * cumack earlier. If not already to be |
|
3422 * rtx'd, If not a mixed sack and if tp1 is |
|
3423 * not between two sacked TSNs, then mark by |
|
3424 * one more. |
|
3425 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that |
|
3426 * two packets have been received after this missing TSN. |
|
3427 */ |
|
3428 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && |
|
3429 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) { |
|
3430 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
|
3431 sctp_log_fr(48 + num_dests_sacked, |
|
3432 tp1->rec.data.TSN_seq, |
|
3433 tp1->sent, |
|
3434 SCTP_FR_LOG_STRIKE_CHUNK); |
|
3435 } |
|
3436 tp1->sent++; |
|
3437 } |
|
3438 } |
|
3439 } |
|
3440 if (tp1->sent == SCTP_DATAGRAM_RESEND) { |
|
3441 struct sctp_nets *alt; |
|
3442 |
|
3443 /* fix counts and things */ |
|
3444 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { |
|
3445 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND, |
|
3446 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0), |
|
3447 tp1->book_size, |
|
3448 (uintptr_t)tp1->whoTo, |
|
3449 tp1->rec.data.TSN_seq); |
|
3450 } |
|
3451 if (tp1->whoTo) { |
|
3452 tp1->whoTo->net_ack++; |
|
3453 sctp_flight_size_decrease(tp1); |
|
3454 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { |
|
3455 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo, |
|
3456 tp1); |
|
3457 } |
|
3458 } |
|
3459 |
|
3460 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { |
|
3461 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, |
|
3462 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); |
|
3463 } |
|
3464 /* add back to the rwnd */ |
|
3465 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); |
|
3466 |
|
3467 /* remove from the total flight */ |
|
3468 sctp_total_flight_decrease(stcb, tp1); |
|
3469 |
|
3470 if ((stcb->asoc.peer_supports_prsctp) && |
|
3471 (PR_SCTP_RTX_ENABLED(tp1->flags))) { |
|
3472 /* Has it been retransmitted tv_sec times? - we store the retran count there. */ |
|
3473 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) { |
|
3474 /* Yes, so drop it */ |
|
3475 if (tp1->data != NULL) { |
|
3476 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, |
|
3477 SCTP_SO_NOT_LOCKED); |
|
3478 } |
|
3479 /* Make sure to flag we had a FR */ |
|
3480 tp1->whoTo->net_ack++; |
|
3481 continue; |
|
3482 } |
|
3483 } |
|
3484 /* SCTP_PRINTF("OK, we are now ready to FR this guy\n"); */ |
|
3485 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
|
3486 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count, |
|
3487 0, SCTP_FR_MARKED); |
|
3488 } |
|
3489 if (strike_flag) { |
|
3490 /* This is a subsequent FR */ |
|
3491 SCTP_STAT_INCR(sctps_sendmultfastretrans); |
|
3492 } |
|
3493 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); |
|
3494 if (asoc->sctp_cmt_on_off > 0) { |
|
3495 /* |
|
3496 * CMT: Using RTX_SSTHRESH policy for CMT. |
|
3497 * If CMT is being used, then pick dest with |
|
3498 * largest ssthresh for any retransmission. |
|
3499 */ |
|
3500 tp1->no_fr_allowed = 1; |
|
3501 alt = tp1->whoTo; |
|
3502 /*sa_ignore NO_NULL_CHK*/ |
|
3503 if (asoc->sctp_cmt_pf > 0) { |
|
3504 /* JRS 5/18/07 - If CMT PF is on, use the PF version of find_alt_net() */ |
|
3505 alt = sctp_find_alternate_net(stcb, alt, 2); |
|
3506 } else { |
|
3507 /* JRS 5/18/07 - If only CMT is on, use the CMT version of find_alt_net() */ |
|
3508 /*sa_ignore NO_NULL_CHK*/ |
|
3509 alt = sctp_find_alternate_net(stcb, alt, 1); |
|
3510 } |
|
3511 if (alt == NULL) { |
|
3512 alt = tp1->whoTo; |
|
3513 } |
|
3514 /* |
|
3515 * CUCv2: If a different dest is picked for |
|
3516 * the retransmission, then new |
|
3517 * (rtx-)pseudo_cumack needs to be tracked |
|
3518 * for orig dest. Let CUCv2 track new (rtx-) |
|
3519 * pseudo-cumack always. |
|
3520 */ |
|
3521 if (tp1->whoTo) { |
|
3522 tp1->whoTo->find_pseudo_cumack = 1; |
|
3523 tp1->whoTo->find_rtx_pseudo_cumack = 1; |
|
3524 } |
|
3525 |
|
3526 } else {/* CMT is OFF */ |
|
3527 |
|
3528 #ifdef SCTP_FR_TO_ALTERNATE |
|
3529 /* Can we find an alternate? */ |
|
3530 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); |
|
3531 #else |
|
3532 /* |
|
3533 * default behavior is to NOT retransmit |
|
3534 * FR's to an alternate. Armando Caro's |
|
3535 * paper details why. |
|
3536 */ |
|
3537 alt = tp1->whoTo; |
|
3538 #endif |
|
3539 } |
|
3540 |
|
3541 tp1->rec.data.doing_fast_retransmit = 1; |
|
3542 tot_retrans++; |
|
3543 /* mark the sending seq for possible subsequent FR's */ |
|
3544 /* |
|
3545 * SCTP_PRINTF("Marking TSN for FR new value %x\n", |
|
3546 * (uint32_t)tpi->rec.data.TSN_seq); |
|
3547 */ |
|
3548 if (TAILQ_EMPTY(&asoc->send_queue)) { |
|
3549 /* |
|
3550 * If the queue of send is empty then its |
|
3551 * the next sequence number that will be |
|
3552 * assigned so we subtract one from this to |
|
3553 * get the one we last sent. |
|
3554 */ |
|
3555 tp1->rec.data.fast_retran_tsn = sending_seq; |
|
3556 } else { |
|
3557 /* |
|
3558 * If there are chunks on the send queue |
|
3559 * (unsent data that has made it from the |
|
3560 * stream queues but not out the door, we |
|
3561 * take the first one (which will have the |
|
3562 * lowest TSN) and subtract one to get the |
|
3563 * one we last sent. |
|
3564 */ |
|
3565 struct sctp_tmit_chunk *ttt; |
|
3566 |
|
3567 ttt = TAILQ_FIRST(&asoc->send_queue); |
|
3568 tp1->rec.data.fast_retran_tsn = |
|
3569 ttt->rec.data.TSN_seq; |
|
3570 } |
|
3571 |
|
3572 if (tp1->do_rtt) { |
|
3573 /* |
|
3574 * this guy had a RTO calculation pending on |
|
3575 * it, cancel it |
|
3576 */ |
|
3577 if ((tp1->whoTo != NULL) && |
|
3578 (tp1->whoTo->rto_needed == 0)) { |
|
3579 tp1->whoTo->rto_needed = 1; |
|
3580 } |
|
3581 tp1->do_rtt = 0; |
|
3582 } |
|
3583 if (alt != tp1->whoTo) { |
|
3584 /* yes, there is an alternate. */ |
|
3585 sctp_free_remote_addr(tp1->whoTo); |
|
3586 /*sa_ignore FREED_MEMORY*/ |
|
3587 tp1->whoTo = alt; |
|
3588 atomic_add_int(&alt->ref_count, 1); |
|
3589 } |
|
3590 } |
|
3591 } |
|
3592 } |
|
3593 |
|
3594 struct sctp_tmit_chunk * |
|
3595 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, |
|
3596 struct sctp_association *asoc) |
|
3597 { |
|
3598 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL; |
|
3599 struct timeval now; |
|
3600 int now_filled = 0; |
|
3601 |
|
3602 if (asoc->peer_supports_prsctp == 0) { |
|
3603 return (NULL); |
|
3604 } |
|
3605 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { |
|
3606 if (tp1->sent != SCTP_FORWARD_TSN_SKIP && |
|
3607 tp1->sent != SCTP_DATAGRAM_RESEND && |
|
3608 tp1->sent != SCTP_DATAGRAM_NR_ACKED) { |
|
3609 /* no chance to advance, out of here */ |
|
3610 break; |
|
3611 } |
|
3612 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { |
|
3613 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || |
|
3614 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { |
|
3615 sctp_misc_ints(SCTP_FWD_TSN_CHECK, |
|
3616 asoc->advanced_peer_ack_point, |
|
3617 tp1->rec.data.TSN_seq, 0, 0); |
|
3618 } |
|
3619 } |
|
3620 if (!PR_SCTP_ENABLED(tp1->flags)) { |
|
3621 /* |
|
3622 * We can't fwd-tsn past any that are reliable aka |
|
3623 * retransmitted until the asoc fails. |
|
3624 */ |
|
3625 break; |
|
3626 } |
|
3627 if (!now_filled) { |
|
3628 (void)SCTP_GETTIME_TIMEVAL(&now); |
|
3629 now_filled = 1; |
|
3630 } |
|
3631 /* |
|
3632 * now we got a chunk which is marked for another |
|
3633 * retransmission to a PR-stream but has run out its chances |
|
3634 * already maybe OR has been marked to skip now. Can we skip |
|
3635 * it if its a resend? |
|
3636 */ |
|
3637 if (tp1->sent == SCTP_DATAGRAM_RESEND && |
|
3638 (PR_SCTP_TTL_ENABLED(tp1->flags))) { |
|
3639 /* |
|
3640 * Now is this one marked for resend and its time is |
|
3641 * now up? |
|
3642 */ |
|
3643 #ifndef __FreeBSD__ |
|
3644 if (timercmp(&now, &tp1->rec.data.timetodrop, >)) { |
|
3645 #else |
|
3646 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { |
|
3647 #endif |
|
3648 /* Yes so drop it */ |
|
3649 if (tp1->data) { |
|
3650 (void)sctp_release_pr_sctp_chunk(stcb, tp1, |
|
3651 1, SCTP_SO_NOT_LOCKED); |
|
3652 } |
|
3653 } else { |
|
3654 /* |
|
3655 * No, we are done when hit one for resend |
|
3656 * whos time as not expired. |
|
3657 */ |
|
3658 break; |
|
3659 } |
|
3660 } |
|
3661 /* |
|
3662 * Ok now if this chunk is marked to drop it we can clean up |
|
3663 * the chunk, advance our peer ack point and we can check |
|
3664 * the next chunk. |
|
3665 */ |
|
3666 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || |
|
3667 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { |
|
3668 /* advance PeerAckPoint goes forward */ |
|
3669 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) { |
|
3670 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq; |
|
3671 a_adv = tp1; |
|
3672 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) { |
|
3673 /* No update but we do save the chk */ |
|
3674 a_adv = tp1; |
|
3675 } |
|
3676 } else { |
|
3677 /* |
|
3678 * If it is still in RESEND we can advance no |
|
3679 * further |
|
3680 */ |
|
3681 break; |
|
3682 } |
|
3683 } |
|
3684 return (a_adv); |
|
3685 } |
|
3686 |
|
3687 static int |
|
3688 sctp_fs_audit(struct sctp_association *asoc) |
|
3689 { |
|
3690 struct sctp_tmit_chunk *chk; |
|
3691 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0; |
|
3692 int entry_flight, entry_cnt, ret; |
|
3693 |
|
3694 entry_flight = asoc->total_flight; |
|
3695 entry_cnt = asoc->total_flight_count; |
|
3696 ret = 0; |
|
3697 |
|
3698 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt) |
|
3699 return (0); |
|
3700 |
|
3701 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { |
|
3702 if (chk->sent < SCTP_DATAGRAM_RESEND) { |
|
3703 SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n", |
|
3704 chk->rec.data.TSN_seq, |
|
3705 chk->send_size, |
|
3706 chk->snd_count); |
|
3707 inflight++; |
|
3708 } else if (chk->sent == SCTP_DATAGRAM_RESEND) { |
|
3709 resend++; |
|
3710 } else if (chk->sent < SCTP_DATAGRAM_ACKED) { |
|
3711 inbetween++; |
|
3712 } else if (chk->sent > SCTP_DATAGRAM_ACKED) { |
|
3713 above++; |
|
3714 } else { |
|
3715 acked++; |
|
3716 } |
|
3717 } |
|
3718 |
|
3719 if ((inflight > 0) || (inbetween > 0)) { |
|
3720 #ifdef INVARIANTS |
|
3721 panic("Flight size-express incorrect? \n"); |
|
3722 #else |
|
3723 SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n", |
|
3724 entry_flight, entry_cnt); |
|
3725 |
|
3726 SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n", |
|
3727 inflight, inbetween, resend, above, acked); |
|
3728 ret = 1; |
|
3729 #endif |
|
3730 } |
|
3731 return (ret); |
|
3732 } |
|
3733 |
|
3734 |
|
3735 static void |
|
3736 sctp_window_probe_recovery(struct sctp_tcb *stcb, |
|
3737 struct sctp_association *asoc, |
|
3738 struct sctp_tmit_chunk *tp1) |
|
3739 { |
|
3740 tp1->window_probe = 0; |
|
3741 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) { |
|
3742 /* TSN's skipped we do NOT move back. */ |
|
3743 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD, |
|
3744 tp1->whoTo->flight_size, |
|
3745 tp1->book_size, |
|
3746 (uintptr_t)tp1->whoTo, |
|
3747 tp1->rec.data.TSN_seq); |
|
3748 return; |
|
3749 } |
|
3750 /* First setup this by shrinking flight */ |
|
3751 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { |
|
3752 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo, |
|
3753 tp1); |
|
3754 } |
|
3755 sctp_flight_size_decrease(tp1); |
|
3756 sctp_total_flight_decrease(stcb, tp1); |
|
3757 /* Now mark for resend */ |
|
3758 tp1->sent = SCTP_DATAGRAM_RESEND; |
|
3759 sctp_ucount_incr(asoc->sent_queue_retran_cnt); |
|
3760 |
|
3761 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { |
|
3762 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP, |
|
3763 tp1->whoTo->flight_size, |
|
3764 tp1->book_size, |
|
3765 (uintptr_t)tp1->whoTo, |
|
3766 tp1->rec.data.TSN_seq); |
|
3767 } |
|
3768 } |
|
3769 |
|
3770 void |
|
3771 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, |
|
3772 uint32_t rwnd, int *abort_now, int ecne_seen) |
|
3773 { |
|
3774 struct sctp_nets *net; |
|
3775 struct sctp_association *asoc; |
|
3776 struct sctp_tmit_chunk *tp1, *tp2; |
|
3777 uint32_t old_rwnd; |
|
3778 int win_probe_recovery = 0; |
|
3779 int win_probe_recovered = 0; |
|
3780 int j, done_once = 0; |
|
3781 int rto_ok = 1; |
|
3782 |
|
3783 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { |
|
3784 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack, |
|
3785 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); |
|
3786 } |
|
3787 SCTP_TCB_LOCK_ASSERT(stcb); |
|
3788 #ifdef SCTP_ASOCLOG_OF_TSNS |
|
3789 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack; |
|
3790 stcb->asoc.cumack_log_at++; |
|
3791 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { |
|
3792 stcb->asoc.cumack_log_at = 0; |
|
3793 } |
|
3794 #endif |
|
3795 asoc = &stcb->asoc; |
|
3796 old_rwnd = asoc->peers_rwnd; |
|
3797 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) { |
|
3798 /* old ack */ |
|
3799 return; |
|
3800 } else if (asoc->last_acked_seq == cumack) { |
|
3801 /* Window update sack */ |
|
3802 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, |
|
3803 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); |
|
3804 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { |
|
3805 /* SWS sender side engages */ |
|
3806 asoc->peers_rwnd = 0; |
|
3807 } |
|
3808 if (asoc->peers_rwnd > old_rwnd) { |
|
3809 goto again; |
|
3810 } |
|
3811 return; |
|
3812 } |
|
3813 |
|
3814 /* First setup for CC stuff */ |
|
3815 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
|
3816 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) { |
|
3817 /* Drag along the window_tsn for cwr's */ |
|
3818 net->cwr_window_tsn = cumack; |
|
3819 } |
|
3820 net->prev_cwnd = net->cwnd; |
|
3821 net->net_ack = 0; |
|
3822 net->net_ack2 = 0; |
|
3823 |
|
3824 /* |
|
3825 * CMT: Reset CUC and Fast recovery algo variables before |
|
3826 * SACK processing |
|
3827 */ |
|
3828 net->new_pseudo_cumack = 0; |
|
3829 net->will_exit_fast_recovery = 0; |
|
3830 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { |
|
3831 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net); |
|
3832 } |
|
3833 } |
|
3834 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) { |
|
3835 uint32_t send_s; |
|
3836 |
|
3837 if (!TAILQ_EMPTY(&asoc->sent_queue)) { |
|
3838 tp1 = TAILQ_LAST(&asoc->sent_queue, |
|
3839 sctpchunk_listhead); |
|
3840 send_s = tp1->rec.data.TSN_seq + 1; |
|
3841 } else { |
|
3842 send_s = asoc->sending_seq; |
|
3843 } |
|
3844 if (SCTP_TSN_GE(cumack, send_s)) { |
|
3845 #ifndef INVARIANTS |
|
3846 struct mbuf *oper; |
|
3847 |
|
3848 #endif |
|
3849 #ifdef INVARIANTS |
|
3850 panic("Impossible sack 1"); |
|
3851 #else |
|
3852 |
|
3853 *abort_now = 1; |
|
3854 /* XXX */ |
|
3855 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), |
|
3856 0, M_NOWAIT, 1, MT_DATA); |
|
3857 if (oper) { |
|
3858 struct sctp_paramhdr *ph; |
|
3859 uint32_t *ippp; |
|
3860 |
|
3861 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + |
|
3862 sizeof(uint32_t); |
|
3863 ph = mtod(oper, struct sctp_paramhdr *); |
|
3864 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); |
|
3865 ph->param_length = htons(SCTP_BUF_LEN(oper)); |
|
3866 ippp = (uint32_t *) (ph + 1); |
|
3867 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25); |
|
3868 } |
|
3869 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; |
|
3870 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); |
|
3871 return; |
|
3872 #endif |
|
3873 } |
|
3874 } |
|
3875 asoc->this_sack_highest_gap = cumack; |
|
3876 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { |
|
3877 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, |
|
3878 stcb->asoc.overall_error_count, |
|
3879 0, |
|
3880 SCTP_FROM_SCTP_INDATA, |
|
3881 __LINE__); |
|
3882 } |
|
3883 stcb->asoc.overall_error_count = 0; |
|
3884 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) { |
|
3885 /* process the new consecutive TSN first */ |
|
3886 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { |
|
3887 if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) { |
|
3888 if (tp1->sent == SCTP_DATAGRAM_UNSENT) { |
|
3889 SCTP_PRINTF("Warning, an unsent is now acked?\n"); |
|
3890 } |
|
3891 if (tp1->sent < SCTP_DATAGRAM_ACKED) { |
|
3892 /* |
|
3893 * If it is less than ACKED, it is |
|
3894 * now no-longer in flight. Higher |
|
3895 * values may occur during marking |
|
3896 */ |
|
3897 if (tp1->sent < SCTP_DATAGRAM_RESEND) { |
|
3898 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { |
|
3899 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, |
|
3900 tp1->whoTo->flight_size, |
|
3901 tp1->book_size, |
|
3902 (uintptr_t)tp1->whoTo, |
|
3903 tp1->rec.data.TSN_seq); |
|
3904 } |
|
3905 sctp_flight_size_decrease(tp1); |
|
3906 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { |
|
3907 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo, |
|
3908 tp1); |
|
3909 } |
|
3910 /* sa_ignore NO_NULL_CHK */ |
|
3911 sctp_total_flight_decrease(stcb, tp1); |
|
3912 } |
|
3913 tp1->whoTo->net_ack += tp1->send_size; |
|
3914 if (tp1->snd_count < 2) { |
|
3915 /* |
|
3916 * True non-retransmited |
|
3917 * chunk |
|
3918 */ |
|
3919 tp1->whoTo->net_ack2 += |
|
3920 tp1->send_size; |
|
3921 |
|
3922 /* update RTO too? */ |
|
3923 if (tp1->do_rtt) { |
|
3924 if (rto_ok) { |
|
3925 tp1->whoTo->RTO = |
|
3926 /* |
|
3927 * sa_ignore |
|
3928 * NO_NULL_CHK |
|
3929 */ |
|
3930 sctp_calculate_rto(stcb, |
|
3931 asoc, tp1->whoTo, |
|
3932 &tp1->sent_rcv_time, |
|
3933 sctp_align_safe_nocopy, |
|
3934 SCTP_RTT_FROM_DATA); |
|
3935 rto_ok = 0; |
|
3936 } |
|
3937 if (tp1->whoTo->rto_needed == 0) { |
|
3938 tp1->whoTo->rto_needed = 1; |
|
3939 } |
|
3940 tp1->do_rtt = 0; |
|
3941 } |
|
3942 } |
|
3943 /* |
|
3944 * CMT: CUCv2 algorithm. From the |
|
3945 * cumack'd TSNs, for each TSN being |
|
3946 * acked for the first time, set the |
|
3947 * following variables for the |
|
3948 * corresp destination. |
|
3949 * new_pseudo_cumack will trigger a |
|
3950 * cwnd update. |
|
3951 * find_(rtx_)pseudo_cumack will |
|
3952 * trigger search for the next |
|
3953 * expected (rtx-)pseudo-cumack. |
|
3954 */ |
|
3955 tp1->whoTo->new_pseudo_cumack = 1; |
|
3956 tp1->whoTo->find_pseudo_cumack = 1; |
|
3957 tp1->whoTo->find_rtx_pseudo_cumack = 1; |
|
3958 |
|
3959 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { |
|
3960 /* sa_ignore NO_NULL_CHK */ |
|
3961 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); |
|
3962 } |
|
3963 } |
|
3964 if (tp1->sent == SCTP_DATAGRAM_RESEND) { |
|
3965 sctp_ucount_decr(asoc->sent_queue_retran_cnt); |
|
3966 } |
|
3967 if (tp1->rec.data.chunk_was_revoked) { |
|
3968 /* deflate the cwnd */ |
|
3969 tp1->whoTo->cwnd -= tp1->book_size; |
|
3970 tp1->rec.data.chunk_was_revoked = 0; |
|
3971 } |
|
3972 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { |
|
3973 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) { |
|
3974 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--; |
|
3975 #ifdef INVARIANTS |
|
3976 } else { |
|
3977 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number); |
|
3978 #endif |
|
3979 } |
|
3980 } |
|
3981 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); |
|
3982 if (tp1->data) { |
|
3983 /* sa_ignore NO_NULL_CHK */ |
|
3984 sctp_free_bufspace(stcb, asoc, tp1, 1); |
|
3985 sctp_m_freem(tp1->data); |
|
3986 tp1->data = NULL; |
|
3987 } |
|
3988 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { |
|
3989 sctp_log_sack(asoc->last_acked_seq, |
|
3990 cumack, |
|
3991 tp1->rec.data.TSN_seq, |
|
3992 0, |
|
3993 0, |
|
3994 SCTP_LOG_FREE_SENT); |
|
3995 } |
|
3996 asoc->sent_queue_cnt--; |
|
3997 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); |
|
3998 } else { |
|
3999 break; |
|
4000 } |
|
4001 } |
|
4002 |
|
4003 } |
|
4004 #if defined(__Userspace__) |
|
4005 if (stcb->sctp_ep->recv_callback) { |
|
4006 if (stcb->sctp_socket) { |
|
4007 uint32_t inqueue_bytes, sb_free_now; |
|
4008 struct sctp_inpcb *inp; |
|
4009 |
|
4010 inp = stcb->sctp_ep; |
|
4011 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); |
|
4012 sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv); |
|
4013 |
|
4014 /* check if the amount free in the send socket buffer crossed the threshold */ |
|
4015 if (inp->send_callback && |
|
4016 (((inp->send_sb_threshold > 0) && |
|
4017 (sb_free_now >= inp->send_sb_threshold) && |
|
4018 (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) || |
|
4019 (inp->send_sb_threshold == 0))) { |
|
4020 atomic_add_int(&stcb->asoc.refcnt, 1); |
|
4021 SCTP_TCB_UNLOCK(stcb); |
|
4022 inp->send_callback(stcb->sctp_socket, sb_free_now); |
|
4023 SCTP_TCB_LOCK(stcb); |
|
4024 atomic_subtract_int(&stcb->asoc.refcnt, 1); |
|
4025 } |
|
4026 } |
|
4027 } else if (stcb->sctp_socket) { |
|
4028 #else |
|
4029 /* sa_ignore NO_NULL_CHK */ |
|
4030 if (stcb->sctp_socket) { |
|
4031 #endif |
|
4032 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) |
|
4033 struct socket *so; |
|
4034 |
|
4035 #endif |
|
4036 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); |
|
4037 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { |
|
4038 /* sa_ignore NO_NULL_CHK */ |
|
4039 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK); |
|
4040 } |
|
4041 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) |
|
4042 so = SCTP_INP_SO(stcb->sctp_ep); |
|
4043 atomic_add_int(&stcb->asoc.refcnt, 1); |
|
4044 SCTP_TCB_UNLOCK(stcb); |
|
4045 SCTP_SOCKET_LOCK(so, 1); |
|
4046 SCTP_TCB_LOCK(stcb); |
|
4047 atomic_subtract_int(&stcb->asoc.refcnt, 1); |
|
4048 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { |
|
4049 /* assoc was freed while we were unlocked */ |
|
4050 SCTP_SOCKET_UNLOCK(so, 1); |
|
4051 return; |
|
4052 } |
|
4053 #endif |
|
4054 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); |
|
4055 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) |
|
4056 SCTP_SOCKET_UNLOCK(so, 1); |
|
4057 #endif |
|
4058 } else { |
|
4059 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { |
|
4060 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK); |
|
4061 } |
|
4062 } |
|
4063 |
|
4064 /* JRS - Use the congestion control given in the CC module */ |
|
4065 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) { |
|
4066 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
|
4067 if (net->net_ack2 > 0) { |
|
4068 /* |
|
4069 * Karn's rule applies to clearing error count, this |
|
4070 * is optional. |
|
4071 */ |
|
4072 net->error_count = 0; |
|
4073 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { |
|
4074 /* addr came good */ |
|
4075 net->dest_state |= SCTP_ADDR_REACHABLE; |
|
4076 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, |
|
4077 0, (void *)net, SCTP_SO_NOT_LOCKED); |
|
4078 } |
|
4079 if (net == stcb->asoc.primary_destination) { |
|
4080 if (stcb->asoc.alternate) { |
|
4081 /* release the alternate, primary is good */ |
|
4082 sctp_free_remote_addr(stcb->asoc.alternate); |
|
4083 stcb->asoc.alternate = NULL; |
|
4084 } |
|
4085 } |
|
4086 if (net->dest_state & SCTP_ADDR_PF) { |
|
4087 net->dest_state &= ~SCTP_ADDR_PF; |
|
4088 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3); |
|
4089 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); |
|
4090 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); |
|
4091 /* Done with this net */ |
|
4092 net->net_ack = 0; |
|
4093 } |
|
4094 /* restore any doubled timers */ |
|
4095 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; |
|
4096 if (net->RTO < stcb->asoc.minrto) { |
|
4097 net->RTO = stcb->asoc.minrto; |
|
4098 } |
|
4099 if (net->RTO > stcb->asoc.maxrto) { |
|
4100 net->RTO = stcb->asoc.maxrto; |
|
4101 } |
|
4102 } |
|
4103 } |
|
4104 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0); |
|
4105 } |
|
4106 asoc->last_acked_seq = cumack; |
|
4107 |
|
4108 if (TAILQ_EMPTY(&asoc->sent_queue)) { |
|
4109 /* nothing left in-flight */ |
|
4110 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
|
4111 net->flight_size = 0; |
|
4112 net->partial_bytes_acked = 0; |
|
4113 } |
|
4114 asoc->total_flight = 0; |
|
4115 asoc->total_flight_count = 0; |
|
4116 } |
|
4117 |
|
4118 /* RWND update */ |
|
4119 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, |
|
4120 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); |
|
4121 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { |
|
4122 /* SWS sender side engages */ |
|
4123 asoc->peers_rwnd = 0; |
|
4124 } |
|
4125 if (asoc->peers_rwnd > old_rwnd) { |
|
4126 win_probe_recovery = 1; |
|
4127 } |
|
4128 /* Now assure a timer where data is queued at */ |
|
4129 again: |
|
4130 j = 0; |
|
4131 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
|
4132 int to_ticks; |
|
4133 if (win_probe_recovery && (net->window_probe)) { |
|
4134 win_probe_recovered = 1; |
|
4135 /* |
|
4136 * Find first chunk that was used with window probe |
|
4137 * and clear the sent |
|
4138 */ |
|
4139 /* sa_ignore FREED_MEMORY */ |
|
4140 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { |
|
4141 if (tp1->window_probe) { |
|
4142 /* move back to data send queue */ |
|
4143 sctp_window_probe_recovery(stcb, asoc, tp1); |
|
4144 break; |
|
4145 } |
|
4146 } |
|
4147 } |
|
4148 if (net->RTO == 0) { |
|
4149 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); |
|
4150 } else { |
|
4151 to_ticks = MSEC_TO_TICKS(net->RTO); |
|
4152 } |
|
4153 if (net->flight_size) { |
|
4154 j++; |
|
4155 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks, |
|
4156 sctp_timeout_handler, &net->rxt_timer); |
|
4157 if (net->window_probe) { |
|
4158 net->window_probe = 0; |
|
4159 } |
|
4160 } else { |
|
4161 if (net->window_probe) { |
|
4162 /* In window probes we must assure a timer is still running there */ |
|
4163 net->window_probe = 0; |
|
4164 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { |
|
4165 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks, |
|
4166 sctp_timeout_handler, &net->rxt_timer); |
|
4167 } |
|
4168 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { |
|
4169 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, |
|
4170 stcb, net, |
|
4171 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); |
|
4172 } |
|
4173 } |
|
4174 } |
|
4175 if ((j == 0) && |
|
4176 (!TAILQ_EMPTY(&asoc->sent_queue)) && |
|
4177 (asoc->sent_queue_retran_cnt == 0) && |
|
4178 (win_probe_recovered == 0) && |
|
4179 (done_once == 0)) { |
|
4180 /* huh, this should not happen unless all packets |
|
4181 * are PR-SCTP and marked to skip of course. |
|
4182 */ |
|
4183 if (sctp_fs_audit(asoc)) { |
|
4184 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
|
4185 net->flight_size = 0; |
|
4186 } |
|
4187 asoc->total_flight = 0; |
|
4188 asoc->total_flight_count = 0; |
|
4189 asoc->sent_queue_retran_cnt = 0; |
|
4190 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { |
|
4191 if (tp1->sent < SCTP_DATAGRAM_RESEND) { |
|
4192 sctp_flight_size_increase(tp1); |
|
4193 sctp_total_flight_increase(stcb, tp1); |
|
4194 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { |
|
4195 sctp_ucount_incr(asoc->sent_queue_retran_cnt); |
|
4196 } |
|
4197 } |
|
4198 } |
|
4199 done_once = 1; |
|
4200 goto again; |
|
4201 } |
|
4202 /**********************************/ |
|
4203 /* Now what about shutdown issues */ |
|
4204 /**********************************/ |
|
4205 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { |
|
4206 /* nothing left on sendqueue.. consider done */ |
|
4207 /* clean up */ |
|
4208 if ((asoc->stream_queue_cnt == 1) && |
|
4209 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || |
|
4210 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && |
|
4211 (asoc->locked_on_sending) |
|
4212 ) { |
|
4213 struct sctp_stream_queue_pending *sp; |
|
4214 /* I may be in a state where we got |
|
4215 * all across.. but cannot write more due |
|
4216 * to a shutdown... we abort since the |
|
4217 * user did not indicate EOR in this case. The |
|
4218 * sp will be cleaned during free of the asoc. |
|
4219 */ |
|
4220 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), |
|
4221 sctp_streamhead); |
|
4222 if ((sp) && (sp->length == 0)) { |
|
4223 /* Let cleanup code purge it */ |
|
4224 if (sp->msg_is_complete) { |
|
4225 asoc->stream_queue_cnt--; |
|
4226 } else { |
|
4227 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; |
|
4228 asoc->locked_on_sending = NULL; |
|
4229 asoc->stream_queue_cnt--; |
|
4230 } |
|
4231 } |
|
4232 } |
|
4233 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && |
|
4234 (asoc->stream_queue_cnt == 0)) { |
|
4235 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { |
|
4236 /* Need to abort here */ |
|
4237 struct mbuf *oper; |
|
4238 |
|
4239 abort_out_now: |
|
4240 *abort_now = 1; |
|
4241 /* XXX */ |
|
4242 oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), |
|
4243 0, M_NOWAIT, 1, MT_DATA); |
|
4244 if (oper) { |
|
4245 struct sctp_paramhdr *ph; |
|
4246 |
|
4247 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr); |
|
4248 ph = mtod(oper, struct sctp_paramhdr *); |
|
4249 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); |
|
4250 ph->param_length = htons(SCTP_BUF_LEN(oper)); |
|
4251 } |
|
4252 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; |
|
4253 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); |
|
4254 } else { |
|
4255 struct sctp_nets *netp; |
|
4256 |
|
4257 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || |
|
4258 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { |
|
4259 SCTP_STAT_DECR_GAUGE32(sctps_currestab); |
|
4260 } |
|
4261 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); |
|
4262 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); |
|
4263 sctp_stop_timers_for_shutdown(stcb); |
|
4264 if (asoc->alternate) { |
|
4265 netp = asoc->alternate; |
|
4266 } else { |
|
4267 netp = asoc->primary_destination; |
|
4268 } |
|
4269 sctp_send_shutdown(stcb, netp); |
|
4270 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, |
|
4271 stcb->sctp_ep, stcb, netp); |
|
4272 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, |
|
4273 stcb->sctp_ep, stcb, netp); |
|
4274 } |
|
4275 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && |
|
4276 (asoc->stream_queue_cnt == 0)) { |
|
4277 struct sctp_nets *netp; |
|
4278 |
|
4279 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { |
|
4280 goto abort_out_now; |
|
4281 } |
|
4282 SCTP_STAT_DECR_GAUGE32(sctps_currestab); |
|
4283 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); |
|
4284 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); |
|
4285 sctp_stop_timers_for_shutdown(stcb); |
|
4286 if (asoc->alternate) { |
|
4287 netp = asoc->alternate; |
|
4288 } else { |
|
4289 netp = asoc->primary_destination; |
|
4290 } |
|
4291 sctp_send_shutdown_ack(stcb, netp); |
|
4292 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, |
|
4293 stcb->sctp_ep, stcb, netp); |
|
4294 } |
|
4295 } |
|
4296 /*********************************************/ |
|
4297 /* Here we perform PR-SCTP procedures */ |
|
4298 /* (section 4.2) */ |
|
4299 /*********************************************/ |
|
4300 /* C1. update advancedPeerAckPoint */ |
|
4301 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) { |
|
4302 asoc->advanced_peer_ack_point = cumack; |
|
4303 } |
|
4304 /* PR-Sctp issues need to be addressed too */ |
|
4305 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) { |
|
4306 struct sctp_tmit_chunk *lchk; |
|
4307 uint32_t old_adv_peer_ack_point; |
|
4308 |
|
4309 old_adv_peer_ack_point = asoc->advanced_peer_ack_point; |
|
4310 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); |
|
4311 /* C3. See if we need to send a Fwd-TSN */ |
|
4312 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) { |
|
4313 /* |
|
4314 * ISSUE with ECN, see FWD-TSN processing. |
|
4315 */ |
|
4316 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { |
|
4317 send_forward_tsn(stcb, asoc); |
|
4318 } else if (lchk) { |
|
4319 /* try to FR fwd-tsn's that get lost too */ |
|
4320 if (lchk->rec.data.fwd_tsn_cnt >= 3) { |
|
4321 send_forward_tsn(stcb, asoc); |
|
4322 } |
|
4323 } |
|
4324 } |
|
4325 if (lchk) { |
|
4326 /* Assure a timer is up */ |
|
4327 sctp_timer_start(SCTP_TIMER_TYPE_SEND, |
|
4328 stcb->sctp_ep, stcb, lchk->whoTo); |
|
4329 } |
|
4330 } |
|
4331 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { |
|
4332 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, |
|
4333 rwnd, |
|
4334 stcb->asoc.peers_rwnd, |
|
4335 stcb->asoc.total_flight, |
|
4336 stcb->asoc.total_output_queue_size); |
|
4337 } |
|
4338 } |
|
4339 |
|
4340 void |
|
4341 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, |
|
4342 struct sctp_tcb *stcb, |
|
4343 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup, |
|
4344 int *abort_now, uint8_t flags, |
|
4345 uint32_t cum_ack, uint32_t rwnd, int ecne_seen) |
|
4346 { |
|
4347 struct sctp_association *asoc; |
|
4348 struct sctp_tmit_chunk *tp1, *tp2; |
|
4349 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack; |
|
4350 uint16_t wake_him = 0; |
|
4351 uint32_t send_s = 0; |
|
4352 long j; |
|
4353 int accum_moved = 0; |
|
4354 int will_exit_fast_recovery = 0; |
|
4355 uint32_t a_rwnd, old_rwnd; |
|
4356 int win_probe_recovery = 0; |
|
4357 int win_probe_recovered = 0; |
|
4358 struct sctp_nets *net = NULL; |
|
4359 int done_once; |
|
4360 int rto_ok = 1; |
|
4361 uint8_t reneged_all = 0; |
|
4362 uint8_t cmt_dac_flag; |
|
4363 /* |
|
4364 * we take any chance we can to service our queues since we cannot |
|
4365 * get awoken when the socket is read from :< |
|
4366 */ |
|
4367 /* |
|
4368 * Now perform the actual SACK handling: 1) Verify that it is not an |
|
4369 * old sack, if so discard. 2) If there is nothing left in the send |
|
4370 * queue (cum-ack is equal to last acked) then you have a duplicate |
|
4371 * too, update any rwnd change and verify no timers are running. |
|
4372 * then return. 3) Process any new consequtive data i.e. cum-ack |
|
4373 * moved process these first and note that it moved. 4) Process any |
|
4374 * sack blocks. 5) Drop any acked from the queue. 6) Check for any |
|
4375 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left, |
|
4376 * sync up flightsizes and things, stop all timers and also check |
|
4377 * for shutdown_pending state. If so then go ahead and send off the |
|
4378 * shutdown. If in shutdown recv, send off the shutdown-ack and |
|
4379 * start that timer, Ret. 9) Strike any non-acked things and do FR |
|
4380 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp |
|
4381 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK |
|
4382 * if in shutdown_recv state. |
|
4383 */ |
|
4384 SCTP_TCB_LOCK_ASSERT(stcb); |
|
4385 /* CMT DAC algo */ |
|
4386 this_sack_lowest_newack = 0; |
|
4387 SCTP_STAT_INCR(sctps_slowpath_sack); |
|
4388 last_tsn = cum_ack; |
|
4389 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC; |
|
4390 #ifdef SCTP_ASOCLOG_OF_TSNS |
|
4391 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack; |
|
4392 stcb->asoc.cumack_log_at++; |
|
4393 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { |
|
4394 stcb->asoc.cumack_log_at = 0; |
|
4395 } |
|
4396 #endif |
|
4397 a_rwnd = rwnd; |
|
4398 |
|
4399 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { |
|
4400 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack, |
|
4401 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); |
|
4402 } |
|
4403 |
|
4404 old_rwnd = stcb->asoc.peers_rwnd; |
|
4405 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { |
|
4406 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, |
|
4407 stcb->asoc.overall_error_count, |
|
4408 0, |
|
4409 SCTP_FROM_SCTP_INDATA, |
|
4410 __LINE__); |
|
4411 } |
|
4412 stcb->asoc.overall_error_count = 0; |
|
4413 asoc = &stcb->asoc; |
|
4414 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { |
|
4415 sctp_log_sack(asoc->last_acked_seq, |
|
4416 cum_ack, |
|
4417 0, |
|
4418 num_seg, |
|
4419 num_dup, |
|
4420 SCTP_LOG_NEW_SACK); |
|
4421 } |
|
4422 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) { |
|
4423 uint16_t i; |
|
4424 uint32_t *dupdata, dblock; |
|
4425 |
|
4426 for (i = 0; i < num_dup; i++) { |
|
4427 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t), |
|
4428 sizeof(uint32_t), (uint8_t *)&dblock); |
|
4429 if (dupdata == NULL) { |
|
4430 break; |
|
4431 } |
|
4432 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED); |
|
4433 } |
|
4434 } |
|
4435 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) { |
|
4436 /* reality check */ |
|
4437 if (!TAILQ_EMPTY(&asoc->sent_queue)) { |
|
4438 tp1 = TAILQ_LAST(&asoc->sent_queue, |
|
4439 sctpchunk_listhead); |
|
4440 send_s = tp1->rec.data.TSN_seq + 1; |
|
4441 } else { |
|
4442 tp1 = NULL; |
|
4443 send_s = asoc->sending_seq; |
|
4444 } |
|
4445 if (SCTP_TSN_GE(cum_ack, send_s)) { |
|
4446 struct mbuf *oper; |
|
4447 /* |
|
4448 * no way, we have not even sent this TSN out yet. |
|
4449 * Peer is hopelessly messed up with us. |
|
4450 */ |
|
4451 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n", |
|
4452 cum_ack, send_s); |
|
4453 if (tp1) { |
|
4454 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n", |
|
4455 tp1->rec.data.TSN_seq, (void *)tp1); |
|
4456 } |
|
4457 hopeless_peer: |
|
4458 *abort_now = 1; |
|
4459 /* XXX */ |
|
4460 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), |
|
4461 0, M_NOWAIT, 1, MT_DATA); |
|
4462 if (oper) { |
|
4463 struct sctp_paramhdr *ph; |
|
4464 uint32_t *ippp; |
|
4465 |
|
4466 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + |
|
4467 sizeof(uint32_t); |
|
4468 ph = mtod(oper, struct sctp_paramhdr *); |
|
4469 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); |
|
4470 ph->param_length = htons(SCTP_BUF_LEN(oper)); |
|
4471 ippp = (uint32_t *) (ph + 1); |
|
4472 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25); |
|
4473 } |
|
4474 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; |
|
4475 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); |
|
4476 return; |
|
4477 } |
|
4478 } |
|
4479 /**********************/ |
|
4480 /* 1) check the range */ |
|
4481 /**********************/ |
|
4482 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) { |
|
4483 /* acking something behind */ |
|
4484 return; |
|
4485 } |
|
4486 |
|
4487 /* update the Rwnd of the peer */ |
|
4488 if (TAILQ_EMPTY(&asoc->sent_queue) && |
|
4489 TAILQ_EMPTY(&asoc->send_queue) && |
|
4490 (asoc->stream_queue_cnt == 0)) { |
|
4491 /* nothing left on send/sent and strmq */ |
|
4492 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { |
|
4493 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, |
|
4494 asoc->peers_rwnd, 0, 0, a_rwnd); |
|
4495 } |
|
4496 asoc->peers_rwnd = a_rwnd; |
|
4497 if (asoc->sent_queue_retran_cnt) { |
|
4498 asoc->sent_queue_retran_cnt = 0; |
|
4499 } |
|
4500 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { |
|
4501 /* SWS sender side engages */ |
|
4502 asoc->peers_rwnd = 0; |
|
4503 } |
|
4504 /* stop any timers */ |
|
4505 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
|
4506 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, |
|
4507 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); |
|
4508 net->partial_bytes_acked = 0; |
|
4509 net->flight_size = 0; |
|
4510 } |
|
4511 asoc->total_flight = 0; |
|
4512 asoc->total_flight_count = 0; |
|
4513 return; |
|
4514 } |
|
4515 /* |
|
4516 * We init netAckSz and netAckSz2 to 0. These are used to track 2 |
|
4517 * things. The total byte count acked is tracked in netAckSz AND |
|
4518 * netAck2 is used to track the total bytes acked that are un- |
|
4519 * amibguious and were never retransmitted. We track these on a per |
|
4520 * destination address basis. |
|
4521 */ |
|
4522 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
|
4523 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) { |
|
4524 /* Drag along the window_tsn for cwr's */ |
|
4525 net->cwr_window_tsn = cum_ack; |
|
4526 } |
|
4527 net->prev_cwnd = net->cwnd; |
|
4528 net->net_ack = 0; |
|
4529 net->net_ack2 = 0; |
|
4530 |
|
4531 /* |
|
4532 * CMT: Reset CUC and Fast recovery algo variables before |
|
4533 * SACK processing |
|
4534 */ |
|
4535 net->new_pseudo_cumack = 0; |
|
4536 net->will_exit_fast_recovery = 0; |
|
4537 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { |
|
4538 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net); |
|
4539 } |
|
4540 } |
|
4541 /* process the new consecutive TSN first */ |
|
4542 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { |
|
4543 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) { |
|
4544 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { |
|
4545 accum_moved = 1; |
|
4546 if (tp1->sent < SCTP_DATAGRAM_ACKED) { |
|
4547 /* |
|
4548 * If it is less than ACKED, it is |
|
4549 * now no-longer in flight. Higher |
|
4550 * values may occur during marking |
|
4551 */ |
|
4552 if ((tp1->whoTo->dest_state & |
|
4553 SCTP_ADDR_UNCONFIRMED) && |
|
4554 (tp1->snd_count < 2)) { |
|
4555 /* |
|
4556 * If there was no retran |
|
4557 * and the address is |
|
4558 * un-confirmed and we sent |
|
4559 * there and are now |
|
4560 * sacked.. its confirmed, |
|
4561 * mark it so. |
|
4562 */ |
|
4563 tp1->whoTo->dest_state &= |
|
4564 ~SCTP_ADDR_UNCONFIRMED; |
|
4565 } |
|
4566 if (tp1->sent < SCTP_DATAGRAM_RESEND) { |
|
4567 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { |
|
4568 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, |
|
4569 tp1->whoTo->flight_size, |
|
4570 tp1->book_size, |
|
4571 (uintptr_t)tp1->whoTo, |
|
4572 tp1->rec.data.TSN_seq); |
|
4573 } |
|
4574 sctp_flight_size_decrease(tp1); |
|
4575 sctp_total_flight_decrease(stcb, tp1); |
|
4576 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { |
|
4577 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo, |
|
4578 tp1); |
|
4579 } |
|
4580 } |
|
4581 tp1->whoTo->net_ack += tp1->send_size; |
|
4582 |
|
4583 /* CMT SFR and DAC algos */ |
|
4584 this_sack_lowest_newack = tp1->rec.data.TSN_seq; |
|
4585 tp1->whoTo->saw_newack = 1; |
|
4586 |
|
4587 if (tp1->snd_count < 2) { |
|
4588 /* |
|
4589 * True non-retransmited |
|
4590 * chunk |
|
4591 */ |
|
4592 tp1->whoTo->net_ack2 += |
|
4593 tp1->send_size; |
|
4594 |
|
4595 /* update RTO too? */ |
|
4596 if (tp1->do_rtt) { |
|
4597 if (rto_ok) { |
|
4598 tp1->whoTo->RTO = |
|
4599 sctp_calculate_rto(stcb, |
|
4600 asoc, tp1->whoTo, |
|
4601 &tp1->sent_rcv_time, |
|
4602 sctp_align_safe_nocopy, |
|
4603 SCTP_RTT_FROM_DATA); |
|
4604 rto_ok = 0; |
|
4605 } |
|
4606 if (tp1->whoTo->rto_needed == 0) { |
|
4607 tp1->whoTo->rto_needed = 1; |
|
4608 } |
|
4609 tp1->do_rtt = 0; |
|
4610 } |
|
4611 } |
|
4612 /* |
|
4613 * CMT: CUCv2 algorithm. From the |
|
4614 * cumack'd TSNs, for each TSN being |
|
4615 * acked for the first time, set the |
|
4616 * following variables for the |
|
4617 * corresp destination. |
|
4618 * new_pseudo_cumack will trigger a |
|
4619 * cwnd update. |
|
4620 * find_(rtx_)pseudo_cumack will |
|
4621 * trigger search for the next |
|
4622 * expected (rtx-)pseudo-cumack. |
|
4623 */ |
|
4624 tp1->whoTo->new_pseudo_cumack = 1; |
|
4625 tp1->whoTo->find_pseudo_cumack = 1; |
|
4626 tp1->whoTo->find_rtx_pseudo_cumack = 1; |
|
4627 |
|
4628 |
|
4629 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { |
|
4630 sctp_log_sack(asoc->last_acked_seq, |
|
4631 cum_ack, |
|
4632 tp1->rec.data.TSN_seq, |
|
4633 0, |
|
4634 0, |
|
4635 SCTP_LOG_TSN_ACKED); |
|
4636 } |
|
4637 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { |
|
4638 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK); |
|
4639 } |
|
4640 } |
|
4641 if (tp1->sent == SCTP_DATAGRAM_RESEND) { |
|
4642 sctp_ucount_decr(asoc->sent_queue_retran_cnt); |
|
4643 #ifdef SCTP_AUDITING_ENABLED |
|
4644 sctp_audit_log(0xB3, |
|
4645 (asoc->sent_queue_retran_cnt & 0x000000ff)); |
|
4646 #endif |
|
4647 } |
|
4648 if (tp1->rec.data.chunk_was_revoked) { |
|
4649 /* deflate the cwnd */ |
|
4650 tp1->whoTo->cwnd -= tp1->book_size; |
|
4651 tp1->rec.data.chunk_was_revoked = 0; |
|
4652 } |
|
4653 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { |
|
4654 tp1->sent = SCTP_DATAGRAM_ACKED; |
|
4655 } |
|
4656 } |
|
4657 } else { |
|
4658 break; |
|
4659 } |
|
4660 } |
|
4661 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; |
|
4662 /* always set this up to cum-ack */ |
|
4663 asoc->this_sack_highest_gap = last_tsn; |
|
4664 |
|
4665 if ((num_seg > 0) || (num_nr_seg > 0)) { |
|
4666 |
|
4667 /* |
|
4668 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has |
|
4669 * to be greater than the cumack. Also reset saw_newack to 0 |
|
4670 * for all dests. |
|
4671 */ |
|
4672 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
|
4673 net->saw_newack = 0; |
|
4674 net->this_sack_highest_newack = last_tsn; |
|
4675 } |
|
4676 |
|
4677 /* |
|
4678 * thisSackHighestGap will increase while handling NEW |
|
4679 * segments this_sack_highest_newack will increase while |
|
4680 * handling NEWLY ACKED chunks. this_sack_lowest_newack is |
|
4681 * used for CMT DAC algo. saw_newack will also change. |
|
4682 */ |
|
4683 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked, |
|
4684 &biggest_tsn_newly_acked, &this_sack_lowest_newack, |
|
4685 num_seg, num_nr_seg, &rto_ok)) { |
|
4686 wake_him++; |
|
4687 } |
|
4688 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) { |
|
4689 /* |
|
4690 * validate the biggest_tsn_acked in the gap acks if |
|
4691 * strict adherence is wanted. |
|
4692 */ |
|
4693 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) { |
|
4694 /* |
|
4695 * peer is either confused or we are under |
|
4696 * attack. We must abort. |
|
4697 */ |
|
4698 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n", |
|
4699 biggest_tsn_acked, send_s); |
|
4700 goto hopeless_peer; |
|
4701 } |
|
4702 } |
|
4703 } |
|
4704 /*******************************************/ |
|
4705 /* cancel ALL T3-send timer if accum moved */ |
|
4706 /*******************************************/ |
|
4707 if (asoc->sctp_cmt_on_off > 0) { |
|
4708 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
|
4709 if (net->new_pseudo_cumack) |
|
4710 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, |
|
4711 stcb, net, |
|
4712 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27); |
|
4713 |
|
4714 } |
|
4715 } else { |
|
4716 if (accum_moved) { |
|
4717 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
|
4718 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, |
|
4719 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28); |
|
4720 } |
|
4721 } |
|
4722 } |
|
4723 /********************************************/ |
|
4724 /* drop the acked chunks from the sentqueue */ |
|
4725 /********************************************/ |
|
4726 asoc->last_acked_seq = cum_ack; |
|
4727 |
|
4728 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { |
|
4729 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) { |
|
4730 break; |
|
4731 } |
|
4732 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { |
|
4733 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) { |
|
4734 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--; |
|
4735 #ifdef INVARIANTS |
|
4736 } else { |
|
4737 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number); |
|
4738 #endif |
|
4739 } |
|
4740 } |
|
4741 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); |
|
4742 if (PR_SCTP_ENABLED(tp1->flags)) { |
|
4743 if (asoc->pr_sctp_cnt != 0) |
|
4744 asoc->pr_sctp_cnt--; |
|
4745 } |
|
4746 asoc->sent_queue_cnt--; |
|
4747 if (tp1->data) { |
|
4748 /* sa_ignore NO_NULL_CHK */ |
|
4749 sctp_free_bufspace(stcb, asoc, tp1, 1); |
|
4750 sctp_m_freem(tp1->data); |
|
4751 tp1->data = NULL; |
|
4752 if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) { |
|
4753 asoc->sent_queue_cnt_removeable--; |
|
4754 } |
|
4755 } |
|
4756 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { |
|
4757 sctp_log_sack(asoc->last_acked_seq, |
|
4758 cum_ack, |
|
4759 tp1->rec.data.TSN_seq, |
|
4760 0, |
|
4761 0, |
|
4762 SCTP_LOG_FREE_SENT); |
|
4763 } |
|
4764 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); |
|
4765 wake_him++; |
|
4766 } |
|
4767 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) { |
|
4768 #ifdef INVARIANTS |
|
4769 panic("Warning flight size is postive and should be 0"); |
|
4770 #else |
|
4771 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n", |
|
4772 asoc->total_flight); |
|
4773 #endif |
|
4774 asoc->total_flight = 0; |
|
4775 } |
|
4776 |
|
4777 #if defined(__Userspace__) |
|
4778 if (stcb->sctp_ep->recv_callback) { |
|
4779 if (stcb->sctp_socket) { |
|
4780 uint32_t inqueue_bytes, sb_free_now; |
|
4781 struct sctp_inpcb *inp; |
|
4782 |
|
4783 inp = stcb->sctp_ep; |
|
4784 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); |
|
4785 sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv); |
|
4786 |
|
4787 /* check if the amount free in the send socket buffer crossed the threshold */ |
|
4788 if (inp->send_callback && |
|
4789 (((inp->send_sb_threshold > 0) && (sb_free_now >= inp->send_sb_threshold)) || |
|
4790 (inp->send_sb_threshold == 0))) { |
|
4791 atomic_add_int(&stcb->asoc.refcnt, 1); |
|
4792 SCTP_TCB_UNLOCK(stcb); |
|
4793 inp->send_callback(stcb->sctp_socket, sb_free_now); |
|
4794 SCTP_TCB_LOCK(stcb); |
|
4795 atomic_subtract_int(&stcb->asoc.refcnt, 1); |
|
4796 } |
|
4797 } |
|
4798 } else if ((wake_him) && (stcb->sctp_socket)) { |
|
4799 #else |
|
4800 /* sa_ignore NO_NULL_CHK */ |
|
4801 if ((wake_him) && (stcb->sctp_socket)) { |
|
4802 #endif |
|
4803 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) |
|
4804 struct socket *so; |
|
4805 |
|
4806 #endif |
|
4807 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); |
|
4808 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { |
|
4809 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK); |
|
4810 } |
|
4811 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) |
|
4812 so = SCTP_INP_SO(stcb->sctp_ep); |
|
4813 atomic_add_int(&stcb->asoc.refcnt, 1); |
|
4814 SCTP_TCB_UNLOCK(stcb); |
|
4815 SCTP_SOCKET_LOCK(so, 1); |
|
4816 SCTP_TCB_LOCK(stcb); |
|
4817 atomic_subtract_int(&stcb->asoc.refcnt, 1); |
|
4818 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { |
|
4819 /* assoc was freed while we were unlocked */ |
|
4820 SCTP_SOCKET_UNLOCK(so, 1); |
|
4821 return; |
|
4822 } |
|
4823 #endif |
|
4824 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); |
|
4825 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) |
|
4826 SCTP_SOCKET_UNLOCK(so, 1); |
|
4827 #endif |
|
4828 } else { |
|
4829 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { |
|
4830 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK); |
|
4831 } |
|
4832 } |
|
4833 |
|
4834 if (asoc->fast_retran_loss_recovery && accum_moved) { |
|
4835 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) { |
|
4836 /* Setup so we will exit RFC2582 fast recovery */ |
|
4837 will_exit_fast_recovery = 1; |
|
4838 } |
|
4839 } |
|
4840 /* |
|
4841 * Check for revoked fragments: |
|
4842 * |
|
4843 * if Previous sack - Had no frags then we can't have any revoked if |
|
4844 * Previous sack - Had frag's then - If we now have frags aka |
|
4845 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked |
|
4846 * some of them. else - The peer revoked all ACKED fragments, since |
|
4847 * we had some before and now we have NONE. |
|
4848 */ |
|
4849 |
|
4850 if (num_seg) { |
|
4851 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked); |
|
4852 asoc->saw_sack_with_frags = 1; |
|
4853 } else if (asoc->saw_sack_with_frags) { |
|
4854 int cnt_revoked = 0; |
|
4855 |
|
4856 /* Peer revoked all dg's marked or acked */ |
|
4857 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { |
|
4858 if (tp1->sent == SCTP_DATAGRAM_ACKED) { |
|
4859 tp1->sent = SCTP_DATAGRAM_SENT; |
|
4860 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { |
|
4861 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, |
|
4862 tp1->whoTo->flight_size, |
|
4863 tp1->book_size, |
|
4864 (uintptr_t)tp1->whoTo, |
|
4865 tp1->rec.data.TSN_seq); |
|
4866 } |
|
4867 sctp_flight_size_increase(tp1); |
|
4868 sctp_total_flight_increase(stcb, tp1); |
|
4869 tp1->rec.data.chunk_was_revoked = 1; |
|
4870 /* |
|
4871 * To ensure that this increase in |
|
4872 * flightsize, which is artificial, |
|
4873 * does not throttle the sender, we |
|
4874 * also increase the cwnd |
|
4875 * artificially. |
|
4876 */ |
|
4877 tp1->whoTo->cwnd += tp1->book_size; |
|
4878 cnt_revoked++; |
|
4879 } |
|
4880 } |
|
4881 if (cnt_revoked) { |
|
4882 reneged_all = 1; |
|
4883 } |
|
4884 asoc->saw_sack_with_frags = 0; |
|
4885 } |
|
4886 if (num_nr_seg > 0) |
|
4887 asoc->saw_sack_with_nr_frags = 1; |
|
4888 else |
|
4889 asoc->saw_sack_with_nr_frags = 0; |
|
4890 |
|
4891 /* JRS - Use the congestion control given in the CC module */ |
|
4892 if (ecne_seen == 0) { |
|
4893 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
|
4894 if (net->net_ack2 > 0) { |
|
4895 /* |
|
4896 * Karn's rule applies to clearing error count, this |
|
4897 * is optional. |
|
4898 */ |
|
4899 net->error_count = 0; |
|
4900 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { |
|
4901 /* addr came good */ |
|
4902 net->dest_state |= SCTP_ADDR_REACHABLE; |
|
4903 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, |
|
4904 0, (void *)net, SCTP_SO_NOT_LOCKED); |
|
4905 } |
|
4906 |
|
4907 if (net == stcb->asoc.primary_destination) { |
|
4908 if (stcb->asoc.alternate) { |
|
4909 /* release the alternate, primary is good */ |
|
4910 sctp_free_remote_addr(stcb->asoc.alternate); |
|
4911 stcb->asoc.alternate = NULL; |
|
4912 } |
|
4913 } |
|
4914 |
|
4915 if (net->dest_state & SCTP_ADDR_PF) { |
|
4916 net->dest_state &= ~SCTP_ADDR_PF; |
|
4917 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3); |
|
4918 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); |
|
4919 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); |
|
4920 /* Done with this net */ |
|
4921 net->net_ack = 0; |
|
4922 } |
|
4923 /* restore any doubled timers */ |
|
4924 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; |
|
4925 if (net->RTO < stcb->asoc.minrto) { |
|
4926 net->RTO = stcb->asoc.minrto; |
|
4927 } |
|
4928 if (net->RTO > stcb->asoc.maxrto) { |
|
4929 net->RTO = stcb->asoc.maxrto; |
|
4930 } |
|
4931 } |
|
4932 } |
|
4933 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery); |
|
4934 } |
|
4935 |
|
4936 if (TAILQ_EMPTY(&asoc->sent_queue)) { |
|
4937 /* nothing left in-flight */ |
|
4938 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
|
4939 /* stop all timers */ |
|
4940 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, |
|
4941 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30); |
|
4942 net->flight_size = 0; |
|
4943 net->partial_bytes_acked = 0; |
|
4944 } |
|
4945 asoc->total_flight = 0; |
|
4946 asoc->total_flight_count = 0; |
|
4947 } |
|
4948 |
|
4949 /**********************************/ |
|
4950 /* Now what about shutdown issues */ |
|
4951 /**********************************/ |
|
4952 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { |
|
4953 /* nothing left on sendqueue.. consider done */ |
|
4954 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { |
|
4955 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, |
|
4956 asoc->peers_rwnd, 0, 0, a_rwnd); |
|
4957 } |
|
4958 asoc->peers_rwnd = a_rwnd; |
|
4959 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { |
|
4960 /* SWS sender side engages */ |
|
4961 asoc->peers_rwnd = 0; |
|
4962 } |
|
4963 /* clean up */ |
|
4964 if ((asoc->stream_queue_cnt == 1) && |
|
4965 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || |
|
4966 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && |
|
4967 (asoc->locked_on_sending) |
|
4968 ) { |
|
4969 struct sctp_stream_queue_pending *sp; |
|
4970 /* I may be in a state where we got |
|
4971 * all across.. but cannot write more due |
|
4972 * to a shutdown... we abort since the |
|
4973 * user did not indicate EOR in this case. |
|
4974 */ |
|
4975 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue), |
|
4976 sctp_streamhead); |
|
4977 if ((sp) && (sp->length == 0)) { |
|
4978 asoc->locked_on_sending = NULL; |
|
4979 if (sp->msg_is_complete) { |
|
4980 asoc->stream_queue_cnt--; |
|
4981 } else { |
|
4982 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; |
|
4983 asoc->stream_queue_cnt--; |
|
4984 } |
|
4985 } |
|
4986 } |
|
4987 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && |
|
4988 (asoc->stream_queue_cnt == 0)) { |
|
4989 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { |
|
4990 /* Need to abort here */ |
|
4991 struct mbuf *oper; |
|
4992 abort_out_now: |
|
4993 *abort_now = 1; |
|
4994 /* XXX */ |
|
4995 oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), |
|
4996 0, M_NOWAIT, 1, MT_DATA); |
|
4997 if (oper) { |
|
4998 struct sctp_paramhdr *ph; |
|
4999 |
|
5000 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr); |
|
5001 ph = mtod(oper, struct sctp_paramhdr *); |
|
5002 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); |
|
5003 ph->param_length = htons(SCTP_BUF_LEN(oper)); |
|
5004 } |
|
5005 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31; |
|
5006 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); |
|
5007 return; |
|
5008 } else { |
|
5009 struct sctp_nets *netp; |
|
5010 |
|
5011 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || |
|
5012 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { |
|
5013 SCTP_STAT_DECR_GAUGE32(sctps_currestab); |
|
5014 } |
|
5015 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); |
|
5016 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); |
|
5017 sctp_stop_timers_for_shutdown(stcb); |
|
5018 if (asoc->alternate) { |
|
5019 netp = asoc->alternate; |
|
5020 } else { |
|
5021 netp = asoc->primary_destination; |
|
5022 } |
|
5023 sctp_send_shutdown(stcb, netp); |
|
5024 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, |
|
5025 stcb->sctp_ep, stcb, netp); |
|
5026 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, |
|
5027 stcb->sctp_ep, stcb, netp); |
|
5028 } |
|
5029 return; |
|
5030 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && |
|
5031 (asoc->stream_queue_cnt == 0)) { |
|
5032 struct sctp_nets *netp; |
|
5033 |
|
5034 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) { |
|
5035 goto abort_out_now; |
|
5036 } |
|
5037 SCTP_STAT_DECR_GAUGE32(sctps_currestab); |
|
5038 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); |
|
5039 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); |
|
5040 sctp_stop_timers_for_shutdown(stcb); |
|
5041 if (asoc->alternate) { |
|
5042 netp = asoc->alternate; |
|
5043 } else { |
|
5044 netp = asoc->primary_destination; |
|
5045 } |
|
5046 sctp_send_shutdown_ack(stcb, netp); |
|
5047 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, |
|
5048 stcb->sctp_ep, stcb, netp); |
|
5049 return; |
|
5050 } |
|
5051 } |
|
5052 /* |
|
5053 * Now here we are going to recycle net_ack for a different use... |
|
5054 * HEADS UP. |
|
5055 */ |
|
5056 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
|
5057 net->net_ack = 0; |
|
5058 } |
|
5059 |
|
5060 /* |
|
5061 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking |
|
5062 * to be done. Setting this_sack_lowest_newack to the cum_ack will |
|
5063 * automatically ensure that. |
|
5064 */ |
|
5065 if ((asoc->sctp_cmt_on_off > 0) && |
|
5066 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && |
|
5067 (cmt_dac_flag == 0)) { |
|
5068 this_sack_lowest_newack = cum_ack; |
|
5069 } |
|
5070 if ((num_seg > 0) || (num_nr_seg > 0)) { |
|
5071 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, |
|
5072 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved); |
|
5073 } |
|
5074 /* JRS - Use the congestion control given in the CC module */ |
|
5075 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc); |
|
5076 |
|
5077 /* Now are we exiting loss recovery ? */ |
|
5078 if (will_exit_fast_recovery) { |
|
5079 /* Ok, we must exit fast recovery */ |
|
5080 asoc->fast_retran_loss_recovery = 0; |
|
5081 } |
|
5082 if ((asoc->sat_t3_loss_recovery) && |
|
5083 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) { |
|
5084 /* end satellite t3 loss recovery */ |
|
5085 asoc->sat_t3_loss_recovery = 0; |
|
5086 } |
|
5087 /* |
|
5088 * CMT Fast recovery |
|
5089 */ |
|
5090 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
|
5091 if (net->will_exit_fast_recovery) { |
|
5092 /* Ok, we must exit fast recovery */ |
|
5093 net->fast_retran_loss_recovery = 0; |
|
5094 } |
|
5095 } |
|
5096 |
|
5097 /* Adjust and set the new rwnd value */ |
|
5098 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { |
|
5099 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, |
|
5100 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd); |
|
5101 } |
|
5102 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, |
|
5103 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); |
|
5104 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { |
|
5105 /* SWS sender side engages */ |
|
5106 asoc->peers_rwnd = 0; |
|
5107 } |
|
5108 if (asoc->peers_rwnd > old_rwnd) { |
|
5109 win_probe_recovery = 1; |
|
5110 } |
|
5111 |
|
5112 /* |
|
5113 * Now we must setup so we have a timer up for anyone with |
|
5114 * outstanding data. |
|
5115 */ |
|
5116 done_once = 0; |
|
5117 again: |
|
5118 j = 0; |
|
5119 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
|
5120 if (win_probe_recovery && (net->window_probe)) { |
|
5121 win_probe_recovered = 1; |
|
5122 /*- |
|
5123 * Find first chunk that was used with |
|
5124 * window probe and clear the event. Put |
|
5125 * it back into the send queue as if has |
|
5126 * not been sent. |
|
5127 */ |
|
5128 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { |
|
5129 if (tp1->window_probe) { |
|
5130 sctp_window_probe_recovery(stcb, asoc, tp1); |
|
5131 break; |
|
5132 } |
|
5133 } |
|
5134 } |
|
5135 if (net->flight_size) { |
|
5136 j++; |
|
5137 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { |
|
5138 sctp_timer_start(SCTP_TIMER_TYPE_SEND, |
|
5139 stcb->sctp_ep, stcb, net); |
|
5140 } |
|
5141 if (net->window_probe) { |
|
5142 net->window_probe = 0; |
|
5143 } |
|
5144 } else { |
|
5145 if (net->window_probe) { |
|
5146 /* In window probes we must assure a timer is still running there */ |
|
5147 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { |
|
5148 sctp_timer_start(SCTP_TIMER_TYPE_SEND, |
|
5149 stcb->sctp_ep, stcb, net); |
|
5150 |
|
5151 } |
|
5152 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { |
|
5153 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, |
|
5154 stcb, net, |
|
5155 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); |
|
5156 } |
|
5157 } |
|
5158 } |
|
5159 if ((j == 0) && |
|
5160 (!TAILQ_EMPTY(&asoc->sent_queue)) && |
|
5161 (asoc->sent_queue_retran_cnt == 0) && |
|
5162 (win_probe_recovered == 0) && |
|
5163 (done_once == 0)) { |
|
5164 /* huh, this should not happen unless all packets |
|
5165 * are PR-SCTP and marked to skip of course. |
|
5166 */ |
|
5167 if (sctp_fs_audit(asoc)) { |
|
5168 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
|
5169 net->flight_size = 0; |
|
5170 } |
|
5171 asoc->total_flight = 0; |
|
5172 asoc->total_flight_count = 0; |
|
5173 asoc->sent_queue_retran_cnt = 0; |
|
5174 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { |
|
5175 if (tp1->sent < SCTP_DATAGRAM_RESEND) { |
|
5176 sctp_flight_size_increase(tp1); |
|
5177 sctp_total_flight_increase(stcb, tp1); |
|
5178 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { |
|
5179 sctp_ucount_incr(asoc->sent_queue_retran_cnt); |
|
5180 } |
|
5181 } |
|
5182 } |
|
5183 done_once = 1; |
|
5184 goto again; |
|
5185 } |
|
5186 /*********************************************/ |
|
5187 /* Here we perform PR-SCTP procedures */ |
|
5188 /* (section 4.2) */ |
|
5189 /*********************************************/ |
|
5190 /* C1. update advancedPeerAckPoint */ |
|
5191 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) { |
|
5192 asoc->advanced_peer_ack_point = cum_ack; |
|
5193 } |
|
5194 /* C2. try to further move advancedPeerAckPoint ahead */ |
|
5195 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) { |
|
5196 struct sctp_tmit_chunk *lchk; |
|
5197 uint32_t old_adv_peer_ack_point; |
|
5198 |
|
5199 old_adv_peer_ack_point = asoc->advanced_peer_ack_point; |
|
5200 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); |
|
5201 /* C3. See if we need to send a Fwd-TSN */ |
|
5202 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) { |
|
5203 /* |
|
5204 * ISSUE with ECN, see FWD-TSN processing. |
|
5205 */ |
|
5206 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { |
|
5207 sctp_misc_ints(SCTP_FWD_TSN_CHECK, |
|
5208 0xee, cum_ack, asoc->advanced_peer_ack_point, |
|
5209 old_adv_peer_ack_point); |
|
5210 } |
|
5211 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { |
|
5212 send_forward_tsn(stcb, asoc); |
|
5213 } else if (lchk) { |
|
5214 /* try to FR fwd-tsn's that get lost too */ |
|
5215 if (lchk->rec.data.fwd_tsn_cnt >= 3) { |
|
5216 send_forward_tsn(stcb, asoc); |
|
5217 } |
|
5218 } |
|
5219 } |
|
5220 if (lchk) { |
|
5221 /* Assure a timer is up */ |
|
5222 sctp_timer_start(SCTP_TIMER_TYPE_SEND, |
|
5223 stcb->sctp_ep, stcb, lchk->whoTo); |
|
5224 } |
|
5225 } |
|
5226 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { |
|
5227 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, |
|
5228 a_rwnd, |
|
5229 stcb->asoc.peers_rwnd, |
|
5230 stcb->asoc.total_flight, |
|
5231 stcb->asoc.total_output_queue_size); |
|
5232 } |
|
5233 } |
|
5234 |
|
5235 void |
|
5236 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag) |
|
5237 { |
|
5238 /* Copy cum-ack */ |
|
5239 uint32_t cum_ack, a_rwnd; |
|
5240 |
|
5241 cum_ack = ntohl(cp->cumulative_tsn_ack); |
|
5242 /* Arrange so a_rwnd does NOT change */ |
|
5243 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight; |
|
5244 |
|
5245 /* Now call the express sack handling */ |
|
5246 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0); |
|
5247 } |
|
5248 |
|
5249 static void |
|
5250 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, |
|
5251 struct sctp_stream_in *strmin) |
|
5252 { |
|
5253 struct sctp_queued_to_read *ctl, *nctl; |
|
5254 struct sctp_association *asoc; |
|
5255 uint16_t tt; |
|
5256 |
|
5257 asoc = &stcb->asoc; |
|
5258 tt = strmin->last_sequence_delivered; |
|
5259 /* |
|
5260 * First deliver anything prior to and including the stream no that |
|
5261 * came in |
|
5262 */ |
|
5263 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) { |
|
5264 if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) { |
|
5265 /* this is deliverable now */ |
|
5266 TAILQ_REMOVE(&strmin->inqueue, ctl, next); |
|
5267 /* subtract pending on streams */ |
|
5268 asoc->size_on_all_streams -= ctl->length; |
|
5269 sctp_ucount_decr(asoc->cnt_on_all_streams); |
|
5270 /* deliver it to at least the delivery-q */ |
|
5271 if (stcb->sctp_socket) { |
|
5272 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn); |
|
5273 sctp_add_to_readq(stcb->sctp_ep, stcb, |
|
5274 ctl, |
|
5275 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); |
|
5276 } |
|
5277 } else { |
|
5278 /* no more delivery now. */ |
|
5279 break; |
|
5280 } |
|
5281 } |
|
5282 /* |
|
5283 * now we must deliver things in queue the normal way if any are |
|
5284 * now ready. |
|
5285 */ |
|
5286 tt = strmin->last_sequence_delivered + 1; |
|
5287 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) { |
|
5288 if (tt == ctl->sinfo_ssn) { |
|
5289 /* this is deliverable now */ |
|
5290 TAILQ_REMOVE(&strmin->inqueue, ctl, next); |
|
5291 /* subtract pending on streams */ |
|
5292 asoc->size_on_all_streams -= ctl->length; |
|
5293 sctp_ucount_decr(asoc->cnt_on_all_streams); |
|
5294 /* deliver it to at least the delivery-q */ |
|
5295 strmin->last_sequence_delivered = ctl->sinfo_ssn; |
|
5296 if (stcb->sctp_socket) { |
|
5297 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn); |
|
5298 sctp_add_to_readq(stcb->sctp_ep, stcb, |
|
5299 ctl, |
|
5300 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); |
|
5301 |
|
5302 } |
|
5303 tt = strmin->last_sequence_delivered + 1; |
|
5304 } else { |
|
5305 break; |
|
5306 } |
|
5307 } |
|
5308 } |
|
5309 |
|
5310 static void |
|
5311 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb, |
|
5312 struct sctp_association *asoc, |
|
5313 uint16_t stream, uint16_t seq) |
|
5314 { |
|
5315 struct sctp_tmit_chunk *chk, *nchk; |
|
5316 |
|
5317 /* For each one on here see if we need to toss it */ |
|
5318 /* |
|
5319 * For now large messages held on the reasmqueue that are |
|
5320 * complete will be tossed too. We could in theory do more |
|
5321 * work to spin through and stop after dumping one msg aka |
|
5322 * seeing the start of a new msg at the head, and call the |
|
5323 * delivery function... to see if it can be delivered... But |
|
5324 * for now we just dump everything on the queue. |
|
5325 */ |
|
5326 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) { |
|
5327 /* Do not toss it if on a different stream or |
|
5328 * marked for unordered delivery in which case |
|
5329 * the stream sequence number has no meaning. |
|
5330 */ |
|
5331 if ((chk->rec.data.stream_number != stream) || |
|
5332 ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) { |
|
5333 continue; |
|
5334 } |
|
5335 if (chk->rec.data.stream_seq == seq) { |
|
5336 /* It needs to be tossed */ |
|
5337 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); |
|
5338 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) { |
|
5339 asoc->tsn_last_delivered = chk->rec.data.TSN_seq; |
|
5340 asoc->str_of_pdapi = chk->rec.data.stream_number; |
|
5341 asoc->ssn_of_pdapi = chk->rec.data.stream_seq; |
|
5342 asoc->fragment_flags = chk->rec.data.rcv_flags; |
|
5343 } |
|
5344 asoc->size_on_reasm_queue -= chk->send_size; |
|
5345 sctp_ucount_decr(asoc->cnt_on_reasm_queue); |
|
5346 |
|
5347 /* Clear up any stream problem */ |
|
5348 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED && |
|
5349 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) { |
|
5350 /* |
|
5351 * We must dump forward this streams |
|
5352 * sequence number if the chunk is |
|
5353 * not unordered that is being |
|
5354 * skipped. There is a chance that |
|
5355 * if the peer does not include the |
|
5356 * last fragment in its FWD-TSN we |
|
5357 * WILL have a problem here since |
|
5358 * you would have a partial chunk in |
|
5359 * queue that may not be |
|
5360 * deliverable. Also if a Partial |
|
5361 * delivery API as started the user |
|
5362 * may get a partial chunk. The next |
|
5363 * read returning a new chunk... |
|
5364 * really ugly but I see no way |
|
5365 * around it! Maybe a notify?? |
|
5366 */ |
|
5367 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq; |
|
5368 } |
|
5369 if (chk->data) { |
|
5370 sctp_m_freem(chk->data); |
|
5371 chk->data = NULL; |
|
5372 } |
|
5373 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); |
|
5374 } else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) { |
|
5375 /* If the stream_seq is > than the purging one, we are done */ |
|
5376 break; |
|
5377 } |
|
5378 } |
|
5379 } |
|
5380 |
|
5381 |
|
5382 void |
|
5383 sctp_handle_forward_tsn(struct sctp_tcb *stcb, |
|
5384 struct sctp_forward_tsn_chunk *fwd, |
|
5385 int *abort_flag, struct mbuf *m ,int offset) |
|
5386 { |
|
5387 /* The pr-sctp fwd tsn */ |
|
5388 /* |
|
5389 * here we will perform all the data receiver side steps for |
|
5390 * processing FwdTSN, as required in by pr-sctp draft: |
|
5391 * |
|
5392 * Assume we get FwdTSN(x): |
|
5393 * |
|
5394 * 1) update local cumTSN to x 2) try to further advance cumTSN to x + |
|
5395 * others we have 3) examine and update re-ordering queue on |
|
5396 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to |
|
5397 * report where we are. |
|
5398 */ |
|
5399 struct sctp_association *asoc; |
|
5400 uint32_t new_cum_tsn, gap; |
|
5401 unsigned int i, fwd_sz, m_size; |
|
5402 uint32_t str_seq; |
|
5403 struct sctp_stream_in *strm; |
|
5404 struct sctp_tmit_chunk *chk, *nchk; |
|
5405 struct sctp_queued_to_read *ctl, *sv; |
|
5406 |
|
5407 asoc = &stcb->asoc; |
|
5408 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { |
|
5409 SCTPDBG(SCTP_DEBUG_INDATA1, |
|
5410 "Bad size too small/big fwd-tsn\n"); |
|
5411 return; |
|
5412 } |
|
5413 m_size = (stcb->asoc.mapping_array_size << 3); |
|
5414 /*************************************************************/ |
|
5415 /* 1. Here we update local cumTSN and shift the bitmap array */ |
|
5416 /*************************************************************/ |
|
5417 new_cum_tsn = ntohl(fwd->new_cumulative_tsn); |
|
5418 |
|
5419 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) { |
|
5420 /* Already got there ... */ |
|
5421 return; |
|
5422 } |
|
5423 /* |
|
5424 * now we know the new TSN is more advanced, let's find the actual |
|
5425 * gap |
|
5426 */ |
|
5427 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn); |
|
5428 asoc->cumulative_tsn = new_cum_tsn; |
|
5429 if (gap >= m_size) { |
|
5430 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) { |
|
5431 struct mbuf *oper; |
|
5432 /* |
|
5433 * out of range (of single byte chunks in the rwnd I |
|
5434 * give out). This must be an attacker. |
|
5435 */ |
|
5436 *abort_flag = 1; |
|
5437 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)), |
|
5438 0, M_NOWAIT, 1, MT_DATA); |
|
5439 if (oper) { |
|
5440 struct sctp_paramhdr *ph; |
|
5441 uint32_t *ippp; |
|
5442 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + |
|
5443 (sizeof(uint32_t) * 3); |
|
5444 ph = mtod(oper, struct sctp_paramhdr *); |
|
5445 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); |
|
5446 ph->param_length = htons(SCTP_BUF_LEN(oper)); |
|
5447 ippp = (uint32_t *) (ph + 1); |
|
5448 *ippp = htonl(SCTP_FROM_SCTP_INDATA+SCTP_LOC_33); |
|
5449 ippp++; |
|
5450 *ippp = asoc->highest_tsn_inside_map; |
|
5451 ippp++; |
|
5452 *ippp = new_cum_tsn; |
|
5453 } |
|
5454 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA+SCTP_LOC_33; |
|
5455 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); |
|
5456 return; |
|
5457 } |
|
5458 SCTP_STAT_INCR(sctps_fwdtsn_map_over); |
|
5459 |
|
5460 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); |
|
5461 asoc->mapping_array_base_tsn = new_cum_tsn + 1; |
|
5462 asoc->highest_tsn_inside_map = new_cum_tsn; |
|
5463 |
|
5464 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size); |
|
5465 asoc->highest_tsn_inside_nr_map = new_cum_tsn; |
|
5466 |
|
5467 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { |
|
5468 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); |
|
5469 } |
|
5470 } else { |
|
5471 SCTP_TCB_LOCK_ASSERT(stcb); |
|
5472 for (i = 0; i <= gap; i++) { |
|
5473 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) && |
|
5474 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) { |
|
5475 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i); |
|
5476 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) { |
|
5477 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i; |
|
5478 } |
|
5479 } |
|
5480 } |
|
5481 } |
|
5482 /*************************************************************/ |
|
5483 /* 2. Clear up re-assembly queue */ |
|
5484 /*************************************************************/ |
|
5485 /* |
|
5486 * First service it if pd-api is up, just in case we can progress it |
|
5487 * forward |
|
5488 */ |
|
5489 if (asoc->fragmented_delivery_inprogress) { |
|
5490 sctp_service_reassembly(stcb, asoc); |
|
5491 } |
|
5492 /* For each one on here see if we need to toss it */ |
|
5493 /* |
|
5494 * For now large messages held on the reasmqueue that are |
|
5495 * complete will be tossed too. We could in theory do more |
|
5496 * work to spin through and stop after dumping one msg aka |
|
5497 * seeing the start of a new msg at the head, and call the |
|
5498 * delivery function... to see if it can be delivered... But |
|
5499 * for now we just dump everything on the queue. |
|
5500 */ |
|
5501 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) { |
|
5502 if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) { |
|
5503 /* It needs to be tossed */ |
|
5504 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); |
|
5505 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) { |
|
5506 asoc->tsn_last_delivered = chk->rec.data.TSN_seq; |
|
5507 asoc->str_of_pdapi = chk->rec.data.stream_number; |
|
5508 asoc->ssn_of_pdapi = chk->rec.data.stream_seq; |
|
5509 asoc->fragment_flags = chk->rec.data.rcv_flags; |
|
5510 } |
|
5511 asoc->size_on_reasm_queue -= chk->send_size; |
|
5512 sctp_ucount_decr(asoc->cnt_on_reasm_queue); |
|
5513 |
|
5514 /* Clear up any stream problem */ |
|
5515 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED && |
|
5516 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) { |
|
5517 /* |
|
5518 * We must dump forward this streams |
|
5519 * sequence number if the chunk is |
|
5520 * not unordered that is being |
|
5521 * skipped. There is a chance that |
|
5522 * if the peer does not include the |
|
5523 * last fragment in its FWD-TSN we |
|
5524 * WILL have a problem here since |
|
5525 * you would have a partial chunk in |
|
5526 * queue that may not be |
|
5527 * deliverable. Also if a Partial |
|
5528 * delivery API as started the user |
|
5529 * may get a partial chunk. The next |
|
5530 * read returning a new chunk... |
|
5531 * really ugly but I see no way |
|
5532 * around it! Maybe a notify?? |
|
5533 */ |
|
5534 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq; |
|
5535 } |
|
5536 if (chk->data) { |
|
5537 sctp_m_freem(chk->data); |
|
5538 chk->data = NULL; |
|
5539 } |
|
5540 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); |
|
5541 } else { |
|
5542 /* |
|
5543 * Ok we have gone beyond the end of the |
|
5544 * fwd-tsn's mark. |
|
5545 */ |
|
5546 break; |
|
5547 } |
|
5548 } |
|
5549 /*******************************************************/ |
|
5550 /* 3. Update the PR-stream re-ordering queues and fix */ |
|
5551 /* delivery issues as needed. */ |
|
5552 /*******************************************************/ |
|
5553 fwd_sz -= sizeof(*fwd); |
|
5554 if (m && fwd_sz) { |
|
5555 /* New method. */ |
|
5556 unsigned int num_str; |
|
5557 struct sctp_strseq *stseq, strseqbuf; |
|
5558 offset += sizeof(*fwd); |
|
5559 |
|
5560 SCTP_INP_READ_LOCK(stcb->sctp_ep); |
|
5561 num_str = fwd_sz / sizeof(struct sctp_strseq); |
|
5562 for (i = 0; i < num_str; i++) { |
|
5563 uint16_t st; |
|
5564 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset, |
|
5565 sizeof(struct sctp_strseq), |
|
5566 (uint8_t *)&strseqbuf); |
|
5567 offset += sizeof(struct sctp_strseq); |
|
5568 if (stseq == NULL) { |
|
5569 break; |
|
5570 } |
|
5571 /* Convert */ |
|
5572 st = ntohs(stseq->stream); |
|
5573 stseq->stream = st; |
|
5574 st = ntohs(stseq->sequence); |
|
5575 stseq->sequence = st; |
|
5576 |
|
5577 /* now process */ |
|
5578 |
|
5579 /* |
|
5580 * Ok we now look for the stream/seq on the read queue |
|
5581 * where its not all delivered. If we find it we transmute the |
|
5582 * read entry into a PDI_ABORTED. |
|
5583 */ |
|
5584 if (stseq->stream >= asoc->streamincnt) { |
|
5585 /* screwed up streams, stop! */ |
|
5586 break; |
|
5587 } |
|
5588 if ((asoc->str_of_pdapi == stseq->stream) && |
|
5589 (asoc->ssn_of_pdapi == stseq->sequence)) { |
|
5590 /* If this is the one we were partially delivering |
|
5591 * now then we no longer are. Note this will change |
|
5592 * with the reassembly re-write. |
|
5593 */ |
|
5594 asoc->fragmented_delivery_inprogress = 0; |
|
5595 } |
|
5596 sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence); |
|
5597 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) { |
|
5598 if ((ctl->sinfo_stream == stseq->stream) && |
|
5599 (ctl->sinfo_ssn == stseq->sequence)) { |
|
5600 str_seq = (stseq->stream << 16) | stseq->sequence; |
|
5601 ctl->end_added = 1; |
|
5602 ctl->pdapi_aborted = 1; |
|
5603 sv = stcb->asoc.control_pdapi; |
|
5604 stcb->asoc.control_pdapi = ctl; |
|
5605 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, |
|
5606 stcb, |
|
5607 SCTP_PARTIAL_DELIVERY_ABORTED, |
|
5608 (void *)&str_seq, |
|
5609 SCTP_SO_NOT_LOCKED); |
|
5610 stcb->asoc.control_pdapi = sv; |
|
5611 break; |
|
5612 } else if ((ctl->sinfo_stream == stseq->stream) && |
|
5613 SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) { |
|
5614 /* We are past our victim SSN */ |
|
5615 break; |
|
5616 } |
|
5617 } |
|
5618 strm = &asoc->strmin[stseq->stream]; |
|
5619 if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) { |
|
5620 /* Update the sequence number */ |
|
5621 strm->last_sequence_delivered = stseq->sequence; |
|
5622 } |
|
5623 /* now kick the stream the new way */ |
|
5624 /*sa_ignore NO_NULL_CHK*/ |
|
5625 sctp_kick_prsctp_reorder_queue(stcb, strm); |
|
5626 } |
|
5627 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); |
|
5628 } |
|
5629 /* |
|
5630 * Now slide thing forward. |
|
5631 */ |
|
5632 sctp_slide_mapping_arrays(stcb); |
|
5633 |
|
5634 if (!TAILQ_EMPTY(&asoc->reasmqueue)) { |
|
5635 /* now lets kick out and check for more fragmented delivery */ |
|
5636 /*sa_ignore NO_NULL_CHK*/ |
|
5637 sctp_deliver_reasm_check(stcb, &stcb->asoc); |
|
5638 } |
|
5639 } |