|
1 /*- |
|
2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. |
|
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. |
|
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. |
|
5 * |
|
6 * Redistribution and use in source and binary forms, with or without |
|
7 * modification, are permitted provided that the following conditions are met: |
|
8 * |
|
9 * a) Redistributions of source code must retain the above copyright notice, |
|
10 * this list of conditions and the following disclaimer. |
|
11 * |
|
12 * b) Redistributions in binary form must reproduce the above copyright |
|
13 * notice, this list of conditions and the following disclaimer in |
|
14 * the documentation and/or other materials provided with the distribution. |
|
15 * |
|
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its |
|
17 * contributors may be used to endorse or promote products derived |
|
18 * from this software without specific prior written permission. |
|
19 * |
|
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
|
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, |
|
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
|
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
|
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
|
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
|
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
|
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
|
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
|
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
|
30 * THE POSSIBILITY OF SUCH DAMAGE. |
|
31 */ |
|
32 |
|
33 #ifdef __FreeBSD__ |
|
34 #include <sys/cdefs.h> |
|
35 __FBSDID("$FreeBSD: head/sys/netinet/sctp_output.c 262252 2014-02-20 20:14:43Z tuexen $"); |
|
36 #endif |
|
37 |
|
38 #include <netinet/sctp_os.h> |
|
39 #ifdef __FreeBSD__ |
|
40 #include <sys/proc.h> |
|
41 #endif |
|
42 #include <netinet/sctp_var.h> |
|
43 #include <netinet/sctp_sysctl.h> |
|
44 #include <netinet/sctp_header.h> |
|
45 #include <netinet/sctp_pcb.h> |
|
46 #include <netinet/sctputil.h> |
|
47 #include <netinet/sctp_output.h> |
|
48 #include <netinet/sctp_uio.h> |
|
49 #include <netinet/sctputil.h> |
|
50 #include <netinet/sctp_auth.h> |
|
51 #include <netinet/sctp_timer.h> |
|
52 #include <netinet/sctp_asconf.h> |
|
53 #include <netinet/sctp_indata.h> |
|
54 #include <netinet/sctp_bsd_addr.h> |
|
55 #include <netinet/sctp_input.h> |
|
56 #include <netinet/sctp_crc32.h> |
|
57 #if defined(__Userspace_os_Linux) |
|
58 #define __FAVOR_BSD /* (on Ubuntu at least) enables UDP header field names like BSD in RFC 768 */ |
|
59 #endif |
|
60 #if !defined(__Userspace_os_Windows) |
|
61 #include <netinet/udp.h> |
|
62 #endif |
|
63 #if defined(__APPLE__) |
|
64 #include <netinet/in.h> |
|
65 #endif |
|
66 #if defined(__FreeBSD__) |
|
67 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000 |
|
68 #include <netinet/udp_var.h> |
|
69 #endif |
|
70 #include <machine/in_cksum.h> |
|
71 #endif |
|
72 #if defined(__Userspace__) && defined(INET6) |
|
73 #include <netinet6/sctp6_var.h> |
|
74 #endif |
|
75 |
|
76 #if defined(__APPLE__) |
|
77 #define APPLE_FILE_NO 3 |
|
78 #endif |
|
79 |
|
80 #if defined(__APPLE__) |
|
81 #if !(defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)) |
|
82 #define SCTP_MAX_LINKHDR 16 |
|
83 #endif |
|
84 #endif |
|
85 |
|
86 #define SCTP_MAX_GAPS_INARRAY 4 |
|
87 struct sack_track { |
|
88 uint8_t right_edge; /* mergable on the right edge */ |
|
89 uint8_t left_edge; /* mergable on the left edge */ |
|
90 uint8_t num_entries; |
|
91 uint8_t spare; |
|
92 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY]; |
|
93 }; |
|
94 |
|
95 struct sack_track sack_array[256] = { |
|
96 {0, 0, 0, 0, /* 0x00 */ |
|
97 {{0, 0}, |
|
98 {0, 0}, |
|
99 {0, 0}, |
|
100 {0, 0} |
|
101 } |
|
102 }, |
|
103 {1, 0, 1, 0, /* 0x01 */ |
|
104 {{0, 0}, |
|
105 {0, 0}, |
|
106 {0, 0}, |
|
107 {0, 0} |
|
108 } |
|
109 }, |
|
110 {0, 0, 1, 0, /* 0x02 */ |
|
111 {{1, 1}, |
|
112 {0, 0}, |
|
113 {0, 0}, |
|
114 {0, 0} |
|
115 } |
|
116 }, |
|
117 {1, 0, 1, 0, /* 0x03 */ |
|
118 {{0, 1}, |
|
119 {0, 0}, |
|
120 {0, 0}, |
|
121 {0, 0} |
|
122 } |
|
123 }, |
|
124 {0, 0, 1, 0, /* 0x04 */ |
|
125 {{2, 2}, |
|
126 {0, 0}, |
|
127 {0, 0}, |
|
128 {0, 0} |
|
129 } |
|
130 }, |
|
131 {1, 0, 2, 0, /* 0x05 */ |
|
132 {{0, 0}, |
|
133 {2, 2}, |
|
134 {0, 0}, |
|
135 {0, 0} |
|
136 } |
|
137 }, |
|
138 {0, 0, 1, 0, /* 0x06 */ |
|
139 {{1, 2}, |
|
140 {0, 0}, |
|
141 {0, 0}, |
|
142 {0, 0} |
|
143 } |
|
144 }, |
|
145 {1, 0, 1, 0, /* 0x07 */ |
|
146 {{0, 2}, |
|
147 {0, 0}, |
|
148 {0, 0}, |
|
149 {0, 0} |
|
150 } |
|
151 }, |
|
152 {0, 0, 1, 0, /* 0x08 */ |
|
153 {{3, 3}, |
|
154 {0, 0}, |
|
155 {0, 0}, |
|
156 {0, 0} |
|
157 } |
|
158 }, |
|
159 {1, 0, 2, 0, /* 0x09 */ |
|
160 {{0, 0}, |
|
161 {3, 3}, |
|
162 {0, 0}, |
|
163 {0, 0} |
|
164 } |
|
165 }, |
|
166 {0, 0, 2, 0, /* 0x0a */ |
|
167 {{1, 1}, |
|
168 {3, 3}, |
|
169 {0, 0}, |
|
170 {0, 0} |
|
171 } |
|
172 }, |
|
173 {1, 0, 2, 0, /* 0x0b */ |
|
174 {{0, 1}, |
|
175 {3, 3}, |
|
176 {0, 0}, |
|
177 {0, 0} |
|
178 } |
|
179 }, |
|
180 {0, 0, 1, 0, /* 0x0c */ |
|
181 {{2, 3}, |
|
182 {0, 0}, |
|
183 {0, 0}, |
|
184 {0, 0} |
|
185 } |
|
186 }, |
|
187 {1, 0, 2, 0, /* 0x0d */ |
|
188 {{0, 0}, |
|
189 {2, 3}, |
|
190 {0, 0}, |
|
191 {0, 0} |
|
192 } |
|
193 }, |
|
194 {0, 0, 1, 0, /* 0x0e */ |
|
195 {{1, 3}, |
|
196 {0, 0}, |
|
197 {0, 0}, |
|
198 {0, 0} |
|
199 } |
|
200 }, |
|
201 {1, 0, 1, 0, /* 0x0f */ |
|
202 {{0, 3}, |
|
203 {0, 0}, |
|
204 {0, 0}, |
|
205 {0, 0} |
|
206 } |
|
207 }, |
|
208 {0, 0, 1, 0, /* 0x10 */ |
|
209 {{4, 4}, |
|
210 {0, 0}, |
|
211 {0, 0}, |
|
212 {0, 0} |
|
213 } |
|
214 }, |
|
215 {1, 0, 2, 0, /* 0x11 */ |
|
216 {{0, 0}, |
|
217 {4, 4}, |
|
218 {0, 0}, |
|
219 {0, 0} |
|
220 } |
|
221 }, |
|
222 {0, 0, 2, 0, /* 0x12 */ |
|
223 {{1, 1}, |
|
224 {4, 4}, |
|
225 {0, 0}, |
|
226 {0, 0} |
|
227 } |
|
228 }, |
|
229 {1, 0, 2, 0, /* 0x13 */ |
|
230 {{0, 1}, |
|
231 {4, 4}, |
|
232 {0, 0}, |
|
233 {0, 0} |
|
234 } |
|
235 }, |
|
236 {0, 0, 2, 0, /* 0x14 */ |
|
237 {{2, 2}, |
|
238 {4, 4}, |
|
239 {0, 0}, |
|
240 {0, 0} |
|
241 } |
|
242 }, |
|
243 {1, 0, 3, 0, /* 0x15 */ |
|
244 {{0, 0}, |
|
245 {2, 2}, |
|
246 {4, 4}, |
|
247 {0, 0} |
|
248 } |
|
249 }, |
|
250 {0, 0, 2, 0, /* 0x16 */ |
|
251 {{1, 2}, |
|
252 {4, 4}, |
|
253 {0, 0}, |
|
254 {0, 0} |
|
255 } |
|
256 }, |
|
257 {1, 0, 2, 0, /* 0x17 */ |
|
258 {{0, 2}, |
|
259 {4, 4}, |
|
260 {0, 0}, |
|
261 {0, 0} |
|
262 } |
|
263 }, |
|
264 {0, 0, 1, 0, /* 0x18 */ |
|
265 {{3, 4}, |
|
266 {0, 0}, |
|
267 {0, 0}, |
|
268 {0, 0} |
|
269 } |
|
270 }, |
|
271 {1, 0, 2, 0, /* 0x19 */ |
|
272 {{0, 0}, |
|
273 {3, 4}, |
|
274 {0, 0}, |
|
275 {0, 0} |
|
276 } |
|
277 }, |
|
278 {0, 0, 2, 0, /* 0x1a */ |
|
279 {{1, 1}, |
|
280 {3, 4}, |
|
281 {0, 0}, |
|
282 {0, 0} |
|
283 } |
|
284 }, |
|
285 {1, 0, 2, 0, /* 0x1b */ |
|
286 {{0, 1}, |
|
287 {3, 4}, |
|
288 {0, 0}, |
|
289 {0, 0} |
|
290 } |
|
291 }, |
|
292 {0, 0, 1, 0, /* 0x1c */ |
|
293 {{2, 4}, |
|
294 {0, 0}, |
|
295 {0, 0}, |
|
296 {0, 0} |
|
297 } |
|
298 }, |
|
299 {1, 0, 2, 0, /* 0x1d */ |
|
300 {{0, 0}, |
|
301 {2, 4}, |
|
302 {0, 0}, |
|
303 {0, 0} |
|
304 } |
|
305 }, |
|
306 {0, 0, 1, 0, /* 0x1e */ |
|
307 {{1, 4}, |
|
308 {0, 0}, |
|
309 {0, 0}, |
|
310 {0, 0} |
|
311 } |
|
312 }, |
|
313 {1, 0, 1, 0, /* 0x1f */ |
|
314 {{0, 4}, |
|
315 {0, 0}, |
|
316 {0, 0}, |
|
317 {0, 0} |
|
318 } |
|
319 }, |
|
320 {0, 0, 1, 0, /* 0x20 */ |
|
321 {{5, 5}, |
|
322 {0, 0}, |
|
323 {0, 0}, |
|
324 {0, 0} |
|
325 } |
|
326 }, |
|
327 {1, 0, 2, 0, /* 0x21 */ |
|
328 {{0, 0}, |
|
329 {5, 5}, |
|
330 {0, 0}, |
|
331 {0, 0} |
|
332 } |
|
333 }, |
|
334 {0, 0, 2, 0, /* 0x22 */ |
|
335 {{1, 1}, |
|
336 {5, 5}, |
|
337 {0, 0}, |
|
338 {0, 0} |
|
339 } |
|
340 }, |
|
341 {1, 0, 2, 0, /* 0x23 */ |
|
342 {{0, 1}, |
|
343 {5, 5}, |
|
344 {0, 0}, |
|
345 {0, 0} |
|
346 } |
|
347 }, |
|
348 {0, 0, 2, 0, /* 0x24 */ |
|
349 {{2, 2}, |
|
350 {5, 5}, |
|
351 {0, 0}, |
|
352 {0, 0} |
|
353 } |
|
354 }, |
|
355 {1, 0, 3, 0, /* 0x25 */ |
|
356 {{0, 0}, |
|
357 {2, 2}, |
|
358 {5, 5}, |
|
359 {0, 0} |
|
360 } |
|
361 }, |
|
362 {0, 0, 2, 0, /* 0x26 */ |
|
363 {{1, 2}, |
|
364 {5, 5}, |
|
365 {0, 0}, |
|
366 {0, 0} |
|
367 } |
|
368 }, |
|
369 {1, 0, 2, 0, /* 0x27 */ |
|
370 {{0, 2}, |
|
371 {5, 5}, |
|
372 {0, 0}, |
|
373 {0, 0} |
|
374 } |
|
375 }, |
|
376 {0, 0, 2, 0, /* 0x28 */ |
|
377 {{3, 3}, |
|
378 {5, 5}, |
|
379 {0, 0}, |
|
380 {0, 0} |
|
381 } |
|
382 }, |
|
383 {1, 0, 3, 0, /* 0x29 */ |
|
384 {{0, 0}, |
|
385 {3, 3}, |
|
386 {5, 5}, |
|
387 {0, 0} |
|
388 } |
|
389 }, |
|
390 {0, 0, 3, 0, /* 0x2a */ |
|
391 {{1, 1}, |
|
392 {3, 3}, |
|
393 {5, 5}, |
|
394 {0, 0} |
|
395 } |
|
396 }, |
|
397 {1, 0, 3, 0, /* 0x2b */ |
|
398 {{0, 1}, |
|
399 {3, 3}, |
|
400 {5, 5}, |
|
401 {0, 0} |
|
402 } |
|
403 }, |
|
404 {0, 0, 2, 0, /* 0x2c */ |
|
405 {{2, 3}, |
|
406 {5, 5}, |
|
407 {0, 0}, |
|
408 {0, 0} |
|
409 } |
|
410 }, |
|
411 {1, 0, 3, 0, /* 0x2d */ |
|
412 {{0, 0}, |
|
413 {2, 3}, |
|
414 {5, 5}, |
|
415 {0, 0} |
|
416 } |
|
417 }, |
|
418 {0, 0, 2, 0, /* 0x2e */ |
|
419 {{1, 3}, |
|
420 {5, 5}, |
|
421 {0, 0}, |
|
422 {0, 0} |
|
423 } |
|
424 }, |
|
425 {1, 0, 2, 0, /* 0x2f */ |
|
426 {{0, 3}, |
|
427 {5, 5}, |
|
428 {0, 0}, |
|
429 {0, 0} |
|
430 } |
|
431 }, |
|
432 {0, 0, 1, 0, /* 0x30 */ |
|
433 {{4, 5}, |
|
434 {0, 0}, |
|
435 {0, 0}, |
|
436 {0, 0} |
|
437 } |
|
438 }, |
|
439 {1, 0, 2, 0, /* 0x31 */ |
|
440 {{0, 0}, |
|
441 {4, 5}, |
|
442 {0, 0}, |
|
443 {0, 0} |
|
444 } |
|
445 }, |
|
446 {0, 0, 2, 0, /* 0x32 */ |
|
447 {{1, 1}, |
|
448 {4, 5}, |
|
449 {0, 0}, |
|
450 {0, 0} |
|
451 } |
|
452 }, |
|
453 {1, 0, 2, 0, /* 0x33 */ |
|
454 {{0, 1}, |
|
455 {4, 5}, |
|
456 {0, 0}, |
|
457 {0, 0} |
|
458 } |
|
459 }, |
|
460 {0, 0, 2, 0, /* 0x34 */ |
|
461 {{2, 2}, |
|
462 {4, 5}, |
|
463 {0, 0}, |
|
464 {0, 0} |
|
465 } |
|
466 }, |
|
467 {1, 0, 3, 0, /* 0x35 */ |
|
468 {{0, 0}, |
|
469 {2, 2}, |
|
470 {4, 5}, |
|
471 {0, 0} |
|
472 } |
|
473 }, |
|
474 {0, 0, 2, 0, /* 0x36 */ |
|
475 {{1, 2}, |
|
476 {4, 5}, |
|
477 {0, 0}, |
|
478 {0, 0} |
|
479 } |
|
480 }, |
|
481 {1, 0, 2, 0, /* 0x37 */ |
|
482 {{0, 2}, |
|
483 {4, 5}, |
|
484 {0, 0}, |
|
485 {0, 0} |
|
486 } |
|
487 }, |
|
488 {0, 0, 1, 0, /* 0x38 */ |
|
489 {{3, 5}, |
|
490 {0, 0}, |
|
491 {0, 0}, |
|
492 {0, 0} |
|
493 } |
|
494 }, |
|
495 {1, 0, 2, 0, /* 0x39 */ |
|
496 {{0, 0}, |
|
497 {3, 5}, |
|
498 {0, 0}, |
|
499 {0, 0} |
|
500 } |
|
501 }, |
|
502 {0, 0, 2, 0, /* 0x3a */ |
|
503 {{1, 1}, |
|
504 {3, 5}, |
|
505 {0, 0}, |
|
506 {0, 0} |
|
507 } |
|
508 }, |
|
509 {1, 0, 2, 0, /* 0x3b */ |
|
510 {{0, 1}, |
|
511 {3, 5}, |
|
512 {0, 0}, |
|
513 {0, 0} |
|
514 } |
|
515 }, |
|
516 {0, 0, 1, 0, /* 0x3c */ |
|
517 {{2, 5}, |
|
518 {0, 0}, |
|
519 {0, 0}, |
|
520 {0, 0} |
|
521 } |
|
522 }, |
|
523 {1, 0, 2, 0, /* 0x3d */ |
|
524 {{0, 0}, |
|
525 {2, 5}, |
|
526 {0, 0}, |
|
527 {0, 0} |
|
528 } |
|
529 }, |
|
530 {0, 0, 1, 0, /* 0x3e */ |
|
531 {{1, 5}, |
|
532 {0, 0}, |
|
533 {0, 0}, |
|
534 {0, 0} |
|
535 } |
|
536 }, |
|
537 {1, 0, 1, 0, /* 0x3f */ |
|
538 {{0, 5}, |
|
539 {0, 0}, |
|
540 {0, 0}, |
|
541 {0, 0} |
|
542 } |
|
543 }, |
|
544 {0, 0, 1, 0, /* 0x40 */ |
|
545 {{6, 6}, |
|
546 {0, 0}, |
|
547 {0, 0}, |
|
548 {0, 0} |
|
549 } |
|
550 }, |
|
551 {1, 0, 2, 0, /* 0x41 */ |
|
552 {{0, 0}, |
|
553 {6, 6}, |
|
554 {0, 0}, |
|
555 {0, 0} |
|
556 } |
|
557 }, |
|
558 {0, 0, 2, 0, /* 0x42 */ |
|
559 {{1, 1}, |
|
560 {6, 6}, |
|
561 {0, 0}, |
|
562 {0, 0} |
|
563 } |
|
564 }, |
|
565 {1, 0, 2, 0, /* 0x43 */ |
|
566 {{0, 1}, |
|
567 {6, 6}, |
|
568 {0, 0}, |
|
569 {0, 0} |
|
570 } |
|
571 }, |
|
572 {0, 0, 2, 0, /* 0x44 */ |
|
573 {{2, 2}, |
|
574 {6, 6}, |
|
575 {0, 0}, |
|
576 {0, 0} |
|
577 } |
|
578 }, |
|
579 {1, 0, 3, 0, /* 0x45 */ |
|
580 {{0, 0}, |
|
581 {2, 2}, |
|
582 {6, 6}, |
|
583 {0, 0} |
|
584 } |
|
585 }, |
|
586 {0, 0, 2, 0, /* 0x46 */ |
|
587 {{1, 2}, |
|
588 {6, 6}, |
|
589 {0, 0}, |
|
590 {0, 0} |
|
591 } |
|
592 }, |
|
593 {1, 0, 2, 0, /* 0x47 */ |
|
594 {{0, 2}, |
|
595 {6, 6}, |
|
596 {0, 0}, |
|
597 {0, 0} |
|
598 } |
|
599 }, |
|
600 {0, 0, 2, 0, /* 0x48 */ |
|
601 {{3, 3}, |
|
602 {6, 6}, |
|
603 {0, 0}, |
|
604 {0, 0} |
|
605 } |
|
606 }, |
|
607 {1, 0, 3, 0, /* 0x49 */ |
|
608 {{0, 0}, |
|
609 {3, 3}, |
|
610 {6, 6}, |
|
611 {0, 0} |
|
612 } |
|
613 }, |
|
614 {0, 0, 3, 0, /* 0x4a */ |
|
615 {{1, 1}, |
|
616 {3, 3}, |
|
617 {6, 6}, |
|
618 {0, 0} |
|
619 } |
|
620 }, |
|
621 {1, 0, 3, 0, /* 0x4b */ |
|
622 {{0, 1}, |
|
623 {3, 3}, |
|
624 {6, 6}, |
|
625 {0, 0} |
|
626 } |
|
627 }, |
|
628 {0, 0, 2, 0, /* 0x4c */ |
|
629 {{2, 3}, |
|
630 {6, 6}, |
|
631 {0, 0}, |
|
632 {0, 0} |
|
633 } |
|
634 }, |
|
635 {1, 0, 3, 0, /* 0x4d */ |
|
636 {{0, 0}, |
|
637 {2, 3}, |
|
638 {6, 6}, |
|
639 {0, 0} |
|
640 } |
|
641 }, |
|
642 {0, 0, 2, 0, /* 0x4e */ |
|
643 {{1, 3}, |
|
644 {6, 6}, |
|
645 {0, 0}, |
|
646 {0, 0} |
|
647 } |
|
648 }, |
|
649 {1, 0, 2, 0, /* 0x4f */ |
|
650 {{0, 3}, |
|
651 {6, 6}, |
|
652 {0, 0}, |
|
653 {0, 0} |
|
654 } |
|
655 }, |
|
656 {0, 0, 2, 0, /* 0x50 */ |
|
657 {{4, 4}, |
|
658 {6, 6}, |
|
659 {0, 0}, |
|
660 {0, 0} |
|
661 } |
|
662 }, |
|
663 {1, 0, 3, 0, /* 0x51 */ |
|
664 {{0, 0}, |
|
665 {4, 4}, |
|
666 {6, 6}, |
|
667 {0, 0} |
|
668 } |
|
669 }, |
|
670 {0, 0, 3, 0, /* 0x52 */ |
|
671 {{1, 1}, |
|
672 {4, 4}, |
|
673 {6, 6}, |
|
674 {0, 0} |
|
675 } |
|
676 }, |
|
677 {1, 0, 3, 0, /* 0x53 */ |
|
678 {{0, 1}, |
|
679 {4, 4}, |
|
680 {6, 6}, |
|
681 {0, 0} |
|
682 } |
|
683 }, |
|
684 {0, 0, 3, 0, /* 0x54 */ |
|
685 {{2, 2}, |
|
686 {4, 4}, |
|
687 {6, 6}, |
|
688 {0, 0} |
|
689 } |
|
690 }, |
|
691 {1, 0, 4, 0, /* 0x55 */ |
|
692 {{0, 0}, |
|
693 {2, 2}, |
|
694 {4, 4}, |
|
695 {6, 6} |
|
696 } |
|
697 }, |
|
698 {0, 0, 3, 0, /* 0x56 */ |
|
699 {{1, 2}, |
|
700 {4, 4}, |
|
701 {6, 6}, |
|
702 {0, 0} |
|
703 } |
|
704 }, |
|
705 {1, 0, 3, 0, /* 0x57 */ |
|
706 {{0, 2}, |
|
707 {4, 4}, |
|
708 {6, 6}, |
|
709 {0, 0} |
|
710 } |
|
711 }, |
|
712 {0, 0, 2, 0, /* 0x58 */ |
|
713 {{3, 4}, |
|
714 {6, 6}, |
|
715 {0, 0}, |
|
716 {0, 0} |
|
717 } |
|
718 }, |
|
719 {1, 0, 3, 0, /* 0x59 */ |
|
720 {{0, 0}, |
|
721 {3, 4}, |
|
722 {6, 6}, |
|
723 {0, 0} |
|
724 } |
|
725 }, |
|
726 {0, 0, 3, 0, /* 0x5a */ |
|
727 {{1, 1}, |
|
728 {3, 4}, |
|
729 {6, 6}, |
|
730 {0, 0} |
|
731 } |
|
732 }, |
|
733 {1, 0, 3, 0, /* 0x5b */ |
|
734 {{0, 1}, |
|
735 {3, 4}, |
|
736 {6, 6}, |
|
737 {0, 0} |
|
738 } |
|
739 }, |
|
740 {0, 0, 2, 0, /* 0x5c */ |
|
741 {{2, 4}, |
|
742 {6, 6}, |
|
743 {0, 0}, |
|
744 {0, 0} |
|
745 } |
|
746 }, |
|
747 {1, 0, 3, 0, /* 0x5d */ |
|
748 {{0, 0}, |
|
749 {2, 4}, |
|
750 {6, 6}, |
|
751 {0, 0} |
|
752 } |
|
753 }, |
|
754 {0, 0, 2, 0, /* 0x5e */ |
|
755 {{1, 4}, |
|
756 {6, 6}, |
|
757 {0, 0}, |
|
758 {0, 0} |
|
759 } |
|
760 }, |
|
761 {1, 0, 2, 0, /* 0x5f */ |
|
762 {{0, 4}, |
|
763 {6, 6}, |
|
764 {0, 0}, |
|
765 {0, 0} |
|
766 } |
|
767 }, |
|
768 {0, 0, 1, 0, /* 0x60 */ |
|
769 {{5, 6}, |
|
770 {0, 0}, |
|
771 {0, 0}, |
|
772 {0, 0} |
|
773 } |
|
774 }, |
|
775 {1, 0, 2, 0, /* 0x61 */ |
|
776 {{0, 0}, |
|
777 {5, 6}, |
|
778 {0, 0}, |
|
779 {0, 0} |
|
780 } |
|
781 }, |
|
782 {0, 0, 2, 0, /* 0x62 */ |
|
783 {{1, 1}, |
|
784 {5, 6}, |
|
785 {0, 0}, |
|
786 {0, 0} |
|
787 } |
|
788 }, |
|
789 {1, 0, 2, 0, /* 0x63 */ |
|
790 {{0, 1}, |
|
791 {5, 6}, |
|
792 {0, 0}, |
|
793 {0, 0} |
|
794 } |
|
795 }, |
|
796 {0, 0, 2, 0, /* 0x64 */ |
|
797 {{2, 2}, |
|
798 {5, 6}, |
|
799 {0, 0}, |
|
800 {0, 0} |
|
801 } |
|
802 }, |
|
803 {1, 0, 3, 0, /* 0x65 */ |
|
804 {{0, 0}, |
|
805 {2, 2}, |
|
806 {5, 6}, |
|
807 {0, 0} |
|
808 } |
|
809 }, |
|
810 {0, 0, 2, 0, /* 0x66 */ |
|
811 {{1, 2}, |
|
812 {5, 6}, |
|
813 {0, 0}, |
|
814 {0, 0} |
|
815 } |
|
816 }, |
|
817 {1, 0, 2, 0, /* 0x67 */ |
|
818 {{0, 2}, |
|
819 {5, 6}, |
|
820 {0, 0}, |
|
821 {0, 0} |
|
822 } |
|
823 }, |
|
824 {0, 0, 2, 0, /* 0x68 */ |
|
825 {{3, 3}, |
|
826 {5, 6}, |
|
827 {0, 0}, |
|
828 {0, 0} |
|
829 } |
|
830 }, |
|
831 {1, 0, 3, 0, /* 0x69 */ |
|
832 {{0, 0}, |
|
833 {3, 3}, |
|
834 {5, 6}, |
|
835 {0, 0} |
|
836 } |
|
837 }, |
|
838 {0, 0, 3, 0, /* 0x6a */ |
|
839 {{1, 1}, |
|
840 {3, 3}, |
|
841 {5, 6}, |
|
842 {0, 0} |
|
843 } |
|
844 }, |
|
845 {1, 0, 3, 0, /* 0x6b */ |
|
846 {{0, 1}, |
|
847 {3, 3}, |
|
848 {5, 6}, |
|
849 {0, 0} |
|
850 } |
|
851 }, |
|
852 {0, 0, 2, 0, /* 0x6c */ |
|
853 {{2, 3}, |
|
854 {5, 6}, |
|
855 {0, 0}, |
|
856 {0, 0} |
|
857 } |
|
858 }, |
|
859 {1, 0, 3, 0, /* 0x6d */ |
|
860 {{0, 0}, |
|
861 {2, 3}, |
|
862 {5, 6}, |
|
863 {0, 0} |
|
864 } |
|
865 }, |
|
866 {0, 0, 2, 0, /* 0x6e */ |
|
867 {{1, 3}, |
|
868 {5, 6}, |
|
869 {0, 0}, |
|
870 {0, 0} |
|
871 } |
|
872 }, |
|
873 {1, 0, 2, 0, /* 0x6f */ |
|
874 {{0, 3}, |
|
875 {5, 6}, |
|
876 {0, 0}, |
|
877 {0, 0} |
|
878 } |
|
879 }, |
|
880 {0, 0, 1, 0, /* 0x70 */ |
|
881 {{4, 6}, |
|
882 {0, 0}, |
|
883 {0, 0}, |
|
884 {0, 0} |
|
885 } |
|
886 }, |
|
887 {1, 0, 2, 0, /* 0x71 */ |
|
888 {{0, 0}, |
|
889 {4, 6}, |
|
890 {0, 0}, |
|
891 {0, 0} |
|
892 } |
|
893 }, |
|
894 {0, 0, 2, 0, /* 0x72 */ |
|
895 {{1, 1}, |
|
896 {4, 6}, |
|
897 {0, 0}, |
|
898 {0, 0} |
|
899 } |
|
900 }, |
|
901 {1, 0, 2, 0, /* 0x73 */ |
|
902 {{0, 1}, |
|
903 {4, 6}, |
|
904 {0, 0}, |
|
905 {0, 0} |
|
906 } |
|
907 }, |
|
908 {0, 0, 2, 0, /* 0x74 */ |
|
909 {{2, 2}, |
|
910 {4, 6}, |
|
911 {0, 0}, |
|
912 {0, 0} |
|
913 } |
|
914 }, |
|
915 {1, 0, 3, 0, /* 0x75 */ |
|
916 {{0, 0}, |
|
917 {2, 2}, |
|
918 {4, 6}, |
|
919 {0, 0} |
|
920 } |
|
921 }, |
|
922 {0, 0, 2, 0, /* 0x76 */ |
|
923 {{1, 2}, |
|
924 {4, 6}, |
|
925 {0, 0}, |
|
926 {0, 0} |
|
927 } |
|
928 }, |
|
929 {1, 0, 2, 0, /* 0x77 */ |
|
930 {{0, 2}, |
|
931 {4, 6}, |
|
932 {0, 0}, |
|
933 {0, 0} |
|
934 } |
|
935 }, |
|
936 {0, 0, 1, 0, /* 0x78 */ |
|
937 {{3, 6}, |
|
938 {0, 0}, |
|
939 {0, 0}, |
|
940 {0, 0} |
|
941 } |
|
942 }, |
|
943 {1, 0, 2, 0, /* 0x79 */ |
|
944 {{0, 0}, |
|
945 {3, 6}, |
|
946 {0, 0}, |
|
947 {0, 0} |
|
948 } |
|
949 }, |
|
950 {0, 0, 2, 0, /* 0x7a */ |
|
951 {{1, 1}, |
|
952 {3, 6}, |
|
953 {0, 0}, |
|
954 {0, 0} |
|
955 } |
|
956 }, |
|
957 {1, 0, 2, 0, /* 0x7b */ |
|
958 {{0, 1}, |
|
959 {3, 6}, |
|
960 {0, 0}, |
|
961 {0, 0} |
|
962 } |
|
963 }, |
|
964 {0, 0, 1, 0, /* 0x7c */ |
|
965 {{2, 6}, |
|
966 {0, 0}, |
|
967 {0, 0}, |
|
968 {0, 0} |
|
969 } |
|
970 }, |
|
971 {1, 0, 2, 0, /* 0x7d */ |
|
972 {{0, 0}, |
|
973 {2, 6}, |
|
974 {0, 0}, |
|
975 {0, 0} |
|
976 } |
|
977 }, |
|
978 {0, 0, 1, 0, /* 0x7e */ |
|
979 {{1, 6}, |
|
980 {0, 0}, |
|
981 {0, 0}, |
|
982 {0, 0} |
|
983 } |
|
984 }, |
|
985 {1, 0, 1, 0, /* 0x7f */ |
|
986 {{0, 6}, |
|
987 {0, 0}, |
|
988 {0, 0}, |
|
989 {0, 0} |
|
990 } |
|
991 }, |
|
992 {0, 1, 1, 0, /* 0x80 */ |
|
993 {{7, 7}, |
|
994 {0, 0}, |
|
995 {0, 0}, |
|
996 {0, 0} |
|
997 } |
|
998 }, |
|
999 {1, 1, 2, 0, /* 0x81 */ |
|
1000 {{0, 0}, |
|
1001 {7, 7}, |
|
1002 {0, 0}, |
|
1003 {0, 0} |
|
1004 } |
|
1005 }, |
|
1006 {0, 1, 2, 0, /* 0x82 */ |
|
1007 {{1, 1}, |
|
1008 {7, 7}, |
|
1009 {0, 0}, |
|
1010 {0, 0} |
|
1011 } |
|
1012 }, |
|
1013 {1, 1, 2, 0, /* 0x83 */ |
|
1014 {{0, 1}, |
|
1015 {7, 7}, |
|
1016 {0, 0}, |
|
1017 {0, 0} |
|
1018 } |
|
1019 }, |
|
1020 {0, 1, 2, 0, /* 0x84 */ |
|
1021 {{2, 2}, |
|
1022 {7, 7}, |
|
1023 {0, 0}, |
|
1024 {0, 0} |
|
1025 } |
|
1026 }, |
|
1027 {1, 1, 3, 0, /* 0x85 */ |
|
1028 {{0, 0}, |
|
1029 {2, 2}, |
|
1030 {7, 7}, |
|
1031 {0, 0} |
|
1032 } |
|
1033 }, |
|
1034 {0, 1, 2, 0, /* 0x86 */ |
|
1035 {{1, 2}, |
|
1036 {7, 7}, |
|
1037 {0, 0}, |
|
1038 {0, 0} |
|
1039 } |
|
1040 }, |
|
1041 {1, 1, 2, 0, /* 0x87 */ |
|
1042 {{0, 2}, |
|
1043 {7, 7}, |
|
1044 {0, 0}, |
|
1045 {0, 0} |
|
1046 } |
|
1047 }, |
|
1048 {0, 1, 2, 0, /* 0x88 */ |
|
1049 {{3, 3}, |
|
1050 {7, 7}, |
|
1051 {0, 0}, |
|
1052 {0, 0} |
|
1053 } |
|
1054 }, |
|
1055 {1, 1, 3, 0, /* 0x89 */ |
|
1056 {{0, 0}, |
|
1057 {3, 3}, |
|
1058 {7, 7}, |
|
1059 {0, 0} |
|
1060 } |
|
1061 }, |
|
1062 {0, 1, 3, 0, /* 0x8a */ |
|
1063 {{1, 1}, |
|
1064 {3, 3}, |
|
1065 {7, 7}, |
|
1066 {0, 0} |
|
1067 } |
|
1068 }, |
|
1069 {1, 1, 3, 0, /* 0x8b */ |
|
1070 {{0, 1}, |
|
1071 {3, 3}, |
|
1072 {7, 7}, |
|
1073 {0, 0} |
|
1074 } |
|
1075 }, |
|
1076 {0, 1, 2, 0, /* 0x8c */ |
|
1077 {{2, 3}, |
|
1078 {7, 7}, |
|
1079 {0, 0}, |
|
1080 {0, 0} |
|
1081 } |
|
1082 }, |
|
1083 {1, 1, 3, 0, /* 0x8d */ |
|
1084 {{0, 0}, |
|
1085 {2, 3}, |
|
1086 {7, 7}, |
|
1087 {0, 0} |
|
1088 } |
|
1089 }, |
|
1090 {0, 1, 2, 0, /* 0x8e */ |
|
1091 {{1, 3}, |
|
1092 {7, 7}, |
|
1093 {0, 0}, |
|
1094 {0, 0} |
|
1095 } |
|
1096 }, |
|
1097 {1, 1, 2, 0, /* 0x8f */ |
|
1098 {{0, 3}, |
|
1099 {7, 7}, |
|
1100 {0, 0}, |
|
1101 {0, 0} |
|
1102 } |
|
1103 }, |
|
1104 {0, 1, 2, 0, /* 0x90 */ |
|
1105 {{4, 4}, |
|
1106 {7, 7}, |
|
1107 {0, 0}, |
|
1108 {0, 0} |
|
1109 } |
|
1110 }, |
|
1111 {1, 1, 3, 0, /* 0x91 */ |
|
1112 {{0, 0}, |
|
1113 {4, 4}, |
|
1114 {7, 7}, |
|
1115 {0, 0} |
|
1116 } |
|
1117 }, |
|
1118 {0, 1, 3, 0, /* 0x92 */ |
|
1119 {{1, 1}, |
|
1120 {4, 4}, |
|
1121 {7, 7}, |
|
1122 {0, 0} |
|
1123 } |
|
1124 }, |
|
1125 {1, 1, 3, 0, /* 0x93 */ |
|
1126 {{0, 1}, |
|
1127 {4, 4}, |
|
1128 {7, 7}, |
|
1129 {0, 0} |
|
1130 } |
|
1131 }, |
|
1132 {0, 1, 3, 0, /* 0x94 */ |
|
1133 {{2, 2}, |
|
1134 {4, 4}, |
|
1135 {7, 7}, |
|
1136 {0, 0} |
|
1137 } |
|
1138 }, |
|
1139 {1, 1, 4, 0, /* 0x95 */ |
|
1140 {{0, 0}, |
|
1141 {2, 2}, |
|
1142 {4, 4}, |
|
1143 {7, 7} |
|
1144 } |
|
1145 }, |
|
1146 {0, 1, 3, 0, /* 0x96 */ |
|
1147 {{1, 2}, |
|
1148 {4, 4}, |
|
1149 {7, 7}, |
|
1150 {0, 0} |
|
1151 } |
|
1152 }, |
|
1153 {1, 1, 3, 0, /* 0x97 */ |
|
1154 {{0, 2}, |
|
1155 {4, 4}, |
|
1156 {7, 7}, |
|
1157 {0, 0} |
|
1158 } |
|
1159 }, |
|
1160 {0, 1, 2, 0, /* 0x98 */ |
|
1161 {{3, 4}, |
|
1162 {7, 7}, |
|
1163 {0, 0}, |
|
1164 {0, 0} |
|
1165 } |
|
1166 }, |
|
1167 {1, 1, 3, 0, /* 0x99 */ |
|
1168 {{0, 0}, |
|
1169 {3, 4}, |
|
1170 {7, 7}, |
|
1171 {0, 0} |
|
1172 } |
|
1173 }, |
|
1174 {0, 1, 3, 0, /* 0x9a */ |
|
1175 {{1, 1}, |
|
1176 {3, 4}, |
|
1177 {7, 7}, |
|
1178 {0, 0} |
|
1179 } |
|
1180 }, |
|
1181 {1, 1, 3, 0, /* 0x9b */ |
|
1182 {{0, 1}, |
|
1183 {3, 4}, |
|
1184 {7, 7}, |
|
1185 {0, 0} |
|
1186 } |
|
1187 }, |
|
1188 {0, 1, 2, 0, /* 0x9c */ |
|
1189 {{2, 4}, |
|
1190 {7, 7}, |
|
1191 {0, 0}, |
|
1192 {0, 0} |
|
1193 } |
|
1194 }, |
|
1195 {1, 1, 3, 0, /* 0x9d */ |
|
1196 {{0, 0}, |
|
1197 {2, 4}, |
|
1198 {7, 7}, |
|
1199 {0, 0} |
|
1200 } |
|
1201 }, |
|
1202 {0, 1, 2, 0, /* 0x9e */ |
|
1203 {{1, 4}, |
|
1204 {7, 7}, |
|
1205 {0, 0}, |
|
1206 {0, 0} |
|
1207 } |
|
1208 }, |
|
1209 {1, 1, 2, 0, /* 0x9f */ |
|
1210 {{0, 4}, |
|
1211 {7, 7}, |
|
1212 {0, 0}, |
|
1213 {0, 0} |
|
1214 } |
|
1215 }, |
|
1216 {0, 1, 2, 0, /* 0xa0 */ |
|
1217 {{5, 5}, |
|
1218 {7, 7}, |
|
1219 {0, 0}, |
|
1220 {0, 0} |
|
1221 } |
|
1222 }, |
|
1223 {1, 1, 3, 0, /* 0xa1 */ |
|
1224 {{0, 0}, |
|
1225 {5, 5}, |
|
1226 {7, 7}, |
|
1227 {0, 0} |
|
1228 } |
|
1229 }, |
|
1230 {0, 1, 3, 0, /* 0xa2 */ |
|
1231 {{1, 1}, |
|
1232 {5, 5}, |
|
1233 {7, 7}, |
|
1234 {0, 0} |
|
1235 } |
|
1236 }, |
|
1237 {1, 1, 3, 0, /* 0xa3 */ |
|
1238 {{0, 1}, |
|
1239 {5, 5}, |
|
1240 {7, 7}, |
|
1241 {0, 0} |
|
1242 } |
|
1243 }, |
|
1244 {0, 1, 3, 0, /* 0xa4 */ |
|
1245 {{2, 2}, |
|
1246 {5, 5}, |
|
1247 {7, 7}, |
|
1248 {0, 0} |
|
1249 } |
|
1250 }, |
|
1251 {1, 1, 4, 0, /* 0xa5 */ |
|
1252 {{0, 0}, |
|
1253 {2, 2}, |
|
1254 {5, 5}, |
|
1255 {7, 7} |
|
1256 } |
|
1257 }, |
|
1258 {0, 1, 3, 0, /* 0xa6 */ |
|
1259 {{1, 2}, |
|
1260 {5, 5}, |
|
1261 {7, 7}, |
|
1262 {0, 0} |
|
1263 } |
|
1264 }, |
|
1265 {1, 1, 3, 0, /* 0xa7 */ |
|
1266 {{0, 2}, |
|
1267 {5, 5}, |
|
1268 {7, 7}, |
|
1269 {0, 0} |
|
1270 } |
|
1271 }, |
|
1272 {0, 1, 3, 0, /* 0xa8 */ |
|
1273 {{3, 3}, |
|
1274 {5, 5}, |
|
1275 {7, 7}, |
|
1276 {0, 0} |
|
1277 } |
|
1278 }, |
|
1279 {1, 1, 4, 0, /* 0xa9 */ |
|
1280 {{0, 0}, |
|
1281 {3, 3}, |
|
1282 {5, 5}, |
|
1283 {7, 7} |
|
1284 } |
|
1285 }, |
|
1286 {0, 1, 4, 0, /* 0xaa */ |
|
1287 {{1, 1}, |
|
1288 {3, 3}, |
|
1289 {5, 5}, |
|
1290 {7, 7} |
|
1291 } |
|
1292 }, |
|
1293 {1, 1, 4, 0, /* 0xab */ |
|
1294 {{0, 1}, |
|
1295 {3, 3}, |
|
1296 {5, 5}, |
|
1297 {7, 7} |
|
1298 } |
|
1299 }, |
|
1300 {0, 1, 3, 0, /* 0xac */ |
|
1301 {{2, 3}, |
|
1302 {5, 5}, |
|
1303 {7, 7}, |
|
1304 {0, 0} |
|
1305 } |
|
1306 }, |
|
1307 {1, 1, 4, 0, /* 0xad */ |
|
1308 {{0, 0}, |
|
1309 {2, 3}, |
|
1310 {5, 5}, |
|
1311 {7, 7} |
|
1312 } |
|
1313 }, |
|
1314 {0, 1, 3, 0, /* 0xae */ |
|
1315 {{1, 3}, |
|
1316 {5, 5}, |
|
1317 {7, 7}, |
|
1318 {0, 0} |
|
1319 } |
|
1320 }, |
|
1321 {1, 1, 3, 0, /* 0xaf */ |
|
1322 {{0, 3}, |
|
1323 {5, 5}, |
|
1324 {7, 7}, |
|
1325 {0, 0} |
|
1326 } |
|
1327 }, |
|
1328 {0, 1, 2, 0, /* 0xb0 */ |
|
1329 {{4, 5}, |
|
1330 {7, 7}, |
|
1331 {0, 0}, |
|
1332 {0, 0} |
|
1333 } |
|
1334 }, |
|
1335 {1, 1, 3, 0, /* 0xb1 */ |
|
1336 {{0, 0}, |
|
1337 {4, 5}, |
|
1338 {7, 7}, |
|
1339 {0, 0} |
|
1340 } |
|
1341 }, |
|
1342 {0, 1, 3, 0, /* 0xb2 */ |
|
1343 {{1, 1}, |
|
1344 {4, 5}, |
|
1345 {7, 7}, |
|
1346 {0, 0} |
|
1347 } |
|
1348 }, |
|
1349 {1, 1, 3, 0, /* 0xb3 */ |
|
1350 {{0, 1}, |
|
1351 {4, 5}, |
|
1352 {7, 7}, |
|
1353 {0, 0} |
|
1354 } |
|
1355 }, |
|
1356 {0, 1, 3, 0, /* 0xb4 */ |
|
1357 {{2, 2}, |
|
1358 {4, 5}, |
|
1359 {7, 7}, |
|
1360 {0, 0} |
|
1361 } |
|
1362 }, |
|
1363 {1, 1, 4, 0, /* 0xb5 */ |
|
1364 {{0, 0}, |
|
1365 {2, 2}, |
|
1366 {4, 5}, |
|
1367 {7, 7} |
|
1368 } |
|
1369 }, |
|
1370 {0, 1, 3, 0, /* 0xb6 */ |
|
1371 {{1, 2}, |
|
1372 {4, 5}, |
|
1373 {7, 7}, |
|
1374 {0, 0} |
|
1375 } |
|
1376 }, |
|
1377 {1, 1, 3, 0, /* 0xb7 */ |
|
1378 {{0, 2}, |
|
1379 {4, 5}, |
|
1380 {7, 7}, |
|
1381 {0, 0} |
|
1382 } |
|
1383 }, |
|
1384 {0, 1, 2, 0, /* 0xb8 */ |
|
1385 {{3, 5}, |
|
1386 {7, 7}, |
|
1387 {0, 0}, |
|
1388 {0, 0} |
|
1389 } |
|
1390 }, |
|
1391 {1, 1, 3, 0, /* 0xb9 */ |
|
1392 {{0, 0}, |
|
1393 {3, 5}, |
|
1394 {7, 7}, |
|
1395 {0, 0} |
|
1396 } |
|
1397 }, |
|
1398 {0, 1, 3, 0, /* 0xba */ |
|
1399 {{1, 1}, |
|
1400 {3, 5}, |
|
1401 {7, 7}, |
|
1402 {0, 0} |
|
1403 } |
|
1404 }, |
|
1405 {1, 1, 3, 0, /* 0xbb */ |
|
1406 {{0, 1}, |
|
1407 {3, 5}, |
|
1408 {7, 7}, |
|
1409 {0, 0} |
|
1410 } |
|
1411 }, |
|
1412 {0, 1, 2, 0, /* 0xbc */ |
|
1413 {{2, 5}, |
|
1414 {7, 7}, |
|
1415 {0, 0}, |
|
1416 {0, 0} |
|
1417 } |
|
1418 }, |
|
1419 {1, 1, 3, 0, /* 0xbd */ |
|
1420 {{0, 0}, |
|
1421 {2, 5}, |
|
1422 {7, 7}, |
|
1423 {0, 0} |
|
1424 } |
|
1425 }, |
|
1426 {0, 1, 2, 0, /* 0xbe */ |
|
1427 {{1, 5}, |
|
1428 {7, 7}, |
|
1429 {0, 0}, |
|
1430 {0, 0} |
|
1431 } |
|
1432 }, |
|
1433 {1, 1, 2, 0, /* 0xbf */ |
|
1434 {{0, 5}, |
|
1435 {7, 7}, |
|
1436 {0, 0}, |
|
1437 {0, 0} |
|
1438 } |
|
1439 }, |
|
1440 {0, 1, 1, 0, /* 0xc0 */ |
|
1441 {{6, 7}, |
|
1442 {0, 0}, |
|
1443 {0, 0}, |
|
1444 {0, 0} |
|
1445 } |
|
1446 }, |
|
1447 {1, 1, 2, 0, /* 0xc1 */ |
|
1448 {{0, 0}, |
|
1449 {6, 7}, |
|
1450 {0, 0}, |
|
1451 {0, 0} |
|
1452 } |
|
1453 }, |
|
1454 {0, 1, 2, 0, /* 0xc2 */ |
|
1455 {{1, 1}, |
|
1456 {6, 7}, |
|
1457 {0, 0}, |
|
1458 {0, 0} |
|
1459 } |
|
1460 }, |
|
1461 {1, 1, 2, 0, /* 0xc3 */ |
|
1462 {{0, 1}, |
|
1463 {6, 7}, |
|
1464 {0, 0}, |
|
1465 {0, 0} |
|
1466 } |
|
1467 }, |
|
1468 {0, 1, 2, 0, /* 0xc4 */ |
|
1469 {{2, 2}, |
|
1470 {6, 7}, |
|
1471 {0, 0}, |
|
1472 {0, 0} |
|
1473 } |
|
1474 }, |
|
1475 {1, 1, 3, 0, /* 0xc5 */ |
|
1476 {{0, 0}, |
|
1477 {2, 2}, |
|
1478 {6, 7}, |
|
1479 {0, 0} |
|
1480 } |
|
1481 }, |
|
1482 {0, 1, 2, 0, /* 0xc6 */ |
|
1483 {{1, 2}, |
|
1484 {6, 7}, |
|
1485 {0, 0}, |
|
1486 {0, 0} |
|
1487 } |
|
1488 }, |
|
1489 {1, 1, 2, 0, /* 0xc7 */ |
|
1490 {{0, 2}, |
|
1491 {6, 7}, |
|
1492 {0, 0}, |
|
1493 {0, 0} |
|
1494 } |
|
1495 }, |
|
1496 {0, 1, 2, 0, /* 0xc8 */ |
|
1497 {{3, 3}, |
|
1498 {6, 7}, |
|
1499 {0, 0}, |
|
1500 {0, 0} |
|
1501 } |
|
1502 }, |
|
1503 {1, 1, 3, 0, /* 0xc9 */ |
|
1504 {{0, 0}, |
|
1505 {3, 3}, |
|
1506 {6, 7}, |
|
1507 {0, 0} |
|
1508 } |
|
1509 }, |
|
1510 {0, 1, 3, 0, /* 0xca */ |
|
1511 {{1, 1}, |
|
1512 {3, 3}, |
|
1513 {6, 7}, |
|
1514 {0, 0} |
|
1515 } |
|
1516 }, |
|
1517 {1, 1, 3, 0, /* 0xcb */ |
|
1518 {{0, 1}, |
|
1519 {3, 3}, |
|
1520 {6, 7}, |
|
1521 {0, 0} |
|
1522 } |
|
1523 }, |
|
1524 {0, 1, 2, 0, /* 0xcc */ |
|
1525 {{2, 3}, |
|
1526 {6, 7}, |
|
1527 {0, 0}, |
|
1528 {0, 0} |
|
1529 } |
|
1530 }, |
|
1531 {1, 1, 3, 0, /* 0xcd */ |
|
1532 {{0, 0}, |
|
1533 {2, 3}, |
|
1534 {6, 7}, |
|
1535 {0, 0} |
|
1536 } |
|
1537 }, |
|
1538 {0, 1, 2, 0, /* 0xce */ |
|
1539 {{1, 3}, |
|
1540 {6, 7}, |
|
1541 {0, 0}, |
|
1542 {0, 0} |
|
1543 } |
|
1544 }, |
|
1545 {1, 1, 2, 0, /* 0xcf */ |
|
1546 {{0, 3}, |
|
1547 {6, 7}, |
|
1548 {0, 0}, |
|
1549 {0, 0} |
|
1550 } |
|
1551 }, |
|
1552 {0, 1, 2, 0, /* 0xd0 */ |
|
1553 {{4, 4}, |
|
1554 {6, 7}, |
|
1555 {0, 0}, |
|
1556 {0, 0} |
|
1557 } |
|
1558 }, |
|
1559 {1, 1, 3, 0, /* 0xd1 */ |
|
1560 {{0, 0}, |
|
1561 {4, 4}, |
|
1562 {6, 7}, |
|
1563 {0, 0} |
|
1564 } |
|
1565 }, |
|
1566 {0, 1, 3, 0, /* 0xd2 */ |
|
1567 {{1, 1}, |
|
1568 {4, 4}, |
|
1569 {6, 7}, |
|
1570 {0, 0} |
|
1571 } |
|
1572 }, |
|
1573 {1, 1, 3, 0, /* 0xd3 */ |
|
1574 {{0, 1}, |
|
1575 {4, 4}, |
|
1576 {6, 7}, |
|
1577 {0, 0} |
|
1578 } |
|
1579 }, |
|
1580 {0, 1, 3, 0, /* 0xd4 */ |
|
1581 {{2, 2}, |
|
1582 {4, 4}, |
|
1583 {6, 7}, |
|
1584 {0, 0} |
|
1585 } |
|
1586 }, |
|
1587 {1, 1, 4, 0, /* 0xd5 */ |
|
1588 {{0, 0}, |
|
1589 {2, 2}, |
|
1590 {4, 4}, |
|
1591 {6, 7} |
|
1592 } |
|
1593 }, |
|
1594 {0, 1, 3, 0, /* 0xd6 */ |
|
1595 {{1, 2}, |
|
1596 {4, 4}, |
|
1597 {6, 7}, |
|
1598 {0, 0} |
|
1599 } |
|
1600 }, |
|
1601 {1, 1, 3, 0, /* 0xd7 */ |
|
1602 {{0, 2}, |
|
1603 {4, 4}, |
|
1604 {6, 7}, |
|
1605 {0, 0} |
|
1606 } |
|
1607 }, |
|
1608 {0, 1, 2, 0, /* 0xd8 */ |
|
1609 {{3, 4}, |
|
1610 {6, 7}, |
|
1611 {0, 0}, |
|
1612 {0, 0} |
|
1613 } |
|
1614 }, |
|
1615 {1, 1, 3, 0, /* 0xd9 */ |
|
1616 {{0, 0}, |
|
1617 {3, 4}, |
|
1618 {6, 7}, |
|
1619 {0, 0} |
|
1620 } |
|
1621 }, |
|
1622 {0, 1, 3, 0, /* 0xda */ |
|
1623 {{1, 1}, |
|
1624 {3, 4}, |
|
1625 {6, 7}, |
|
1626 {0, 0} |
|
1627 } |
|
1628 }, |
|
1629 {1, 1, 3, 0, /* 0xdb */ |
|
1630 {{0, 1}, |
|
1631 {3, 4}, |
|
1632 {6, 7}, |
|
1633 {0, 0} |
|
1634 } |
|
1635 }, |
|
1636 {0, 1, 2, 0, /* 0xdc */ |
|
1637 {{2, 4}, |
|
1638 {6, 7}, |
|
1639 {0, 0}, |
|
1640 {0, 0} |
|
1641 } |
|
1642 }, |
|
1643 {1, 1, 3, 0, /* 0xdd */ |
|
1644 {{0, 0}, |
|
1645 {2, 4}, |
|
1646 {6, 7}, |
|
1647 {0, 0} |
|
1648 } |
|
1649 }, |
|
1650 {0, 1, 2, 0, /* 0xde */ |
|
1651 {{1, 4}, |
|
1652 {6, 7}, |
|
1653 {0, 0}, |
|
1654 {0, 0} |
|
1655 } |
|
1656 }, |
|
1657 {1, 1, 2, 0, /* 0xdf */ |
|
1658 {{0, 4}, |
|
1659 {6, 7}, |
|
1660 {0, 0}, |
|
1661 {0, 0} |
|
1662 } |
|
1663 }, |
|
1664 {0, 1, 1, 0, /* 0xe0 */ |
|
1665 {{5, 7}, |
|
1666 {0, 0}, |
|
1667 {0, 0}, |
|
1668 {0, 0} |
|
1669 } |
|
1670 }, |
|
1671 {1, 1, 2, 0, /* 0xe1 */ |
|
1672 {{0, 0}, |
|
1673 {5, 7}, |
|
1674 {0, 0}, |
|
1675 {0, 0} |
|
1676 } |
|
1677 }, |
|
1678 {0, 1, 2, 0, /* 0xe2 */ |
|
1679 {{1, 1}, |
|
1680 {5, 7}, |
|
1681 {0, 0}, |
|
1682 {0, 0} |
|
1683 } |
|
1684 }, |
|
1685 {1, 1, 2, 0, /* 0xe3 */ |
|
1686 {{0, 1}, |
|
1687 {5, 7}, |
|
1688 {0, 0}, |
|
1689 {0, 0} |
|
1690 } |
|
1691 }, |
|
1692 {0, 1, 2, 0, /* 0xe4 */ |
|
1693 {{2, 2}, |
|
1694 {5, 7}, |
|
1695 {0, 0}, |
|
1696 {0, 0} |
|
1697 } |
|
1698 }, |
|
1699 {1, 1, 3, 0, /* 0xe5 */ |
|
1700 {{0, 0}, |
|
1701 {2, 2}, |
|
1702 {5, 7}, |
|
1703 {0, 0} |
|
1704 } |
|
1705 }, |
|
1706 {0, 1, 2, 0, /* 0xe6 */ |
|
1707 {{1, 2}, |
|
1708 {5, 7}, |
|
1709 {0, 0}, |
|
1710 {0, 0} |
|
1711 } |
|
1712 }, |
|
1713 {1, 1, 2, 0, /* 0xe7 */ |
|
1714 {{0, 2}, |
|
1715 {5, 7}, |
|
1716 {0, 0}, |
|
1717 {0, 0} |
|
1718 } |
|
1719 }, |
|
1720 {0, 1, 2, 0, /* 0xe8 */ |
|
1721 {{3, 3}, |
|
1722 {5, 7}, |
|
1723 {0, 0}, |
|
1724 {0, 0} |
|
1725 } |
|
1726 }, |
|
1727 {1, 1, 3, 0, /* 0xe9 */ |
|
1728 {{0, 0}, |
|
1729 {3, 3}, |
|
1730 {5, 7}, |
|
1731 {0, 0} |
|
1732 } |
|
1733 }, |
|
1734 {0, 1, 3, 0, /* 0xea */ |
|
1735 {{1, 1}, |
|
1736 {3, 3}, |
|
1737 {5, 7}, |
|
1738 {0, 0} |
|
1739 } |
|
1740 }, |
|
1741 {1, 1, 3, 0, /* 0xeb */ |
|
1742 {{0, 1}, |
|
1743 {3, 3}, |
|
1744 {5, 7}, |
|
1745 {0, 0} |
|
1746 } |
|
1747 }, |
|
1748 {0, 1, 2, 0, /* 0xec */ |
|
1749 {{2, 3}, |
|
1750 {5, 7}, |
|
1751 {0, 0}, |
|
1752 {0, 0} |
|
1753 } |
|
1754 }, |
|
1755 {1, 1, 3, 0, /* 0xed */ |
|
1756 {{0, 0}, |
|
1757 {2, 3}, |
|
1758 {5, 7}, |
|
1759 {0, 0} |
|
1760 } |
|
1761 }, |
|
1762 {0, 1, 2, 0, /* 0xee */ |
|
1763 {{1, 3}, |
|
1764 {5, 7}, |
|
1765 {0, 0}, |
|
1766 {0, 0} |
|
1767 } |
|
1768 }, |
|
1769 {1, 1, 2, 0, /* 0xef */ |
|
1770 {{0, 3}, |
|
1771 {5, 7}, |
|
1772 {0, 0}, |
|
1773 {0, 0} |
|
1774 } |
|
1775 }, |
|
1776 {0, 1, 1, 0, /* 0xf0 */ |
|
1777 {{4, 7}, |
|
1778 {0, 0}, |
|
1779 {0, 0}, |
|
1780 {0, 0} |
|
1781 } |
|
1782 }, |
|
1783 {1, 1, 2, 0, /* 0xf1 */ |
|
1784 {{0, 0}, |
|
1785 {4, 7}, |
|
1786 {0, 0}, |
|
1787 {0, 0} |
|
1788 } |
|
1789 }, |
|
1790 {0, 1, 2, 0, /* 0xf2 */ |
|
1791 {{1, 1}, |
|
1792 {4, 7}, |
|
1793 {0, 0}, |
|
1794 {0, 0} |
|
1795 } |
|
1796 }, |
|
1797 {1, 1, 2, 0, /* 0xf3 */ |
|
1798 {{0, 1}, |
|
1799 {4, 7}, |
|
1800 {0, 0}, |
|
1801 {0, 0} |
|
1802 } |
|
1803 }, |
|
1804 {0, 1, 2, 0, /* 0xf4 */ |
|
1805 {{2, 2}, |
|
1806 {4, 7}, |
|
1807 {0, 0}, |
|
1808 {0, 0} |
|
1809 } |
|
1810 }, |
|
1811 {1, 1, 3, 0, /* 0xf5 */ |
|
1812 {{0, 0}, |
|
1813 {2, 2}, |
|
1814 {4, 7}, |
|
1815 {0, 0} |
|
1816 } |
|
1817 }, |
|
1818 {0, 1, 2, 0, /* 0xf6 */ |
|
1819 {{1, 2}, |
|
1820 {4, 7}, |
|
1821 {0, 0}, |
|
1822 {0, 0} |
|
1823 } |
|
1824 }, |
|
1825 {1, 1, 2, 0, /* 0xf7 */ |
|
1826 {{0, 2}, |
|
1827 {4, 7}, |
|
1828 {0, 0}, |
|
1829 {0, 0} |
|
1830 } |
|
1831 }, |
|
1832 {0, 1, 1, 0, /* 0xf8 */ |
|
1833 {{3, 7}, |
|
1834 {0, 0}, |
|
1835 {0, 0}, |
|
1836 {0, 0} |
|
1837 } |
|
1838 }, |
|
1839 {1, 1, 2, 0, /* 0xf9 */ |
|
1840 {{0, 0}, |
|
1841 {3, 7}, |
|
1842 {0, 0}, |
|
1843 {0, 0} |
|
1844 } |
|
1845 }, |
|
1846 {0, 1, 2, 0, /* 0xfa */ |
|
1847 {{1, 1}, |
|
1848 {3, 7}, |
|
1849 {0, 0}, |
|
1850 {0, 0} |
|
1851 } |
|
1852 }, |
|
1853 {1, 1, 2, 0, /* 0xfb */ |
|
1854 {{0, 1}, |
|
1855 {3, 7}, |
|
1856 {0, 0}, |
|
1857 {0, 0} |
|
1858 } |
|
1859 }, |
|
1860 {0, 1, 1, 0, /* 0xfc */ |
|
1861 {{2, 7}, |
|
1862 {0, 0}, |
|
1863 {0, 0}, |
|
1864 {0, 0} |
|
1865 } |
|
1866 }, |
|
1867 {1, 1, 2, 0, /* 0xfd */ |
|
1868 {{0, 0}, |
|
1869 {2, 7}, |
|
1870 {0, 0}, |
|
1871 {0, 0} |
|
1872 } |
|
1873 }, |
|
1874 {0, 1, 1, 0, /* 0xfe */ |
|
1875 {{1, 7}, |
|
1876 {0, 0}, |
|
1877 {0, 0}, |
|
1878 {0, 0} |
|
1879 } |
|
1880 }, |
|
1881 {1, 1, 1, 0, /* 0xff */ |
|
1882 {{0, 7}, |
|
1883 {0, 0}, |
|
1884 {0, 0}, |
|
1885 {0, 0} |
|
1886 } |
|
1887 } |
|
1888 }; |
|
1889 |
|
1890 |
|
1891 int |
|
1892 sctp_is_address_in_scope(struct sctp_ifa *ifa, |
|
1893 struct sctp_scoping *scope, |
|
1894 int do_update) |
|
1895 { |
|
1896 if ((scope->loopback_scope == 0) && |
|
1897 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) { |
|
1898 /* |
|
1899 * skip loopback if not in scope * |
|
1900 */ |
|
1901 return (0); |
|
1902 } |
|
1903 switch (ifa->address.sa.sa_family) { |
|
1904 #ifdef INET |
|
1905 case AF_INET: |
|
1906 if (scope->ipv4_addr_legal) { |
|
1907 struct sockaddr_in *sin; |
|
1908 |
|
1909 sin = (struct sockaddr_in *)&ifa->address.sin; |
|
1910 if (sin->sin_addr.s_addr == 0) { |
|
1911 /* not in scope , unspecified */ |
|
1912 return (0); |
|
1913 } |
|
1914 if ((scope->ipv4_local_scope == 0) && |
|
1915 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { |
|
1916 /* private address not in scope */ |
|
1917 return (0); |
|
1918 } |
|
1919 } else { |
|
1920 return (0); |
|
1921 } |
|
1922 break; |
|
1923 #endif |
|
1924 #ifdef INET6 |
|
1925 case AF_INET6: |
|
1926 if (scope->ipv6_addr_legal) { |
|
1927 struct sockaddr_in6 *sin6; |
|
1928 |
|
1929 #if !defined(__Panda__) |
|
1930 /* Must update the flags, bummer, which |
|
1931 * means any IFA locks must now be applied HERE <-> |
|
1932 */ |
|
1933 if (do_update) { |
|
1934 sctp_gather_internal_ifa_flags(ifa); |
|
1935 } |
|
1936 #endif |
|
1937 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { |
|
1938 return (0); |
|
1939 } |
|
1940 /* ok to use deprecated addresses? */ |
|
1941 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6; |
|
1942 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { |
|
1943 /* skip unspecifed addresses */ |
|
1944 return (0); |
|
1945 } |
|
1946 if ( /* (local_scope == 0) && */ |
|
1947 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) { |
|
1948 return (0); |
|
1949 } |
|
1950 if ((scope->site_scope == 0) && |
|
1951 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { |
|
1952 return (0); |
|
1953 } |
|
1954 } else { |
|
1955 return (0); |
|
1956 } |
|
1957 break; |
|
1958 #endif |
|
1959 #if defined(__Userspace__) |
|
1960 case AF_CONN: |
|
1961 if (!scope->conn_addr_legal) { |
|
1962 return (0); |
|
1963 } |
|
1964 break; |
|
1965 #endif |
|
1966 default: |
|
1967 return (0); |
|
1968 } |
|
1969 return (1); |
|
1970 } |
|
1971 |
|
1972 static struct mbuf * |
|
1973 sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t *len) |
|
1974 { |
|
1975 #if defined(INET) || defined(INET6) |
|
1976 struct sctp_paramhdr *parmh; |
|
1977 struct mbuf *mret; |
|
1978 uint16_t plen; |
|
1979 #endif |
|
1980 |
|
1981 switch (ifa->address.sa.sa_family) { |
|
1982 #ifdef INET |
|
1983 case AF_INET: |
|
1984 plen = (uint16_t)sizeof(struct sctp_ipv4addr_param); |
|
1985 break; |
|
1986 #endif |
|
1987 #ifdef INET6 |
|
1988 case AF_INET6: |
|
1989 plen = (uint16_t)sizeof(struct sctp_ipv6addr_param); |
|
1990 break; |
|
1991 #endif |
|
1992 default: |
|
1993 return (m); |
|
1994 } |
|
1995 #if defined(INET) || defined(INET6) |
|
1996 if (M_TRAILINGSPACE(m) >= plen) { |
|
1997 /* easy side we just drop it on the end */ |
|
1998 parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m))); |
|
1999 mret = m; |
|
2000 } else { |
|
2001 /* Need more space */ |
|
2002 mret = m; |
|
2003 while (SCTP_BUF_NEXT(mret) != NULL) { |
|
2004 mret = SCTP_BUF_NEXT(mret); |
|
2005 } |
|
2006 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_NOWAIT, 1, MT_DATA); |
|
2007 if (SCTP_BUF_NEXT(mret) == NULL) { |
|
2008 /* We are hosed, can't add more addresses */ |
|
2009 return (m); |
|
2010 } |
|
2011 mret = SCTP_BUF_NEXT(mret); |
|
2012 parmh = mtod(mret, struct sctp_paramhdr *); |
|
2013 } |
|
2014 /* now add the parameter */ |
|
2015 switch (ifa->address.sa.sa_family) { |
|
2016 #ifdef INET |
|
2017 case AF_INET: |
|
2018 { |
|
2019 struct sctp_ipv4addr_param *ipv4p; |
|
2020 struct sockaddr_in *sin; |
|
2021 |
|
2022 sin = (struct sockaddr_in *)&ifa->address.sin; |
|
2023 ipv4p = (struct sctp_ipv4addr_param *)parmh; |
|
2024 parmh->param_type = htons(SCTP_IPV4_ADDRESS); |
|
2025 parmh->param_length = htons(plen); |
|
2026 ipv4p->addr = sin->sin_addr.s_addr; |
|
2027 SCTP_BUF_LEN(mret) += plen; |
|
2028 break; |
|
2029 } |
|
2030 #endif |
|
2031 #ifdef INET6 |
|
2032 case AF_INET6: |
|
2033 { |
|
2034 struct sctp_ipv6addr_param *ipv6p; |
|
2035 struct sockaddr_in6 *sin6; |
|
2036 |
|
2037 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6; |
|
2038 ipv6p = (struct sctp_ipv6addr_param *)parmh; |
|
2039 parmh->param_type = htons(SCTP_IPV6_ADDRESS); |
|
2040 parmh->param_length = htons(plen); |
|
2041 memcpy(ipv6p->addr, &sin6->sin6_addr, |
|
2042 sizeof(ipv6p->addr)); |
|
2043 #if defined(SCTP_EMBEDDED_V6_SCOPE) |
|
2044 /* clear embedded scope in the address */ |
|
2045 in6_clearscope((struct in6_addr *)ipv6p->addr); |
|
2046 #endif |
|
2047 SCTP_BUF_LEN(mret) += plen; |
|
2048 break; |
|
2049 } |
|
2050 #endif |
|
2051 default: |
|
2052 return (m); |
|
2053 } |
|
2054 if (len != NULL) { |
|
2055 *len += plen; |
|
2056 } |
|
2057 return (mret); |
|
2058 #endif |
|
2059 } |
|
2060 |
|
2061 |
|
2062 struct mbuf * |
|
2063 sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb, |
|
2064 struct sctp_scoping *scope, |
|
2065 struct mbuf *m_at, int cnt_inits_to, |
|
2066 uint16_t *padding_len, uint16_t *chunk_len) |
|
2067 { |
|
2068 struct sctp_vrf *vrf = NULL; |
|
2069 int cnt, limit_out = 0, total_count; |
|
2070 uint32_t vrf_id; |
|
2071 |
|
2072 vrf_id = inp->def_vrf_id; |
|
2073 SCTP_IPI_ADDR_RLOCK(); |
|
2074 vrf = sctp_find_vrf(vrf_id); |
|
2075 if (vrf == NULL) { |
|
2076 SCTP_IPI_ADDR_RUNLOCK(); |
|
2077 return (m_at); |
|
2078 } |
|
2079 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { |
|
2080 struct sctp_ifa *sctp_ifap; |
|
2081 struct sctp_ifn *sctp_ifnp; |
|
2082 |
|
2083 cnt = cnt_inits_to; |
|
2084 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) { |
|
2085 limit_out = 1; |
|
2086 cnt = SCTP_ADDRESS_LIMIT; |
|
2087 goto skip_count; |
|
2088 } |
|
2089 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) { |
|
2090 if ((scope->loopback_scope == 0) && |
|
2091 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) { |
|
2092 /* |
|
2093 * Skip loopback devices if loopback_scope |
|
2094 * not set |
|
2095 */ |
|
2096 continue; |
|
2097 } |
|
2098 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) { |
|
2099 if (sctp_is_addr_restricted(stcb, sctp_ifap)) { |
|
2100 continue; |
|
2101 } |
|
2102 #if defined(__Userspace__) |
|
2103 if (sctp_ifap->address.sa.sa_family == AF_CONN) { |
|
2104 continue; |
|
2105 } |
|
2106 #endif |
|
2107 if (sctp_is_address_in_scope(sctp_ifap, scope, 1) == 0) { |
|
2108 continue; |
|
2109 } |
|
2110 cnt++; |
|
2111 if (cnt > SCTP_ADDRESS_LIMIT) { |
|
2112 break; |
|
2113 } |
|
2114 } |
|
2115 if (cnt > SCTP_ADDRESS_LIMIT) { |
|
2116 break; |
|
2117 } |
|
2118 } |
|
2119 skip_count: |
|
2120 if (cnt > 1) { |
|
2121 total_count = 0; |
|
2122 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) { |
|
2123 cnt = 0; |
|
2124 if ((scope->loopback_scope == 0) && |
|
2125 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) { |
|
2126 /* |
|
2127 * Skip loopback devices if |
|
2128 * loopback_scope not set |
|
2129 */ |
|
2130 continue; |
|
2131 } |
|
2132 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) { |
|
2133 if (sctp_is_addr_restricted(stcb, sctp_ifap)) { |
|
2134 continue; |
|
2135 } |
|
2136 #if defined(__Userspace__) |
|
2137 if (sctp_ifap->address.sa.sa_family == AF_CONN) { |
|
2138 continue; |
|
2139 } |
|
2140 #endif |
|
2141 if (sctp_is_address_in_scope(sctp_ifap, |
|
2142 scope, 0) == 0) { |
|
2143 continue; |
|
2144 } |
|
2145 if ((chunk_len != NULL) && |
|
2146 (padding_len != NULL) && |
|
2147 (*padding_len > 0)) { |
|
2148 memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len); |
|
2149 SCTP_BUF_LEN(m_at) += *padding_len; |
|
2150 *chunk_len += *padding_len; |
|
2151 *padding_len = 0; |
|
2152 } |
|
2153 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len); |
|
2154 if (limit_out) { |
|
2155 cnt++; |
|
2156 total_count++; |
|
2157 if (cnt >= 2) { |
|
2158 /* two from each address */ |
|
2159 break; |
|
2160 } |
|
2161 if (total_count > SCTP_ADDRESS_LIMIT) { |
|
2162 /* No more addresses */ |
|
2163 break; |
|
2164 } |
|
2165 } |
|
2166 } |
|
2167 } |
|
2168 } |
|
2169 } else { |
|
2170 struct sctp_laddr *laddr; |
|
2171 |
|
2172 cnt = cnt_inits_to; |
|
2173 /* First, how many ? */ |
|
2174 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { |
|
2175 if (laddr->ifa == NULL) { |
|
2176 continue; |
|
2177 } |
|
2178 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) |
|
2179 /* Address being deleted by the system, dont |
|
2180 * list. |
|
2181 */ |
|
2182 continue; |
|
2183 if (laddr->action == SCTP_DEL_IP_ADDRESS) { |
|
2184 /* Address being deleted on this ep |
|
2185 * don't list. |
|
2186 */ |
|
2187 continue; |
|
2188 } |
|
2189 #if defined(__Userspace__) |
|
2190 if (laddr->ifa->address.sa.sa_family == AF_CONN) { |
|
2191 continue; |
|
2192 } |
|
2193 #endif |
|
2194 if (sctp_is_address_in_scope(laddr->ifa, |
|
2195 scope, 1) == 0) { |
|
2196 continue; |
|
2197 } |
|
2198 cnt++; |
|
2199 } |
|
2200 /* |
|
2201 * To get through a NAT we only list addresses if we have |
|
2202 * more than one. That way if you just bind a single address |
|
2203 * we let the source of the init dictate our address. |
|
2204 */ |
|
2205 if (cnt > 1) { |
|
2206 cnt = cnt_inits_to; |
|
2207 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { |
|
2208 if (laddr->ifa == NULL) { |
|
2209 continue; |
|
2210 } |
|
2211 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) { |
|
2212 continue; |
|
2213 } |
|
2214 #if defined(__Userspace__) |
|
2215 if (laddr->ifa->address.sa.sa_family == AF_CONN) { |
|
2216 continue; |
|
2217 } |
|
2218 #endif |
|
2219 if (sctp_is_address_in_scope(laddr->ifa, |
|
2220 scope, 0) == 0) { |
|
2221 continue; |
|
2222 } |
|
2223 if ((chunk_len != NULL) && |
|
2224 (padding_len != NULL) && |
|
2225 (*padding_len > 0)) { |
|
2226 memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len); |
|
2227 SCTP_BUF_LEN(m_at) += *padding_len; |
|
2228 *chunk_len += *padding_len; |
|
2229 *padding_len = 0; |
|
2230 } |
|
2231 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len); |
|
2232 cnt++; |
|
2233 if (cnt >= SCTP_ADDRESS_LIMIT) { |
|
2234 break; |
|
2235 } |
|
2236 } |
|
2237 } |
|
2238 } |
|
2239 SCTP_IPI_ADDR_RUNLOCK(); |
|
2240 return (m_at); |
|
2241 } |
|
2242 |
|
2243 static struct sctp_ifa * |
|
2244 sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa, |
|
2245 uint8_t dest_is_loop, |
|
2246 uint8_t dest_is_priv, |
|
2247 sa_family_t fam) |
|
2248 { |
|
2249 uint8_t dest_is_global = 0; |
|
2250 /* dest_is_priv is true if destination is a private address */ |
|
2251 /* dest_is_loop is true if destination is a loopback addresses */ |
|
2252 |
|
2253 /** |
|
2254 * Here we determine if its a preferred address. A preferred address |
|
2255 * means it is the same scope or higher scope then the destination. |
|
2256 * L = loopback, P = private, G = global |
|
2257 * ----------------------------------------- |
|
2258 * src | dest | result |
|
2259 * ---------------------------------------- |
|
2260 * L | L | yes |
|
2261 * ----------------------------------------- |
|
2262 * P | L | yes-v4 no-v6 |
|
2263 * ----------------------------------------- |
|
2264 * G | L | yes-v4 no-v6 |
|
2265 * ----------------------------------------- |
|
2266 * L | P | no |
|
2267 * ----------------------------------------- |
|
2268 * P | P | yes |
|
2269 * ----------------------------------------- |
|
2270 * G | P | no |
|
2271 * ----------------------------------------- |
|
2272 * L | G | no |
|
2273 * ----------------------------------------- |
|
2274 * P | G | no |
|
2275 * ----------------------------------------- |
|
2276 * G | G | yes |
|
2277 * ----------------------------------------- |
|
2278 */ |
|
2279 |
|
2280 if (ifa->address.sa.sa_family != fam) { |
|
2281 /* forget mis-matched family */ |
|
2282 return (NULL); |
|
2283 } |
|
2284 if ((dest_is_priv == 0) && (dest_is_loop == 0)) { |
|
2285 dest_is_global = 1; |
|
2286 } |
|
2287 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:"); |
|
2288 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa); |
|
2289 /* Ok the address may be ok */ |
|
2290 #ifdef INET6 |
|
2291 if (fam == AF_INET6) { |
|
2292 /* ok to use deprecated addresses? no lets not! */ |
|
2293 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { |
|
2294 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n"); |
|
2295 return (NULL); |
|
2296 } |
|
2297 if (ifa->src_is_priv && !ifa->src_is_loop) { |
|
2298 if (dest_is_loop) { |
|
2299 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n"); |
|
2300 return (NULL); |
|
2301 } |
|
2302 } |
|
2303 if (ifa->src_is_glob) { |
|
2304 if (dest_is_loop) { |
|
2305 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n"); |
|
2306 return (NULL); |
|
2307 } |
|
2308 } |
|
2309 } |
|
2310 #endif |
|
2311 /* Now that we know what is what, implement or table |
|
2312 * this could in theory be done slicker (it used to be), but this |
|
2313 * is straightforward and easier to validate :-) |
|
2314 */ |
|
2315 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n", |
|
2316 ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob); |
|
2317 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n", |
|
2318 dest_is_loop, dest_is_priv, dest_is_global); |
|
2319 |
|
2320 if ((ifa->src_is_loop) && (dest_is_priv)) { |
|
2321 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n"); |
|
2322 return (NULL); |
|
2323 } |
|
2324 if ((ifa->src_is_glob) && (dest_is_priv)) { |
|
2325 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n"); |
|
2326 return (NULL); |
|
2327 } |
|
2328 if ((ifa->src_is_loop) && (dest_is_global)) { |
|
2329 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n"); |
|
2330 return (NULL); |
|
2331 } |
|
2332 if ((ifa->src_is_priv) && (dest_is_global)) { |
|
2333 SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n"); |
|
2334 return (NULL); |
|
2335 } |
|
2336 SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n"); |
|
2337 /* its a preferred address */ |
|
2338 return (ifa); |
|
2339 } |
|
2340 |
|
2341 static struct sctp_ifa * |
|
2342 sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa, |
|
2343 uint8_t dest_is_loop, |
|
2344 uint8_t dest_is_priv, |
|
2345 sa_family_t fam) |
|
2346 { |
|
2347 uint8_t dest_is_global = 0; |
|
2348 |
|
2349 /** |
|
2350 * Here we determine if its a acceptable address. A acceptable |
|
2351 * address means it is the same scope or higher scope but we can |
|
2352 * allow for NAT which means its ok to have a global dest and a |
|
2353 * private src. |
|
2354 * |
|
2355 * L = loopback, P = private, G = global |
|
2356 * ----------------------------------------- |
|
2357 * src | dest | result |
|
2358 * ----------------------------------------- |
|
2359 * L | L | yes |
|
2360 * ----------------------------------------- |
|
2361 * P | L | yes-v4 no-v6 |
|
2362 * ----------------------------------------- |
|
2363 * G | L | yes |
|
2364 * ----------------------------------------- |
|
2365 * L | P | no |
|
2366 * ----------------------------------------- |
|
2367 * P | P | yes |
|
2368 * ----------------------------------------- |
|
2369 * G | P | yes - May not work |
|
2370 * ----------------------------------------- |
|
2371 * L | G | no |
|
2372 * ----------------------------------------- |
|
2373 * P | G | yes - May not work |
|
2374 * ----------------------------------------- |
|
2375 * G | G | yes |
|
2376 * ----------------------------------------- |
|
2377 */ |
|
2378 |
|
2379 if (ifa->address.sa.sa_family != fam) { |
|
2380 /* forget non matching family */ |
|
2381 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n", |
|
2382 ifa->address.sa.sa_family, fam); |
|
2383 return (NULL); |
|
2384 } |
|
2385 /* Ok the address may be ok */ |
|
2386 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa); |
|
2387 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n", |
|
2388 dest_is_loop, dest_is_priv); |
|
2389 if ((dest_is_loop == 0) && (dest_is_priv == 0)) { |
|
2390 dest_is_global = 1; |
|
2391 } |
|
2392 #ifdef INET6 |
|
2393 if (fam == AF_INET6) { |
|
2394 /* ok to use deprecated addresses? */ |
|
2395 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { |
|
2396 return (NULL); |
|
2397 } |
|
2398 if (ifa->src_is_priv) { |
|
2399 /* Special case, linklocal to loop */ |
|
2400 if (dest_is_loop) |
|
2401 return (NULL); |
|
2402 } |
|
2403 } |
|
2404 #endif |
|
2405 /* |
|
2406 * Now that we know what is what, implement our table. |
|
2407 * This could in theory be done slicker (it used to be), but this |
|
2408 * is straightforward and easier to validate :-) |
|
2409 */ |
|
2410 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n", |
|
2411 ifa->src_is_loop, |
|
2412 dest_is_priv); |
|
2413 if ((ifa->src_is_loop == 1) && (dest_is_priv)) { |
|
2414 return (NULL); |
|
2415 } |
|
2416 SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n", |
|
2417 ifa->src_is_loop, |
|
2418 dest_is_global); |
|
2419 if ((ifa->src_is_loop == 1) && (dest_is_global)) { |
|
2420 return (NULL); |
|
2421 } |
|
2422 SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n"); |
|
2423 /* its an acceptable address */ |
|
2424 return (ifa); |
|
2425 } |
|
2426 |
|
2427 int |
|
2428 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa) |
|
2429 { |
|
2430 struct sctp_laddr *laddr; |
|
2431 |
|
2432 if (stcb == NULL) { |
|
2433 /* There are no restrictions, no TCB :-) */ |
|
2434 return (0); |
|
2435 } |
|
2436 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) { |
|
2437 if (laddr->ifa == NULL) { |
|
2438 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n", |
|
2439 __FUNCTION__); |
|
2440 continue; |
|
2441 } |
|
2442 if (laddr->ifa == ifa) { |
|
2443 /* Yes it is on the list */ |
|
2444 return (1); |
|
2445 } |
|
2446 } |
|
2447 return (0); |
|
2448 } |
|
2449 |
|
2450 |
|
2451 int |
|
2452 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa) |
|
2453 { |
|
2454 struct sctp_laddr *laddr; |
|
2455 |
|
2456 if (ifa == NULL) |
|
2457 return (0); |
|
2458 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { |
|
2459 if (laddr->ifa == NULL) { |
|
2460 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n", |
|
2461 __FUNCTION__); |
|
2462 continue; |
|
2463 } |
|
2464 if ((laddr->ifa == ifa) && laddr->action == 0) |
|
2465 /* same pointer */ |
|
2466 return (1); |
|
2467 } |
|
2468 return (0); |
|
2469 } |
|
2470 |
|
2471 |
|
2472 |
|
2473 static struct sctp_ifa * |
|
2474 sctp_choose_boundspecific_inp(struct sctp_inpcb *inp, |
|
2475 sctp_route_t *ro, |
|
2476 uint32_t vrf_id, |
|
2477 int non_asoc_addr_ok, |
|
2478 uint8_t dest_is_priv, |
|
2479 uint8_t dest_is_loop, |
|
2480 sa_family_t fam) |
|
2481 { |
|
2482 struct sctp_laddr *laddr, *starting_point; |
|
2483 void *ifn; |
|
2484 int resettotop = 0; |
|
2485 struct sctp_ifn *sctp_ifn; |
|
2486 struct sctp_ifa *sctp_ifa, *sifa; |
|
2487 struct sctp_vrf *vrf; |
|
2488 uint32_t ifn_index; |
|
2489 |
|
2490 vrf = sctp_find_vrf(vrf_id); |
|
2491 if (vrf == NULL) |
|
2492 return (NULL); |
|
2493 |
|
2494 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); |
|
2495 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro); |
|
2496 sctp_ifn = sctp_find_ifn(ifn, ifn_index); |
|
2497 /* |
|
2498 * first question, is the ifn we will emit on in our list, if so, we |
|
2499 * want such an address. Note that we first looked for a |
|
2500 * preferred address. |
|
2501 */ |
|
2502 if (sctp_ifn) { |
|
2503 /* is a preferred one on the interface we route out? */ |
|
2504 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { |
|
2505 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && |
|
2506 (non_asoc_addr_ok == 0)) |
|
2507 continue; |
|
2508 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, |
|
2509 dest_is_loop, |
|
2510 dest_is_priv, fam); |
|
2511 if (sifa == NULL) |
|
2512 continue; |
|
2513 if (sctp_is_addr_in_ep(inp, sifa)) { |
|
2514 atomic_add_int(&sifa->refcount, 1); |
|
2515 return (sifa); |
|
2516 } |
|
2517 } |
|
2518 } |
|
2519 /* |
|
2520 * ok, now we now need to find one on the list of the addresses. |
|
2521 * We can't get one on the emitting interface so let's find first |
|
2522 * a preferred one. If not that an acceptable one otherwise... |
|
2523 * we return NULL. |
|
2524 */ |
|
2525 starting_point = inp->next_addr_touse; |
|
2526 once_again: |
|
2527 if (inp->next_addr_touse == NULL) { |
|
2528 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list); |
|
2529 resettotop = 1; |
|
2530 } |
|
2531 for (laddr = inp->next_addr_touse; laddr; |
|
2532 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { |
|
2533 if (laddr->ifa == NULL) { |
|
2534 /* address has been removed */ |
|
2535 continue; |
|
2536 } |
|
2537 if (laddr->action == SCTP_DEL_IP_ADDRESS) { |
|
2538 /* address is being deleted */ |
|
2539 continue; |
|
2540 } |
|
2541 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, |
|
2542 dest_is_priv, fam); |
|
2543 if (sifa == NULL) |
|
2544 continue; |
|
2545 atomic_add_int(&sifa->refcount, 1); |
|
2546 return (sifa); |
|
2547 } |
|
2548 if (resettotop == 0) { |
|
2549 inp->next_addr_touse = NULL; |
|
2550 goto once_again; |
|
2551 } |
|
2552 |
|
2553 inp->next_addr_touse = starting_point; |
|
2554 resettotop = 0; |
|
2555 once_again_too: |
|
2556 if (inp->next_addr_touse == NULL) { |
|
2557 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list); |
|
2558 resettotop = 1; |
|
2559 } |
|
2560 |
|
2561 /* ok, what about an acceptable address in the inp */ |
|
2562 for (laddr = inp->next_addr_touse; laddr; |
|
2563 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { |
|
2564 if (laddr->ifa == NULL) { |
|
2565 /* address has been removed */ |
|
2566 continue; |
|
2567 } |
|
2568 if (laddr->action == SCTP_DEL_IP_ADDRESS) { |
|
2569 /* address is being deleted */ |
|
2570 continue; |
|
2571 } |
|
2572 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop, |
|
2573 dest_is_priv, fam); |
|
2574 if (sifa == NULL) |
|
2575 continue; |
|
2576 atomic_add_int(&sifa->refcount, 1); |
|
2577 return (sifa); |
|
2578 } |
|
2579 if (resettotop == 0) { |
|
2580 inp->next_addr_touse = NULL; |
|
2581 goto once_again_too; |
|
2582 } |
|
2583 |
|
2584 /* |
|
2585 * no address bound can be a source for the destination we are in |
|
2586 * trouble |
|
2587 */ |
|
2588 return (NULL); |
|
2589 } |
|
2590 |
|
2591 |
|
2592 |
|
2593 static struct sctp_ifa * |
|
2594 sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp, |
|
2595 struct sctp_tcb *stcb, |
|
2596 sctp_route_t *ro, |
|
2597 uint32_t vrf_id, |
|
2598 uint8_t dest_is_priv, |
|
2599 uint8_t dest_is_loop, |
|
2600 int non_asoc_addr_ok, |
|
2601 sa_family_t fam) |
|
2602 { |
|
2603 struct sctp_laddr *laddr, *starting_point; |
|
2604 void *ifn; |
|
2605 struct sctp_ifn *sctp_ifn; |
|
2606 struct sctp_ifa *sctp_ifa, *sifa; |
|
2607 uint8_t start_at_beginning = 0; |
|
2608 struct sctp_vrf *vrf; |
|
2609 uint32_t ifn_index; |
|
2610 |
|
2611 /* |
|
2612 * first question, is the ifn we will emit on in our list, if so, we |
|
2613 * want that one. |
|
2614 */ |
|
2615 vrf = sctp_find_vrf(vrf_id); |
|
2616 if (vrf == NULL) |
|
2617 return (NULL); |
|
2618 |
|
2619 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); |
|
2620 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro); |
|
2621 sctp_ifn = sctp_find_ifn( ifn, ifn_index); |
|
2622 |
|
2623 /* |
|
2624 * first question, is the ifn we will emit on in our list? If so, |
|
2625 * we want that one. First we look for a preferred. Second, we go |
|
2626 * for an acceptable. |
|
2627 */ |
|
2628 if (sctp_ifn) { |
|
2629 /* first try for a preferred address on the ep */ |
|
2630 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { |
|
2631 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) |
|
2632 continue; |
|
2633 if (sctp_is_addr_in_ep(inp, sctp_ifa)) { |
|
2634 sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam); |
|
2635 if (sifa == NULL) |
|
2636 continue; |
|
2637 if (((non_asoc_addr_ok == 0) && |
|
2638 (sctp_is_addr_restricted(stcb, sifa))) || |
|
2639 (non_asoc_addr_ok && |
|
2640 (sctp_is_addr_restricted(stcb, sifa)) && |
|
2641 (!sctp_is_addr_pending(stcb, sifa)))) { |
|
2642 /* on the no-no list */ |
|
2643 continue; |
|
2644 } |
|
2645 atomic_add_int(&sifa->refcount, 1); |
|
2646 return (sifa); |
|
2647 } |
|
2648 } |
|
2649 /* next try for an acceptable address on the ep */ |
|
2650 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { |
|
2651 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) |
|
2652 continue; |
|
2653 if (sctp_is_addr_in_ep(inp, sctp_ifa)) { |
|
2654 sifa= sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv,fam); |
|
2655 if (sifa == NULL) |
|
2656 continue; |
|
2657 if (((non_asoc_addr_ok == 0) && |
|
2658 (sctp_is_addr_restricted(stcb, sifa))) || |
|
2659 (non_asoc_addr_ok && |
|
2660 (sctp_is_addr_restricted(stcb, sifa)) && |
|
2661 (!sctp_is_addr_pending(stcb, sifa)))) { |
|
2662 /* on the no-no list */ |
|
2663 continue; |
|
2664 } |
|
2665 atomic_add_int(&sifa->refcount, 1); |
|
2666 return (sifa); |
|
2667 } |
|
2668 } |
|
2669 |
|
2670 } |
|
2671 /* |
|
2672 * if we can't find one like that then we must look at all |
|
2673 * addresses bound to pick one at first preferable then |
|
2674 * secondly acceptable. |
|
2675 */ |
|
2676 starting_point = stcb->asoc.last_used_address; |
|
2677 sctp_from_the_top: |
|
2678 if (stcb->asoc.last_used_address == NULL) { |
|
2679 start_at_beginning = 1; |
|
2680 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list); |
|
2681 } |
|
2682 /* search beginning with the last used address */ |
|
2683 for (laddr = stcb->asoc.last_used_address; laddr; |
|
2684 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { |
|
2685 if (laddr->ifa == NULL) { |
|
2686 /* address has been removed */ |
|
2687 continue; |
|
2688 } |
|
2689 if (laddr->action == SCTP_DEL_IP_ADDRESS) { |
|
2690 /* address is being deleted */ |
|
2691 continue; |
|
2692 } |
|
2693 sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam); |
|
2694 if (sifa == NULL) |
|
2695 continue; |
|
2696 if (((non_asoc_addr_ok == 0) && |
|
2697 (sctp_is_addr_restricted(stcb, sifa))) || |
|
2698 (non_asoc_addr_ok && |
|
2699 (sctp_is_addr_restricted(stcb, sifa)) && |
|
2700 (!sctp_is_addr_pending(stcb, sifa)))) { |
|
2701 /* on the no-no list */ |
|
2702 continue; |
|
2703 } |
|
2704 stcb->asoc.last_used_address = laddr; |
|
2705 atomic_add_int(&sifa->refcount, 1); |
|
2706 return (sifa); |
|
2707 } |
|
2708 if (start_at_beginning == 0) { |
|
2709 stcb->asoc.last_used_address = NULL; |
|
2710 goto sctp_from_the_top; |
|
2711 } |
|
2712 /* now try for any higher scope than the destination */ |
|
2713 stcb->asoc.last_used_address = starting_point; |
|
2714 start_at_beginning = 0; |
|
2715 sctp_from_the_top2: |
|
2716 if (stcb->asoc.last_used_address == NULL) { |
|
2717 start_at_beginning = 1; |
|
2718 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list); |
|
2719 } |
|
2720 /* search beginning with the last used address */ |
|
2721 for (laddr = stcb->asoc.last_used_address; laddr; |
|
2722 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { |
|
2723 if (laddr->ifa == NULL) { |
|
2724 /* address has been removed */ |
|
2725 continue; |
|
2726 } |
|
2727 if (laddr->action == SCTP_DEL_IP_ADDRESS) { |
|
2728 /* address is being deleted */ |
|
2729 continue; |
|
2730 } |
|
2731 sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop, |
|
2732 dest_is_priv, fam); |
|
2733 if (sifa == NULL) |
|
2734 continue; |
|
2735 if (((non_asoc_addr_ok == 0) && |
|
2736 (sctp_is_addr_restricted(stcb, sifa))) || |
|
2737 (non_asoc_addr_ok && |
|
2738 (sctp_is_addr_restricted(stcb, sifa)) && |
|
2739 (!sctp_is_addr_pending(stcb, sifa)))) { |
|
2740 /* on the no-no list */ |
|
2741 continue; |
|
2742 } |
|
2743 stcb->asoc.last_used_address = laddr; |
|
2744 atomic_add_int(&sifa->refcount, 1); |
|
2745 return (sifa); |
|
2746 } |
|
2747 if (start_at_beginning == 0) { |
|
2748 stcb->asoc.last_used_address = NULL; |
|
2749 goto sctp_from_the_top2; |
|
2750 } |
|
2751 return (NULL); |
|
2752 } |
|
2753 |
|
2754 static struct sctp_ifa * |
|
2755 sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn, |
|
2756 struct sctp_tcb *stcb, |
|
2757 int non_asoc_addr_ok, |
|
2758 uint8_t dest_is_loop, |
|
2759 uint8_t dest_is_priv, |
|
2760 int addr_wanted, |
|
2761 sa_family_t fam, |
|
2762 sctp_route_t *ro |
|
2763 ) |
|
2764 { |
|
2765 struct sctp_ifa *ifa, *sifa; |
|
2766 int num_eligible_addr = 0; |
|
2767 #ifdef INET6 |
|
2768 #ifdef SCTP_EMBEDDED_V6_SCOPE |
|
2769 struct sockaddr_in6 sin6, lsa6; |
|
2770 |
|
2771 if (fam == AF_INET6) { |
|
2772 memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6)); |
|
2773 #ifdef SCTP_KAME |
|
2774 (void)sa6_recoverscope(&sin6); |
|
2775 #else |
|
2776 (void)in6_recoverscope(&sin6, &sin6.sin6_addr, NULL); |
|
2777 #endif /* SCTP_KAME */ |
|
2778 } |
|
2779 #endif /* SCTP_EMBEDDED_V6_SCOPE */ |
|
2780 #endif /* INET6 */ |
|
2781 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) { |
|
2782 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && |
|
2783 (non_asoc_addr_ok == 0)) |
|
2784 continue; |
|
2785 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop, |
|
2786 dest_is_priv, fam); |
|
2787 if (sifa == NULL) |
|
2788 continue; |
|
2789 #ifdef INET6 |
|
2790 if (fam == AF_INET6 && |
|
2791 dest_is_loop && |
|
2792 sifa->src_is_loop && sifa->src_is_priv) { |
|
2793 /* don't allow fe80::1 to be a src on loop ::1, we don't list it |
|
2794 * to the peer so we will get an abort. |
|
2795 */ |
|
2796 continue; |
|
2797 } |
|
2798 #ifdef SCTP_EMBEDDED_V6_SCOPE |
|
2799 if (fam == AF_INET6 && |
|
2800 IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) && |
|
2801 IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) { |
|
2802 /* link-local <-> link-local must belong to the same scope. */ |
|
2803 memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6)); |
|
2804 #ifdef SCTP_KAME |
|
2805 (void)sa6_recoverscope(&lsa6); |
|
2806 #else |
|
2807 (void)in6_recoverscope(&lsa6, &lsa6.sin6_addr, NULL); |
|
2808 #endif /* SCTP_KAME */ |
|
2809 if (sin6.sin6_scope_id != lsa6.sin6_scope_id) { |
|
2810 continue; |
|
2811 } |
|
2812 } |
|
2813 #endif /* SCTP_EMBEDDED_V6_SCOPE */ |
|
2814 #endif /* INET6 */ |
|
2815 |
|
2816 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__) |
|
2817 /* Check if the IPv6 address matches to next-hop. |
|
2818 In the mobile case, old IPv6 address may be not deleted |
|
2819 from the interface. Then, the interface has previous and |
|
2820 new addresses. We should use one corresponding to the |
|
2821 next-hop. (by micchie) |
|
2822 */ |
|
2823 #ifdef INET6 |
|
2824 if (stcb && fam == AF_INET6 && |
|
2825 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) { |
|
2826 if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro) |
|
2827 == 0) { |
|
2828 continue; |
|
2829 } |
|
2830 } |
|
2831 #endif |
|
2832 #ifdef INET |
|
2833 /* Avoid topologically incorrect IPv4 address */ |
|
2834 if (stcb && fam == AF_INET && |
|
2835 sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) { |
|
2836 if (sctp_v4src_match_nexthop(sifa, ro) == 0) { |
|
2837 continue; |
|
2838 } |
|
2839 } |
|
2840 #endif |
|
2841 #endif |
|
2842 if (stcb) { |
|
2843 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) { |
|
2844 continue; |
|
2845 } |
|
2846 if (((non_asoc_addr_ok == 0) && |
|
2847 (sctp_is_addr_restricted(stcb, sifa))) || |
|
2848 (non_asoc_addr_ok && |
|
2849 (sctp_is_addr_restricted(stcb, sifa)) && |
|
2850 (!sctp_is_addr_pending(stcb, sifa)))) { |
|
2851 /* |
|
2852 * It is restricted for some reason.. |
|
2853 * probably not yet added. |
|
2854 */ |
|
2855 continue; |
|
2856 } |
|
2857 } |
|
2858 if (num_eligible_addr >= addr_wanted) { |
|
2859 return (sifa); |
|
2860 } |
|
2861 num_eligible_addr++; |
|
2862 } |
|
2863 return (NULL); |
|
2864 } |
|
2865 |
|
2866 |
|
2867 static int |
|
2868 sctp_count_num_preferred_boundall(struct sctp_ifn *ifn, |
|
2869 struct sctp_tcb *stcb, |
|
2870 int non_asoc_addr_ok, |
|
2871 uint8_t dest_is_loop, |
|
2872 uint8_t dest_is_priv, |
|
2873 sa_family_t fam) |
|
2874 { |
|
2875 struct sctp_ifa *ifa, *sifa; |
|
2876 int num_eligible_addr = 0; |
|
2877 |
|
2878 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) { |
|
2879 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && |
|
2880 (non_asoc_addr_ok == 0)) { |
|
2881 continue; |
|
2882 } |
|
2883 sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop, |
|
2884 dest_is_priv, fam); |
|
2885 if (sifa == NULL) { |
|
2886 continue; |
|
2887 } |
|
2888 if (stcb) { |
|
2889 if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) { |
|
2890 continue; |
|
2891 } |
|
2892 if (((non_asoc_addr_ok == 0) && |
|
2893 (sctp_is_addr_restricted(stcb, sifa))) || |
|
2894 (non_asoc_addr_ok && |
|
2895 (sctp_is_addr_restricted(stcb, sifa)) && |
|
2896 (!sctp_is_addr_pending(stcb, sifa)))) { |
|
2897 /* |
|
2898 * It is restricted for some reason.. |
|
2899 * probably not yet added. |
|
2900 */ |
|
2901 continue; |
|
2902 } |
|
2903 } |
|
2904 num_eligible_addr++; |
|
2905 } |
|
2906 return (num_eligible_addr); |
|
2907 } |
|
2908 |
|
2909 static struct sctp_ifa * |
|
2910 sctp_choose_boundall(struct sctp_tcb *stcb, |
|
2911 struct sctp_nets *net, |
|
2912 sctp_route_t *ro, |
|
2913 uint32_t vrf_id, |
|
2914 uint8_t dest_is_priv, |
|
2915 uint8_t dest_is_loop, |
|
2916 int non_asoc_addr_ok, |
|
2917 sa_family_t fam) |
|
2918 { |
|
2919 int cur_addr_num = 0, num_preferred = 0; |
|
2920 void *ifn; |
|
2921 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn; |
|
2922 struct sctp_ifa *sctp_ifa, *sifa; |
|
2923 uint32_t ifn_index; |
|
2924 struct sctp_vrf *vrf; |
|
2925 #ifdef INET |
|
2926 int retried = 0; |
|
2927 #endif |
|
2928 |
|
2929 /*- |
|
2930 * For boundall we can use any address in the association. |
|
2931 * If non_asoc_addr_ok is set we can use any address (at least in |
|
2932 * theory). So we look for preferred addresses first. If we find one, |
|
2933 * we use it. Otherwise we next try to get an address on the |
|
2934 * interface, which we should be able to do (unless non_asoc_addr_ok |
|
2935 * is false and we are routed out that way). In these cases where we |
|
2936 * can't use the address of the interface we go through all the |
|
2937 * ifn's looking for an address we can use and fill that in. Punting |
|
2938 * means we send back address 0, which will probably cause problems |
|
2939 * actually since then IP will fill in the address of the route ifn, |
|
2940 * which means we probably already rejected it.. i.e. here comes an |
|
2941 * abort :-<. |
|
2942 */ |
|
2943 vrf = sctp_find_vrf(vrf_id); |
|
2944 if (vrf == NULL) |
|
2945 return (NULL); |
|
2946 |
|
2947 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); |
|
2948 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro); |
|
2949 SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn from route:%p ifn_index:%d\n", ifn, ifn_index); |
|
2950 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index); |
|
2951 if (sctp_ifn == NULL) { |
|
2952 /* ?? We don't have this guy ?? */ |
|
2953 SCTPDBG(SCTP_DEBUG_OUTPUT2,"No ifn emit interface?\n"); |
|
2954 goto bound_all_plan_b; |
|
2955 } |
|
2956 SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn_index:%d name:%s is emit interface\n", |
|
2957 ifn_index, sctp_ifn->ifn_name); |
|
2958 |
|
2959 if (net) { |
|
2960 cur_addr_num = net->indx_of_eligible_next_to_use; |
|
2961 } |
|
2962 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, |
|
2963 stcb, |
|
2964 non_asoc_addr_ok, |
|
2965 dest_is_loop, |
|
2966 dest_is_priv, fam); |
|
2967 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n", |
|
2968 num_preferred, sctp_ifn->ifn_name); |
|
2969 if (num_preferred == 0) { |
|
2970 /* |
|
2971 * no eligible addresses, we must use some other interface |
|
2972 * address if we can find one. |
|
2973 */ |
|
2974 goto bound_all_plan_b; |
|
2975 } |
|
2976 /* |
|
2977 * Ok we have num_eligible_addr set with how many we can use, this |
|
2978 * may vary from call to call due to addresses being deprecated |
|
2979 * etc.. |
|
2980 */ |
|
2981 if (cur_addr_num >= num_preferred) { |
|
2982 cur_addr_num = 0; |
|
2983 } |
|
2984 /* |
|
2985 * select the nth address from the list (where cur_addr_num is the |
|
2986 * nth) and 0 is the first one, 1 is the second one etc... |
|
2987 */ |
|
2988 SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num); |
|
2989 |
|
2990 sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop, |
|
2991 dest_is_priv, cur_addr_num, fam, ro); |
|
2992 |
|
2993 /* if sctp_ifa is NULL something changed??, fall to plan b. */ |
|
2994 if (sctp_ifa) { |
|
2995 atomic_add_int(&sctp_ifa->refcount, 1); |
|
2996 if (net) { |
|
2997 /* save off where the next one we will want */ |
|
2998 net->indx_of_eligible_next_to_use = cur_addr_num + 1; |
|
2999 } |
|
3000 return (sctp_ifa); |
|
3001 } |
|
3002 /* |
|
3003 * plan_b: Look at all interfaces and find a preferred address. If |
|
3004 * no preferred fall through to plan_c. |
|
3005 */ |
|
3006 bound_all_plan_b: |
|
3007 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n"); |
|
3008 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { |
|
3009 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n", |
|
3010 sctp_ifn->ifn_name); |
|
3011 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { |
|
3012 /* wrong base scope */ |
|
3013 SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n"); |
|
3014 continue; |
|
3015 } |
|
3016 if ((sctp_ifn == looked_at) && looked_at) { |
|
3017 /* already looked at this guy */ |
|
3018 SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n"); |
|
3019 continue; |
|
3020 } |
|
3021 num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, stcb, non_asoc_addr_ok, |
|
3022 dest_is_loop, dest_is_priv, fam); |
|
3023 SCTPDBG(SCTP_DEBUG_OUTPUT2, |
|
3024 "Found ifn:%p %d preferred source addresses\n", |
|
3025 ifn, num_preferred); |
|
3026 if (num_preferred == 0) { |
|
3027 /* None on this interface. */ |
|
3028 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefered -- skipping to next\n"); |
|
3029 continue; |
|
3030 } |
|
3031 SCTPDBG(SCTP_DEBUG_OUTPUT2, |
|
3032 "num preferred:%d on interface:%p cur_addr_num:%d\n", |
|
3033 num_preferred, (void *)sctp_ifn, cur_addr_num); |
|
3034 |
|
3035 /* |
|
3036 * Ok we have num_eligible_addr set with how many we can |
|
3037 * use, this may vary from call to call due to addresses |
|
3038 * being deprecated etc.. |
|
3039 */ |
|
3040 if (cur_addr_num >= num_preferred) { |
|
3041 cur_addr_num = 0; |
|
3042 } |
|
3043 sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop, |
|
3044 dest_is_priv, cur_addr_num, fam, ro); |
|
3045 if (sifa == NULL) |
|
3046 continue; |
|
3047 if (net) { |
|
3048 net->indx_of_eligible_next_to_use = cur_addr_num + 1; |
|
3049 SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n", |
|
3050 cur_addr_num); |
|
3051 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:"); |
|
3052 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa); |
|
3053 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:"); |
|
3054 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa); |
|
3055 } |
|
3056 atomic_add_int(&sifa->refcount, 1); |
|
3057 return (sifa); |
|
3058 } |
|
3059 #ifdef INET |
|
3060 again_with_private_addresses_allowed: |
|
3061 #endif |
|
3062 /* plan_c: do we have an acceptable address on the emit interface */ |
|
3063 sifa = NULL; |
|
3064 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Trying Plan C: find acceptable on interface\n"); |
|
3065 if (emit_ifn == NULL) { |
|
3066 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jump to Plan D - no emit_ifn\n"); |
|
3067 goto plan_d; |
|
3068 } |
|
3069 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) { |
|
3070 SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa); |
|
3071 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && |
|
3072 (non_asoc_addr_ok == 0)) { |
|
3073 SCTPDBG(SCTP_DEBUG_OUTPUT2,"Defer\n"); |
|
3074 continue; |
|
3075 } |
|
3076 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, |
|
3077 dest_is_priv, fam); |
|
3078 if (sifa == NULL) { |
|
3079 SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n"); |
|
3080 continue; |
|
3081 } |
|
3082 if (stcb) { |
|
3083 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) { |
|
3084 SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n"); |
|
3085 sifa = NULL; |
|
3086 continue; |
|
3087 } |
|
3088 if (((non_asoc_addr_ok == 0) && |
|
3089 (sctp_is_addr_restricted(stcb, sifa))) || |
|
3090 (non_asoc_addr_ok && |
|
3091 (sctp_is_addr_restricted(stcb, sifa)) && |
|
3092 (!sctp_is_addr_pending(stcb, sifa)))) { |
|
3093 /* |
|
3094 * It is restricted for some |
|
3095 * reason.. probably not yet added. |
|
3096 */ |
|
3097 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its resticted\n"); |
|
3098 sifa = NULL; |
|
3099 continue; |
|
3100 } |
|
3101 } else { |
|
3102 SCTP_PRINTF("Stcb is null - no print\n"); |
|
3103 } |
|
3104 atomic_add_int(&sifa->refcount, 1); |
|
3105 goto out; |
|
3106 } |
|
3107 plan_d: |
|
3108 /* |
|
3109 * plan_d: We are in trouble. No preferred address on the emit |
|
3110 * interface. And not even a preferred address on all interfaces. |
|
3111 * Go out and see if we can find an acceptable address somewhere |
|
3112 * amongst all interfaces. |
|
3113 */ |
|
3114 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", (void *)looked_at); |
|
3115 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { |
|
3116 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { |
|
3117 /* wrong base scope */ |
|
3118 continue; |
|
3119 } |
|
3120 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { |
|
3121 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && |
|
3122 (non_asoc_addr_ok == 0)) |
|
3123 continue; |
|
3124 sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, |
|
3125 dest_is_loop, |
|
3126 dest_is_priv, fam); |
|
3127 if (sifa == NULL) |
|
3128 continue; |
|
3129 if (stcb) { |
|
3130 if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) { |
|
3131 sifa = NULL; |
|
3132 continue; |
|
3133 } |
|
3134 if (((non_asoc_addr_ok == 0) && |
|
3135 (sctp_is_addr_restricted(stcb, sifa))) || |
|
3136 (non_asoc_addr_ok && |
|
3137 (sctp_is_addr_restricted(stcb, sifa)) && |
|
3138 (!sctp_is_addr_pending(stcb, sifa)))) { |
|
3139 /* |
|
3140 * It is restricted for some |
|
3141 * reason.. probably not yet added. |
|
3142 */ |
|
3143 sifa = NULL; |
|
3144 continue; |
|
3145 } |
|
3146 } |
|
3147 goto out; |
|
3148 } |
|
3149 } |
|
3150 #ifdef INET |
|
3151 if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) { |
|
3152 stcb->asoc.scope.ipv4_local_scope = 1; |
|
3153 retried = 1; |
|
3154 goto again_with_private_addresses_allowed; |
|
3155 } else if (retried == 1) { |
|
3156 stcb->asoc.scope.ipv4_local_scope = 0; |
|
3157 } |
|
3158 #endif |
|
3159 out: |
|
3160 #ifdef INET |
|
3161 if (sifa) { |
|
3162 if (retried == 1) { |
|
3163 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { |
|
3164 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { |
|
3165 /* wrong base scope */ |
|
3166 continue; |
|
3167 } |
|
3168 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { |
|
3169 struct sctp_ifa *tmp_sifa; |
|
3170 |
|
3171 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && |
|
3172 (non_asoc_addr_ok == 0)) |
|
3173 continue; |
|
3174 tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, |
|
3175 dest_is_loop, |
|
3176 dest_is_priv, fam); |
|
3177 if (tmp_sifa == NULL) { |
|
3178 continue; |
|
3179 } |
|
3180 if (tmp_sifa == sifa) { |
|
3181 continue; |
|
3182 } |
|
3183 if (stcb) { |
|
3184 if (sctp_is_address_in_scope(tmp_sifa, |
|
3185 &stcb->asoc.scope, 0) == 0) { |
|
3186 continue; |
|
3187 } |
|
3188 if (((non_asoc_addr_ok == 0) && |
|
3189 (sctp_is_addr_restricted(stcb, tmp_sifa))) || |
|
3190 (non_asoc_addr_ok && |
|
3191 (sctp_is_addr_restricted(stcb, tmp_sifa)) && |
|
3192 (!sctp_is_addr_pending(stcb, tmp_sifa)))) { |
|
3193 /* |
|
3194 * It is restricted for some |
|
3195 * reason.. probably not yet added. |
|
3196 */ |
|
3197 continue; |
|
3198 } |
|
3199 } |
|
3200 if ((tmp_sifa->address.sin.sin_family == AF_INET) && |
|
3201 (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) { |
|
3202 sctp_add_local_addr_restricted(stcb, tmp_sifa); |
|
3203 } |
|
3204 } |
|
3205 } |
|
3206 } |
|
3207 atomic_add_int(&sifa->refcount, 1); |
|
3208 } |
|
3209 #endif |
|
3210 return (sifa); |
|
3211 } |
|
3212 |
|
3213 |
|
3214 |
|
3215 /* tcb may be NULL */ |
|
3216 struct sctp_ifa * |
|
3217 sctp_source_address_selection(struct sctp_inpcb *inp, |
|
3218 struct sctp_tcb *stcb, |
|
3219 sctp_route_t *ro, |
|
3220 struct sctp_nets *net, |
|
3221 int non_asoc_addr_ok, uint32_t vrf_id) |
|
3222 { |
|
3223 struct sctp_ifa *answer; |
|
3224 uint8_t dest_is_priv, dest_is_loop; |
|
3225 sa_family_t fam; |
|
3226 #ifdef INET |
|
3227 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst; |
|
3228 #endif |
|
3229 #ifdef INET6 |
|
3230 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst; |
|
3231 #endif |
|
3232 |
|
3233 /** |
|
3234 * Rules: - Find the route if needed, cache if I can. - Look at |
|
3235 * interface address in route, Is it in the bound list. If so we |
|
3236 * have the best source. - If not we must rotate amongst the |
|
3237 * addresses. |
|
3238 * |
|
3239 * Cavets and issues |
|
3240 * |
|
3241 * Do we need to pay attention to scope. We can have a private address |
|
3242 * or a global address we are sourcing or sending to. So if we draw |
|
3243 * it out |
|
3244 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz |
|
3245 * For V4 |
|
3246 * ------------------------------------------ |
|
3247 * source * dest * result |
|
3248 * ----------------------------------------- |
|
3249 * <a> Private * Global * NAT |
|
3250 * ----------------------------------------- |
|
3251 * <b> Private * Private * No problem |
|
3252 * ----------------------------------------- |
|
3253 * <c> Global * Private * Huh, How will this work? |
|
3254 * ----------------------------------------- |
|
3255 * <d> Global * Global * No Problem |
|
3256 *------------------------------------------ |
|
3257 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz |
|
3258 * For V6 |
|
3259 *------------------------------------------ |
|
3260 * source * dest * result |
|
3261 * ----------------------------------------- |
|
3262 * <a> Linklocal * Global * |
|
3263 * ----------------------------------------- |
|
3264 * <b> Linklocal * Linklocal * No problem |
|
3265 * ----------------------------------------- |
|
3266 * <c> Global * Linklocal * Huh, How will this work? |
|
3267 * ----------------------------------------- |
|
3268 * <d> Global * Global * No Problem |
|
3269 *------------------------------------------ |
|
3270 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz |
|
3271 * |
|
3272 * And then we add to that what happens if there are multiple addresses |
|
3273 * assigned to an interface. Remember the ifa on a ifn is a linked |
|
3274 * list of addresses. So one interface can have more than one IP |
|
3275 * address. What happens if we have both a private and a global |
|
3276 * address? Do we then use context of destination to sort out which |
|
3277 * one is best? And what about NAT's sending P->G may get you a NAT |
|
3278 * translation, or should you select the G thats on the interface in |
|
3279 * preference. |
|
3280 * |
|
3281 * Decisions: |
|
3282 * |
|
3283 * - count the number of addresses on the interface. |
|
3284 * - if it is one, no problem except case <c>. |
|
3285 * For <a> we will assume a NAT out there. |
|
3286 * - if there are more than one, then we need to worry about scope P |
|
3287 * or G. We should prefer G -> G and P -> P if possible. |
|
3288 * Then as a secondary fall back to mixed types G->P being a last |
|
3289 * ditch one. |
|
3290 * - The above all works for bound all, but bound specific we need to |
|
3291 * use the same concept but instead only consider the bound |
|
3292 * addresses. If the bound set is NOT assigned to the interface then |
|
3293 * we must use rotation amongst the bound addresses.. |
|
3294 */ |
|
3295 if (ro->ro_rt == NULL) { |
|
3296 /* |
|
3297 * Need a route to cache. |
|
3298 */ |
|
3299 SCTP_RTALLOC(ro, vrf_id); |
|
3300 } |
|
3301 if (ro->ro_rt == NULL) { |
|
3302 return (NULL); |
|
3303 } |
|
3304 fam = ro->ro_dst.sa_family; |
|
3305 dest_is_priv = dest_is_loop = 0; |
|
3306 /* Setup our scopes for the destination */ |
|
3307 switch (fam) { |
|
3308 #ifdef INET |
|
3309 case AF_INET: |
|
3310 /* Scope based on outbound address */ |
|
3311 if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) { |
|
3312 dest_is_loop = 1; |
|
3313 if (net != NULL) { |
|
3314 /* mark it as local */ |
|
3315 net->addr_is_local = 1; |
|
3316 } |
|
3317 } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) { |
|
3318 dest_is_priv = 1; |
|
3319 } |
|
3320 break; |
|
3321 #endif |
|
3322 #ifdef INET6 |
|
3323 case AF_INET6: |
|
3324 /* Scope based on outbound address */ |
|
3325 #if defined(__Userspace_os_Windows) |
|
3326 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) { |
|
3327 #else |
|
3328 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) || |
|
3329 SCTP_ROUTE_IS_REAL_LOOP(ro)) { |
|
3330 #endif |
|
3331 /* |
|
3332 * If the address is a loopback address, which |
|
3333 * consists of "::1" OR "fe80::1%lo0", we are loopback |
|
3334 * scope. But we don't use dest_is_priv (link local |
|
3335 * addresses). |
|
3336 */ |
|
3337 dest_is_loop = 1; |
|
3338 if (net != NULL) { |
|
3339 /* mark it as local */ |
|
3340 net->addr_is_local = 1; |
|
3341 } |
|
3342 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) { |
|
3343 dest_is_priv = 1; |
|
3344 } |
|
3345 break; |
|
3346 #endif |
|
3347 } |
|
3348 SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:"); |
|
3349 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst); |
|
3350 SCTP_IPI_ADDR_RLOCK(); |
|
3351 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { |
|
3352 /* |
|
3353 * Bound all case |
|
3354 */ |
|
3355 answer = sctp_choose_boundall(stcb, net, ro, vrf_id, |
|
3356 dest_is_priv, dest_is_loop, |
|
3357 non_asoc_addr_ok, fam); |
|
3358 SCTP_IPI_ADDR_RUNLOCK(); |
|
3359 return (answer); |
|
3360 } |
|
3361 /* |
|
3362 * Subset bound case |
|
3363 */ |
|
3364 if (stcb) { |
|
3365 answer = sctp_choose_boundspecific_stcb(inp, stcb, ro, |
|
3366 vrf_id, dest_is_priv, |
|
3367 dest_is_loop, |
|
3368 non_asoc_addr_ok, fam); |
|
3369 } else { |
|
3370 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id, |
|
3371 non_asoc_addr_ok, |
|
3372 dest_is_priv, |
|
3373 dest_is_loop, fam); |
|
3374 } |
|
3375 SCTP_IPI_ADDR_RUNLOCK(); |
|
3376 return (answer); |
|
3377 } |
|
3378 |
|
3379 static int |
|
3380 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize) |
|
3381 { |
|
3382 #if defined(__Userspace_os_Windows) |
|
3383 WSACMSGHDR cmh; |
|
3384 #else |
|
3385 struct cmsghdr cmh; |
|
3386 #endif |
|
3387 int tlen, at, found; |
|
3388 struct sctp_sndinfo sndinfo; |
|
3389 struct sctp_prinfo prinfo; |
|
3390 struct sctp_authinfo authinfo; |
|
3391 |
|
3392 tlen = SCTP_BUF_LEN(control); |
|
3393 at = 0; |
|
3394 found = 0; |
|
3395 /* |
|
3396 * Independent of how many mbufs, find the c_type inside the control |
|
3397 * structure and copy out the data. |
|
3398 */ |
|
3399 while (at < tlen) { |
|
3400 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) { |
|
3401 /* There is not enough room for one more. */ |
|
3402 return (found); |
|
3403 } |
|
3404 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh); |
|
3405 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) { |
|
3406 /* We dont't have a complete CMSG header. */ |
|
3407 return (found); |
|
3408 } |
|
3409 if (((int)cmh.cmsg_len + at) > tlen) { |
|
3410 /* We don't have the complete CMSG. */ |
|
3411 return (found); |
|
3412 } |
|
3413 if ((cmh.cmsg_level == IPPROTO_SCTP) && |
|
3414 ((c_type == cmh.cmsg_type) || |
|
3415 ((c_type == SCTP_SNDRCV) && |
|
3416 ((cmh.cmsg_type == SCTP_SNDINFO) || |
|
3417 (cmh.cmsg_type == SCTP_PRINFO) || |
|
3418 (cmh.cmsg_type == SCTP_AUTHINFO))))) { |
|
3419 if (c_type == cmh.cmsg_type) { |
|
3420 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < cpsize) { |
|
3421 return (found); |
|
3422 } |
|
3423 /* It is exactly what we want. Copy it out. */ |
|
3424 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), cpsize, (caddr_t)data); |
|
3425 return (1); |
|
3426 } else { |
|
3427 struct sctp_sndrcvinfo *sndrcvinfo; |
|
3428 |
|
3429 sndrcvinfo = (struct sctp_sndrcvinfo *)data; |
|
3430 if (found == 0) { |
|
3431 if (cpsize < sizeof(struct sctp_sndrcvinfo)) { |
|
3432 return (found); |
|
3433 } |
|
3434 memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo)); |
|
3435 } |
|
3436 switch (cmh.cmsg_type) { |
|
3437 case SCTP_SNDINFO: |
|
3438 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_sndinfo)) { |
|
3439 return (found); |
|
3440 } |
|
3441 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo); |
|
3442 sndrcvinfo->sinfo_stream = sndinfo.snd_sid; |
|
3443 sndrcvinfo->sinfo_flags = sndinfo.snd_flags; |
|
3444 sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid; |
|
3445 sndrcvinfo->sinfo_context = sndinfo.snd_context; |
|
3446 sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id; |
|
3447 break; |
|
3448 case SCTP_PRINFO: |
|
3449 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_prinfo)) { |
|
3450 return (found); |
|
3451 } |
|
3452 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_prinfo), (caddr_t)&prinfo); |
|
3453 if (prinfo.pr_policy != SCTP_PR_SCTP_NONE) { |
|
3454 sndrcvinfo->sinfo_timetolive = prinfo.pr_value; |
|
3455 } else { |
|
3456 sndrcvinfo->sinfo_timetolive = 0; |
|
3457 } |
|
3458 sndrcvinfo->sinfo_flags |= prinfo.pr_policy; |
|
3459 break; |
|
3460 case SCTP_AUTHINFO: |
|
3461 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_authinfo)) { |
|
3462 return (found); |
|
3463 } |
|
3464 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_authinfo), (caddr_t)&authinfo); |
|
3465 sndrcvinfo->sinfo_keynumber_valid = 1; |
|
3466 sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber; |
|
3467 break; |
|
3468 default: |
|
3469 return (found); |
|
3470 } |
|
3471 found = 1; |
|
3472 } |
|
3473 } |
|
3474 at += CMSG_ALIGN(cmh.cmsg_len); |
|
3475 } |
|
3476 return (found); |
|
3477 } |
|
3478 |
|
3479 static int |
|
3480 sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error) |
|
3481 { |
|
3482 #if defined(__Userspace_os_Windows) |
|
3483 WSACMSGHDR cmh; |
|
3484 #else |
|
3485 struct cmsghdr cmh; |
|
3486 #endif |
|
3487 int tlen, at; |
|
3488 struct sctp_initmsg initmsg; |
|
3489 #ifdef INET |
|
3490 struct sockaddr_in sin; |
|
3491 #endif |
|
3492 #ifdef INET6 |
|
3493 struct sockaddr_in6 sin6; |
|
3494 #endif |
|
3495 |
|
3496 tlen = SCTP_BUF_LEN(control); |
|
3497 at = 0; |
|
3498 while (at < tlen) { |
|
3499 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) { |
|
3500 /* There is not enough room for one more. */ |
|
3501 *error = EINVAL; |
|
3502 return (1); |
|
3503 } |
|
3504 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh); |
|
3505 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) { |
|
3506 /* We dont't have a complete CMSG header. */ |
|
3507 *error = EINVAL; |
|
3508 return (1); |
|
3509 } |
|
3510 if (((int)cmh.cmsg_len + at) > tlen) { |
|
3511 /* We don't have the complete CMSG. */ |
|
3512 *error = EINVAL; |
|
3513 return (1); |
|
3514 } |
|
3515 if (cmh.cmsg_level == IPPROTO_SCTP) { |
|
3516 switch (cmh.cmsg_type) { |
|
3517 case SCTP_INIT: |
|
3518 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_initmsg)) { |
|
3519 *error = EINVAL; |
|
3520 return (1); |
|
3521 } |
|
3522 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_initmsg), (caddr_t)&initmsg); |
|
3523 if (initmsg.sinit_max_attempts) |
|
3524 stcb->asoc.max_init_times = initmsg.sinit_max_attempts; |
|
3525 if (initmsg.sinit_num_ostreams) |
|
3526 stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams; |
|
3527 if (initmsg.sinit_max_instreams) |
|
3528 stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams; |
|
3529 if (initmsg.sinit_max_init_timeo) |
|
3530 stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo; |
|
3531 if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) { |
|
3532 struct sctp_stream_out *tmp_str; |
|
3533 unsigned int i; |
|
3534 |
|
3535 /* Default is NOT correct */ |
|
3536 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n", |
|
3537 stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams); |
|
3538 SCTP_TCB_UNLOCK(stcb); |
|
3539 SCTP_MALLOC(tmp_str, |
|
3540 struct sctp_stream_out *, |
|
3541 (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)), |
|
3542 SCTP_M_STRMO); |
|
3543 SCTP_TCB_LOCK(stcb); |
|
3544 if (tmp_str != NULL) { |
|
3545 SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO); |
|
3546 stcb->asoc.strmout = tmp_str; |
|
3547 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams; |
|
3548 } else { |
|
3549 stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt; |
|
3550 } |
|
3551 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { |
|
3552 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue); |
|
3553 stcb->asoc.strmout[i].chunks_on_queues = 0; |
|
3554 stcb->asoc.strmout[i].next_sequence_send = 0; |
|
3555 stcb->asoc.strmout[i].stream_no = i; |
|
3556 stcb->asoc.strmout[i].last_msg_incomplete = 0; |
|
3557 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL); |
|
3558 } |
|
3559 } |
|
3560 break; |
|
3561 #ifdef INET |
|
3562 case SCTP_DSTADDRV4: |
|
3563 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) { |
|
3564 *error = EINVAL; |
|
3565 return (1); |
|
3566 } |
|
3567 memset(&sin, 0, sizeof(struct sockaddr_in)); |
|
3568 sin.sin_family = AF_INET; |
|
3569 #ifdef HAVE_SIN_LEN |
|
3570 sin.sin_len = sizeof(struct sockaddr_in); |
|
3571 #endif |
|
3572 sin.sin_port = stcb->rport; |
|
3573 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr); |
|
3574 if ((sin.sin_addr.s_addr == INADDR_ANY) || |
|
3575 (sin.sin_addr.s_addr == INADDR_BROADCAST) || |
|
3576 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) { |
|
3577 *error = EINVAL; |
|
3578 return (1); |
|
3579 } |
|
3580 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, |
|
3581 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { |
|
3582 *error = ENOBUFS; |
|
3583 return (1); |
|
3584 } |
|
3585 break; |
|
3586 #endif |
|
3587 #ifdef INET6 |
|
3588 case SCTP_DSTADDRV6: |
|
3589 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) { |
|
3590 *error = EINVAL; |
|
3591 return (1); |
|
3592 } |
|
3593 memset(&sin6, 0, sizeof(struct sockaddr_in6)); |
|
3594 sin6.sin6_family = AF_INET6; |
|
3595 #ifdef HAVE_SIN6_LEN |
|
3596 sin6.sin6_len = sizeof(struct sockaddr_in6); |
|
3597 #endif |
|
3598 sin6.sin6_port = stcb->rport; |
|
3599 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr); |
|
3600 if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) || |
|
3601 IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) { |
|
3602 *error = EINVAL; |
|
3603 return (1); |
|
3604 } |
|
3605 #ifdef INET |
|
3606 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) { |
|
3607 in6_sin6_2_sin(&sin, &sin6); |
|
3608 if ((sin.sin_addr.s_addr == INADDR_ANY) || |
|
3609 (sin.sin_addr.s_addr == INADDR_BROADCAST) || |
|
3610 IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) { |
|
3611 *error = EINVAL; |
|
3612 return (1); |
|
3613 } |
|
3614 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, |
|
3615 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { |
|
3616 *error = ENOBUFS; |
|
3617 return (1); |
|
3618 } |
|
3619 } else |
|
3620 #endif |
|
3621 if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL, |
|
3622 SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { |
|
3623 *error = ENOBUFS; |
|
3624 return (1); |
|
3625 } |
|
3626 break; |
|
3627 #endif |
|
3628 default: |
|
3629 break; |
|
3630 } |
|
3631 } |
|
3632 at += CMSG_ALIGN(cmh.cmsg_len); |
|
3633 } |
|
3634 return (0); |
|
3635 } |
|
3636 |
|
3637 static struct sctp_tcb * |
|
3638 sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p, |
|
3639 uint16_t port, |
|
3640 struct mbuf *control, |
|
3641 struct sctp_nets **net_p, |
|
3642 int *error) |
|
3643 { |
|
3644 #if defined(__Userspace_os_Windows) |
|
3645 WSACMSGHDR cmh; |
|
3646 #else |
|
3647 struct cmsghdr cmh; |
|
3648 #endif |
|
3649 int tlen, at; |
|
3650 struct sctp_tcb *stcb; |
|
3651 struct sockaddr *addr; |
|
3652 #ifdef INET |
|
3653 struct sockaddr_in sin; |
|
3654 #endif |
|
3655 #ifdef INET6 |
|
3656 struct sockaddr_in6 sin6; |
|
3657 #endif |
|
3658 |
|
3659 tlen = SCTP_BUF_LEN(control); |
|
3660 at = 0; |
|
3661 while (at < tlen) { |
|
3662 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) { |
|
3663 /* There is not enough room for one more. */ |
|
3664 *error = EINVAL; |
|
3665 return (NULL); |
|
3666 } |
|
3667 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh); |
|
3668 if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) { |
|
3669 /* We dont't have a complete CMSG header. */ |
|
3670 *error = EINVAL; |
|
3671 return (NULL); |
|
3672 } |
|
3673 if (((int)cmh.cmsg_len + at) > tlen) { |
|
3674 /* We don't have the complete CMSG. */ |
|
3675 *error = EINVAL; |
|
3676 return (NULL); |
|
3677 } |
|
3678 if (cmh.cmsg_level == IPPROTO_SCTP) { |
|
3679 switch (cmh.cmsg_type) { |
|
3680 #ifdef INET |
|
3681 case SCTP_DSTADDRV4: |
|
3682 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) { |
|
3683 *error = EINVAL; |
|
3684 return (NULL); |
|
3685 } |
|
3686 memset(&sin, 0, sizeof(struct sockaddr_in)); |
|
3687 sin.sin_family = AF_INET; |
|
3688 #ifdef HAVE_SIN_LEN |
|
3689 sin.sin_len = sizeof(struct sockaddr_in); |
|
3690 #endif |
|
3691 sin.sin_port = port; |
|
3692 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr); |
|
3693 addr = (struct sockaddr *)&sin; |
|
3694 break; |
|
3695 #endif |
|
3696 #ifdef INET6 |
|
3697 case SCTP_DSTADDRV6: |
|
3698 if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) { |
|
3699 *error = EINVAL; |
|
3700 return (NULL); |
|
3701 } |
|
3702 memset(&sin6, 0, sizeof(struct sockaddr_in6)); |
|
3703 sin6.sin6_family = AF_INET6; |
|
3704 #ifdef HAVE_SIN6_LEN |
|
3705 sin6.sin6_len = sizeof(struct sockaddr_in6); |
|
3706 #endif |
|
3707 sin6.sin6_port = port; |
|
3708 m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr); |
|
3709 #ifdef INET |
|
3710 if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) { |
|
3711 in6_sin6_2_sin(&sin, &sin6); |
|
3712 addr = (struct sockaddr *)&sin; |
|
3713 } else |
|
3714 #endif |
|
3715 addr = (struct sockaddr *)&sin6; |
|
3716 break; |
|
3717 #endif |
|
3718 default: |
|
3719 addr = NULL; |
|
3720 break; |
|
3721 } |
|
3722 if (addr) { |
|
3723 stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL); |
|
3724 if (stcb != NULL) { |
|
3725 return (stcb); |
|
3726 } |
|
3727 } |
|
3728 } |
|
3729 at += CMSG_ALIGN(cmh.cmsg_len); |
|
3730 } |
|
3731 return (NULL); |
|
3732 } |
|
3733 |
|
3734 static struct mbuf * |
|
3735 sctp_add_cookie(struct mbuf *init, int init_offset, |
|
3736 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t **signature) |
|
3737 { |
|
3738 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret; |
|
3739 struct sctp_state_cookie *stc; |
|
3740 struct sctp_paramhdr *ph; |
|
3741 uint8_t *foo; |
|
3742 int sig_offset; |
|
3743 uint16_t cookie_sz; |
|
3744 |
|
3745 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) + |
|
3746 sizeof(struct sctp_paramhdr)), 0, |
|
3747 M_NOWAIT, 1, MT_DATA); |
|
3748 if (mret == NULL) { |
|
3749 return (NULL); |
|
3750 } |
|
3751 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_NOWAIT); |
|
3752 if (copy_init == NULL) { |
|
3753 sctp_m_freem(mret); |
|
3754 return (NULL); |
|
3755 } |
|
3756 #ifdef SCTP_MBUF_LOGGING |
|
3757 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { |
|
3758 struct mbuf *mat; |
|
3759 |
|
3760 for (mat = copy_init; mat; mat = SCTP_BUF_NEXT(mat)) { |
|
3761 if (SCTP_BUF_IS_EXTENDED(mat)) { |
|
3762 sctp_log_mb(mat, SCTP_MBUF_ICOPY); |
|
3763 } |
|
3764 } |
|
3765 } |
|
3766 #endif |
|
3767 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL, |
|
3768 M_NOWAIT); |
|
3769 if (copy_initack == NULL) { |
|
3770 sctp_m_freem(mret); |
|
3771 sctp_m_freem(copy_init); |
|
3772 return (NULL); |
|
3773 } |
|
3774 #ifdef SCTP_MBUF_LOGGING |
|
3775 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { |
|
3776 struct mbuf *mat; |
|
3777 |
|
3778 for (mat = copy_initack; mat; mat = SCTP_BUF_NEXT(mat)) { |
|
3779 if (SCTP_BUF_IS_EXTENDED(mat)) { |
|
3780 sctp_log_mb(mat, SCTP_MBUF_ICOPY); |
|
3781 } |
|
3782 } |
|
3783 } |
|
3784 #endif |
|
3785 /* easy side we just drop it on the end */ |
|
3786 ph = mtod(mret, struct sctp_paramhdr *); |
|
3787 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) + |
|
3788 sizeof(struct sctp_paramhdr); |
|
3789 stc = (struct sctp_state_cookie *)((caddr_t)ph + |
|
3790 sizeof(struct sctp_paramhdr)); |
|
3791 ph->param_type = htons(SCTP_STATE_COOKIE); |
|
3792 ph->param_length = 0; /* fill in at the end */ |
|
3793 /* Fill in the stc cookie data */ |
|
3794 memcpy(stc, stc_in, sizeof(struct sctp_state_cookie)); |
|
3795 |
|
3796 /* tack the INIT and then the INIT-ACK onto the chain */ |
|
3797 cookie_sz = 0; |
|
3798 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) { |
|
3799 cookie_sz += SCTP_BUF_LEN(m_at); |
|
3800 if (SCTP_BUF_NEXT(m_at) == NULL) { |
|
3801 SCTP_BUF_NEXT(m_at) = copy_init; |
|
3802 break; |
|
3803 } |
|
3804 } |
|
3805 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) { |
|
3806 cookie_sz += SCTP_BUF_LEN(m_at); |
|
3807 if (SCTP_BUF_NEXT(m_at) == NULL) { |
|
3808 SCTP_BUF_NEXT(m_at) = copy_initack; |
|
3809 break; |
|
3810 } |
|
3811 } |
|
3812 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) { |
|
3813 cookie_sz += SCTP_BUF_LEN(m_at); |
|
3814 if (SCTP_BUF_NEXT(m_at) == NULL) { |
|
3815 break; |
|
3816 } |
|
3817 } |
|
3818 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_NOWAIT, 1, MT_DATA); |
|
3819 if (sig == NULL) { |
|
3820 /* no space, so free the entire chain */ |
|
3821 sctp_m_freem(mret); |
|
3822 return (NULL); |
|
3823 } |
|
3824 SCTP_BUF_LEN(sig) = 0; |
|
3825 SCTP_BUF_NEXT(m_at) = sig; |
|
3826 sig_offset = 0; |
|
3827 foo = (uint8_t *) (mtod(sig, caddr_t) + sig_offset); |
|
3828 memset(foo, 0, SCTP_SIGNATURE_SIZE); |
|
3829 *signature = foo; |
|
3830 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE; |
|
3831 cookie_sz += SCTP_SIGNATURE_SIZE; |
|
3832 ph->param_length = htons(cookie_sz); |
|
3833 return (mret); |
|
3834 } |
|
3835 |
|
3836 |
|
3837 static uint8_t |
|
3838 sctp_get_ect(struct sctp_tcb *stcb) |
|
3839 { |
|
3840 if ((stcb != NULL) && (stcb->asoc.ecn_allowed == 1)) { |
|
3841 return (SCTP_ECT0_BIT); |
|
3842 } else { |
|
3843 return (0); |
|
3844 } |
|
3845 } |
|
3846 |
|
3847 #if defined(INET) || defined(INET6) |
|
3848 static void |
|
3849 sctp_handle_no_route(struct sctp_tcb *stcb, |
|
3850 struct sctp_nets *net, |
|
3851 int so_locked) |
|
3852 { |
|
3853 SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n"); |
|
3854 |
|
3855 if (net) { |
|
3856 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was "); |
|
3857 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa); |
|
3858 if (net->dest_state & SCTP_ADDR_CONFIRMED) { |
|
3859 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) { |
|
3860 SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", (void *)net); |
|
3861 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, |
|
3862 stcb, 0, |
|
3863 (void *)net, |
|
3864 so_locked); |
|
3865 net->dest_state &= ~SCTP_ADDR_REACHABLE; |
|
3866 net->dest_state &= ~SCTP_ADDR_PF; |
|
3867 } |
|
3868 } |
|
3869 if (stcb) { |
|
3870 if (net == stcb->asoc.primary_destination) { |
|
3871 /* need a new primary */ |
|
3872 struct sctp_nets *alt; |
|
3873 |
|
3874 alt = sctp_find_alternate_net(stcb, net, 0); |
|
3875 if (alt != net) { |
|
3876 if (stcb->asoc.alternate) { |
|
3877 sctp_free_remote_addr(stcb->asoc.alternate); |
|
3878 } |
|
3879 stcb->asoc.alternate = alt; |
|
3880 atomic_add_int(&stcb->asoc.alternate->ref_count, 1); |
|
3881 if (net->ro._s_addr) { |
|
3882 sctp_free_ifa(net->ro._s_addr); |
|
3883 net->ro._s_addr = NULL; |
|
3884 } |
|
3885 net->src_addr_selected = 0; |
|
3886 } |
|
3887 } |
|
3888 } |
|
3889 } |
|
3890 } |
|
3891 #endif |
|
3892 |
|
3893 static int |
|
3894 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, |
|
3895 struct sctp_tcb *stcb, /* may be NULL */ |
|
3896 struct sctp_nets *net, |
|
3897 struct sockaddr *to, |
|
3898 struct mbuf *m, |
|
3899 uint32_t auth_offset, |
|
3900 struct sctp_auth_chunk *auth, |
|
3901 uint16_t auth_keyid, |
|
3902 int nofragment_flag, |
|
3903 int ecn_ok, |
|
3904 int out_of_asoc_ok, |
|
3905 uint16_t src_port, |
|
3906 uint16_t dest_port, |
|
3907 uint32_t v_tag, |
|
3908 uint16_t port, |
|
3909 union sctp_sockstore *over_addr, |
|
3910 #if defined(__FreeBSD__) |
|
3911 uint8_t use_mflowid, uint32_t mflowid, |
|
3912 #endif |
|
3913 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) |
|
3914 int so_locked SCTP_UNUSED |
|
3915 #else |
|
3916 int so_locked |
|
3917 #endif |
|
3918 ) |
|
3919 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */ |
|
3920 { |
|
3921 /** |
|
3922 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header |
|
3923 * WITH an SCTPHDR but no IP header, endpoint inp and sa structure: |
|
3924 * - fill in the HMAC digest of any AUTH chunk in the packet. |
|
3925 * - calculate and fill in the SCTP checksum. |
|
3926 * - prepend an IP address header. |
|
3927 * - if boundall use INADDR_ANY. |
|
3928 * - if boundspecific do source address selection. |
|
3929 * - set fragmentation option for ipV4. |
|
3930 * - On return from IP output, check/adjust mtu size of output |
|
3931 * interface and smallest_mtu size as well. |
|
3932 */ |
|
3933 /* Will need ifdefs around this */ |
|
3934 #ifdef __Panda__ |
|
3935 pakhandle_type o_pak; |
|
3936 #endif |
|
3937 struct mbuf *newm; |
|
3938 struct sctphdr *sctphdr; |
|
3939 int packet_length; |
|
3940 int ret; |
|
3941 #if defined(INET) || defined(INET6) |
|
3942 uint32_t vrf_id; |
|
3943 #endif |
|
3944 #if defined(INET) || defined(INET6) |
|
3945 #if !defined(__Panda__) |
|
3946 struct mbuf *o_pak; |
|
3947 #endif |
|
3948 sctp_route_t *ro = NULL; |
|
3949 struct udphdr *udp = NULL; |
|
3950 #endif |
|
3951 uint8_t tos_value; |
|
3952 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) |
|
3953 struct socket *so = NULL; |
|
3954 #endif |
|
3955 |
|
3956 #if defined(__APPLE__) |
|
3957 if (so_locked) { |
|
3958 sctp_lock_assert(SCTP_INP_SO(inp)); |
|
3959 SCTP_TCB_LOCK_ASSERT(stcb); |
|
3960 } else { |
|
3961 sctp_unlock_assert(SCTP_INP_SO(inp)); |
|
3962 } |
|
3963 #endif |
|
3964 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) { |
|
3965 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT); |
|
3966 sctp_m_freem(m); |
|
3967 return (EFAULT); |
|
3968 } |
|
3969 #if defined(INET) || defined(INET6) |
|
3970 if (stcb) { |
|
3971 vrf_id = stcb->asoc.vrf_id; |
|
3972 } else { |
|
3973 vrf_id = inp->def_vrf_id; |
|
3974 } |
|
3975 #endif |
|
3976 /* fill in the HMAC digest for any AUTH chunk in the packet */ |
|
3977 if ((auth != NULL) && (stcb != NULL)) { |
|
3978 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid); |
|
3979 } |
|
3980 |
|
3981 if (net) { |
|
3982 tos_value = net->dscp; |
|
3983 } else if (stcb) { |
|
3984 tos_value = stcb->asoc.default_dscp; |
|
3985 } else { |
|
3986 tos_value = inp->sctp_ep.default_dscp; |
|
3987 } |
|
3988 |
|
3989 switch (to->sa_family) { |
|
3990 #ifdef INET |
|
3991 case AF_INET: |
|
3992 { |
|
3993 struct ip *ip = NULL; |
|
3994 sctp_route_t iproute; |
|
3995 int len; |
|
3996 |
|
3997 len = sizeof(struct ip) + sizeof(struct sctphdr); |
|
3998 if (port) { |
|
3999 len += sizeof(struct udphdr); |
|
4000 } |
|
4001 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA); |
|
4002 if (newm == NULL) { |
|
4003 sctp_m_freem(m); |
|
4004 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); |
|
4005 return (ENOMEM); |
|
4006 } |
|
4007 SCTP_ALIGN_TO_END(newm, len); |
|
4008 SCTP_BUF_LEN(newm) = len; |
|
4009 SCTP_BUF_NEXT(newm) = m; |
|
4010 m = newm; |
|
4011 #if defined(__FreeBSD__) |
|
4012 if (net != NULL) { |
|
4013 #ifdef INVARIANTS |
|
4014 if (net->flowidset == 0) { |
|
4015 panic("Flow ID not set"); |
|
4016 } |
|
4017 #endif |
|
4018 m->m_pkthdr.flowid = net->flowid; |
|
4019 m->m_flags |= M_FLOWID; |
|
4020 } else { |
|
4021 if (use_mflowid != 0) { |
|
4022 m->m_pkthdr.flowid = mflowid; |
|
4023 m->m_flags |= M_FLOWID; |
|
4024 } |
|
4025 } |
|
4026 #endif |
|
4027 packet_length = sctp_calculate_len(m); |
|
4028 ip = mtod(m, struct ip *); |
|
4029 ip->ip_v = IPVERSION; |
|
4030 ip->ip_hl = (sizeof(struct ip) >> 2); |
|
4031 if (tos_value == 0) { |
|
4032 /* |
|
4033 * This means especially, that it is not set at the |
|
4034 * SCTP layer. So use the value from the IP layer. |
|
4035 */ |
|
4036 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__) |
|
4037 tos_value = inp->ip_inp.inp.inp_ip_tos; |
|
4038 #else |
|
4039 tos_value = inp->inp_ip_tos; |
|
4040 #endif |
|
4041 } |
|
4042 tos_value &= 0xfc; |
|
4043 if (ecn_ok) { |
|
4044 tos_value |= sctp_get_ect(stcb); |
|
4045 } |
|
4046 if ((nofragment_flag) && (port == 0)) { |
|
4047 #if defined(__FreeBSD__) |
|
4048 #if __FreeBSD_version >= 1000000 |
|
4049 ip->ip_off = htons(IP_DF); |
|
4050 #else |
|
4051 ip->ip_off = IP_DF; |
|
4052 #endif |
|
4053 #elif defined(WITH_CONVERT_IP_OFF) || defined(__APPLE__) || defined(__Userspace__) |
|
4054 ip->ip_off = IP_DF; |
|
4055 #else |
|
4056 ip->ip_off = htons(IP_DF); |
|
4057 #endif |
|
4058 } else { |
|
4059 #if defined(__FreeBSD__) && __FreeBSD_version >= 1000000 |
|
4060 ip->ip_off = htons(0); |
|
4061 #else |
|
4062 ip->ip_off = 0; |
|
4063 #endif |
|
4064 } |
|
4065 #if defined(__FreeBSD__) |
|
4066 /* FreeBSD has a function for ip_id's */ |
|
4067 ip->ip_id = ip_newid(); |
|
4068 #elif defined(RANDOM_IP_ID) |
|
4069 /* Apple has RANDOM_IP_ID switch */ |
|
4070 ip->ip_id = htons(ip_randomid()); |
|
4071 #elif defined(__Userspace__) |
|
4072 ip->ip_id = htons(SCTP_IP_ID(inp)++); |
|
4073 #else |
|
4074 ip->ip_id = SCTP_IP_ID(inp)++; |
|
4075 #endif |
|
4076 |
|
4077 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__) |
|
4078 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl; |
|
4079 #else |
|
4080 ip->ip_ttl = inp->inp_ip_ttl; |
|
4081 #endif |
|
4082 #if defined(__FreeBSD__) && __FreeBSD_version >= 1000000 |
|
4083 ip->ip_len = htons(packet_length); |
|
4084 #else |
|
4085 ip->ip_len = packet_length; |
|
4086 #endif |
|
4087 ip->ip_tos = tos_value; |
|
4088 if (port) { |
|
4089 ip->ip_p = IPPROTO_UDP; |
|
4090 } else { |
|
4091 ip->ip_p = IPPROTO_SCTP; |
|
4092 } |
|
4093 ip->ip_sum = 0; |
|
4094 if (net == NULL) { |
|
4095 ro = &iproute; |
|
4096 memset(&iproute, 0, sizeof(iproute)); |
|
4097 #ifdef HAVE_SA_LEN |
|
4098 memcpy(&ro->ro_dst, to, to->sa_len); |
|
4099 #else |
|
4100 memcpy(&ro->ro_dst, to, sizeof(struct sockaddr_in)); |
|
4101 #endif |
|
4102 } else { |
|
4103 ro = (sctp_route_t *)&net->ro; |
|
4104 } |
|
4105 /* Now the address selection part */ |
|
4106 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr; |
|
4107 |
|
4108 /* call the routine to select the src address */ |
|
4109 if (net && out_of_asoc_ok == 0) { |
|
4110 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) { |
|
4111 sctp_free_ifa(net->ro._s_addr); |
|
4112 net->ro._s_addr = NULL; |
|
4113 net->src_addr_selected = 0; |
|
4114 if (ro->ro_rt) { |
|
4115 RTFREE(ro->ro_rt); |
|
4116 ro->ro_rt = NULL; |
|
4117 } |
|
4118 } |
|
4119 if (net->src_addr_selected == 0) { |
|
4120 /* Cache the source address */ |
|
4121 net->ro._s_addr = sctp_source_address_selection(inp,stcb, |
|
4122 ro, net, 0, |
|
4123 vrf_id); |
|
4124 net->src_addr_selected = 1; |
|
4125 } |
|
4126 if (net->ro._s_addr == NULL) { |
|
4127 /* No route to host */ |
|
4128 net->src_addr_selected = 0; |
|
4129 sctp_handle_no_route(stcb, net, so_locked); |
|
4130 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); |
|
4131 sctp_m_freem(m); |
|
4132 return (EHOSTUNREACH); |
|
4133 } |
|
4134 ip->ip_src = net->ro._s_addr->address.sin.sin_addr; |
|
4135 } else { |
|
4136 if (over_addr == NULL) { |
|
4137 struct sctp_ifa *_lsrc; |
|
4138 |
|
4139 _lsrc = sctp_source_address_selection(inp, stcb, ro, |
|
4140 net, |
|
4141 out_of_asoc_ok, |
|
4142 vrf_id); |
|
4143 if (_lsrc == NULL) { |
|
4144 sctp_handle_no_route(stcb, net, so_locked); |
|
4145 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); |
|
4146 sctp_m_freem(m); |
|
4147 return (EHOSTUNREACH); |
|
4148 } |
|
4149 ip->ip_src = _lsrc->address.sin.sin_addr; |
|
4150 sctp_free_ifa(_lsrc); |
|
4151 } else { |
|
4152 ip->ip_src = over_addr->sin.sin_addr; |
|
4153 SCTP_RTALLOC(ro, vrf_id); |
|
4154 } |
|
4155 } |
|
4156 if (port) { |
|
4157 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) { |
|
4158 sctp_handle_no_route(stcb, net, so_locked); |
|
4159 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); |
|
4160 sctp_m_freem(m); |
|
4161 return (EHOSTUNREACH); |
|
4162 } |
|
4163 udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); |
|
4164 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)); |
|
4165 udp->uh_dport = port; |
|
4166 udp->uh_ulen = htons(packet_length - sizeof(struct ip)); |
|
4167 #if !defined(__Windows__) && !defined(__Userspace__) |
|
4168 #if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000) |
|
4169 if (V_udp_cksum) { |
|
4170 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP)); |
|
4171 } else { |
|
4172 udp->uh_sum = 0; |
|
4173 } |
|
4174 #else |
|
4175 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP)); |
|
4176 #endif |
|
4177 #else |
|
4178 udp->uh_sum = 0; |
|
4179 #endif |
|
4180 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr)); |
|
4181 } else { |
|
4182 sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip)); |
|
4183 } |
|
4184 |
|
4185 sctphdr->src_port = src_port; |
|
4186 sctphdr->dest_port = dest_port; |
|
4187 sctphdr->v_tag = v_tag; |
|
4188 sctphdr->checksum = 0; |
|
4189 |
|
4190 /* |
|
4191 * If source address selection fails and we find no route |
|
4192 * then the ip_output should fail as well with a |
|
4193 * NO_ROUTE_TO_HOST type error. We probably should catch |
|
4194 * that somewhere and abort the association right away |
|
4195 * (assuming this is an INIT being sent). |
|
4196 */ |
|
4197 if (ro->ro_rt == NULL) { |
|
4198 /* |
|
4199 * src addr selection failed to find a route (or |
|
4200 * valid source addr), so we can't get there from |
|
4201 * here (yet)! |
|
4202 */ |
|
4203 sctp_handle_no_route(stcb, net, so_locked); |
|
4204 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); |
|
4205 sctp_m_freem(m); |
|
4206 return (EHOSTUNREACH); |
|
4207 } |
|
4208 if (ro != &iproute) { |
|
4209 memcpy(&iproute, ro, sizeof(*ro)); |
|
4210 } |
|
4211 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n", |
|
4212 (uint32_t) (ntohl(ip->ip_src.s_addr))); |
|
4213 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n", |
|
4214 (uint32_t)(ntohl(ip->ip_dst.s_addr))); |
|
4215 SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n", |
|
4216 (void *)ro->ro_rt); |
|
4217 |
|
4218 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { |
|
4219 /* failed to prepend data, give up */ |
|
4220 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); |
|
4221 sctp_m_freem(m); |
|
4222 return (ENOMEM); |
|
4223 } |
|
4224 SCTP_ATTACH_CHAIN(o_pak, m, packet_length); |
|
4225 if (port) { |
|
4226 #if defined(SCTP_WITH_NO_CSUM) |
|
4227 SCTP_STAT_INCR(sctps_sendnocrc); |
|
4228 #else |
|
4229 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr)); |
|
4230 SCTP_STAT_INCR(sctps_sendswcrc); |
|
4231 #endif |
|
4232 #if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000) |
|
4233 if (V_udp_cksum) { |
|
4234 SCTP_ENABLE_UDP_CSUM(o_pak); |
|
4235 } |
|
4236 #else |
|
4237 SCTP_ENABLE_UDP_CSUM(o_pak); |
|
4238 #endif |
|
4239 } else { |
|
4240 #if defined(SCTP_WITH_NO_CSUM) |
|
4241 SCTP_STAT_INCR(sctps_sendnocrc); |
|
4242 #else |
|
4243 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000 |
|
4244 m->m_pkthdr.csum_flags = CSUM_SCTP; |
|
4245 m->m_pkthdr.csum_data = 0; |
|
4246 SCTP_STAT_INCR(sctps_sendhwcrc); |
|
4247 #else |
|
4248 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) && |
|
4249 (stcb) && (stcb->asoc.scope.loopback_scope))) { |
|
4250 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip)); |
|
4251 SCTP_STAT_INCR(sctps_sendswcrc); |
|
4252 } else { |
|
4253 SCTP_STAT_INCR(sctps_sendnocrc); |
|
4254 } |
|
4255 #endif |
|
4256 #endif |
|
4257 } |
|
4258 #ifdef SCTP_PACKET_LOGGING |
|
4259 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) |
|
4260 sctp_packet_log(o_pak); |
|
4261 #endif |
|
4262 /* send it out. table id is taken from stcb */ |
|
4263 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) |
|
4264 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) { |
|
4265 so = SCTP_INP_SO(inp); |
|
4266 SCTP_SOCKET_UNLOCK(so, 0); |
|
4267 } |
|
4268 #endif |
|
4269 SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id); |
|
4270 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) |
|
4271 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) { |
|
4272 atomic_add_int(&stcb->asoc.refcnt, 1); |
|
4273 SCTP_TCB_UNLOCK(stcb); |
|
4274 SCTP_SOCKET_LOCK(so, 0); |
|
4275 SCTP_TCB_LOCK(stcb); |
|
4276 atomic_subtract_int(&stcb->asoc.refcnt, 1); |
|
4277 } |
|
4278 #endif |
|
4279 SCTP_STAT_INCR(sctps_sendpackets); |
|
4280 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); |
|
4281 if (ret) |
|
4282 SCTP_STAT_INCR(sctps_senderrors); |
|
4283 |
|
4284 SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret); |
|
4285 if (net == NULL) { |
|
4286 /* free tempy routes */ |
|
4287 #if defined(__FreeBSD__) && __FreeBSD_version > 901000 |
|
4288 RO_RTFREE(ro); |
|
4289 #else |
|
4290 if (ro->ro_rt) { |
|
4291 RTFREE(ro->ro_rt); |
|
4292 ro->ro_rt = NULL; |
|
4293 } |
|
4294 #endif |
|
4295 } else { |
|
4296 /* PMTU check versus smallest asoc MTU goes here */ |
|
4297 if ((ro->ro_rt != NULL) && |
|
4298 (net->ro._s_addr)) { |
|
4299 uint32_t mtu; |
|
4300 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt); |
|
4301 if (net->port) { |
|
4302 mtu -= sizeof(struct udphdr); |
|
4303 } |
|
4304 if (mtu && (stcb->asoc.smallest_mtu > mtu)) { |
|
4305 sctp_mtu_size_reset(inp, &stcb->asoc, mtu); |
|
4306 net->mtu = mtu; |
|
4307 } |
|
4308 } else if (ro->ro_rt == NULL) { |
|
4309 /* route was freed */ |
|
4310 if (net->ro._s_addr && |
|
4311 net->src_addr_selected) { |
|
4312 sctp_free_ifa(net->ro._s_addr); |
|
4313 net->ro._s_addr = NULL; |
|
4314 } |
|
4315 net->src_addr_selected = 0; |
|
4316 } |
|
4317 } |
|
4318 return (ret); |
|
4319 } |
|
4320 #endif |
|
4321 #ifdef INET6 |
|
4322 case AF_INET6: |
|
4323 { |
|
4324 uint32_t flowlabel, flowinfo; |
|
4325 struct ip6_hdr *ip6h; |
|
4326 struct route_in6 ip6route; |
|
4327 #if !(defined(__Panda__) || defined(__Userspace__)) |
|
4328 struct ifnet *ifp; |
|
4329 #endif |
|
4330 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp; |
|
4331 int prev_scope = 0; |
|
4332 #ifdef SCTP_EMBEDDED_V6_SCOPE |
|
4333 struct sockaddr_in6 lsa6_storage; |
|
4334 int error; |
|
4335 #endif |
|
4336 u_short prev_port = 0; |
|
4337 int len; |
|
4338 |
|
4339 if (net) { |
|
4340 flowlabel = net->flowlabel; |
|
4341 } else if (stcb) { |
|
4342 flowlabel = stcb->asoc.default_flowlabel; |
|
4343 } else { |
|
4344 flowlabel = inp->sctp_ep.default_flowlabel; |
|
4345 } |
|
4346 if (flowlabel == 0) { |
|
4347 /* |
|
4348 * This means especially, that it is not set at the |
|
4349 * SCTP layer. So use the value from the IP layer. |
|
4350 */ |
|
4351 #if defined(__APPLE__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION)) |
|
4352 flowlabel = ntohl(inp->ip_inp.inp.inp_flow); |
|
4353 #else |
|
4354 flowlabel = ntohl(((struct in6pcb *)inp)->in6p_flowinfo); |
|
4355 #endif |
|
4356 } |
|
4357 flowlabel &= 0x000fffff; |
|
4358 len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr); |
|
4359 if (port) { |
|
4360 len += sizeof(struct udphdr); |
|
4361 } |
|
4362 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA); |
|
4363 if (newm == NULL) { |
|
4364 sctp_m_freem(m); |
|
4365 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); |
|
4366 return (ENOMEM); |
|
4367 } |
|
4368 SCTP_ALIGN_TO_END(newm, len); |
|
4369 SCTP_BUF_LEN(newm) = len; |
|
4370 SCTP_BUF_NEXT(newm) = m; |
|
4371 m = newm; |
|
4372 #if defined(__FreeBSD__) |
|
4373 if (net != NULL) { |
|
4374 #ifdef INVARIANTS |
|
4375 if (net->flowidset == 0) { |
|
4376 panic("Flow ID not set"); |
|
4377 } |
|
4378 #endif |
|
4379 m->m_pkthdr.flowid = net->flowid; |
|
4380 m->m_flags |= M_FLOWID; |
|
4381 } else { |
|
4382 if (use_mflowid != 0) { |
|
4383 m->m_pkthdr.flowid = mflowid; |
|
4384 m->m_flags |= M_FLOWID; |
|
4385 } |
|
4386 } |
|
4387 #endif |
|
4388 packet_length = sctp_calculate_len(m); |
|
4389 |
|
4390 ip6h = mtod(m, struct ip6_hdr *); |
|
4391 /* protect *sin6 from overwrite */ |
|
4392 sin6 = (struct sockaddr_in6 *)to; |
|
4393 tmp = *sin6; |
|
4394 sin6 = &tmp; |
|
4395 |
|
4396 #ifdef SCTP_EMBEDDED_V6_SCOPE |
|
4397 /* KAME hack: embed scopeid */ |
|
4398 #if defined(__APPLE__) |
|
4399 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) |
|
4400 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0) |
|
4401 #else |
|
4402 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0) |
|
4403 #endif |
|
4404 #elif defined(SCTP_KAME) |
|
4405 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) |
|
4406 #else |
|
4407 if (in6_embedscope(&sin6->sin6_addr, sin6) != 0) |
|
4408 #endif |
|
4409 { |
|
4410 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); |
|
4411 return (EINVAL); |
|
4412 } |
|
4413 #endif /* SCTP_EMBEDDED_V6_SCOPE */ |
|
4414 if (net == NULL) { |
|
4415 memset(&ip6route, 0, sizeof(ip6route)); |
|
4416 ro = (sctp_route_t *)&ip6route; |
|
4417 #ifdef HAVE_SIN6_LEN |
|
4418 memcpy(&ro->ro_dst, sin6, sin6->sin6_len); |
|
4419 #else |
|
4420 memcpy(&ro->ro_dst, sin6, sizeof(struct sockaddr_in6)); |
|
4421 #endif |
|
4422 } else { |
|
4423 ro = (sctp_route_t *)&net->ro; |
|
4424 } |
|
4425 /* |
|
4426 * We assume here that inp_flow is in host byte order within |
|
4427 * the TCB! |
|
4428 */ |
|
4429 if (tos_value == 0) { |
|
4430 /* |
|
4431 * This means especially, that it is not set at the |
|
4432 * SCTP layer. So use the value from the IP layer. |
|
4433 */ |
|
4434 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__) |
|
4435 #if defined(__APPLE__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION)) |
|
4436 tos_value = (ntohl(inp->ip_inp.inp.inp_flow) >> 20) & 0xff; |
|
4437 #else |
|
4438 tos_value = (ntohl(((struct in6pcb *)inp)->in6p_flowinfo) >> 20) & 0xff; |
|
4439 #endif |
|
4440 #endif |
|
4441 } |
|
4442 tos_value &= 0xfc; |
|
4443 if (ecn_ok) { |
|
4444 tos_value |= sctp_get_ect(stcb); |
|
4445 } |
|
4446 flowinfo = 0x06; |
|
4447 flowinfo <<= 8; |
|
4448 flowinfo |= tos_value; |
|
4449 flowinfo <<= 20; |
|
4450 flowinfo |= flowlabel; |
|
4451 ip6h->ip6_flow = htonl(flowinfo); |
|
4452 if (port) { |
|
4453 ip6h->ip6_nxt = IPPROTO_UDP; |
|
4454 } else { |
|
4455 ip6h->ip6_nxt = IPPROTO_SCTP; |
|
4456 } |
|
4457 ip6h->ip6_plen = (packet_length - sizeof(struct ip6_hdr)); |
|
4458 ip6h->ip6_dst = sin6->sin6_addr; |
|
4459 |
|
4460 /* |
|
4461 * Add SRC address selection here: we can only reuse to a |
|
4462 * limited degree the kame src-addr-sel, since we can try |
|
4463 * their selection but it may not be bound. |
|
4464 */ |
|
4465 bzero(&lsa6_tmp, sizeof(lsa6_tmp)); |
|
4466 lsa6_tmp.sin6_family = AF_INET6; |
|
4467 #ifdef HAVE_SIN6_LEN |
|
4468 lsa6_tmp.sin6_len = sizeof(lsa6_tmp); |
|
4469 #endif |
|
4470 lsa6 = &lsa6_tmp; |
|
4471 if (net && out_of_asoc_ok == 0) { |
|
4472 if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) { |
|
4473 sctp_free_ifa(net->ro._s_addr); |
|
4474 net->ro._s_addr = NULL; |
|
4475 net->src_addr_selected = 0; |
|
4476 if (ro->ro_rt) { |
|
4477 RTFREE(ro->ro_rt); |
|
4478 ro->ro_rt = NULL; |
|
4479 } |
|
4480 } |
|
4481 if (net->src_addr_selected == 0) { |
|
4482 #ifdef SCTP_EMBEDDED_V6_SCOPE |
|
4483 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; |
|
4484 /* KAME hack: embed scopeid */ |
|
4485 #if defined(__APPLE__) |
|
4486 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) |
|
4487 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0) |
|
4488 #else |
|
4489 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0) |
|
4490 #endif |
|
4491 #elif defined(SCTP_KAME) |
|
4492 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) |
|
4493 #else |
|
4494 if (in6_embedscope(&sin6->sin6_addr, sin6) != 0) |
|
4495 #endif |
|
4496 { |
|
4497 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); |
|
4498 return (EINVAL); |
|
4499 } |
|
4500 #endif /* SCTP_EMBEDDED_V6_SCOPE */ |
|
4501 /* Cache the source address */ |
|
4502 net->ro._s_addr = sctp_source_address_selection(inp, |
|
4503 stcb, |
|
4504 ro, |
|
4505 net, |
|
4506 0, |
|
4507 vrf_id); |
|
4508 #ifdef SCTP_EMBEDDED_V6_SCOPE |
|
4509 #ifdef SCTP_KAME |
|
4510 (void)sa6_recoverscope(sin6); |
|
4511 #else |
|
4512 (void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL); |
|
4513 #endif /* SCTP_KAME */ |
|
4514 #endif /* SCTP_EMBEDDED_V6_SCOPE */ |
|
4515 net->src_addr_selected = 1; |
|
4516 } |
|
4517 if (net->ro._s_addr == NULL) { |
|
4518 SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n"); |
|
4519 net->src_addr_selected = 0; |
|
4520 sctp_handle_no_route(stcb, net, so_locked); |
|
4521 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); |
|
4522 sctp_m_freem(m); |
|
4523 return (EHOSTUNREACH); |
|
4524 } |
|
4525 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr; |
|
4526 } else { |
|
4527 #ifdef SCTP_EMBEDDED_V6_SCOPE |
|
4528 sin6 = (struct sockaddr_in6 *)&ro->ro_dst; |
|
4529 /* KAME hack: embed scopeid */ |
|
4530 #if defined(__APPLE__) |
|
4531 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) |
|
4532 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0) |
|
4533 #else |
|
4534 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0) |
|
4535 #endif |
|
4536 #elif defined(SCTP_KAME) |
|
4537 if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) |
|
4538 #else |
|
4539 if (in6_embedscope(&sin6->sin6_addr, sin6) != 0) |
|
4540 #endif |
|
4541 { |
|
4542 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); |
|
4543 return (EINVAL); |
|
4544 } |
|
4545 #endif /* SCTP_EMBEDDED_V6_SCOPE */ |
|
4546 if (over_addr == NULL) { |
|
4547 struct sctp_ifa *_lsrc; |
|
4548 |
|
4549 _lsrc = sctp_source_address_selection(inp, stcb, ro, |
|
4550 net, |
|
4551 out_of_asoc_ok, |
|
4552 vrf_id); |
|
4553 if (_lsrc == NULL) { |
|
4554 sctp_handle_no_route(stcb, net, so_locked); |
|
4555 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); |
|
4556 sctp_m_freem(m); |
|
4557 return (EHOSTUNREACH); |
|
4558 } |
|
4559 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr; |
|
4560 sctp_free_ifa(_lsrc); |
|
4561 } else { |
|
4562 lsa6->sin6_addr = over_addr->sin6.sin6_addr; |
|
4563 SCTP_RTALLOC(ro, vrf_id); |
|
4564 } |
|
4565 #ifdef SCTP_EMBEDDED_V6_SCOPE |
|
4566 #ifdef SCTP_KAME |
|
4567 (void)sa6_recoverscope(sin6); |
|
4568 #else |
|
4569 (void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL); |
|
4570 #endif /* SCTP_KAME */ |
|
4571 #endif /* SCTP_EMBEDDED_V6_SCOPE */ |
|
4572 } |
|
4573 lsa6->sin6_port = inp->sctp_lport; |
|
4574 |
|
4575 if (ro->ro_rt == NULL) { |
|
4576 /* |
|
4577 * src addr selection failed to find a route (or |
|
4578 * valid source addr), so we can't get there from |
|
4579 * here! |
|
4580 */ |
|
4581 sctp_handle_no_route(stcb, net, so_locked); |
|
4582 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); |
|
4583 sctp_m_freem(m); |
|
4584 return (EHOSTUNREACH); |
|
4585 } |
|
4586 #ifndef SCOPEDROUTING |
|
4587 #ifdef SCTP_EMBEDDED_V6_SCOPE |
|
4588 /* |
|
4589 * XXX: sa6 may not have a valid sin6_scope_id in the |
|
4590 * non-SCOPEDROUTING case. |
|
4591 */ |
|
4592 bzero(&lsa6_storage, sizeof(lsa6_storage)); |
|
4593 lsa6_storage.sin6_family = AF_INET6; |
|
4594 #ifdef HAVE_SIN6_LEN |
|
4595 lsa6_storage.sin6_len = sizeof(lsa6_storage); |
|
4596 #endif |
|
4597 #ifdef SCTP_KAME |
|
4598 lsa6_storage.sin6_addr = lsa6->sin6_addr; |
|
4599 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) { |
|
4600 #else |
|
4601 if ((error = in6_recoverscope(&lsa6_storage, &lsa6->sin6_addr, |
|
4602 NULL)) != 0) { |
|
4603 #endif /* SCTP_KAME */ |
|
4604 SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error); |
|
4605 sctp_m_freem(m); |
|
4606 return (error); |
|
4607 } |
|
4608 /* XXX */ |
|
4609 lsa6_storage.sin6_addr = lsa6->sin6_addr; |
|
4610 lsa6_storage.sin6_port = inp->sctp_lport; |
|
4611 lsa6 = &lsa6_storage; |
|
4612 #endif /* SCTP_EMBEDDED_V6_SCOPE */ |
|
4613 #endif /* SCOPEDROUTING */ |
|
4614 ip6h->ip6_src = lsa6->sin6_addr; |
|
4615 |
|
4616 if (port) { |
|
4617 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) { |
|
4618 sctp_handle_no_route(stcb, net, so_locked); |
|
4619 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); |
|
4620 sctp_m_freem(m); |
|
4621 return (EHOSTUNREACH); |
|
4622 } |
|
4623 udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr)); |
|
4624 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)); |
|
4625 udp->uh_dport = port; |
|
4626 udp->uh_ulen = htons(packet_length - sizeof(struct ip6_hdr)); |
|
4627 udp->uh_sum = 0; |
|
4628 sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr)); |
|
4629 } else { |
|
4630 sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr)); |
|
4631 } |
|
4632 |
|
4633 sctphdr->src_port = src_port; |
|
4634 sctphdr->dest_port = dest_port; |
|
4635 sctphdr->v_tag = v_tag; |
|
4636 sctphdr->checksum = 0; |
|
4637 |
|
4638 /* |
|
4639 * We set the hop limit now since there is a good chance |
|
4640 * that our ro pointer is now filled |
|
4641 */ |
|
4642 ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro); |
|
4643 #if !(defined(__Panda__) || defined(__Userspace__)) |
|
4644 ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); |
|
4645 #endif |
|
4646 |
|
4647 #ifdef SCTP_DEBUG |
|
4648 /* Copy to be sure something bad is not happening */ |
|
4649 sin6->sin6_addr = ip6h->ip6_dst; |
|
4650 lsa6->sin6_addr = ip6h->ip6_src; |
|
4651 #endif |
|
4652 |
|
4653 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n"); |
|
4654 SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: "); |
|
4655 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6); |
|
4656 SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: "); |
|
4657 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6); |
|
4658 if (net) { |
|
4659 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; |
|
4660 /* preserve the port and scope for link local send */ |
|
4661 prev_scope = sin6->sin6_scope_id; |
|
4662 prev_port = sin6->sin6_port; |
|
4663 } |
|
4664 |
|
4665 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { |
|
4666 /* failed to prepend data, give up */ |
|
4667 sctp_m_freem(m); |
|
4668 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); |
|
4669 return (ENOMEM); |
|
4670 } |
|
4671 SCTP_ATTACH_CHAIN(o_pak, m, packet_length); |
|
4672 if (port) { |
|
4673 #if defined(SCTP_WITH_NO_CSUM) |
|
4674 SCTP_STAT_INCR(sctps_sendnocrc); |
|
4675 #else |
|
4676 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr)); |
|
4677 SCTP_STAT_INCR(sctps_sendswcrc); |
|
4678 #endif |
|
4679 #if defined(__Windows__) |
|
4680 udp->uh_sum = 0; |
|
4681 #elif !defined(__Userspace__) |
|
4682 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) { |
|
4683 udp->uh_sum = 0xffff; |
|
4684 } |
|
4685 #endif |
|
4686 } else { |
|
4687 #if defined(SCTP_WITH_NO_CSUM) |
|
4688 SCTP_STAT_INCR(sctps_sendnocrc); |
|
4689 #else |
|
4690 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000 |
|
4691 #if __FreeBSD_version < 900000 |
|
4692 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr)); |
|
4693 SCTP_STAT_INCR(sctps_sendswcrc); |
|
4694 #else |
|
4695 #if __FreeBSD_version > 901000 |
|
4696 m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6; |
|
4697 #else |
|
4698 m->m_pkthdr.csum_flags = CSUM_SCTP; |
|
4699 #endif |
|
4700 m->m_pkthdr.csum_data = 0; |
|
4701 SCTP_STAT_INCR(sctps_sendhwcrc); |
|
4702 #endif |
|
4703 #else |
|
4704 if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) && |
|
4705 (stcb) && (stcb->asoc.scope.loopback_scope))) { |
|
4706 sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr)); |
|
4707 SCTP_STAT_INCR(sctps_sendswcrc); |
|
4708 } else { |
|
4709 SCTP_STAT_INCR(sctps_sendnocrc); |
|
4710 } |
|
4711 #endif |
|
4712 #endif |
|
4713 } |
|
4714 /* send it out. table id is taken from stcb */ |
|
4715 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) |
|
4716 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) { |
|
4717 so = SCTP_INP_SO(inp); |
|
4718 SCTP_SOCKET_UNLOCK(so, 0); |
|
4719 } |
|
4720 #endif |
|
4721 #ifdef SCTP_PACKET_LOGGING |
|
4722 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) |
|
4723 sctp_packet_log(o_pak); |
|
4724 #endif |
|
4725 #if !(defined(__Panda__) || defined(__Userspace__)) |
|
4726 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id); |
|
4727 #else |
|
4728 SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, NULL, stcb, vrf_id); |
|
4729 #endif |
|
4730 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) |
|
4731 if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) { |
|
4732 atomic_add_int(&stcb->asoc.refcnt, 1); |
|
4733 SCTP_TCB_UNLOCK(stcb); |
|
4734 SCTP_SOCKET_LOCK(so, 0); |
|
4735 SCTP_TCB_LOCK(stcb); |
|
4736 atomic_subtract_int(&stcb->asoc.refcnt, 1); |
|
4737 } |
|
4738 #endif |
|
4739 if (net) { |
|
4740 /* for link local this must be done */ |
|
4741 sin6->sin6_scope_id = prev_scope; |
|
4742 sin6->sin6_port = prev_port; |
|
4743 } |
|
4744 SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret); |
|
4745 SCTP_STAT_INCR(sctps_sendpackets); |
|
4746 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); |
|
4747 if (ret) { |
|
4748 SCTP_STAT_INCR(sctps_senderrors); |
|
4749 } |
|
4750 if (net == NULL) { |
|
4751 /* Now if we had a temp route free it */ |
|
4752 #if defined(__FreeBSD__) && __FreeBSD_version > 901000 |
|
4753 RO_RTFREE(ro); |
|
4754 #else |
|
4755 if (ro->ro_rt) { |
|
4756 RTFREE(ro->ro_rt); |
|
4757 ro->ro_rt = NULL; |
|
4758 } |
|
4759 #endif |
|
4760 } else { |
|
4761 /* PMTU check versus smallest asoc MTU goes here */ |
|
4762 if (ro->ro_rt == NULL) { |
|
4763 /* Route was freed */ |
|
4764 if (net->ro._s_addr && |
|
4765 net->src_addr_selected) { |
|
4766 sctp_free_ifa(net->ro._s_addr); |
|
4767 net->ro._s_addr = NULL; |
|
4768 } |
|
4769 net->src_addr_selected = 0; |
|
4770 } |
|
4771 if ((ro->ro_rt != NULL) && |
|
4772 (net->ro._s_addr)) { |
|
4773 uint32_t mtu; |
|
4774 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt); |
|
4775 if (mtu && |
|
4776 (stcb->asoc.smallest_mtu > mtu)) { |
|
4777 sctp_mtu_size_reset(inp, &stcb->asoc, mtu); |
|
4778 net->mtu = mtu; |
|
4779 if (net->port) { |
|
4780 net->mtu -= sizeof(struct udphdr); |
|
4781 } |
|
4782 } |
|
4783 } |
|
4784 #if !defined(__Panda__) && !defined(__Userspace__) |
|
4785 else if (ifp) { |
|
4786 #if defined(__Windows__) |
|
4787 #define ND_IFINFO(ifp) (ifp) |
|
4788 #define linkmtu if_mtu |
|
4789 #endif |
|
4790 if (ND_IFINFO(ifp)->linkmtu && |
|
4791 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) { |
|
4792 sctp_mtu_size_reset(inp, |
|
4793 &stcb->asoc, |
|
4794 ND_IFINFO(ifp)->linkmtu); |
|
4795 } |
|
4796 } |
|
4797 #endif |
|
4798 } |
|
4799 return (ret); |
|
4800 } |
|
4801 #endif |
|
4802 #if defined(__Userspace__) |
|
4803 case AF_CONN: |
|
4804 { |
|
4805 char *buffer; |
|
4806 struct sockaddr_conn *sconn; |
|
4807 int len; |
|
4808 |
|
4809 sconn = (struct sockaddr_conn *)to; |
|
4810 len = sizeof(struct sctphdr); |
|
4811 newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA); |
|
4812 if (newm == NULL) { |
|
4813 sctp_m_freem(m); |
|
4814 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); |
|
4815 return (ENOMEM); |
|
4816 } |
|
4817 SCTP_ALIGN_TO_END(newm, len); |
|
4818 SCTP_BUF_LEN(newm) = len; |
|
4819 SCTP_BUF_NEXT(newm) = m; |
|
4820 m = newm; |
|
4821 packet_length = sctp_calculate_len(m); |
|
4822 sctphdr = mtod(m, struct sctphdr *); |
|
4823 sctphdr->src_port = src_port; |
|
4824 sctphdr->dest_port = dest_port; |
|
4825 sctphdr->v_tag = v_tag; |
|
4826 sctphdr->checksum = 0; |
|
4827 #if defined(SCTP_WITH_NO_CSUM) |
|
4828 SCTP_STAT_INCR(sctps_sendnocrc); |
|
4829 #else |
|
4830 sctphdr->checksum = sctp_calculate_cksum(m, 0); |
|
4831 SCTP_STAT_INCR(sctps_sendswcrc); |
|
4832 #endif |
|
4833 if (tos_value == 0) { |
|
4834 tos_value = inp->ip_inp.inp.inp_ip_tos; |
|
4835 } |
|
4836 tos_value &= 0xfc; |
|
4837 if (ecn_ok) { |
|
4838 tos_value |= sctp_get_ect(stcb); |
|
4839 } |
|
4840 /* Don't alloc/free for each packet */ |
|
4841 if ((buffer = malloc(packet_length)) != NULL) { |
|
4842 m_copydata(m, 0, packet_length, buffer); |
|
4843 ret = SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, packet_length, tos_value, nofragment_flag); |
|
4844 free(buffer); |
|
4845 } else { |
|
4846 ret = ENOMEM; |
|
4847 } |
|
4848 sctp_m_freem(m); |
|
4849 return (ret); |
|
4850 } |
|
4851 #endif |
|
4852 default: |
|
4853 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n", |
|
4854 ((struct sockaddr *)to)->sa_family); |
|
4855 sctp_m_freem(m); |
|
4856 SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT); |
|
4857 return (EFAULT); |
|
4858 } |
|
4859 } |
|
4860 |
|
4861 |
|
4862 void |
|
4863 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked |
|
4864 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) |
|
4865 SCTP_UNUSED |
|
4866 #endif |
|
4867 ) |
|
4868 { |
|
4869 struct mbuf *m; |
|
4870 struct sctp_nets *net; |
|
4871 struct sctp_init_chunk *init; |
|
4872 struct sctp_supported_addr_param *sup_addr; |
|
4873 struct sctp_adaptation_layer_indication *ali; |
|
4874 struct sctp_supported_chunk_types_param *pr_supported; |
|
4875 struct sctp_paramhdr *ph; |
|
4876 int cnt_inits_to = 0; |
|
4877 int ret; |
|
4878 uint16_t num_ext, chunk_len, padding_len, parameter_len; |
|
4879 |
|
4880 #if defined(__APPLE__) |
|
4881 if (so_locked) { |
|
4882 sctp_lock_assert(SCTP_INP_SO(inp)); |
|
4883 } else { |
|
4884 sctp_unlock_assert(SCTP_INP_SO(inp)); |
|
4885 } |
|
4886 #endif |
|
4887 /* INIT's always go to the primary (and usually ONLY address) */ |
|
4888 net = stcb->asoc.primary_destination; |
|
4889 if (net == NULL) { |
|
4890 net = TAILQ_FIRST(&stcb->asoc.nets); |
|
4891 if (net == NULL) { |
|
4892 /* TSNH */ |
|
4893 return; |
|
4894 } |
|
4895 /* we confirm any address we send an INIT to */ |
|
4896 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; |
|
4897 (void)sctp_set_primary_addr(stcb, NULL, net); |
|
4898 } else { |
|
4899 /* we confirm any address we send an INIT to */ |
|
4900 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; |
|
4901 } |
|
4902 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n"); |
|
4903 #ifdef INET6 |
|
4904 if (net->ro._l_addr.sa.sa_family == AF_INET6) { |
|
4905 /* |
|
4906 * special hook, if we are sending to link local it will not |
|
4907 * show up in our private address count. |
|
4908 */ |
|
4909 if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr)) |
|
4910 cnt_inits_to = 1; |
|
4911 } |
|
4912 #endif |
|
4913 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { |
|
4914 /* This case should not happen */ |
|
4915 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n"); |
|
4916 return; |
|
4917 } |
|
4918 /* start the INIT timer */ |
|
4919 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net); |
|
4920 |
|
4921 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_NOWAIT, 1, MT_DATA); |
|
4922 if (m == NULL) { |
|
4923 /* No memory, INIT timer will re-attempt. */ |
|
4924 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n"); |
|
4925 return; |
|
4926 } |
|
4927 chunk_len = (uint16_t)sizeof(struct sctp_init_chunk); |
|
4928 padding_len = 0; |
|
4929 /* |
|
4930 * assume peer supports asconf in order to be able to queue |
|
4931 * local address changes while an INIT is in flight and before |
|
4932 * the assoc is established. |
|
4933 */ |
|
4934 stcb->asoc.peer_supports_asconf = 1; |
|
4935 /* Now lets put the chunk header in place */ |
|
4936 init = mtod(m, struct sctp_init_chunk *); |
|
4937 /* now the chunk header */ |
|
4938 init->ch.chunk_type = SCTP_INITIATION; |
|
4939 init->ch.chunk_flags = 0; |
|
4940 /* fill in later from mbuf we build */ |
|
4941 init->ch.chunk_length = 0; |
|
4942 /* place in my tag */ |
|
4943 init->init.initiate_tag = htonl(stcb->asoc.my_vtag); |
|
4944 /* set up some of the credits. */ |
|
4945 init->init.a_rwnd = htonl(max(inp->sctp_socket?SCTP_SB_LIMIT_RCV(inp->sctp_socket):0, |
|
4946 SCTP_MINIMAL_RWND)); |
|
4947 init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams); |
|
4948 init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams); |
|
4949 init->init.initial_tsn = htonl(stcb->asoc.init_seq_number); |
|
4950 |
|
4951 if (stcb->asoc.scope.ipv4_addr_legal || stcb->asoc.scope.ipv6_addr_legal) { |
|
4952 uint8_t i; |
|
4953 |
|
4954 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr); |
|
4955 if (stcb->asoc.scope.ipv4_addr_legal) { |
|
4956 parameter_len += (uint16_t)sizeof(uint16_t); |
|
4957 } |
|
4958 if (stcb->asoc.scope.ipv6_addr_legal) { |
|
4959 parameter_len += (uint16_t)sizeof(uint16_t); |
|
4960 } |
|
4961 sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t) + chunk_len); |
|
4962 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE); |
|
4963 sup_addr->ph.param_length = htons(parameter_len); |
|
4964 i = 0; |
|
4965 if (stcb->asoc.scope.ipv4_addr_legal) { |
|
4966 sup_addr->addr_type[i++] = htons(SCTP_IPV4_ADDRESS); |
|
4967 } |
|
4968 if (stcb->asoc.scope.ipv6_addr_legal) { |
|
4969 sup_addr->addr_type[i++] = htons(SCTP_IPV6_ADDRESS); |
|
4970 } |
|
4971 padding_len = 4 - 2 * i; |
|
4972 chunk_len += parameter_len; |
|
4973 } |
|
4974 |
|
4975 /* Adaptation layer indication parameter */ |
|
4976 if (inp->sctp_ep.adaptation_layer_indicator_provided) { |
|
4977 if (padding_len > 0) { |
|
4978 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); |
|
4979 chunk_len += padding_len; |
|
4980 padding_len = 0; |
|
4981 } |
|
4982 parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication); |
|
4983 ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t) + chunk_len); |
|
4984 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION); |
|
4985 ali->ph.param_length = htons(parameter_len); |
|
4986 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator); |
|
4987 chunk_len += parameter_len; |
|
4988 } |
|
4989 |
|
4990 if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) { |
|
4991 /* Add NAT friendly parameter. */ |
|
4992 if (padding_len > 0) { |
|
4993 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); |
|
4994 chunk_len += padding_len; |
|
4995 padding_len = 0; |
|
4996 } |
|
4997 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr); |
|
4998 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len); |
|
4999 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT); |
|
5000 ph->param_length = htons(parameter_len); |
|
5001 chunk_len += parameter_len; |
|
5002 } |
|
5003 |
|
5004 /* now any cookie time extensions */ |
|
5005 if (stcb->asoc.cookie_preserve_req) { |
|
5006 struct sctp_cookie_perserve_param *cookie_preserve; |
|
5007 |
|
5008 if (padding_len > 0) { |
|
5009 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); |
|
5010 chunk_len += padding_len; |
|
5011 padding_len = 0; |
|
5012 } |
|
5013 parameter_len = (uint16_t)sizeof(struct sctp_cookie_perserve_param); |
|
5014 cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t) + chunk_len); |
|
5015 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE); |
|
5016 cookie_preserve->ph.param_length = htons(parameter_len); |
|
5017 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req); |
|
5018 stcb->asoc.cookie_preserve_req = 0; |
|
5019 chunk_len += parameter_len; |
|
5020 } |
|
5021 |
|
5022 /* ECN parameter */ |
|
5023 if (stcb->asoc.ecn_allowed == 1) { |
|
5024 if (padding_len > 0) { |
|
5025 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); |
|
5026 chunk_len += padding_len; |
|
5027 padding_len = 0; |
|
5028 } |
|
5029 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr); |
|
5030 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len); |
|
5031 ph->param_type = htons(SCTP_ECN_CAPABLE); |
|
5032 ph->param_length = htons(parameter_len); |
|
5033 chunk_len += parameter_len; |
|
5034 } |
|
5035 |
|
5036 /* And now tell the peer we do support PR-SCTP. */ |
|
5037 if (padding_len > 0) { |
|
5038 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); |
|
5039 chunk_len += padding_len; |
|
5040 padding_len = 0; |
|
5041 } |
|
5042 parameter_len = (uint16_t)sizeof(struct sctp_paramhdr); |
|
5043 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len); |
|
5044 ph->param_type = htons(SCTP_PRSCTP_SUPPORTED); |
|
5045 ph->param_length = htons(parameter_len); |
|
5046 chunk_len += parameter_len; |
|
5047 |
|
5048 /* And now tell the peer we do all the extensions */ |
|
5049 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + chunk_len); |
|
5050 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT); |
|
5051 num_ext = 0; |
|
5052 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF; |
|
5053 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK; |
|
5054 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN; |
|
5055 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED; |
|
5056 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET; |
|
5057 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) { |
|
5058 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION; |
|
5059 } |
|
5060 if (stcb->asoc.sctp_nr_sack_on_off == 1) { |
|
5061 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK; |
|
5062 } |
|
5063 parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext; |
|
5064 pr_supported->ph.param_length = htons(parameter_len); |
|
5065 padding_len = SCTP_SIZE32(parameter_len) - parameter_len; |
|
5066 chunk_len += parameter_len; |
|
5067 |
|
5068 /* add authentication parameters */ |
|
5069 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) { |
|
5070 /* attach RANDOM parameter, if available */ |
|
5071 if (stcb->asoc.authinfo.random != NULL) { |
|
5072 struct sctp_auth_random *randp; |
|
5073 |
|
5074 if (padding_len > 0) { |
|
5075 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); |
|
5076 chunk_len += padding_len; |
|
5077 padding_len = 0; |
|
5078 } |
|
5079 randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + chunk_len); |
|
5080 parameter_len = (uint16_t)sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len; |
|
5081 /* random key already contains the header */ |
|
5082 memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len); |
|
5083 padding_len = SCTP_SIZE32(parameter_len) - parameter_len; |
|
5084 chunk_len += parameter_len; |
|
5085 } |
|
5086 /* add HMAC_ALGO parameter */ |
|
5087 if ((stcb->asoc.local_hmacs != NULL) && |
|
5088 (stcb->asoc.local_hmacs->num_algo > 0)) { |
|
5089 struct sctp_auth_hmac_algo *hmacs; |
|
5090 |
|
5091 if (padding_len > 0) { |
|
5092 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); |
|
5093 chunk_len += padding_len; |
|
5094 padding_len = 0; |
|
5095 } |
|
5096 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + chunk_len); |
|
5097 parameter_len = (uint16_t)(sizeof(struct sctp_auth_hmac_algo) + |
|
5098 stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t)); |
|
5099 hmacs->ph.param_type = htons(SCTP_HMAC_LIST); |
|
5100 hmacs->ph.param_length = htons(parameter_len); |
|
5101 sctp_serialize_hmaclist(stcb->asoc.local_hmacs, (uint8_t *)hmacs->hmac_ids); |
|
5102 padding_len = SCTP_SIZE32(parameter_len) - parameter_len; |
|
5103 chunk_len += parameter_len; |
|
5104 } |
|
5105 /* add CHUNKS parameter */ |
|
5106 if (sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks) > 0) { |
|
5107 struct sctp_auth_chunk_list *chunks; |
|
5108 |
|
5109 if (padding_len > 0) { |
|
5110 memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); |
|
5111 chunk_len += padding_len; |
|
5112 padding_len = 0; |
|
5113 } |
|
5114 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + chunk_len); |
|
5115 parameter_len = (uint16_t)(sizeof(struct sctp_auth_chunk_list) + |
|
5116 sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks)); |
|
5117 chunks->ph.param_type = htons(SCTP_CHUNK_LIST); |
|
5118 chunks->ph.param_length = htons(parameter_len); |
|
5119 sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types); |
|
5120 padding_len = SCTP_SIZE32(parameter_len) - parameter_len; |
|
5121 chunk_len += parameter_len; |
|
5122 } |
|
5123 } |
|
5124 SCTP_BUF_LEN(m) = chunk_len; |
|
5125 |
|
5126 /* now the addresses */ |
|
5127 /* To optimize this we could put the scoping stuff |
|
5128 * into a structure and remove the individual uint8's from |
|
5129 * the assoc structure. Then we could just sifa in the |
|
5130 * address within the stcb. But for now this is a quick |
|
5131 * hack to get the address stuff teased apart. |
|
5132 */ |
|
5133 sctp_add_addresses_to_i_ia(inp, stcb, &stcb->asoc.scope, m, cnt_inits_to, &padding_len, &chunk_len); |
|
5134 |
|
5135 init->ch.chunk_length = htons(chunk_len); |
|
5136 if (padding_len > 0) { |
|
5137 struct mbuf *m_at, *mp_last; |
|
5138 |
|
5139 mp_last = NULL; |
|
5140 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { |
|
5141 if (SCTP_BUF_NEXT(m_at) == NULL) |
|
5142 mp_last = m_at; |
|
5143 } |
|
5144 if ((mp_last == NULL) || sctp_add_pad_tombuf(mp_last, padding_len)) { |
|
5145 sctp_m_freem(m); |
|
5146 return; |
|
5147 } |
|
5148 } |
|
5149 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n"); |
|
5150 ret = sctp_lowlevel_chunk_output(inp, stcb, net, |
|
5151 (struct sockaddr *)&net->ro._l_addr, |
|
5152 m, 0, NULL, 0, 0, 0, 0, |
|
5153 inp->sctp_lport, stcb->rport, htonl(0), |
|
5154 net->port, NULL, |
|
5155 #if defined(__FreeBSD__) |
|
5156 0, 0, |
|
5157 #endif |
|
5158 so_locked); |
|
5159 SCTPDBG(SCTP_DEBUG_OUTPUT4, "lowlevel_output - %d\n", ret); |
|
5160 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); |
|
5161 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); |
|
5162 } |
|
5163 |
|
5164 struct mbuf * |
|
5165 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt, |
|
5166 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly) |
|
5167 { |
|
5168 /* |
|
5169 * Given a mbuf containing an INIT or INIT-ACK with the param_offset |
|
5170 * being equal to the beginning of the params i.e. (iphlen + |
|
5171 * sizeof(struct sctp_init_msg) parse through the parameters to the |
|
5172 * end of the mbuf verifying that all parameters are known. |
|
5173 * |
|
5174 * For unknown parameters build and return a mbuf with |
|
5175 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop |
|
5176 * processing this chunk stop, and set *abort_processing to 1. |
|
5177 * |
|
5178 * By having param_offset be pre-set to where parameters begin it is |
|
5179 * hoped that this routine may be reused in the future by new |
|
5180 * features. |
|
5181 */ |
|
5182 struct sctp_paramhdr *phdr, params; |
|
5183 |
|
5184 struct mbuf *mat, *op_err; |
|
5185 char tempbuf[SCTP_PARAM_BUFFER_SIZE]; |
|
5186 int at, limit, pad_needed; |
|
5187 uint16_t ptype, plen, padded_size; |
|
5188 int err_at; |
|
5189 |
|
5190 *abort_processing = 0; |
|
5191 mat = in_initpkt; |
|
5192 err_at = 0; |
|
5193 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk); |
|
5194 at = param_offset; |
|
5195 op_err = NULL; |
|
5196 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n"); |
|
5197 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params)); |
|
5198 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) { |
|
5199 ptype = ntohs(phdr->param_type); |
|
5200 plen = ntohs(phdr->param_length); |
|
5201 if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) { |
|
5202 /* wacked parameter */ |
|
5203 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen); |
|
5204 goto invalid_size; |
|
5205 } |
|
5206 limit -= SCTP_SIZE32(plen); |
|
5207 /*- |
|
5208 * All parameters for all chunks that we know/understand are |
|
5209 * listed here. We process them other places and make |
|
5210 * appropriate stop actions per the upper bits. However this |
|
5211 * is the generic routine processor's can call to get back |
|
5212 * an operr.. to either incorporate (init-ack) or send. |
|
5213 */ |
|
5214 padded_size = SCTP_SIZE32(plen); |
|
5215 switch (ptype) { |
|
5216 /* Param's with variable size */ |
|
5217 case SCTP_HEARTBEAT_INFO: |
|
5218 case SCTP_STATE_COOKIE: |
|
5219 case SCTP_UNRECOG_PARAM: |
|
5220 case SCTP_ERROR_CAUSE_IND: |
|
5221 /* ok skip fwd */ |
|
5222 at += padded_size; |
|
5223 break; |
|
5224 /* Param's with variable size within a range */ |
|
5225 case SCTP_CHUNK_LIST: |
|
5226 case SCTP_SUPPORTED_CHUNK_EXT: |
|
5227 if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) { |
|
5228 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen); |
|
5229 goto invalid_size; |
|
5230 } |
|
5231 at += padded_size; |
|
5232 break; |
|
5233 case SCTP_SUPPORTED_ADDRTYPE: |
|
5234 if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) { |
|
5235 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen); |
|
5236 goto invalid_size; |
|
5237 } |
|
5238 at += padded_size; |
|
5239 break; |
|
5240 case SCTP_RANDOM: |
|
5241 if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) { |
|
5242 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen); |
|
5243 goto invalid_size; |
|
5244 } |
|
5245 at += padded_size; |
|
5246 break; |
|
5247 case SCTP_SET_PRIM_ADDR: |
|
5248 case SCTP_DEL_IP_ADDRESS: |
|
5249 case SCTP_ADD_IP_ADDRESS: |
|
5250 if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) && |
|
5251 (padded_size != sizeof(struct sctp_asconf_addr_param))) { |
|
5252 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen); |
|
5253 goto invalid_size; |
|
5254 } |
|
5255 at += padded_size; |
|
5256 break; |
|
5257 /* Param's with a fixed size */ |
|
5258 case SCTP_IPV4_ADDRESS: |
|
5259 if (padded_size != sizeof(struct sctp_ipv4addr_param)) { |
|
5260 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen); |
|
5261 goto invalid_size; |
|
5262 } |
|
5263 at += padded_size; |
|
5264 break; |
|
5265 case SCTP_IPV6_ADDRESS: |
|
5266 if (padded_size != sizeof(struct sctp_ipv6addr_param)) { |
|
5267 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen); |
|
5268 goto invalid_size; |
|
5269 } |
|
5270 at += padded_size; |
|
5271 break; |
|
5272 case SCTP_COOKIE_PRESERVE: |
|
5273 if (padded_size != sizeof(struct sctp_cookie_perserve_param)) { |
|
5274 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen); |
|
5275 goto invalid_size; |
|
5276 } |
|
5277 at += padded_size; |
|
5278 break; |
|
5279 case SCTP_HAS_NAT_SUPPORT: |
|
5280 *nat_friendly = 1; |
|
5281 /* fall through */ |
|
5282 case SCTP_PRSCTP_SUPPORTED: |
|
5283 |
|
5284 if (padded_size != sizeof(struct sctp_paramhdr)) { |
|
5285 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen); |
|
5286 goto invalid_size; |
|
5287 } |
|
5288 at += padded_size; |
|
5289 break; |
|
5290 case SCTP_ECN_CAPABLE: |
|
5291 if (padded_size != sizeof(struct sctp_ecn_supported_param)) { |
|
5292 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen); |
|
5293 goto invalid_size; |
|
5294 } |
|
5295 at += padded_size; |
|
5296 break; |
|
5297 case SCTP_ULP_ADAPTATION: |
|
5298 if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) { |
|
5299 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen); |
|
5300 goto invalid_size; |
|
5301 } |
|
5302 at += padded_size; |
|
5303 break; |
|
5304 case SCTP_SUCCESS_REPORT: |
|
5305 if (padded_size != sizeof(struct sctp_asconf_paramhdr)) { |
|
5306 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen); |
|
5307 goto invalid_size; |
|
5308 } |
|
5309 at += padded_size; |
|
5310 break; |
|
5311 case SCTP_HOSTNAME_ADDRESS: |
|
5312 { |
|
5313 /* We can NOT handle HOST NAME addresses!! */ |
|
5314 int l_len; |
|
5315 |
|
5316 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n"); |
|
5317 *abort_processing = 1; |
|
5318 if (op_err == NULL) { |
|
5319 /* Ok need to try to get a mbuf */ |
|
5320 #ifdef INET6 |
|
5321 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); |
|
5322 #else |
|
5323 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); |
|
5324 #endif |
|
5325 l_len += plen; |
|
5326 l_len += sizeof(struct sctp_paramhdr); |
|
5327 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA); |
|
5328 if (op_err) { |
|
5329 SCTP_BUF_LEN(op_err) = 0; |
|
5330 /* |
|
5331 * pre-reserve space for ip and sctp |
|
5332 * header and chunk hdr |
|
5333 */ |
|
5334 #ifdef INET6 |
|
5335 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); |
|
5336 #else |
|
5337 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip)); |
|
5338 #endif |
|
5339 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); |
|
5340 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); |
|
5341 } |
|
5342 } |
|
5343 if (op_err) { |
|
5344 /* If we have space */ |
|
5345 struct sctp_paramhdr s; |
|
5346 |
|
5347 if (err_at % 4) { |
|
5348 uint32_t cpthis = 0; |
|
5349 |
|
5350 pad_needed = 4 - (err_at % 4); |
|
5351 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis); |
|
5352 err_at += pad_needed; |
|
5353 } |
|
5354 s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR); |
|
5355 s.param_length = htons(sizeof(s) + plen); |
|
5356 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s); |
|
5357 err_at += sizeof(s); |
|
5358 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf),plen)); |
|
5359 if (phdr == NULL) { |
|
5360 sctp_m_freem(op_err); |
|
5361 /* |
|
5362 * we are out of memory but we still |
|
5363 * need to have a look at what to do |
|
5364 * (the system is in trouble |
|
5365 * though). |
|
5366 */ |
|
5367 return (NULL); |
|
5368 } |
|
5369 m_copyback(op_err, err_at, plen, (caddr_t)phdr); |
|
5370 } |
|
5371 return (op_err); |
|
5372 break; |
|
5373 } |
|
5374 default: |
|
5375 /* |
|
5376 * we do not recognize the parameter figure out what |
|
5377 * we do. |
|
5378 */ |
|
5379 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype); |
|
5380 if ((ptype & 0x4000) == 0x4000) { |
|
5381 /* Report bit is set?? */ |
|
5382 SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n"); |
|
5383 if (op_err == NULL) { |
|
5384 int l_len; |
|
5385 /* Ok need to try to get an mbuf */ |
|
5386 #ifdef INET6 |
|
5387 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); |
|
5388 #else |
|
5389 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); |
|
5390 #endif |
|
5391 l_len += plen; |
|
5392 l_len += sizeof(struct sctp_paramhdr); |
|
5393 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA); |
|
5394 if (op_err) { |
|
5395 SCTP_BUF_LEN(op_err) = 0; |
|
5396 #ifdef INET6 |
|
5397 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); |
|
5398 #else |
|
5399 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip)); |
|
5400 #endif |
|
5401 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); |
|
5402 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); |
|
5403 } |
|
5404 } |
|
5405 if (op_err) { |
|
5406 /* If we have space */ |
|
5407 struct sctp_paramhdr s; |
|
5408 |
|
5409 if (err_at % 4) { |
|
5410 uint32_t cpthis = 0; |
|
5411 |
|
5412 pad_needed = 4 - (err_at % 4); |
|
5413 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis); |
|
5414 err_at += pad_needed; |
|
5415 } |
|
5416 s.param_type = htons(SCTP_UNRECOG_PARAM); |
|
5417 s.param_length = htons(sizeof(s) + plen); |
|
5418 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s); |
|
5419 err_at += sizeof(s); |
|
5420 if (plen > sizeof(tempbuf)) { |
|
5421 plen = sizeof(tempbuf); |
|
5422 } |
|
5423 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf),plen)); |
|
5424 if (phdr == NULL) { |
|
5425 sctp_m_freem(op_err); |
|
5426 /* |
|
5427 * we are out of memory but |
|
5428 * we still need to have a |
|
5429 * look at what to do (the |
|
5430 * system is in trouble |
|
5431 * though). |
|
5432 */ |
|
5433 op_err = NULL; |
|
5434 goto more_processing; |
|
5435 } |
|
5436 m_copyback(op_err, err_at, plen, (caddr_t)phdr); |
|
5437 err_at += plen; |
|
5438 } |
|
5439 } |
|
5440 more_processing: |
|
5441 if ((ptype & 0x8000) == 0x0000) { |
|
5442 SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n"); |
|
5443 return (op_err); |
|
5444 } else { |
|
5445 /* skip this chunk and continue processing */ |
|
5446 SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n"); |
|
5447 at += SCTP_SIZE32(plen); |
|
5448 } |
|
5449 break; |
|
5450 |
|
5451 } |
|
5452 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params)); |
|
5453 } |
|
5454 return (op_err); |
|
5455 invalid_size: |
|
5456 SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n"); |
|
5457 *abort_processing = 1; |
|
5458 if ((op_err == NULL) && phdr) { |
|
5459 int l_len; |
|
5460 #ifdef INET6 |
|
5461 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); |
|
5462 #else |
|
5463 l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); |
|
5464 #endif |
|
5465 l_len += (2 * sizeof(struct sctp_paramhdr)); |
|
5466 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA); |
|
5467 if (op_err) { |
|
5468 SCTP_BUF_LEN(op_err) = 0; |
|
5469 #ifdef INET6 |
|
5470 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); |
|
5471 #else |
|
5472 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip)); |
|
5473 #endif |
|
5474 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); |
|
5475 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); |
|
5476 } |
|
5477 } |
|
5478 if ((op_err) && phdr) { |
|
5479 struct sctp_paramhdr s; |
|
5480 |
|
5481 if (err_at % 4) { |
|
5482 uint32_t cpthis = 0; |
|
5483 |
|
5484 pad_needed = 4 - (err_at % 4); |
|
5485 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis); |
|
5486 err_at += pad_needed; |
|
5487 } |
|
5488 s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); |
|
5489 s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr)); |
|
5490 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s); |
|
5491 err_at += sizeof(s); |
|
5492 /* Only copy back the p-hdr that caused the issue */ |
|
5493 m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr); |
|
5494 } |
|
5495 return (op_err); |
|
5496 } |
|
5497 |
|
5498 static int |
|
5499 sctp_are_there_new_addresses(struct sctp_association *asoc, |
|
5500 struct mbuf *in_initpkt, int offset, struct sockaddr *src) |
|
5501 { |
|
5502 /* |
|
5503 * Given a INIT packet, look through the packet to verify that there |
|
5504 * are NO new addresses. As we go through the parameters add reports |
|
5505 * of any un-understood parameters that require an error. Also we |
|
5506 * must return (1) to drop the packet if we see a un-understood |
|
5507 * parameter that tells us to drop the chunk. |
|
5508 */ |
|
5509 struct sockaddr *sa_touse; |
|
5510 struct sockaddr *sa; |
|
5511 struct sctp_paramhdr *phdr, params; |
|
5512 uint16_t ptype, plen; |
|
5513 uint8_t fnd; |
|
5514 struct sctp_nets *net; |
|
5515 #ifdef INET |
|
5516 struct sockaddr_in sin4, *sa4; |
|
5517 #endif |
|
5518 #ifdef INET6 |
|
5519 struct sockaddr_in6 sin6, *sa6; |
|
5520 #endif |
|
5521 |
|
5522 #ifdef INET |
|
5523 memset(&sin4, 0, sizeof(sin4)); |
|
5524 sin4.sin_family = AF_INET; |
|
5525 #ifdef HAVE_SIN_LEN |
|
5526 sin4.sin_len = sizeof(sin4); |
|
5527 #endif |
|
5528 #endif |
|
5529 #ifdef INET6 |
|
5530 memset(&sin6, 0, sizeof(sin6)); |
|
5531 sin6.sin6_family = AF_INET6; |
|
5532 #ifdef HAVE_SIN6_LEN |
|
5533 sin6.sin6_len = sizeof(sin6); |
|
5534 #endif |
|
5535 #endif |
|
5536 /* First what about the src address of the pkt ? */ |
|
5537 fnd = 0; |
|
5538 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
|
5539 sa = (struct sockaddr *)&net->ro._l_addr; |
|
5540 if (sa->sa_family == src->sa_family) { |
|
5541 #ifdef INET |
|
5542 if (sa->sa_family == AF_INET) { |
|
5543 struct sockaddr_in *src4; |
|
5544 |
|
5545 sa4 = (struct sockaddr_in *)sa; |
|
5546 src4 = (struct sockaddr_in *)src; |
|
5547 if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) { |
|
5548 fnd = 1; |
|
5549 break; |
|
5550 } |
|
5551 } |
|
5552 #endif |
|
5553 #ifdef INET6 |
|
5554 if (sa->sa_family == AF_INET6) { |
|
5555 struct sockaddr_in6 *src6; |
|
5556 |
|
5557 sa6 = (struct sockaddr_in6 *)sa; |
|
5558 src6 = (struct sockaddr_in6 *)src; |
|
5559 if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) { |
|
5560 fnd = 1; |
|
5561 break; |
|
5562 } |
|
5563 } |
|
5564 #endif |
|
5565 } |
|
5566 } |
|
5567 if (fnd == 0) { |
|
5568 /* New address added! no need to look futher. */ |
|
5569 return (1); |
|
5570 } |
|
5571 /* Ok so far lets munge through the rest of the packet */ |
|
5572 offset += sizeof(struct sctp_init_chunk); |
|
5573 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params)); |
|
5574 while (phdr) { |
|
5575 sa_touse = NULL; |
|
5576 ptype = ntohs(phdr->param_type); |
|
5577 plen = ntohs(phdr->param_length); |
|
5578 switch (ptype) { |
|
5579 #ifdef INET |
|
5580 case SCTP_IPV4_ADDRESS: |
|
5581 { |
|
5582 struct sctp_ipv4addr_param *p4, p4_buf; |
|
5583 |
|
5584 phdr = sctp_get_next_param(in_initpkt, offset, |
|
5585 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf)); |
|
5586 if (plen != sizeof(struct sctp_ipv4addr_param) || |
|
5587 phdr == NULL) { |
|
5588 return (1); |
|
5589 } |
|
5590 p4 = (struct sctp_ipv4addr_param *)phdr; |
|
5591 sin4.sin_addr.s_addr = p4->addr; |
|
5592 sa_touse = (struct sockaddr *)&sin4; |
|
5593 break; |
|
5594 } |
|
5595 #endif |
|
5596 #ifdef INET6 |
|
5597 case SCTP_IPV6_ADDRESS: |
|
5598 { |
|
5599 struct sctp_ipv6addr_param *p6, p6_buf; |
|
5600 |
|
5601 phdr = sctp_get_next_param(in_initpkt, offset, |
|
5602 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf)); |
|
5603 if (plen != sizeof(struct sctp_ipv6addr_param) || |
|
5604 phdr == NULL) { |
|
5605 return (1); |
|
5606 } |
|
5607 p6 = (struct sctp_ipv6addr_param *)phdr; |
|
5608 memcpy((caddr_t)&sin6.sin6_addr, p6->addr, |
|
5609 sizeof(p6->addr)); |
|
5610 sa_touse = (struct sockaddr *)&sin6; |
|
5611 break; |
|
5612 } |
|
5613 #endif |
|
5614 default: |
|
5615 sa_touse = NULL; |
|
5616 break; |
|
5617 } |
|
5618 if (sa_touse) { |
|
5619 /* ok, sa_touse points to one to check */ |
|
5620 fnd = 0; |
|
5621 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
|
5622 sa = (struct sockaddr *)&net->ro._l_addr; |
|
5623 if (sa->sa_family != sa_touse->sa_family) { |
|
5624 continue; |
|
5625 } |
|
5626 #ifdef INET |
|
5627 if (sa->sa_family == AF_INET) { |
|
5628 sa4 = (struct sockaddr_in *)sa; |
|
5629 if (sa4->sin_addr.s_addr == |
|
5630 sin4.sin_addr.s_addr) { |
|
5631 fnd = 1; |
|
5632 break; |
|
5633 } |
|
5634 } |
|
5635 #endif |
|
5636 #ifdef INET6 |
|
5637 if (sa->sa_family == AF_INET6) { |
|
5638 sa6 = (struct sockaddr_in6 *)sa; |
|
5639 if (SCTP6_ARE_ADDR_EQUAL( |
|
5640 sa6, &sin6)) { |
|
5641 fnd = 1; |
|
5642 break; |
|
5643 } |
|
5644 } |
|
5645 #endif |
|
5646 } |
|
5647 if (!fnd) { |
|
5648 /* New addr added! no need to look further */ |
|
5649 return (1); |
|
5650 } |
|
5651 } |
|
5652 offset += SCTP_SIZE32(plen); |
|
5653 phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params)); |
|
5654 } |
|
5655 return (0); |
|
5656 } |
|
5657 |
|
5658 /* |
|
5659 * Given a MBUF chain that was sent into us containing an INIT. Build a |
|
5660 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done |
|
5661 * a pullup to include IPv6/4header, SCTP header and initial part of INIT |
|
5662 * message (i.e. the struct sctp_init_msg). |
|
5663 */ |
|
5664 void |
|
5665 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb, |
|
5666 struct mbuf *init_pkt, int iphlen, int offset, |
|
5667 struct sockaddr *src, struct sockaddr *dst, |
|
5668 struct sctphdr *sh, struct sctp_init_chunk *init_chk, |
|
5669 #if defined(__FreeBSD__) |
|
5670 uint8_t use_mflowid, uint32_t mflowid, |
|
5671 #endif |
|
5672 uint32_t vrf_id, uint16_t port, int hold_inp_lock) |
|
5673 { |
|
5674 struct sctp_association *asoc; |
|
5675 struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *mp_last; |
|
5676 struct sctp_init_ack_chunk *initack; |
|
5677 struct sctp_adaptation_layer_indication *ali; |
|
5678 struct sctp_ecn_supported_param *ecn; |
|
5679 struct sctp_prsctp_supported_param *prsctp; |
|
5680 struct sctp_supported_chunk_types_param *pr_supported; |
|
5681 union sctp_sockstore *over_addr; |
|
5682 #ifdef INET |
|
5683 struct sockaddr_in *dst4 = (struct sockaddr_in *)dst; |
|
5684 struct sockaddr_in *src4 = (struct sockaddr_in *)src; |
|
5685 struct sockaddr_in *sin; |
|
5686 #endif |
|
5687 #ifdef INET6 |
|
5688 struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst; |
|
5689 struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src; |
|
5690 struct sockaddr_in6 *sin6; |
|
5691 #endif |
|
5692 #if defined(__Userspace__) |
|
5693 struct sockaddr_conn *dstconn = (struct sockaddr_conn *)dst; |
|
5694 struct sockaddr_conn *srcconn = (struct sockaddr_conn *)src; |
|
5695 struct sockaddr_conn *sconn; |
|
5696 #endif |
|
5697 struct sockaddr *to; |
|
5698 struct sctp_state_cookie stc; |
|
5699 struct sctp_nets *net = NULL; |
|
5700 uint8_t *signature = NULL; |
|
5701 int cnt_inits_to = 0; |
|
5702 uint16_t his_limit, i_want; |
|
5703 int abort_flag, padval; |
|
5704 int num_ext; |
|
5705 int p_len; |
|
5706 int nat_friendly = 0; |
|
5707 struct socket *so; |
|
5708 |
|
5709 if (stcb) { |
|
5710 asoc = &stcb->asoc; |
|
5711 } else { |
|
5712 asoc = NULL; |
|
5713 } |
|
5714 mp_last = NULL; |
|
5715 if ((asoc != NULL) && |
|
5716 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) && |
|
5717 (sctp_are_there_new_addresses(asoc, init_pkt, offset, src))) { |
|
5718 /* new addresses, out of here in non-cookie-wait states */ |
|
5719 /* |
|
5720 * Send a ABORT, we don't add the new address error clause |
|
5721 * though we even set the T bit and copy in the 0 tag.. this |
|
5722 * looks no different than if no listener was present. |
|
5723 */ |
|
5724 sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, NULL, |
|
5725 #if defined(__FreeBSD__) |
|
5726 use_mflowid, mflowid, |
|
5727 #endif |
|
5728 vrf_id, port); |
|
5729 return; |
|
5730 } |
|
5731 abort_flag = 0; |
|
5732 op_err = sctp_arethere_unrecognized_parameters(init_pkt, |
|
5733 (offset + sizeof(struct sctp_init_chunk)), |
|
5734 &abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly); |
|
5735 if (abort_flag) { |
|
5736 do_a_abort: |
|
5737 sctp_send_abort(init_pkt, iphlen, src, dst, sh, |
|
5738 init_chk->init.initiate_tag, op_err, |
|
5739 #if defined(__FreeBSD__) |
|
5740 use_mflowid, mflowid, |
|
5741 #endif |
|
5742 vrf_id, port); |
|
5743 return; |
|
5744 } |
|
5745 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); |
|
5746 if (m == NULL) { |
|
5747 /* No memory, INIT timer will re-attempt. */ |
|
5748 if (op_err) |
|
5749 sctp_m_freem(op_err); |
|
5750 return; |
|
5751 } |
|
5752 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk); |
|
5753 |
|
5754 /* |
|
5755 * We might not overwrite the identification[] completely and on |
|
5756 * some platforms time_entered will contain some padding. |
|
5757 * Therefore zero out the cookie to avoid putting |
|
5758 * uninitialized memory on the wire. |
|
5759 */ |
|
5760 memset(&stc, 0, sizeof(struct sctp_state_cookie)); |
|
5761 |
|
5762 /* the time I built cookie */ |
|
5763 (void)SCTP_GETTIME_TIMEVAL(&stc.time_entered); |
|
5764 |
|
5765 /* populate any tie tags */ |
|
5766 if (asoc != NULL) { |
|
5767 /* unlock before tag selections */ |
|
5768 stc.tie_tag_my_vtag = asoc->my_vtag_nonce; |
|
5769 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce; |
|
5770 stc.cookie_life = asoc->cookie_life; |
|
5771 net = asoc->primary_destination; |
|
5772 } else { |
|
5773 stc.tie_tag_my_vtag = 0; |
|
5774 stc.tie_tag_peer_vtag = 0; |
|
5775 /* life I will award this cookie */ |
|
5776 stc.cookie_life = inp->sctp_ep.def_cookie_life; |
|
5777 } |
|
5778 |
|
5779 /* copy in the ports for later check */ |
|
5780 stc.myport = sh->dest_port; |
|
5781 stc.peerport = sh->src_port; |
|
5782 |
|
5783 /* |
|
5784 * If we wanted to honor cookie life extentions, we would add to |
|
5785 * stc.cookie_life. For now we should NOT honor any extension |
|
5786 */ |
|
5787 stc.site_scope = stc.local_scope = stc.loopback_scope = 0; |
|
5788 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { |
|
5789 stc.ipv6_addr_legal = 1; |
|
5790 if (SCTP_IPV6_V6ONLY(inp)) { |
|
5791 stc.ipv4_addr_legal = 0; |
|
5792 } else { |
|
5793 stc.ipv4_addr_legal = 1; |
|
5794 } |
|
5795 #if defined(__Userspace__) |
|
5796 stc.conn_addr_legal = 0; |
|
5797 #endif |
|
5798 } else { |
|
5799 stc.ipv6_addr_legal = 0; |
|
5800 #if defined(__Userspace__) |
|
5801 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) { |
|
5802 stc.conn_addr_legal = 1; |
|
5803 stc.ipv4_addr_legal = 0; |
|
5804 } else { |
|
5805 stc.conn_addr_legal = 0; |
|
5806 stc.ipv4_addr_legal = 1; |
|
5807 } |
|
5808 #else |
|
5809 stc.ipv4_addr_legal = 1; |
|
5810 #endif |
|
5811 } |
|
5812 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE |
|
5813 stc.ipv4_scope = 1; |
|
5814 #else |
|
5815 stc.ipv4_scope = 0; |
|
5816 #endif |
|
5817 if (net == NULL) { |
|
5818 to = src; |
|
5819 switch (dst->sa_family) { |
|
5820 #ifdef INET |
|
5821 case AF_INET: |
|
5822 { |
|
5823 /* lookup address */ |
|
5824 stc.address[0] = src4->sin_addr.s_addr; |
|
5825 stc.address[1] = 0; |
|
5826 stc.address[2] = 0; |
|
5827 stc.address[3] = 0; |
|
5828 stc.addr_type = SCTP_IPV4_ADDRESS; |
|
5829 /* local from address */ |
|
5830 stc.laddress[0] = dst4->sin_addr.s_addr; |
|
5831 stc.laddress[1] = 0; |
|
5832 stc.laddress[2] = 0; |
|
5833 stc.laddress[3] = 0; |
|
5834 stc.laddr_type = SCTP_IPV4_ADDRESS; |
|
5835 /* scope_id is only for v6 */ |
|
5836 stc.scope_id = 0; |
|
5837 #ifndef SCTP_DONT_DO_PRIVADDR_SCOPE |
|
5838 if (IN4_ISPRIVATE_ADDRESS(&src4->sin_addr)) { |
|
5839 stc.ipv4_scope = 1; |
|
5840 } |
|
5841 #else |
|
5842 stc.ipv4_scope = 1; |
|
5843 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */ |
|
5844 /* Must use the address in this case */ |
|
5845 if (sctp_is_address_on_local_host(src, vrf_id)) { |
|
5846 stc.loopback_scope = 1; |
|
5847 stc.ipv4_scope = 1; |
|
5848 stc.site_scope = 1; |
|
5849 stc.local_scope = 0; |
|
5850 } |
|
5851 break; |
|
5852 } |
|
5853 #endif |
|
5854 #ifdef INET6 |
|
5855 case AF_INET6: |
|
5856 { |
|
5857 stc.addr_type = SCTP_IPV6_ADDRESS; |
|
5858 memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr)); |
|
5859 #if defined(__FreeBSD__) && (((__FreeBSD_version < 900000) && (__FreeBSD_version >= 804000)) || (__FreeBSD_version > 900000)) |
|
5860 stc.scope_id = in6_getscope(&src6->sin6_addr); |
|
5861 #else |
|
5862 stc.scope_id = 0; |
|
5863 #endif |
|
5864 if (sctp_is_address_on_local_host(src, vrf_id)) { |
|
5865 stc.loopback_scope = 1; |
|
5866 stc.local_scope = 0; |
|
5867 stc.site_scope = 1; |
|
5868 stc.ipv4_scope = 1; |
|
5869 } else if (IN6_IS_ADDR_LINKLOCAL(&src6->sin6_addr)) { |
|
5870 /* |
|
5871 * If the new destination is a LINK_LOCAL we |
|
5872 * must have common both site and local |
|
5873 * scope. Don't set local scope though since |
|
5874 * we must depend on the source to be added |
|
5875 * implicitly. We cannot assure just because |
|
5876 * we share one link that all links are |
|
5877 * common. |
|
5878 */ |
|
5879 #if defined(__APPLE__) |
|
5880 /* Mac OS X currently doesn't have in6_getscope() */ |
|
5881 stc.scope_id = src6->sin6_addr.s6_addr16[1]; |
|
5882 #endif |
|
5883 stc.local_scope = 0; |
|
5884 stc.site_scope = 1; |
|
5885 stc.ipv4_scope = 1; |
|
5886 /* |
|
5887 * we start counting for the private address |
|
5888 * stuff at 1. since the link local we |
|
5889 * source from won't show up in our scoped |
|
5890 * count. |
|
5891 */ |
|
5892 cnt_inits_to = 1; |
|
5893 /* pull out the scope_id from incoming pkt */ |
|
5894 } else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr)) { |
|
5895 /* |
|
5896 * If the new destination is SITE_LOCAL then |
|
5897 * we must have site scope in common. |
|
5898 */ |
|
5899 stc.site_scope = 1; |
|
5900 } |
|
5901 memcpy(&stc.laddress, &dst6->sin6_addr, sizeof(struct in6_addr)); |
|
5902 stc.laddr_type = SCTP_IPV6_ADDRESS; |
|
5903 break; |
|
5904 } |
|
5905 #endif |
|
5906 #if defined(__Userspace__) |
|
5907 case AF_CONN: |
|
5908 { |
|
5909 /* lookup address */ |
|
5910 stc.address[0] = 0; |
|
5911 stc.address[1] = 0; |
|
5912 stc.address[2] = 0; |
|
5913 stc.address[3] = 0; |
|
5914 memcpy(&stc.address, &srcconn->sconn_addr, sizeof(void *)); |
|
5915 stc.addr_type = SCTP_CONN_ADDRESS; |
|
5916 /* local from address */ |
|
5917 stc.laddress[0] = 0; |
|
5918 stc.laddress[1] = 0; |
|
5919 stc.laddress[2] = 0; |
|
5920 stc.laddress[3] = 0; |
|
5921 memcpy(&stc.laddress, &dstconn->sconn_addr, sizeof(void *)); |
|
5922 stc.laddr_type = SCTP_CONN_ADDRESS; |
|
5923 /* scope_id is only for v6 */ |
|
5924 stc.scope_id = 0; |
|
5925 break; |
|
5926 } |
|
5927 #endif |
|
5928 default: |
|
5929 /* TSNH */ |
|
5930 goto do_a_abort; |
|
5931 break; |
|
5932 } |
|
5933 } else { |
|
5934 /* set the scope per the existing tcb */ |
|
5935 |
|
5936 #ifdef INET6 |
|
5937 struct sctp_nets *lnet; |
|
5938 #endif |
|
5939 |
|
5940 stc.loopback_scope = asoc->scope.loopback_scope; |
|
5941 stc.ipv4_scope = asoc->scope.ipv4_local_scope; |
|
5942 stc.site_scope = asoc->scope.site_scope; |
|
5943 stc.local_scope = asoc->scope.local_scope; |
|
5944 #ifdef INET6 |
|
5945 /* Why do we not consider IPv4 LL addresses? */ |
|
5946 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) { |
|
5947 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) { |
|
5948 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) { |
|
5949 /* |
|
5950 * if we have a LL address, start |
|
5951 * counting at 1. |
|
5952 */ |
|
5953 cnt_inits_to = 1; |
|
5954 } |
|
5955 } |
|
5956 } |
|
5957 #endif |
|
5958 /* use the net pointer */ |
|
5959 to = (struct sockaddr *)&net->ro._l_addr; |
|
5960 switch (to->sa_family) { |
|
5961 #ifdef INET |
|
5962 case AF_INET: |
|
5963 sin = (struct sockaddr_in *)to; |
|
5964 stc.address[0] = sin->sin_addr.s_addr; |
|
5965 stc.address[1] = 0; |
|
5966 stc.address[2] = 0; |
|
5967 stc.address[3] = 0; |
|
5968 stc.addr_type = SCTP_IPV4_ADDRESS; |
|
5969 if (net->src_addr_selected == 0) { |
|
5970 /* |
|
5971 * strange case here, the INIT should have |
|
5972 * did the selection. |
|
5973 */ |
|
5974 net->ro._s_addr = sctp_source_address_selection(inp, |
|
5975 stcb, (sctp_route_t *)&net->ro, |
|
5976 net, 0, vrf_id); |
|
5977 if (net->ro._s_addr == NULL) |
|
5978 return; |
|
5979 |
|
5980 net->src_addr_selected = 1; |
|
5981 |
|
5982 } |
|
5983 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr; |
|
5984 stc.laddress[1] = 0; |
|
5985 stc.laddress[2] = 0; |
|
5986 stc.laddress[3] = 0; |
|
5987 stc.laddr_type = SCTP_IPV4_ADDRESS; |
|
5988 /* scope_id is only for v6 */ |
|
5989 stc.scope_id = 0; |
|
5990 break; |
|
5991 #endif |
|
5992 #ifdef INET6 |
|
5993 case AF_INET6: |
|
5994 sin6 = (struct sockaddr_in6 *)to; |
|
5995 memcpy(&stc.address, &sin6->sin6_addr, |
|
5996 sizeof(struct in6_addr)); |
|
5997 stc.addr_type = SCTP_IPV6_ADDRESS; |
|
5998 stc.scope_id = sin6->sin6_scope_id; |
|
5999 if (net->src_addr_selected == 0) { |
|
6000 /* |
|
6001 * strange case here, the INIT should have |
|
6002 * done the selection. |
|
6003 */ |
|
6004 net->ro._s_addr = sctp_source_address_selection(inp, |
|
6005 stcb, (sctp_route_t *)&net->ro, |
|
6006 net, 0, vrf_id); |
|
6007 if (net->ro._s_addr == NULL) |
|
6008 return; |
|
6009 |
|
6010 net->src_addr_selected = 1; |
|
6011 } |
|
6012 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr, |
|
6013 sizeof(struct in6_addr)); |
|
6014 stc.laddr_type = SCTP_IPV6_ADDRESS; |
|
6015 break; |
|
6016 #endif |
|
6017 #if defined(__Userspace__) |
|
6018 case AF_CONN: |
|
6019 sconn = (struct sockaddr_conn *)to; |
|
6020 stc.address[0] = 0; |
|
6021 stc.address[1] = 0; |
|
6022 stc.address[2] = 0; |
|
6023 stc.address[3] = 0; |
|
6024 memcpy(&stc.address, &sconn->sconn_addr, sizeof(void *)); |
|
6025 stc.addr_type = SCTP_CONN_ADDRESS; |
|
6026 stc.laddress[0] = 0; |
|
6027 stc.laddress[1] = 0; |
|
6028 stc.laddress[2] = 0; |
|
6029 stc.laddress[3] = 0; |
|
6030 memcpy(&stc.laddress, &sconn->sconn_addr, sizeof(void *)); |
|
6031 stc.laddr_type = SCTP_CONN_ADDRESS; |
|
6032 stc.scope_id = 0; |
|
6033 break; |
|
6034 #endif |
|
6035 } |
|
6036 } |
|
6037 /* Now lets put the SCTP header in place */ |
|
6038 initack = mtod(m, struct sctp_init_ack_chunk *); |
|
6039 /* Save it off for quick ref */ |
|
6040 stc.peers_vtag = init_chk->init.initiate_tag; |
|
6041 /* who are we */ |
|
6042 memcpy(stc.identification, SCTP_VERSION_STRING, |
|
6043 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification))); |
|
6044 memset(stc.reserved, 0, SCTP_RESERVE_SPACE); |
|
6045 /* now the chunk header */ |
|
6046 initack->ch.chunk_type = SCTP_INITIATION_ACK; |
|
6047 initack->ch.chunk_flags = 0; |
|
6048 /* fill in later from mbuf we build */ |
|
6049 initack->ch.chunk_length = 0; |
|
6050 /* place in my tag */ |
|
6051 if ((asoc != NULL) && |
|
6052 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || |
|
6053 (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) || |
|
6054 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) { |
|
6055 /* re-use the v-tags and init-seq here */ |
|
6056 initack->init.initiate_tag = htonl(asoc->my_vtag); |
|
6057 initack->init.initial_tsn = htonl(asoc->init_seq_number); |
|
6058 } else { |
|
6059 uint32_t vtag, itsn; |
|
6060 if (hold_inp_lock) { |
|
6061 SCTP_INP_INCR_REF(inp); |
|
6062 SCTP_INP_RUNLOCK(inp); |
|
6063 } |
|
6064 if (asoc) { |
|
6065 atomic_add_int(&asoc->refcnt, 1); |
|
6066 SCTP_TCB_UNLOCK(stcb); |
|
6067 new_tag: |
|
6068 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1); |
|
6069 if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) { |
|
6070 /* Got a duplicate vtag on some guy behind a nat |
|
6071 * make sure we don't use it. |
|
6072 */ |
|
6073 goto new_tag; |
|
6074 } |
|
6075 initack->init.initiate_tag = htonl(vtag); |
|
6076 /* get a TSN to use too */ |
|
6077 itsn = sctp_select_initial_TSN(&inp->sctp_ep); |
|
6078 initack->init.initial_tsn = htonl(itsn); |
|
6079 SCTP_TCB_LOCK(stcb); |
|
6080 atomic_add_int(&asoc->refcnt, -1); |
|
6081 } else { |
|
6082 vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1); |
|
6083 initack->init.initiate_tag = htonl(vtag); |
|
6084 /* get a TSN to use too */ |
|
6085 initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep)); |
|
6086 } |
|
6087 if (hold_inp_lock) { |
|
6088 SCTP_INP_RLOCK(inp); |
|
6089 SCTP_INP_DECR_REF(inp); |
|
6090 } |
|
6091 } |
|
6092 /* save away my tag to */ |
|
6093 stc.my_vtag = initack->init.initiate_tag; |
|
6094 |
|
6095 /* set up some of the credits. */ |
|
6096 so = inp->sctp_socket; |
|
6097 if (so == NULL) { |
|
6098 /* memory problem */ |
|
6099 sctp_m_freem(m); |
|
6100 return; |
|
6101 } else { |
|
6102 initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND)); |
|
6103 } |
|
6104 /* set what I want */ |
|
6105 his_limit = ntohs(init_chk->init.num_inbound_streams); |
|
6106 /* choose what I want */ |
|
6107 if (asoc != NULL) { |
|
6108 if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) { |
|
6109 i_want = asoc->streamoutcnt; |
|
6110 } else { |
|
6111 i_want = inp->sctp_ep.pre_open_stream_count; |
|
6112 } |
|
6113 } else { |
|
6114 i_want = inp->sctp_ep.pre_open_stream_count; |
|
6115 } |
|
6116 if (his_limit < i_want) { |
|
6117 /* I Want more :< */ |
|
6118 initack->init.num_outbound_streams = init_chk->init.num_inbound_streams; |
|
6119 } else { |
|
6120 /* I can have what I want :> */ |
|
6121 initack->init.num_outbound_streams = htons(i_want); |
|
6122 } |
|
6123 /* tell him his limit. */ |
|
6124 initack->init.num_inbound_streams = |
|
6125 htons(inp->sctp_ep.max_open_streams_intome); |
|
6126 |
|
6127 /* adaptation layer indication parameter */ |
|
6128 if (inp->sctp_ep.adaptation_layer_indicator_provided) { |
|
6129 ali = (struct sctp_adaptation_layer_indication *)((caddr_t)initack + sizeof(*initack)); |
|
6130 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION); |
|
6131 ali->ph.param_length = htons(sizeof(*ali)); |
|
6132 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator); |
|
6133 SCTP_BUF_LEN(m) += sizeof(*ali); |
|
6134 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali)); |
|
6135 } else { |
|
6136 ecn = (struct sctp_ecn_supported_param *)((caddr_t)initack + sizeof(*initack)); |
|
6137 } |
|
6138 |
|
6139 /* ECN parameter */ |
|
6140 if (((asoc != NULL) && (asoc->ecn_allowed == 1)) || |
|
6141 (inp->sctp_ecn_enable == 1)) { |
|
6142 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE); |
|
6143 ecn->ph.param_length = htons(sizeof(*ecn)); |
|
6144 SCTP_BUF_LEN(m) += sizeof(*ecn); |
|
6145 |
|
6146 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn + |
|
6147 sizeof(*ecn)); |
|
6148 } else { |
|
6149 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn); |
|
6150 } |
|
6151 /* And now tell the peer we do pr-sctp */ |
|
6152 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED); |
|
6153 prsctp->ph.param_length = htons(sizeof(*prsctp)); |
|
6154 SCTP_BUF_LEN(m) += sizeof(*prsctp); |
|
6155 if (nat_friendly) { |
|
6156 /* Add NAT friendly parameter */ |
|
6157 struct sctp_paramhdr *ph; |
|
6158 |
|
6159 ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + SCTP_BUF_LEN(m)); |
|
6160 ph->param_type = htons(SCTP_HAS_NAT_SUPPORT); |
|
6161 ph->param_length = htons(sizeof(struct sctp_paramhdr)); |
|
6162 SCTP_BUF_LEN(m) += sizeof(struct sctp_paramhdr); |
|
6163 } |
|
6164 /* And now tell the peer we do all the extensions */ |
|
6165 pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + SCTP_BUF_LEN(m)); |
|
6166 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT); |
|
6167 num_ext = 0; |
|
6168 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF; |
|
6169 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK; |
|
6170 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN; |
|
6171 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED; |
|
6172 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET; |
|
6173 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) |
|
6174 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION; |
|
6175 if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off)) |
|
6176 pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK; |
|
6177 p_len = sizeof(*pr_supported) + num_ext; |
|
6178 pr_supported->ph.param_length = htons(p_len); |
|
6179 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len); |
|
6180 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); |
|
6181 |
|
6182 /* add authentication parameters */ |
|
6183 if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) { |
|
6184 struct sctp_auth_random *randp; |
|
6185 struct sctp_auth_hmac_algo *hmacs; |
|
6186 struct sctp_auth_chunk_list *chunks; |
|
6187 uint16_t random_len; |
|
6188 |
|
6189 /* generate and add RANDOM parameter */ |
|
6190 random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT; |
|
6191 randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + SCTP_BUF_LEN(m)); |
|
6192 randp->ph.param_type = htons(SCTP_RANDOM); |
|
6193 p_len = sizeof(*randp) + random_len; |
|
6194 randp->ph.param_length = htons(p_len); |
|
6195 SCTP_READ_RANDOM(randp->random_data, random_len); |
|
6196 /* zero out any padding required */ |
|
6197 bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len); |
|
6198 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); |
|
6199 |
|
6200 /* add HMAC_ALGO parameter */ |
|
6201 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + SCTP_BUF_LEN(m)); |
|
6202 p_len = sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs, |
|
6203 (uint8_t *) hmacs->hmac_ids); |
|
6204 if (p_len > 0) { |
|
6205 p_len += sizeof(*hmacs); |
|
6206 hmacs->ph.param_type = htons(SCTP_HMAC_LIST); |
|
6207 hmacs->ph.param_length = htons(p_len); |
|
6208 /* zero out any padding required */ |
|
6209 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len); |
|
6210 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); |
|
6211 } |
|
6212 /* add CHUNKS parameter */ |
|
6213 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + SCTP_BUF_LEN(m)); |
|
6214 p_len = sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks, |
|
6215 chunks->chunk_types); |
|
6216 if (p_len > 0) { |
|
6217 p_len += sizeof(*chunks); |
|
6218 chunks->ph.param_type = htons(SCTP_CHUNK_LIST); |
|
6219 chunks->ph.param_length = htons(p_len); |
|
6220 /* zero out any padding required */ |
|
6221 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len); |
|
6222 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); |
|
6223 } |
|
6224 } |
|
6225 m_at = m; |
|
6226 /* now the addresses */ |
|
6227 { |
|
6228 struct sctp_scoping scp; |
|
6229 /* To optimize this we could put the scoping stuff |
|
6230 * into a structure and remove the individual uint8's from |
|
6231 * the stc structure. Then we could just sifa in the |
|
6232 * address within the stc.. but for now this is a quick |
|
6233 * hack to get the address stuff teased apart. |
|
6234 */ |
|
6235 scp.ipv4_addr_legal = stc.ipv4_addr_legal; |
|
6236 scp.ipv6_addr_legal = stc.ipv6_addr_legal; |
|
6237 #if defined(__Userspace__) |
|
6238 scp.conn_addr_legal = stc.conn_addr_legal; |
|
6239 #endif |
|
6240 scp.loopback_scope = stc.loopback_scope; |
|
6241 scp.ipv4_local_scope = stc.ipv4_scope; |
|
6242 scp.local_scope = stc.local_scope; |
|
6243 scp.site_scope = stc.site_scope; |
|
6244 m_at = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_at, cnt_inits_to, NULL, NULL); |
|
6245 } |
|
6246 |
|
6247 /* tack on the operational error if present */ |
|
6248 if (op_err) { |
|
6249 struct mbuf *ol; |
|
6250 int llen; |
|
6251 llen = 0; |
|
6252 ol = op_err; |
|
6253 |
|
6254 while (ol) { |
|
6255 llen += SCTP_BUF_LEN(ol); |
|
6256 ol = SCTP_BUF_NEXT(ol); |
|
6257 } |
|
6258 if (llen % 4) { |
|
6259 /* must add a pad to the param */ |
|
6260 uint32_t cpthis = 0; |
|
6261 int padlen; |
|
6262 |
|
6263 padlen = 4 - (llen % 4); |
|
6264 m_copyback(op_err, llen, padlen, (caddr_t)&cpthis); |
|
6265 } |
|
6266 while (SCTP_BUF_NEXT(m_at) != NULL) { |
|
6267 m_at = SCTP_BUF_NEXT(m_at); |
|
6268 } |
|
6269 SCTP_BUF_NEXT(m_at) = op_err; |
|
6270 while (SCTP_BUF_NEXT(m_at) != NULL) { |
|
6271 m_at = SCTP_BUF_NEXT(m_at); |
|
6272 } |
|
6273 } |
|
6274 /* pre-calulate the size and update pkt header and chunk header */ |
|
6275 p_len = 0; |
|
6276 for (m_tmp = m; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) { |
|
6277 p_len += SCTP_BUF_LEN(m_tmp); |
|
6278 if (SCTP_BUF_NEXT(m_tmp) == NULL) { |
|
6279 /* m_tmp should now point to last one */ |
|
6280 break; |
|
6281 } |
|
6282 } |
|
6283 |
|
6284 /* Now we must build a cookie */ |
|
6285 m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature); |
|
6286 if (m_cookie == NULL) { |
|
6287 /* memory problem */ |
|
6288 sctp_m_freem(m); |
|
6289 return; |
|
6290 } |
|
6291 /* Now append the cookie to the end and update the space/size */ |
|
6292 SCTP_BUF_NEXT(m_tmp) = m_cookie; |
|
6293 |
|
6294 for (m_tmp = m_cookie; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) { |
|
6295 p_len += SCTP_BUF_LEN(m_tmp); |
|
6296 if (SCTP_BUF_NEXT(m_tmp) == NULL) { |
|
6297 /* m_tmp should now point to last one */ |
|
6298 mp_last = m_tmp; |
|
6299 break; |
|
6300 } |
|
6301 } |
|
6302 /* Place in the size, but we don't include |
|
6303 * the last pad (if any) in the INIT-ACK. |
|
6304 */ |
|
6305 initack->ch.chunk_length = htons(p_len); |
|
6306 |
|
6307 /* Time to sign the cookie, we don't sign over the cookie |
|
6308 * signature though thus we set trailer. |
|
6309 */ |
|
6310 (void)sctp_hmac_m(SCTP_HMAC, |
|
6311 (uint8_t *)inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)], |
|
6312 SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr), |
|
6313 (uint8_t *)signature, SCTP_SIGNATURE_SIZE); |
|
6314 /* |
|
6315 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return |
|
6316 * here since the timer will drive a retranmission. |
|
6317 */ |
|
6318 padval = p_len % 4; |
|
6319 if ((padval) && (mp_last)) { |
|
6320 /* see my previous comments on mp_last */ |
|
6321 if (sctp_add_pad_tombuf(mp_last, (4 - padval))) { |
|
6322 /* Houston we have a problem, no space */ |
|
6323 sctp_m_freem(m); |
|
6324 return; |
|
6325 } |
|
6326 } |
|
6327 if (stc.loopback_scope) { |
|
6328 over_addr = (union sctp_sockstore *)dst; |
|
6329 } else { |
|
6330 over_addr = NULL; |
|
6331 } |
|
6332 |
|
6333 (void)sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0, |
|
6334 0, 0, |
|
6335 inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag, |
|
6336 port, over_addr, |
|
6337 #if defined(__FreeBSD__) |
|
6338 use_mflowid, mflowid, |
|
6339 #endif |
|
6340 SCTP_SO_NOT_LOCKED); |
|
6341 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); |
|
6342 } |
|
6343 |
|
6344 |
|
6345 static void |
|
6346 sctp_prune_prsctp(struct sctp_tcb *stcb, |
|
6347 struct sctp_association *asoc, |
|
6348 struct sctp_sndrcvinfo *srcv, |
|
6349 int dataout) |
|
6350 { |
|
6351 int freed_spc = 0; |
|
6352 struct sctp_tmit_chunk *chk, *nchk; |
|
6353 |
|
6354 SCTP_TCB_LOCK_ASSERT(stcb); |
|
6355 if ((asoc->peer_supports_prsctp) && |
|
6356 (asoc->sent_queue_cnt_removeable > 0)) { |
|
6357 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { |
|
6358 /* |
|
6359 * Look for chunks marked with the PR_SCTP flag AND |
|
6360 * the buffer space flag. If the one being sent is |
|
6361 * equal or greater priority then purge the old one |
|
6362 * and free some space. |
|
6363 */ |
|
6364 if (PR_SCTP_BUF_ENABLED(chk->flags)) { |
|
6365 /* |
|
6366 * This one is PR-SCTP AND buffer space |
|
6367 * limited type |
|
6368 */ |
|
6369 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) { |
|
6370 /* |
|
6371 * Lower numbers equates to higher |
|
6372 * priority so if the one we are |
|
6373 * looking at has a larger or equal |
|
6374 * priority we want to drop the data |
|
6375 * and NOT retransmit it. |
|
6376 */ |
|
6377 if (chk->data) { |
|
6378 /* |
|
6379 * We release the book_size |
|
6380 * if the mbuf is here |
|
6381 */ |
|
6382 int ret_spc; |
|
6383 uint8_t sent; |
|
6384 |
|
6385 if (chk->sent > SCTP_DATAGRAM_UNSENT) |
|
6386 sent = 1; |
|
6387 else |
|
6388 sent = 0; |
|
6389 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk, |
|
6390 sent, |
|
6391 SCTP_SO_LOCKED); |
|
6392 freed_spc += ret_spc; |
|
6393 if (freed_spc >= dataout) { |
|
6394 return; |
|
6395 } |
|
6396 } /* if chunk was present */ |
|
6397 } /* if of sufficent priority */ |
|
6398 } /* if chunk has enabled */ |
|
6399 } /* tailqforeach */ |
|
6400 |
|
6401 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { |
|
6402 /* Here we must move to the sent queue and mark */ |
|
6403 if (PR_SCTP_BUF_ENABLED(chk->flags)) { |
|
6404 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) { |
|
6405 if (chk->data) { |
|
6406 /* |
|
6407 * We release the book_size |
|
6408 * if the mbuf is here |
|
6409 */ |
|
6410 int ret_spc; |
|
6411 |
|
6412 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk, |
|
6413 0, SCTP_SO_LOCKED); |
|
6414 |
|
6415 freed_spc += ret_spc; |
|
6416 if (freed_spc >= dataout) { |
|
6417 return; |
|
6418 } |
|
6419 } /* end if chk->data */ |
|
6420 } /* end if right class */ |
|
6421 } /* end if chk pr-sctp */ |
|
6422 } /* tailqforeachsafe (chk) */ |
|
6423 } /* if enabled in asoc */ |
|
6424 } |
|
6425 |
|
6426 int |
|
6427 sctp_get_frag_point(struct sctp_tcb *stcb, |
|
6428 struct sctp_association *asoc) |
|
6429 { |
|
6430 int siz, ovh; |
|
6431 |
|
6432 /* |
|
6433 * For endpoints that have both v6 and v4 addresses we must reserve |
|
6434 * room for the ipv6 header, for those that are only dealing with V4 |
|
6435 * we use a larger frag point. |
|
6436 */ |
|
6437 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { |
|
6438 ovh = SCTP_MED_OVERHEAD; |
|
6439 } else { |
|
6440 ovh = SCTP_MED_V4_OVERHEAD; |
|
6441 } |
|
6442 |
|
6443 if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu) |
|
6444 siz = asoc->smallest_mtu - ovh; |
|
6445 else |
|
6446 siz = (stcb->asoc.sctp_frag_point - ovh); |
|
6447 /* |
|
6448 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) { |
|
6449 */ |
|
6450 /* A data chunk MUST fit in a cluster */ |
|
6451 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */ |
|
6452 /* } */ |
|
6453 |
|
6454 /* adjust for an AUTH chunk if DATA requires auth */ |
|
6455 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) |
|
6456 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); |
|
6457 |
|
6458 if (siz % 4) { |
|
6459 /* make it an even word boundary please */ |
|
6460 siz -= (siz % 4); |
|
6461 } |
|
6462 return (siz); |
|
6463 } |
|
6464 |
|
6465 static void |
|
6466 sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp) |
|
6467 { |
|
6468 /* |
|
6469 * We assume that the user wants PR_SCTP_TTL if the user |
|
6470 * provides a positive lifetime but does not specify any |
|
6471 * PR_SCTP policy. |
|
6472 */ |
|
6473 if (PR_SCTP_ENABLED(sp->sinfo_flags)) { |
|
6474 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags); |
|
6475 } else if (sp->timetolive > 0) { |
|
6476 sp->sinfo_flags |= SCTP_PR_SCTP_TTL; |
|
6477 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags); |
|
6478 } else { |
|
6479 return; |
|
6480 } |
|
6481 switch (PR_SCTP_POLICY(sp->sinfo_flags)) { |
|
6482 case CHUNK_FLAGS_PR_SCTP_BUF: |
|
6483 /* |
|
6484 * Time to live is a priority stored in tv_sec when |
|
6485 * doing the buffer drop thing. |
|
6486 */ |
|
6487 sp->ts.tv_sec = sp->timetolive; |
|
6488 sp->ts.tv_usec = 0; |
|
6489 break; |
|
6490 case CHUNK_FLAGS_PR_SCTP_TTL: |
|
6491 { |
|
6492 struct timeval tv; |
|
6493 (void)SCTP_GETTIME_TIMEVAL(&sp->ts); |
|
6494 tv.tv_sec = sp->timetolive / 1000; |
|
6495 tv.tv_usec = (sp->timetolive * 1000) % 1000000; |
|
6496 /* TODO sctp_constants.h needs alternative time macros when |
|
6497 * _KERNEL is undefined. |
|
6498 */ |
|
6499 #ifndef __FreeBSD__ |
|
6500 timeradd(&sp->ts, &tv, &sp->ts); |
|
6501 #else |
|
6502 timevaladd(&sp->ts, &tv); |
|
6503 #endif |
|
6504 } |
|
6505 break; |
|
6506 case CHUNK_FLAGS_PR_SCTP_RTX: |
|
6507 /* |
|
6508 * Time to live is a the number or retransmissions |
|
6509 * stored in tv_sec. |
|
6510 */ |
|
6511 sp->ts.tv_sec = sp->timetolive; |
|
6512 sp->ts.tv_usec = 0; |
|
6513 break; |
|
6514 default: |
|
6515 SCTPDBG(SCTP_DEBUG_USRREQ1, |
|
6516 "Unknown PR_SCTP policy %u.\n", |
|
6517 PR_SCTP_POLICY(sp->sinfo_flags)); |
|
6518 break; |
|
6519 } |
|
6520 } |
|
6521 |
|
6522 static int |
|
6523 sctp_msg_append(struct sctp_tcb *stcb, |
|
6524 struct sctp_nets *net, |
|
6525 struct mbuf *m, |
|
6526 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock) |
|
6527 { |
|
6528 int error = 0; |
|
6529 struct mbuf *at; |
|
6530 struct sctp_stream_queue_pending *sp = NULL; |
|
6531 struct sctp_stream_out *strm; |
|
6532 |
|
6533 /* Given an mbuf chain, put it |
|
6534 * into the association send queue and |
|
6535 * place it on the wheel |
|
6536 */ |
|
6537 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) { |
|
6538 /* Invalid stream number */ |
|
6539 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); |
|
6540 error = EINVAL; |
|
6541 goto out_now; |
|
6542 } |
|
6543 if ((stcb->asoc.stream_locked) && |
|
6544 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) { |
|
6545 SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); |
|
6546 error = EINVAL; |
|
6547 goto out_now; |
|
6548 } |
|
6549 strm = &stcb->asoc.strmout[srcv->sinfo_stream]; |
|
6550 /* Now can we send this? */ |
|
6551 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) || |
|
6552 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) || |
|
6553 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) || |
|
6554 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) { |
|
6555 /* got data while shutting down */ |
|
6556 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET); |
|
6557 error = ECONNRESET; |
|
6558 goto out_now; |
|
6559 } |
|
6560 sctp_alloc_a_strmoq(stcb, sp); |
|
6561 if (sp == NULL) { |
|
6562 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); |
|
6563 error = ENOMEM; |
|
6564 goto out_now; |
|
6565 } |
|
6566 sp->sinfo_flags = srcv->sinfo_flags; |
|
6567 sp->timetolive = srcv->sinfo_timetolive; |
|
6568 sp->ppid = srcv->sinfo_ppid; |
|
6569 sp->context = srcv->sinfo_context; |
|
6570 if (sp->sinfo_flags & SCTP_ADDR_OVER) { |
|
6571 sp->net = net; |
|
6572 atomic_add_int(&sp->net->ref_count, 1); |
|
6573 } else { |
|
6574 sp->net = NULL; |
|
6575 } |
|
6576 (void)SCTP_GETTIME_TIMEVAL(&sp->ts); |
|
6577 sp->stream = srcv->sinfo_stream; |
|
6578 sp->msg_is_complete = 1; |
|
6579 sp->sender_all_done = 1; |
|
6580 sp->some_taken = 0; |
|
6581 sp->data = m; |
|
6582 sp->tail_mbuf = NULL; |
|
6583 sctp_set_prsctp_policy(sp); |
|
6584 /* We could in theory (for sendall) sifa the length |
|
6585 * in, but we would still have to hunt through the |
|
6586 * chain since we need to setup the tail_mbuf |
|
6587 */ |
|
6588 sp->length = 0; |
|
6589 for (at = m; at; at = SCTP_BUF_NEXT(at)) { |
|
6590 if (SCTP_BUF_NEXT(at) == NULL) |
|
6591 sp->tail_mbuf = at; |
|
6592 sp->length += SCTP_BUF_LEN(at); |
|
6593 } |
|
6594 if (srcv->sinfo_keynumber_valid) { |
|
6595 sp->auth_keyid = srcv->sinfo_keynumber; |
|
6596 } else { |
|
6597 sp->auth_keyid = stcb->asoc.authinfo.active_keyid; |
|
6598 } |
|
6599 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) { |
|
6600 sctp_auth_key_acquire(stcb, sp->auth_keyid); |
|
6601 sp->holds_key_ref = 1; |
|
6602 } |
|
6603 if (hold_stcb_lock == 0) { |
|
6604 SCTP_TCB_SEND_LOCK(stcb); |
|
6605 } |
|
6606 sctp_snd_sb_alloc(stcb, sp->length); |
|
6607 atomic_add_int(&stcb->asoc.stream_queue_cnt, 1); |
|
6608 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next); |
|
6609 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1); |
|
6610 m = NULL; |
|
6611 if (hold_stcb_lock == 0) { |
|
6612 SCTP_TCB_SEND_UNLOCK(stcb); |
|
6613 } |
|
6614 out_now: |
|
6615 if (m) { |
|
6616 sctp_m_freem(m); |
|
6617 } |
|
6618 return (error); |
|
6619 } |
|
6620 |
|
6621 |
|
6622 static struct mbuf * |
|
6623 sctp_copy_mbufchain(struct mbuf *clonechain, |
|
6624 struct mbuf *outchain, |
|
6625 struct mbuf **endofchain, |
|
6626 int can_take_mbuf, |
|
6627 int sizeofcpy, |
|
6628 uint8_t copy_by_ref) |
|
6629 { |
|
6630 struct mbuf *m; |
|
6631 struct mbuf *appendchain; |
|
6632 caddr_t cp; |
|
6633 int len; |
|
6634 |
|
6635 if (endofchain == NULL) { |
|
6636 /* error */ |
|
6637 error_out: |
|
6638 if (outchain) |
|
6639 sctp_m_freem(outchain); |
|
6640 return (NULL); |
|
6641 } |
|
6642 if (can_take_mbuf) { |
|
6643 appendchain = clonechain; |
|
6644 } else { |
|
6645 if (!copy_by_ref && |
|
6646 #if defined(__Panda__) |
|
6647 0 |
|
6648 #else |
|
6649 (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN))) |
|
6650 #endif |
|
6651 ) { |
|
6652 /* Its not in a cluster */ |
|
6653 if (*endofchain == NULL) { |
|
6654 /* lets get a mbuf cluster */ |
|
6655 if (outchain == NULL) { |
|
6656 /* This is the general case */ |
|
6657 new_mbuf: |
|
6658 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER); |
|
6659 if (outchain == NULL) { |
|
6660 goto error_out; |
|
6661 } |
|
6662 SCTP_BUF_LEN(outchain) = 0; |
|
6663 *endofchain = outchain; |
|
6664 /* get the prepend space */ |
|
6665 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV+4)); |
|
6666 } else { |
|
6667 /* We really should not get a NULL in endofchain */ |
|
6668 /* find end */ |
|
6669 m = outchain; |
|
6670 while (m) { |
|
6671 if (SCTP_BUF_NEXT(m) == NULL) { |
|
6672 *endofchain = m; |
|
6673 break; |
|
6674 } |
|
6675 m = SCTP_BUF_NEXT(m); |
|
6676 } |
|
6677 /* sanity */ |
|
6678 if (*endofchain == NULL) { |
|
6679 /* huh, TSNH XXX maybe we should panic */ |
|
6680 sctp_m_freem(outchain); |
|
6681 goto new_mbuf; |
|
6682 } |
|
6683 } |
|
6684 /* get the new end of length */ |
|
6685 len = M_TRAILINGSPACE(*endofchain); |
|
6686 } else { |
|
6687 /* how much is left at the end? */ |
|
6688 len = M_TRAILINGSPACE(*endofchain); |
|
6689 } |
|
6690 /* Find the end of the data, for appending */ |
|
6691 cp = (mtod((*endofchain), caddr_t) + SCTP_BUF_LEN((*endofchain))); |
|
6692 |
|
6693 /* Now lets copy it out */ |
|
6694 if (len >= sizeofcpy) { |
|
6695 /* It all fits, copy it in */ |
|
6696 m_copydata(clonechain, 0, sizeofcpy, cp); |
|
6697 SCTP_BUF_LEN((*endofchain)) += sizeofcpy; |
|
6698 } else { |
|
6699 /* fill up the end of the chain */ |
|
6700 if (len > 0) { |
|
6701 m_copydata(clonechain, 0, len, cp); |
|
6702 SCTP_BUF_LEN((*endofchain)) += len; |
|
6703 /* now we need another one */ |
|
6704 sizeofcpy -= len; |
|
6705 } |
|
6706 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER); |
|
6707 if (m == NULL) { |
|
6708 /* We failed */ |
|
6709 goto error_out; |
|
6710 } |
|
6711 SCTP_BUF_NEXT((*endofchain)) = m; |
|
6712 *endofchain = m; |
|
6713 cp = mtod((*endofchain), caddr_t); |
|
6714 m_copydata(clonechain, len, sizeofcpy, cp); |
|
6715 SCTP_BUF_LEN((*endofchain)) += sizeofcpy; |
|
6716 } |
|
6717 return (outchain); |
|
6718 } else { |
|
6719 /* copy the old fashion way */ |
|
6720 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_NOWAIT); |
|
6721 #ifdef SCTP_MBUF_LOGGING |
|
6722 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { |
|
6723 struct mbuf *mat; |
|
6724 |
|
6725 for (mat = appendchain; mat; mat = SCTP_BUF_NEXT(mat)) { |
|
6726 if (SCTP_BUF_IS_EXTENDED(mat)) { |
|
6727 sctp_log_mb(mat, SCTP_MBUF_ICOPY); |
|
6728 } |
|
6729 } |
|
6730 } |
|
6731 #endif |
|
6732 } |
|
6733 } |
|
6734 if (appendchain == NULL) { |
|
6735 /* error */ |
|
6736 if (outchain) |
|
6737 sctp_m_freem(outchain); |
|
6738 return (NULL); |
|
6739 } |
|
6740 if (outchain) { |
|
6741 /* tack on to the end */ |
|
6742 if (*endofchain != NULL) { |
|
6743 SCTP_BUF_NEXT(((*endofchain))) = appendchain; |
|
6744 } else { |
|
6745 m = outchain; |
|
6746 while (m) { |
|
6747 if (SCTP_BUF_NEXT(m) == NULL) { |
|
6748 SCTP_BUF_NEXT(m) = appendchain; |
|
6749 break; |
|
6750 } |
|
6751 m = SCTP_BUF_NEXT(m); |
|
6752 } |
|
6753 } |
|
6754 /* |
|
6755 * save off the end and update the end-chain |
|
6756 * postion |
|
6757 */ |
|
6758 m = appendchain; |
|
6759 while (m) { |
|
6760 if (SCTP_BUF_NEXT(m) == NULL) { |
|
6761 *endofchain = m; |
|
6762 break; |
|
6763 } |
|
6764 m = SCTP_BUF_NEXT(m); |
|
6765 } |
|
6766 return (outchain); |
|
6767 } else { |
|
6768 /* save off the end and update the end-chain postion */ |
|
6769 m = appendchain; |
|
6770 while (m) { |
|
6771 if (SCTP_BUF_NEXT(m) == NULL) { |
|
6772 *endofchain = m; |
|
6773 break; |
|
6774 } |
|
6775 m = SCTP_BUF_NEXT(m); |
|
6776 } |
|
6777 return (appendchain); |
|
6778 } |
|
6779 } |
|
6780 |
|
6781 static int |
|
6782 sctp_med_chunk_output(struct sctp_inpcb *inp, |
|
6783 struct sctp_tcb *stcb, |
|
6784 struct sctp_association *asoc, |
|
6785 int *num_out, |
|
6786 int *reason_code, |
|
6787 int control_only, int from_where, |
|
6788 struct timeval *now, int *now_filled, int frag_point, int so_locked |
|
6789 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) |
|
6790 SCTP_UNUSED |
|
6791 #endif |
|
6792 ); |
|
6793 |
|
6794 static void |
|
6795 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr, |
|
6796 uint32_t val SCTP_UNUSED) |
|
6797 { |
|
6798 struct sctp_copy_all *ca; |
|
6799 struct mbuf *m; |
|
6800 int ret = 0; |
|
6801 int added_control = 0; |
|
6802 int un_sent, do_chunk_output = 1; |
|
6803 struct sctp_association *asoc; |
|
6804 struct sctp_nets *net; |
|
6805 |
|
6806 ca = (struct sctp_copy_all *)ptr; |
|
6807 if (ca->m == NULL) { |
|
6808 return; |
|
6809 } |
|
6810 if (ca->inp != inp) { |
|
6811 /* TSNH */ |
|
6812 return; |
|
6813 } |
|
6814 if (ca->sndlen > 0) { |
|
6815 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_NOWAIT); |
|
6816 if (m == NULL) { |
|
6817 /* can't copy so we are done */ |
|
6818 ca->cnt_failed++; |
|
6819 return; |
|
6820 } |
|
6821 #ifdef SCTP_MBUF_LOGGING |
|
6822 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { |
|
6823 struct mbuf *mat; |
|
6824 |
|
6825 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { |
|
6826 if (SCTP_BUF_IS_EXTENDED(mat)) { |
|
6827 sctp_log_mb(mat, SCTP_MBUF_ICOPY); |
|
6828 } |
|
6829 } |
|
6830 } |
|
6831 #endif |
|
6832 } else { |
|
6833 m = NULL; |
|
6834 } |
|
6835 SCTP_TCB_LOCK_ASSERT(stcb); |
|
6836 if (stcb->asoc.alternate) { |
|
6837 net = stcb->asoc.alternate; |
|
6838 } else { |
|
6839 net = stcb->asoc.primary_destination; |
|
6840 } |
|
6841 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) { |
|
6842 /* Abort this assoc with m as the user defined reason */ |
|
6843 if (m != NULL) { |
|
6844 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_NOWAIT); |
|
6845 } else { |
|
6846 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), |
|
6847 0, M_NOWAIT, 1, MT_DATA); |
|
6848 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr); |
|
6849 } |
|
6850 if (m != NULL) { |
|
6851 struct sctp_paramhdr *ph; |
|
6852 |
|
6853 ph = mtod(m, struct sctp_paramhdr *); |
|
6854 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); |
|
6855 ph->param_length = htons(sizeof(struct sctp_paramhdr) + ca->sndlen); |
|
6856 } |
|
6857 /* We add one here to keep the assoc from |
|
6858 * dis-appearing on us. |
|
6859 */ |
|
6860 atomic_add_int(&stcb->asoc.refcnt, 1); |
|
6861 sctp_abort_an_association(inp, stcb, m, SCTP_SO_NOT_LOCKED); |
|
6862 /* sctp_abort_an_association calls sctp_free_asoc() |
|
6863 * free association will NOT free it since we |
|
6864 * incremented the refcnt .. we do this to prevent |
|
6865 * it being freed and things getting tricky since |
|
6866 * we could end up (from free_asoc) calling inpcb_free |
|
6867 * which would get a recursive lock call to the |
|
6868 * iterator lock.. But as a consequence of that the |
|
6869 * stcb will return to us un-locked.. since free_asoc |
|
6870 * returns with either no TCB or the TCB unlocked, we |
|
6871 * must relock.. to unlock in the iterator timer :-0 |
|
6872 */ |
|
6873 SCTP_TCB_LOCK(stcb); |
|
6874 atomic_add_int(&stcb->asoc.refcnt, -1); |
|
6875 goto no_chunk_output; |
|
6876 } else { |
|
6877 if (m) { |
|
6878 ret = sctp_msg_append(stcb, net, m, |
|
6879 &ca->sndrcv, 1); |
|
6880 } |
|
6881 asoc = &stcb->asoc; |
|
6882 if (ca->sndrcv.sinfo_flags & SCTP_EOF) { |
|
6883 /* shutdown this assoc */ |
|
6884 int cnt; |
|
6885 cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED); |
|
6886 |
|
6887 if (TAILQ_EMPTY(&asoc->send_queue) && |
|
6888 TAILQ_EMPTY(&asoc->sent_queue) && |
|
6889 (cnt == 0)) { |
|
6890 if (asoc->locked_on_sending) { |
|
6891 goto abort_anyway; |
|
6892 } |
|
6893 /* there is nothing queued to send, so I'm done... */ |
|
6894 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && |
|
6895 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && |
|
6896 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { |
|
6897 /* only send SHUTDOWN the first time through */ |
|
6898 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { |
|
6899 SCTP_STAT_DECR_GAUGE32(sctps_currestab); |
|
6900 } |
|
6901 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); |
|
6902 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); |
|
6903 sctp_stop_timers_for_shutdown(stcb); |
|
6904 sctp_send_shutdown(stcb, net); |
|
6905 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, |
|
6906 net); |
|
6907 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, |
|
6908 asoc->primary_destination); |
|
6909 added_control = 1; |
|
6910 do_chunk_output = 0; |
|
6911 } |
|
6912 } else { |
|
6913 /* |
|
6914 * we still got (or just got) data to send, so set |
|
6915 * SHUTDOWN_PENDING |
|
6916 */ |
|
6917 /* |
|
6918 * XXX sockets draft says that SCTP_EOF should be |
|
6919 * sent with no data. currently, we will allow user |
|
6920 * data to be sent first and move to |
|
6921 * SHUTDOWN-PENDING |
|
6922 */ |
|
6923 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && |
|
6924 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && |
|
6925 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { |
|
6926 if (asoc->locked_on_sending) { |
|
6927 /* Locked to send out the data */ |
|
6928 struct sctp_stream_queue_pending *sp; |
|
6929 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); |
|
6930 if (sp) { |
|
6931 if ((sp->length == 0) && (sp->msg_is_complete == 0)) |
|
6932 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; |
|
6933 } |
|
6934 } |
|
6935 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; |
|
6936 if (TAILQ_EMPTY(&asoc->send_queue) && |
|
6937 TAILQ_EMPTY(&asoc->sent_queue) && |
|
6938 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { |
|
6939 abort_anyway: |
|
6940 atomic_add_int(&stcb->asoc.refcnt, 1); |
|
6941 sctp_abort_an_association(stcb->sctp_ep, stcb, |
|
6942 NULL, SCTP_SO_NOT_LOCKED); |
|
6943 atomic_add_int(&stcb->asoc.refcnt, -1); |
|
6944 goto no_chunk_output; |
|
6945 } |
|
6946 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, |
|
6947 asoc->primary_destination); |
|
6948 } |
|
6949 } |
|
6950 |
|
6951 } |
|
6952 } |
|
6953 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + |
|
6954 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk))); |
|
6955 |
|
6956 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && |
|
6957 (stcb->asoc.total_flight > 0) && |
|
6958 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) { |
|
6959 do_chunk_output = 0; |
|
6960 } |
|
6961 if (do_chunk_output) |
|
6962 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED); |
|
6963 else if (added_control) { |
|
6964 int num_out = 0, reason = 0, now_filled = 0; |
|
6965 struct timeval now; |
|
6966 int frag_point; |
|
6967 frag_point = sctp_get_frag_point(stcb, &stcb->asoc); |
|
6968 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out, |
|
6969 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED); |
|
6970 } |
|
6971 no_chunk_output: |
|
6972 if (ret) { |
|
6973 ca->cnt_failed++; |
|
6974 } else { |
|
6975 ca->cnt_sent++; |
|
6976 } |
|
6977 } |
|
6978 |
|
6979 static void |
|
6980 sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED) |
|
6981 { |
|
6982 struct sctp_copy_all *ca; |
|
6983 |
|
6984 ca = (struct sctp_copy_all *)ptr; |
|
6985 /* |
|
6986 * Do a notify here? Kacheong suggests that the notify be done at |
|
6987 * the send time.. so you would push up a notification if any send |
|
6988 * failed. Don't know if this is feasable since the only failures we |
|
6989 * have is "memory" related and if you cannot get an mbuf to send |
|
6990 * the data you surely can't get an mbuf to send up to notify the |
|
6991 * user you can't send the data :-> |
|
6992 */ |
|
6993 |
|
6994 /* now free everything */ |
|
6995 sctp_m_freem(ca->m); |
|
6996 SCTP_FREE(ca, SCTP_M_COPYAL); |
|
6997 } |
|
6998 |
|
6999 |
|
7000 #define MC_ALIGN(m, len) do { \ |
|
7001 SCTP_BUF_RESV_UF(m, ((MCLBYTES - (len)) & ~(sizeof(long) - 1)); \ |
|
7002 } while (0) |
|
7003 |
|
7004 |
|
7005 |
|
7006 static struct mbuf * |
|
7007 sctp_copy_out_all(struct uio *uio, int len) |
|
7008 { |
|
7009 struct mbuf *ret, *at; |
|
7010 int left, willcpy, cancpy, error; |
|
7011 |
|
7012 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAITOK, 1, MT_DATA); |
|
7013 if (ret == NULL) { |
|
7014 /* TSNH */ |
|
7015 return (NULL); |
|
7016 } |
|
7017 left = len; |
|
7018 SCTP_BUF_LEN(ret) = 0; |
|
7019 /* save space for the data chunk header */ |
|
7020 cancpy = M_TRAILINGSPACE(ret); |
|
7021 willcpy = min(cancpy, left); |
|
7022 at = ret; |
|
7023 while (left > 0) { |
|
7024 /* Align data to the end */ |
|
7025 error = uiomove(mtod(at, caddr_t), willcpy, uio); |
|
7026 if (error) { |
|
7027 err_out_now: |
|
7028 sctp_m_freem(at); |
|
7029 return (NULL); |
|
7030 } |
|
7031 SCTP_BUF_LEN(at) = willcpy; |
|
7032 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0; |
|
7033 left -= willcpy; |
|
7034 if (left > 0) { |
|
7035 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 1, MT_DATA); |
|
7036 if (SCTP_BUF_NEXT(at) == NULL) { |
|
7037 goto err_out_now; |
|
7038 } |
|
7039 at = SCTP_BUF_NEXT(at); |
|
7040 SCTP_BUF_LEN(at) = 0; |
|
7041 cancpy = M_TRAILINGSPACE(at); |
|
7042 willcpy = min(cancpy, left); |
|
7043 } |
|
7044 } |
|
7045 return (ret); |
|
7046 } |
|
7047 |
|
7048 static int |
|
7049 sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m, |
|
7050 struct sctp_sndrcvinfo *srcv) |
|
7051 { |
|
7052 int ret; |
|
7053 struct sctp_copy_all *ca; |
|
7054 |
|
7055 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all), |
|
7056 SCTP_M_COPYAL); |
|
7057 if (ca == NULL) { |
|
7058 sctp_m_freem(m); |
|
7059 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); |
|
7060 return (ENOMEM); |
|
7061 } |
|
7062 memset(ca, 0, sizeof(struct sctp_copy_all)); |
|
7063 |
|
7064 ca->inp = inp; |
|
7065 if (srcv) { |
|
7066 memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo)); |
|
7067 } |
|
7068 /* |
|
7069 * take off the sendall flag, it would be bad if we failed to do |
|
7070 * this :-0 |
|
7071 */ |
|
7072 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL; |
|
7073 /* get length and mbuf chain */ |
|
7074 if (uio) { |
|
7075 #if defined(__APPLE__) |
|
7076 #if defined(APPLE_LEOPARD) |
|
7077 ca->sndlen = uio->uio_resid; |
|
7078 #else |
|
7079 ca->sndlen = uio_resid(uio); |
|
7080 #endif |
|
7081 #else |
|
7082 ca->sndlen = uio->uio_resid; |
|
7083 #endif |
|
7084 #if defined(__APPLE__) |
|
7085 SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 0); |
|
7086 #endif |
|
7087 ca->m = sctp_copy_out_all(uio, ca->sndlen); |
|
7088 #if defined(__APPLE__) |
|
7089 SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 0); |
|
7090 #endif |
|
7091 if (ca->m == NULL) { |
|
7092 SCTP_FREE(ca, SCTP_M_COPYAL); |
|
7093 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); |
|
7094 return (ENOMEM); |
|
7095 } |
|
7096 } else { |
|
7097 /* Gather the length of the send */ |
|
7098 struct mbuf *mat; |
|
7099 |
|
7100 ca->sndlen = 0; |
|
7101 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { |
|
7102 ca->sndlen += SCTP_BUF_LEN(mat); |
|
7103 } |
|
7104 } |
|
7105 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL, |
|
7106 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES, |
|
7107 SCTP_ASOC_ANY_STATE, |
|
7108 (void *)ca, 0, |
|
7109 sctp_sendall_completes, inp, 1); |
|
7110 if (ret) { |
|
7111 SCTP_PRINTF("Failed to initiate iterator for sendall\n"); |
|
7112 SCTP_FREE(ca, SCTP_M_COPYAL); |
|
7113 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT); |
|
7114 return (EFAULT); |
|
7115 } |
|
7116 return (0); |
|
7117 } |
|
7118 |
|
7119 |
|
7120 void |
|
7121 sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc) |
|
7122 { |
|
7123 struct sctp_tmit_chunk *chk, *nchk; |
|
7124 |
|
7125 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) { |
|
7126 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { |
|
7127 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); |
|
7128 if (chk->data) { |
|
7129 sctp_m_freem(chk->data); |
|
7130 chk->data = NULL; |
|
7131 } |
|
7132 asoc->ctrl_queue_cnt--; |
|
7133 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); |
|
7134 } |
|
7135 } |
|
7136 } |
|
7137 |
|
7138 void |
|
7139 sctp_toss_old_asconf(struct sctp_tcb *stcb) |
|
7140 { |
|
7141 struct sctp_association *asoc; |
|
7142 struct sctp_tmit_chunk *chk, *nchk; |
|
7143 struct sctp_asconf_chunk *acp; |
|
7144 |
|
7145 asoc = &stcb->asoc; |
|
7146 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) { |
|
7147 /* find SCTP_ASCONF chunk in queue */ |
|
7148 if (chk->rec.chunk_id.id == SCTP_ASCONF) { |
|
7149 if (chk->data) { |
|
7150 acp = mtod(chk->data, struct sctp_asconf_chunk *); |
|
7151 if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) { |
|
7152 /* Not Acked yet */ |
|
7153 break; |
|
7154 } |
|
7155 } |
|
7156 TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next); |
|
7157 if (chk->data) { |
|
7158 sctp_m_freem(chk->data); |
|
7159 chk->data = NULL; |
|
7160 } |
|
7161 asoc->ctrl_queue_cnt--; |
|
7162 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); |
|
7163 } |
|
7164 } |
|
7165 } |
|
7166 |
|
7167 |
|
7168 static void |
|
7169 sctp_clean_up_datalist(struct sctp_tcb *stcb, |
|
7170 struct sctp_association *asoc, |
|
7171 struct sctp_tmit_chunk **data_list, |
|
7172 int bundle_at, |
|
7173 struct sctp_nets *net) |
|
7174 { |
|
7175 int i; |
|
7176 struct sctp_tmit_chunk *tp1; |
|
7177 |
|
7178 for (i = 0; i < bundle_at; i++) { |
|
7179 /* off of the send queue */ |
|
7180 TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next); |
|
7181 asoc->send_queue_cnt--; |
|
7182 if (i > 0) { |
|
7183 /* |
|
7184 * Any chunk NOT 0 you zap the time chunk 0 gets |
|
7185 * zapped or set based on if a RTO measurment is |
|
7186 * needed. |
|
7187 */ |
|
7188 data_list[i]->do_rtt = 0; |
|
7189 } |
|
7190 /* record time */ |
|
7191 data_list[i]->sent_rcv_time = net->last_sent_time; |
|
7192 data_list[i]->rec.data.cwnd_at_send = net->cwnd; |
|
7193 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq; |
|
7194 if (data_list[i]->whoTo == NULL) { |
|
7195 data_list[i]->whoTo = net; |
|
7196 atomic_add_int(&net->ref_count, 1); |
|
7197 } |
|
7198 /* on to the sent queue */ |
|
7199 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead); |
|
7200 if ((tp1) && SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) { |
|
7201 struct sctp_tmit_chunk *tpp; |
|
7202 |
|
7203 /* need to move back */ |
|
7204 back_up_more: |
|
7205 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next); |
|
7206 if (tpp == NULL) { |
|
7207 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next); |
|
7208 goto all_done; |
|
7209 } |
|
7210 tp1 = tpp; |
|
7211 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) { |
|
7212 goto back_up_more; |
|
7213 } |
|
7214 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next); |
|
7215 } else { |
|
7216 TAILQ_INSERT_TAIL(&asoc->sent_queue, |
|
7217 data_list[i], |
|
7218 sctp_next); |
|
7219 } |
|
7220 all_done: |
|
7221 /* This does not lower until the cum-ack passes it */ |
|
7222 asoc->sent_queue_cnt++; |
|
7223 if ((asoc->peers_rwnd <= 0) && |
|
7224 (asoc->total_flight == 0) && |
|
7225 (bundle_at == 1)) { |
|
7226 /* Mark the chunk as being a window probe */ |
|
7227 SCTP_STAT_INCR(sctps_windowprobed); |
|
7228 } |
|
7229 #ifdef SCTP_AUDITING_ENABLED |
|
7230 sctp_audit_log(0xC2, 3); |
|
7231 #endif |
|
7232 data_list[i]->sent = SCTP_DATAGRAM_SENT; |
|
7233 data_list[i]->snd_count = 1; |
|
7234 data_list[i]->rec.data.chunk_was_revoked = 0; |
|
7235 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { |
|
7236 sctp_misc_ints(SCTP_FLIGHT_LOG_UP, |
|
7237 data_list[i]->whoTo->flight_size, |
|
7238 data_list[i]->book_size, |
|
7239 (uintptr_t)data_list[i]->whoTo, |
|
7240 data_list[i]->rec.data.TSN_seq); |
|
7241 } |
|
7242 sctp_flight_size_increase(data_list[i]); |
|
7243 sctp_total_flight_increase(stcb, data_list[i]); |
|
7244 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { |
|
7245 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND, |
|
7246 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); |
|
7247 } |
|
7248 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd, |
|
7249 (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))); |
|
7250 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { |
|
7251 /* SWS sender side engages */ |
|
7252 asoc->peers_rwnd = 0; |
|
7253 } |
|
7254 } |
|
7255 if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) { |
|
7256 (*asoc->cc_functions.sctp_cwnd_update_packet_transmitted)(stcb, net); |
|
7257 } |
|
7258 } |
|
7259 |
|
7260 static void |
|
7261 sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked |
|
7262 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) |
|
7263 SCTP_UNUSED |
|
7264 #endif |
|
7265 ) |
|
7266 { |
|
7267 struct sctp_tmit_chunk *chk, *nchk; |
|
7268 |
|
7269 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) { |
|
7270 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || |
|
7271 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */ |
|
7272 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) || |
|
7273 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) || |
|
7274 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) || |
|
7275 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) || |
|
7276 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) || |
|
7277 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) || |
|
7278 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) || |
|
7279 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) || |
|
7280 (chk->rec.chunk_id.id == SCTP_ECN_CWR) || |
|
7281 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) { |
|
7282 /* Stray chunks must be cleaned up */ |
|
7283 clean_up_anyway: |
|
7284 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); |
|
7285 if (chk->data) { |
|
7286 sctp_m_freem(chk->data); |
|
7287 chk->data = NULL; |
|
7288 } |
|
7289 asoc->ctrl_queue_cnt--; |
|
7290 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) |
|
7291 asoc->fwd_tsn_cnt--; |
|
7292 sctp_free_a_chunk(stcb, chk, so_locked); |
|
7293 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) { |
|
7294 /* special handling, we must look into the param */ |
|
7295 if (chk != asoc->str_reset) { |
|
7296 goto clean_up_anyway; |
|
7297 } |
|
7298 } |
|
7299 } |
|
7300 } |
|
7301 |
|
7302 |
|
7303 static int |
|
7304 sctp_can_we_split_this(struct sctp_tcb *stcb, |
|
7305 uint32_t length, |
|
7306 uint32_t goal_mtu, uint32_t frag_point, int eeor_on) |
|
7307 { |
|
7308 /* Make a decision on if I should split a |
|
7309 * msg into multiple parts. This is only asked of |
|
7310 * incomplete messages. |
|
7311 */ |
|
7312 if (eeor_on) { |
|
7313 /* If we are doing EEOR we need to always send |
|
7314 * it if its the entire thing, since it might |
|
7315 * be all the guy is putting in the hopper. |
|
7316 */ |
|
7317 if (goal_mtu >= length) { |
|
7318 /*- |
|
7319 * If we have data outstanding, |
|
7320 * we get another chance when the sack |
|
7321 * arrives to transmit - wait for more data |
|
7322 */ |
|
7323 if (stcb->asoc.total_flight == 0) { |
|
7324 /* If nothing is in flight, we zero |
|
7325 * the packet counter. |
|
7326 */ |
|
7327 return (length); |
|
7328 } |
|
7329 return (0); |
|
7330 |
|
7331 } else { |
|
7332 /* You can fill the rest */ |
|
7333 return (goal_mtu); |
|
7334 } |
|
7335 } |
|
7336 /*- |
|
7337 * For those strange folk that make the send buffer |
|
7338 * smaller than our fragmentation point, we can't |
|
7339 * get a full msg in so we have to allow splitting. |
|
7340 */ |
|
7341 if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) { |
|
7342 return (length); |
|
7343 } |
|
7344 |
|
7345 if ((length <= goal_mtu) || |
|
7346 ((length - goal_mtu) < SCTP_BASE_SYSCTL(sctp_min_residual))) { |
|
7347 /* Sub-optimial residual don't split in non-eeor mode. */ |
|
7348 return (0); |
|
7349 } |
|
7350 /* If we reach here length is larger |
|
7351 * than the goal_mtu. Do we wish to split |
|
7352 * it for the sake of packet putting together? |
|
7353 */ |
|
7354 if (goal_mtu >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) { |
|
7355 /* Its ok to split it */ |
|
7356 return (min(goal_mtu, frag_point)); |
|
7357 } |
|
7358 /* Nope, can't split */ |
|
7359 return (0); |
|
7360 |
|
7361 } |
|
7362 |
|
7363 static uint32_t |
|
7364 sctp_move_to_outqueue(struct sctp_tcb *stcb, |
|
7365 struct sctp_stream_out *strq, |
|
7366 uint32_t goal_mtu, |
|
7367 uint32_t frag_point, |
|
7368 int *locked, |
|
7369 int *giveup, |
|
7370 int eeor_mode, |
|
7371 int *bail, |
|
7372 int so_locked |
|
7373 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) |
|
7374 SCTP_UNUSED |
|
7375 #endif |
|
7376 ) |
|
7377 { |
|
7378 /* Move from the stream to the send_queue keeping track of the total */ |
|
7379 struct sctp_association *asoc; |
|
7380 struct sctp_stream_queue_pending *sp; |
|
7381 struct sctp_tmit_chunk *chk; |
|
7382 struct sctp_data_chunk *dchkh; |
|
7383 uint32_t to_move, length; |
|
7384 uint8_t rcv_flags = 0; |
|
7385 uint8_t some_taken; |
|
7386 uint8_t send_lock_up = 0; |
|
7387 |
|
7388 SCTP_TCB_LOCK_ASSERT(stcb); |
|
7389 asoc = &stcb->asoc; |
|
7390 one_more_time: |
|
7391 /*sa_ignore FREED_MEMORY*/ |
|
7392 sp = TAILQ_FIRST(&strq->outqueue); |
|
7393 if (sp == NULL) { |
|
7394 *locked = 0; |
|
7395 if (send_lock_up == 0) { |
|
7396 SCTP_TCB_SEND_LOCK(stcb); |
|
7397 send_lock_up = 1; |
|
7398 } |
|
7399 sp = TAILQ_FIRST(&strq->outqueue); |
|
7400 if (sp) { |
|
7401 goto one_more_time; |
|
7402 } |
|
7403 if (strq->last_msg_incomplete) { |
|
7404 SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n", |
|
7405 strq->stream_no, |
|
7406 strq->last_msg_incomplete); |
|
7407 strq->last_msg_incomplete = 0; |
|
7408 } |
|
7409 to_move = 0; |
|
7410 if (send_lock_up) { |
|
7411 SCTP_TCB_SEND_UNLOCK(stcb); |
|
7412 send_lock_up = 0; |
|
7413 } |
|
7414 goto out_of; |
|
7415 } |
|
7416 if ((sp->msg_is_complete) && (sp->length == 0)) { |
|
7417 if (sp->sender_all_done) { |
|
7418 /* We are doing differed cleanup. Last |
|
7419 * time through when we took all the data |
|
7420 * the sender_all_done was not set. |
|
7421 */ |
|
7422 if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) { |
|
7423 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n"); |
|
7424 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n", |
|
7425 sp->sender_all_done, |
|
7426 sp->length, |
|
7427 sp->msg_is_complete, |
|
7428 sp->put_last_out, |
|
7429 send_lock_up); |
|
7430 } |
|
7431 if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up == 0)) { |
|
7432 SCTP_TCB_SEND_LOCK(stcb); |
|
7433 send_lock_up = 1; |
|
7434 } |
|
7435 atomic_subtract_int(&asoc->stream_queue_cnt, 1); |
|
7436 TAILQ_REMOVE(&strq->outqueue, sp, next); |
|
7437 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up); |
|
7438 if (sp->net) { |
|
7439 sctp_free_remote_addr(sp->net); |
|
7440 sp->net = NULL; |
|
7441 } |
|
7442 if (sp->data) { |
|
7443 sctp_m_freem(sp->data); |
|
7444 sp->data = NULL; |
|
7445 } |
|
7446 sctp_free_a_strmoq(stcb, sp, so_locked); |
|
7447 /* we can't be locked to it */ |
|
7448 *locked = 0; |
|
7449 stcb->asoc.locked_on_sending = NULL; |
|
7450 if (send_lock_up) { |
|
7451 SCTP_TCB_SEND_UNLOCK(stcb); |
|
7452 send_lock_up = 0; |
|
7453 } |
|
7454 /* back to get the next msg */ |
|
7455 goto one_more_time; |
|
7456 } else { |
|
7457 /* sender just finished this but |
|
7458 * still holds a reference |
|
7459 */ |
|
7460 *locked = 1; |
|
7461 *giveup = 1; |
|
7462 to_move = 0; |
|
7463 goto out_of; |
|
7464 } |
|
7465 } else { |
|
7466 /* is there some to get */ |
|
7467 if (sp->length == 0) { |
|
7468 /* no */ |
|
7469 *locked = 1; |
|
7470 *giveup = 1; |
|
7471 to_move = 0; |
|
7472 goto out_of; |
|
7473 } else if (sp->discard_rest) { |
|
7474 if (send_lock_up == 0) { |
|
7475 SCTP_TCB_SEND_LOCK(stcb); |
|
7476 send_lock_up = 1; |
|
7477 } |
|
7478 /* Whack down the size */ |
|
7479 atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length); |
|
7480 if ((stcb->sctp_socket != NULL) && \ |
|
7481 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || |
|
7482 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { |
|
7483 atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length); |
|
7484 } |
|
7485 if (sp->data) { |
|
7486 sctp_m_freem(sp->data); |
|
7487 sp->data = NULL; |
|
7488 sp->tail_mbuf = NULL; |
|
7489 } |
|
7490 sp->length = 0; |
|
7491 sp->some_taken = 1; |
|
7492 *locked = 1; |
|
7493 *giveup = 1; |
|
7494 to_move = 0; |
|
7495 goto out_of; |
|
7496 } |
|
7497 } |
|
7498 some_taken = sp->some_taken; |
|
7499 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { |
|
7500 sp->msg_is_complete = 1; |
|
7501 } |
|
7502 re_look: |
|
7503 length = sp->length; |
|
7504 if (sp->msg_is_complete) { |
|
7505 /* The message is complete */ |
|
7506 to_move = min(length, frag_point); |
|
7507 if (to_move == length) { |
|
7508 /* All of it fits in the MTU */ |
|
7509 if (sp->some_taken) { |
|
7510 rcv_flags |= SCTP_DATA_LAST_FRAG; |
|
7511 sp->put_last_out = 1; |
|
7512 } else { |
|
7513 rcv_flags |= SCTP_DATA_NOT_FRAG; |
|
7514 sp->put_last_out = 1; |
|
7515 } |
|
7516 } else { |
|
7517 /* Not all of it fits, we fragment */ |
|
7518 if (sp->some_taken == 0) { |
|
7519 rcv_flags |= SCTP_DATA_FIRST_FRAG; |
|
7520 } |
|
7521 sp->some_taken = 1; |
|
7522 } |
|
7523 } else { |
|
7524 to_move = sctp_can_we_split_this(stcb, length, goal_mtu, frag_point, eeor_mode); |
|
7525 if (to_move) { |
|
7526 /*- |
|
7527 * We use a snapshot of length in case it |
|
7528 * is expanding during the compare. |
|
7529 */ |
|
7530 uint32_t llen; |
|
7531 |
|
7532 llen = length; |
|
7533 if (to_move >= llen) { |
|
7534 to_move = llen; |
|
7535 if (send_lock_up == 0) { |
|
7536 /*- |
|
7537 * We are taking all of an incomplete msg |
|
7538 * thus we need a send lock. |
|
7539 */ |
|
7540 SCTP_TCB_SEND_LOCK(stcb); |
|
7541 send_lock_up = 1; |
|
7542 if (sp->msg_is_complete) { |
|
7543 /* the sender finished the msg */ |
|
7544 goto re_look; |
|
7545 } |
|
7546 } |
|
7547 } |
|
7548 if (sp->some_taken == 0) { |
|
7549 rcv_flags |= SCTP_DATA_FIRST_FRAG; |
|
7550 sp->some_taken = 1; |
|
7551 } |
|
7552 } else { |
|
7553 /* Nothing to take. */ |
|
7554 if (sp->some_taken) { |
|
7555 *locked = 1; |
|
7556 } |
|
7557 *giveup = 1; |
|
7558 to_move = 0; |
|
7559 goto out_of; |
|
7560 } |
|
7561 } |
|
7562 |
|
7563 /* If we reach here, we can copy out a chunk */ |
|
7564 sctp_alloc_a_chunk(stcb, chk); |
|
7565 if (chk == NULL) { |
|
7566 /* No chunk memory */ |
|
7567 *giveup = 1; |
|
7568 to_move = 0; |
|
7569 goto out_of; |
|
7570 } |
|
7571 /* Setup for unordered if needed by looking |
|
7572 * at the user sent info flags. |
|
7573 */ |
|
7574 if (sp->sinfo_flags & SCTP_UNORDERED) { |
|
7575 rcv_flags |= SCTP_DATA_UNORDERED; |
|
7576 } |
|
7577 if ((SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && ((sp->sinfo_flags & SCTP_EOF) == SCTP_EOF)) || |
|
7578 ((sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) == SCTP_SACK_IMMEDIATELY)) { |
|
7579 rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY; |
|
7580 } |
|
7581 /* clear out the chunk before setting up */ |
|
7582 memset(chk, 0, sizeof(*chk)); |
|
7583 chk->rec.data.rcv_flags = rcv_flags; |
|
7584 |
|
7585 if (to_move >= length) { |
|
7586 /* we think we can steal the whole thing */ |
|
7587 if ((sp->sender_all_done == 0) && (send_lock_up == 0)) { |
|
7588 SCTP_TCB_SEND_LOCK(stcb); |
|
7589 send_lock_up = 1; |
|
7590 } |
|
7591 if (to_move < sp->length) { |
|
7592 /* bail, it changed */ |
|
7593 goto dont_do_it; |
|
7594 } |
|
7595 chk->data = sp->data; |
|
7596 chk->last_mbuf = sp->tail_mbuf; |
|
7597 /* register the stealing */ |
|
7598 sp->data = sp->tail_mbuf = NULL; |
|
7599 } else { |
|
7600 struct mbuf *m; |
|
7601 dont_do_it: |
|
7602 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_NOWAIT); |
|
7603 chk->last_mbuf = NULL; |
|
7604 if (chk->data == NULL) { |
|
7605 sp->some_taken = some_taken; |
|
7606 sctp_free_a_chunk(stcb, chk, so_locked); |
|
7607 *bail = 1; |
|
7608 to_move = 0; |
|
7609 goto out_of; |
|
7610 } |
|
7611 #ifdef SCTP_MBUF_LOGGING |
|
7612 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { |
|
7613 struct mbuf *mat; |
|
7614 |
|
7615 for (mat = chk->data; mat; mat = SCTP_BUF_NEXT(mat)) { |
|
7616 if (SCTP_BUF_IS_EXTENDED(mat)) { |
|
7617 sctp_log_mb(mat, SCTP_MBUF_ICOPY); |
|
7618 } |
|
7619 } |
|
7620 } |
|
7621 #endif |
|
7622 /* Pull off the data */ |
|
7623 m_adj(sp->data, to_move); |
|
7624 /* Now lets work our way down and compact it */ |
|
7625 m = sp->data; |
|
7626 while (m && (SCTP_BUF_LEN(m) == 0)) { |
|
7627 sp->data = SCTP_BUF_NEXT(m); |
|
7628 SCTP_BUF_NEXT(m) = NULL; |
|
7629 if (sp->tail_mbuf == m) { |
|
7630 /*- |
|
7631 * Freeing tail? TSNH since |
|
7632 * we supposedly were taking less |
|
7633 * than the sp->length. |
|
7634 */ |
|
7635 #ifdef INVARIANTS |
|
7636 panic("Huh, freing tail? - TSNH"); |
|
7637 #else |
|
7638 SCTP_PRINTF("Huh, freeing tail? - TSNH\n"); |
|
7639 sp->tail_mbuf = sp->data = NULL; |
|
7640 sp->length = 0; |
|
7641 #endif |
|
7642 |
|
7643 } |
|
7644 sctp_m_free(m); |
|
7645 m = sp->data; |
|
7646 } |
|
7647 } |
|
7648 if (SCTP_BUF_IS_EXTENDED(chk->data)) { |
|
7649 chk->copy_by_ref = 1; |
|
7650 } else { |
|
7651 chk->copy_by_ref = 0; |
|
7652 } |
|
7653 /* get last_mbuf and counts of mb useage |
|
7654 * This is ugly but hopefully its only one mbuf. |
|
7655 */ |
|
7656 if (chk->last_mbuf == NULL) { |
|
7657 chk->last_mbuf = chk->data; |
|
7658 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) { |
|
7659 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf); |
|
7660 } |
|
7661 } |
|
7662 |
|
7663 if (to_move > length) { |
|
7664 /*- This should not happen either |
|
7665 * since we always lower to_move to the size |
|
7666 * of sp->length if its larger. |
|
7667 */ |
|
7668 #ifdef INVARIANTS |
|
7669 panic("Huh, how can to_move be larger?"); |
|
7670 #else |
|
7671 SCTP_PRINTF("Huh, how can to_move be larger?\n"); |
|
7672 sp->length = 0; |
|
7673 #endif |
|
7674 } else { |
|
7675 atomic_subtract_int(&sp->length, to_move); |
|
7676 } |
|
7677 if (M_LEADINGSPACE(chk->data) < (int)sizeof(struct sctp_data_chunk)) { |
|
7678 /* Not enough room for a chunk header, get some */ |
|
7679 struct mbuf *m; |
|
7680 m = sctp_get_mbuf_for_msg(1, 0, M_NOWAIT, 0, MT_DATA); |
|
7681 if (m == NULL) { |
|
7682 /* |
|
7683 * we're in trouble here. _PREPEND below will free |
|
7684 * all the data if there is no leading space, so we |
|
7685 * must put the data back and restore. |
|
7686 */ |
|
7687 if (send_lock_up == 0) { |
|
7688 SCTP_TCB_SEND_LOCK(stcb); |
|
7689 send_lock_up = 1; |
|
7690 } |
|
7691 if (chk->data == NULL) { |
|
7692 /* unsteal the data */ |
|
7693 sp->data = chk->data; |
|
7694 sp->tail_mbuf = chk->last_mbuf; |
|
7695 } else { |
|
7696 struct mbuf *m_tmp; |
|
7697 /* reassemble the data */ |
|
7698 m_tmp = sp->data; |
|
7699 sp->data = chk->data; |
|
7700 SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp; |
|
7701 } |
|
7702 sp->some_taken = some_taken; |
|
7703 atomic_add_int(&sp->length, to_move); |
|
7704 chk->data = NULL; |
|
7705 *bail = 1; |
|
7706 sctp_free_a_chunk(stcb, chk, so_locked); |
|
7707 to_move = 0; |
|
7708 goto out_of; |
|
7709 } else { |
|
7710 SCTP_BUF_LEN(m) = 0; |
|
7711 SCTP_BUF_NEXT(m) = chk->data; |
|
7712 chk->data = m; |
|
7713 M_ALIGN(chk->data, 4); |
|
7714 } |
|
7715 } |
|
7716 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_NOWAIT); |
|
7717 if (chk->data == NULL) { |
|
7718 /* HELP, TSNH since we assured it would not above? */ |
|
7719 #ifdef INVARIANTS |
|
7720 panic("prepend failes HELP?"); |
|
7721 #else |
|
7722 SCTP_PRINTF("prepend fails HELP?\n"); |
|
7723 sctp_free_a_chunk(stcb, chk, so_locked); |
|
7724 #endif |
|
7725 *bail = 1; |
|
7726 to_move = 0; |
|
7727 goto out_of; |
|
7728 } |
|
7729 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk)); |
|
7730 chk->book_size = chk->send_size = (to_move + sizeof(struct sctp_data_chunk)); |
|
7731 chk->book_size_scale = 0; |
|
7732 chk->sent = SCTP_DATAGRAM_UNSENT; |
|
7733 |
|
7734 chk->flags = 0; |
|
7735 chk->asoc = &stcb->asoc; |
|
7736 chk->pad_inplace = 0; |
|
7737 chk->no_fr_allowed = 0; |
|
7738 chk->rec.data.stream_seq = strq->next_sequence_send; |
|
7739 if (rcv_flags & SCTP_DATA_LAST_FRAG) { |
|
7740 strq->next_sequence_send++; |
|
7741 } |
|
7742 chk->rec.data.stream_number = sp->stream; |
|
7743 chk->rec.data.payloadtype = sp->ppid; |
|
7744 chk->rec.data.context = sp->context; |
|
7745 chk->rec.data.doing_fast_retransmit = 0; |
|
7746 |
|
7747 chk->rec.data.timetodrop = sp->ts; |
|
7748 chk->flags = sp->act_flags; |
|
7749 |
|
7750 if (sp->net) { |
|
7751 chk->whoTo = sp->net; |
|
7752 atomic_add_int(&chk->whoTo->ref_count, 1); |
|
7753 } else |
|
7754 chk->whoTo = NULL; |
|
7755 |
|
7756 if (sp->holds_key_ref) { |
|
7757 chk->auth_keyid = sp->auth_keyid; |
|
7758 sctp_auth_key_acquire(stcb, chk->auth_keyid); |
|
7759 chk->holds_key_ref = 1; |
|
7760 } |
|
7761 |
|
7762 #if defined(__FreeBSD__) || defined(__Panda__) |
|
7763 chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1); |
|
7764 #else |
|
7765 chk->rec.data.TSN_seq = asoc->sending_seq++; |
|
7766 #endif |
|
7767 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) { |
|
7768 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND, |
|
7769 (uintptr_t)stcb, sp->length, |
|
7770 (uint32_t)((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq), |
|
7771 chk->rec.data.TSN_seq); |
|
7772 } |
|
7773 dchkh = mtod(chk->data, struct sctp_data_chunk *); |
|
7774 /* |
|
7775 * Put the rest of the things in place now. Size was done |
|
7776 * earlier in previous loop prior to padding. |
|
7777 */ |
|
7778 |
|
7779 #ifdef SCTP_ASOCLOG_OF_TSNS |
|
7780 SCTP_TCB_LOCK_ASSERT(stcb); |
|
7781 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) { |
|
7782 asoc->tsn_out_at = 0; |
|
7783 asoc->tsn_out_wrapped = 1; |
|
7784 } |
|
7785 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq; |
|
7786 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number; |
|
7787 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq; |
|
7788 asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size; |
|
7789 asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags; |
|
7790 asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb; |
|
7791 asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at; |
|
7792 asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2; |
|
7793 asoc->tsn_out_at++; |
|
7794 #endif |
|
7795 |
|
7796 dchkh->ch.chunk_type = SCTP_DATA; |
|
7797 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags; |
|
7798 dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq); |
|
7799 dchkh->dp.stream_id = htons(strq->stream_no); |
|
7800 dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq); |
|
7801 dchkh->dp.protocol_id = chk->rec.data.payloadtype; |
|
7802 dchkh->ch.chunk_length = htons(chk->send_size); |
|
7803 /* Now advance the chk->send_size by the actual pad needed. */ |
|
7804 if (chk->send_size < SCTP_SIZE32(chk->book_size)) { |
|
7805 /* need a pad */ |
|
7806 struct mbuf *lm; |
|
7807 int pads; |
|
7808 |
|
7809 pads = SCTP_SIZE32(chk->book_size) - chk->send_size; |
|
7810 if (sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf) == 0) { |
|
7811 chk->pad_inplace = 1; |
|
7812 } |
|
7813 if ((lm = SCTP_BUF_NEXT(chk->last_mbuf)) != NULL) { |
|
7814 /* pad added an mbuf */ |
|
7815 chk->last_mbuf = lm; |
|
7816 } |
|
7817 chk->send_size += pads; |
|
7818 } |
|
7819 if (PR_SCTP_ENABLED(chk->flags)) { |
|
7820 asoc->pr_sctp_cnt++; |
|
7821 } |
|
7822 if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) { |
|
7823 /* All done pull and kill the message */ |
|
7824 atomic_subtract_int(&asoc->stream_queue_cnt, 1); |
|
7825 if (sp->put_last_out == 0) { |
|
7826 SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n"); |
|
7827 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n", |
|
7828 sp->sender_all_done, |
|
7829 sp->length, |
|
7830 sp->msg_is_complete, |
|
7831 sp->put_last_out, |
|
7832 send_lock_up); |
|
7833 } |
|
7834 if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) { |
|
7835 SCTP_TCB_SEND_LOCK(stcb); |
|
7836 send_lock_up = 1; |
|
7837 } |
|
7838 TAILQ_REMOVE(&strq->outqueue, sp, next); |
|
7839 stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up); |
|
7840 if (sp->net) { |
|
7841 sctp_free_remote_addr(sp->net); |
|
7842 sp->net = NULL; |
|
7843 } |
|
7844 if (sp->data) { |
|
7845 sctp_m_freem(sp->data); |
|
7846 sp->data = NULL; |
|
7847 } |
|
7848 sctp_free_a_strmoq(stcb, sp, so_locked); |
|
7849 |
|
7850 /* we can't be locked to it */ |
|
7851 *locked = 0; |
|
7852 stcb->asoc.locked_on_sending = NULL; |
|
7853 } else { |
|
7854 /* more to go, we are locked */ |
|
7855 *locked = 1; |
|
7856 } |
|
7857 asoc->chunks_on_out_queue++; |
|
7858 strq->chunks_on_queues++; |
|
7859 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next); |
|
7860 asoc->send_queue_cnt++; |
|
7861 out_of: |
|
7862 if (send_lock_up) { |
|
7863 SCTP_TCB_SEND_UNLOCK(stcb); |
|
7864 } |
|
7865 return (to_move); |
|
7866 } |
|
7867 |
|
7868 |
|
7869 static void |
|
7870 sctp_fill_outqueue(struct sctp_tcb *stcb, |
|
7871 struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked |
|
7872 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) |
|
7873 SCTP_UNUSED |
|
7874 #endif |
|
7875 ) |
|
7876 { |
|
7877 struct sctp_association *asoc; |
|
7878 struct sctp_stream_out *strq; |
|
7879 int goal_mtu, moved_how_much, total_moved = 0, bail = 0; |
|
7880 int locked, giveup; |
|
7881 |
|
7882 SCTP_TCB_LOCK_ASSERT(stcb); |
|
7883 asoc = &stcb->asoc; |
|
7884 switch (net->ro._l_addr.sa.sa_family) { |
|
7885 #ifdef INET |
|
7886 case AF_INET: |
|
7887 goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; |
|
7888 break; |
|
7889 #endif |
|
7890 #ifdef INET6 |
|
7891 case AF_INET6: |
|
7892 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD; |
|
7893 break; |
|
7894 #endif |
|
7895 #if defined(__Userspace__) |
|
7896 case AF_CONN: |
|
7897 goal_mtu = net->mtu - sizeof(struct sctphdr); |
|
7898 break; |
|
7899 #endif |
|
7900 default: |
|
7901 /* TSNH */ |
|
7902 goal_mtu = net->mtu; |
|
7903 break; |
|
7904 } |
|
7905 /* Need an allowance for the data chunk header too */ |
|
7906 goal_mtu -= sizeof(struct sctp_data_chunk); |
|
7907 |
|
7908 /* must make even word boundary */ |
|
7909 goal_mtu &= 0xfffffffc; |
|
7910 if (asoc->locked_on_sending) { |
|
7911 /* We are stuck on one stream until the message completes. */ |
|
7912 strq = asoc->locked_on_sending; |
|
7913 locked = 1; |
|
7914 } else { |
|
7915 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc); |
|
7916 locked = 0; |
|
7917 } |
|
7918 while ((goal_mtu > 0) && strq) { |
|
7919 giveup = 0; |
|
7920 bail = 0; |
|
7921 moved_how_much = sctp_move_to_outqueue(stcb, strq, goal_mtu, frag_point, &locked, |
|
7922 &giveup, eeor_mode, &bail, so_locked); |
|
7923 if (moved_how_much) |
|
7924 stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved_how_much); |
|
7925 |
|
7926 if (locked) { |
|
7927 asoc->locked_on_sending = strq; |
|
7928 if ((moved_how_much == 0) || (giveup) || bail) |
|
7929 /* no more to move for now */ |
|
7930 break; |
|
7931 } else { |
|
7932 asoc->locked_on_sending = NULL; |
|
7933 if ((giveup) || bail) { |
|
7934 break; |
|
7935 } |
|
7936 strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc); |
|
7937 if (strq == NULL) { |
|
7938 break; |
|
7939 } |
|
7940 } |
|
7941 total_moved += moved_how_much; |
|
7942 goal_mtu -= (moved_how_much + sizeof(struct sctp_data_chunk)); |
|
7943 goal_mtu &= 0xfffffffc; |
|
7944 } |
|
7945 if (bail) |
|
7946 *quit_now = 1; |
|
7947 |
|
7948 stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc); |
|
7949 |
|
7950 if (total_moved == 0) { |
|
7951 if ((stcb->asoc.sctp_cmt_on_off == 0) && |
|
7952 (net == stcb->asoc.primary_destination)) { |
|
7953 /* ran dry for primary network net */ |
|
7954 SCTP_STAT_INCR(sctps_primary_randry); |
|
7955 } else if (stcb->asoc.sctp_cmt_on_off > 0) { |
|
7956 /* ran dry with CMT on */ |
|
7957 SCTP_STAT_INCR(sctps_cmt_randry); |
|
7958 } |
|
7959 } |
|
7960 } |
|
7961 |
|
7962 void |
|
7963 sctp_fix_ecn_echo(struct sctp_association *asoc) |
|
7964 { |
|
7965 struct sctp_tmit_chunk *chk; |
|
7966 |
|
7967 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { |
|
7968 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) { |
|
7969 chk->sent = SCTP_DATAGRAM_UNSENT; |
|
7970 } |
|
7971 } |
|
7972 } |
|
7973 |
|
7974 void |
|
7975 sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net) |
|
7976 { |
|
7977 struct sctp_association *asoc; |
|
7978 struct sctp_tmit_chunk *chk; |
|
7979 struct sctp_stream_queue_pending *sp; |
|
7980 unsigned int i; |
|
7981 |
|
7982 if (net == NULL) { |
|
7983 return; |
|
7984 } |
|
7985 asoc = &stcb->asoc; |
|
7986 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { |
|
7987 TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) { |
|
7988 if (sp->net == net) { |
|
7989 sctp_free_remote_addr(sp->net); |
|
7990 sp->net = NULL; |
|
7991 } |
|
7992 } |
|
7993 } |
|
7994 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { |
|
7995 if (chk->whoTo == net) { |
|
7996 sctp_free_remote_addr(chk->whoTo); |
|
7997 chk->whoTo = NULL; |
|
7998 } |
|
7999 } |
|
8000 } |
|
8001 |
|
8002 int |
|
8003 sctp_med_chunk_output(struct sctp_inpcb *inp, |
|
8004 struct sctp_tcb *stcb, |
|
8005 struct sctp_association *asoc, |
|
8006 int *num_out, |
|
8007 int *reason_code, |
|
8008 int control_only, int from_where, |
|
8009 struct timeval *now, int *now_filled, int frag_point, int so_locked |
|
8010 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) |
|
8011 SCTP_UNUSED |
|
8012 #endif |
|
8013 ) |
|
8014 { |
|
8015 /** |
|
8016 * Ok this is the generic chunk service queue. we must do the |
|
8017 * following: - Service the stream queue that is next, moving any |
|
8018 * message (note I must get a complete message i.e. FIRST/MIDDLE and |
|
8019 * LAST to the out queue in one pass) and assigning TSN's - Check to |
|
8020 * see if the cwnd/rwnd allows any output, if so we go ahead and |
|
8021 * fomulate and send the low level chunks. Making sure to combine |
|
8022 * any control in the control chunk queue also. |
|
8023 */ |
|
8024 struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL; |
|
8025 struct mbuf *outchain, *endoutchain; |
|
8026 struct sctp_tmit_chunk *chk, *nchk; |
|
8027 |
|
8028 /* temp arrays for unlinking */ |
|
8029 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING]; |
|
8030 int no_fragmentflg, error; |
|
8031 unsigned int max_rwnd_per_dest, max_send_per_dest; |
|
8032 int one_chunk, hbflag, skip_data_for_this_net; |
|
8033 int asconf, cookie, no_out_cnt; |
|
8034 int bundle_at, ctl_cnt, no_data_chunks, eeor_mode; |
|
8035 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out; |
|
8036 int tsns_sent = 0; |
|
8037 uint32_t auth_offset = 0; |
|
8038 struct sctp_auth_chunk *auth = NULL; |
|
8039 uint16_t auth_keyid; |
|
8040 int override_ok = 1; |
|
8041 int skip_fill_up = 0; |
|
8042 int data_auth_reqd = 0; |
|
8043 /* JRS 5/14/07 - Add flag for whether a heartbeat is sent to |
|
8044 the destination. */ |
|
8045 int quit_now = 0; |
|
8046 |
|
8047 #if defined(__APPLE__) |
|
8048 if (so_locked) { |
|
8049 sctp_lock_assert(SCTP_INP_SO(inp)); |
|
8050 } else { |
|
8051 sctp_unlock_assert(SCTP_INP_SO(inp)); |
|
8052 } |
|
8053 #endif |
|
8054 *num_out = 0; |
|
8055 auth_keyid = stcb->asoc.authinfo.active_keyid; |
|
8056 |
|
8057 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || |
|
8058 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) || |
|
8059 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) { |
|
8060 eeor_mode = 1; |
|
8061 } else { |
|
8062 eeor_mode = 0; |
|
8063 } |
|
8064 ctl_cnt = no_out_cnt = asconf = cookie = 0; |
|
8065 /* |
|
8066 * First lets prime the pump. For each destination, if there is room |
|
8067 * in the flight size, attempt to pull an MTU's worth out of the |
|
8068 * stream queues into the general send_queue |
|
8069 */ |
|
8070 #ifdef SCTP_AUDITING_ENABLED |
|
8071 sctp_audit_log(0xC2, 2); |
|
8072 #endif |
|
8073 SCTP_TCB_LOCK_ASSERT(stcb); |
|
8074 hbflag = 0; |
|
8075 if ((control_only) || (asoc->stream_reset_outstanding)) |
|
8076 no_data_chunks = 1; |
|
8077 else |
|
8078 no_data_chunks = 0; |
|
8079 |
|
8080 /* Nothing to possible to send? */ |
|
8081 if ((TAILQ_EMPTY(&asoc->control_send_queue) || |
|
8082 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) && |
|
8083 TAILQ_EMPTY(&asoc->asconf_send_queue) && |
|
8084 TAILQ_EMPTY(&asoc->send_queue) && |
|
8085 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) { |
|
8086 nothing_to_send: |
|
8087 *reason_code = 9; |
|
8088 return (0); |
|
8089 } |
|
8090 if (asoc->peers_rwnd == 0) { |
|
8091 /* No room in peers rwnd */ |
|
8092 *reason_code = 1; |
|
8093 if (asoc->total_flight > 0) { |
|
8094 /* we are allowed one chunk in flight */ |
|
8095 no_data_chunks = 1; |
|
8096 } |
|
8097 } |
|
8098 if (stcb->asoc.ecn_echo_cnt_onq) { |
|
8099 /* Record where a sack goes, if any */ |
|
8100 if (no_data_chunks && |
|
8101 (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) { |
|
8102 /* Nothing but ECNe to send - we don't do that */ |
|
8103 goto nothing_to_send; |
|
8104 } |
|
8105 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { |
|
8106 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || |
|
8107 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) { |
|
8108 sack_goes_to = chk->whoTo; |
|
8109 break; |
|
8110 } |
|
8111 } |
|
8112 } |
|
8113 max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets); |
|
8114 if (stcb->sctp_socket) |
|
8115 max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets; |
|
8116 else |
|
8117 max_send_per_dest = 0; |
|
8118 if (no_data_chunks == 0) { |
|
8119 /* How many non-directed chunks are there? */ |
|
8120 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { |
|
8121 if (chk->whoTo == NULL) { |
|
8122 /* We already have non-directed |
|
8123 * chunks on the queue, no need |
|
8124 * to do a fill-up. |
|
8125 */ |
|
8126 skip_fill_up = 1; |
|
8127 break; |
|
8128 } |
|
8129 } |
|
8130 |
|
8131 } |
|
8132 if ((no_data_chunks == 0) && |
|
8133 (skip_fill_up == 0) && |
|
8134 (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) { |
|
8135 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
|
8136 /* |
|
8137 * This for loop we are in takes in |
|
8138 * each net, if its's got space in cwnd and |
|
8139 * has data sent to it (when CMT is off) then it |
|
8140 * calls sctp_fill_outqueue for the net. This gets |
|
8141 * data on the send queue for that network. |
|
8142 * |
|
8143 * In sctp_fill_outqueue TSN's are assigned and |
|
8144 * data is copied out of the stream buffers. Note |
|
8145 * mostly copy by reference (we hope). |
|
8146 */ |
|
8147 net->window_probe = 0; |
|
8148 if ((net != stcb->asoc.alternate) && |
|
8149 ((net->dest_state & SCTP_ADDR_PF) || |
|
8150 (!(net->dest_state & SCTP_ADDR_REACHABLE)) || |
|
8151 (net->dest_state & SCTP_ADDR_UNCONFIRMED))) { |
|
8152 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { |
|
8153 sctp_log_cwnd(stcb, net, 1, |
|
8154 SCTP_CWND_LOG_FILL_OUTQ_CALLED); |
|
8155 } |
|
8156 continue; |
|
8157 } |
|
8158 if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) && |
|
8159 (net->flight_size == 0)) { |
|
8160 (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins)(stcb, net); |
|
8161 } |
|
8162 if (net->flight_size >= net->cwnd) { |
|
8163 /* skip this network, no room - can't fill */ |
|
8164 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { |
|
8165 sctp_log_cwnd(stcb, net, 3, |
|
8166 SCTP_CWND_LOG_FILL_OUTQ_CALLED); |
|
8167 } |
|
8168 continue; |
|
8169 } |
|
8170 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { |
|
8171 sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED); |
|
8172 } |
|
8173 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked); |
|
8174 if (quit_now) { |
|
8175 /* memory alloc failure */ |
|
8176 no_data_chunks = 1; |
|
8177 break; |
|
8178 } |
|
8179 } |
|
8180 } |
|
8181 /* now service each destination and send out what we can for it */ |
|
8182 /* Nothing to send? */ |
|
8183 if (TAILQ_EMPTY(&asoc->control_send_queue) && |
|
8184 TAILQ_EMPTY(&asoc->asconf_send_queue) && |
|
8185 TAILQ_EMPTY(&asoc->send_queue)) { |
|
8186 *reason_code = 8; |
|
8187 return (0); |
|
8188 } |
|
8189 |
|
8190 if (asoc->sctp_cmt_on_off > 0) { |
|
8191 /* get the last start point */ |
|
8192 start_at = asoc->last_net_cmt_send_started; |
|
8193 if (start_at == NULL) { |
|
8194 /* null so to beginning */ |
|
8195 start_at = TAILQ_FIRST(&asoc->nets); |
|
8196 } else { |
|
8197 start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next); |
|
8198 if (start_at == NULL) { |
|
8199 start_at = TAILQ_FIRST(&asoc->nets); |
|
8200 } |
|
8201 } |
|
8202 asoc->last_net_cmt_send_started = start_at; |
|
8203 } else { |
|
8204 start_at = TAILQ_FIRST(&asoc->nets); |
|
8205 } |
|
8206 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { |
|
8207 if (chk->whoTo == NULL) { |
|
8208 if (asoc->alternate) { |
|
8209 chk->whoTo = asoc->alternate; |
|
8210 } else { |
|
8211 chk->whoTo = asoc->primary_destination; |
|
8212 } |
|
8213 atomic_add_int(&chk->whoTo->ref_count, 1); |
|
8214 } |
|
8215 } |
|
8216 old_start_at = NULL; |
|
8217 again_one_more_time: |
|
8218 for (net = start_at ; net != NULL; net = TAILQ_NEXT(net, sctp_next)) { |
|
8219 /* how much can we send? */ |
|
8220 /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */ |
|
8221 if (old_start_at && (old_start_at == net)) { |
|
8222 /* through list ocmpletely. */ |
|
8223 break; |
|
8224 } |
|
8225 tsns_sent = 0xa; |
|
8226 if (TAILQ_EMPTY(&asoc->control_send_queue) && |
|
8227 TAILQ_EMPTY(&asoc->asconf_send_queue) && |
|
8228 (net->flight_size >= net->cwnd)) { |
|
8229 /* Nothing on control or asconf and flight is full, we can skip |
|
8230 * even in the CMT case. |
|
8231 */ |
|
8232 continue; |
|
8233 } |
|
8234 bundle_at = 0; |
|
8235 endoutchain = outchain = NULL; |
|
8236 no_fragmentflg = 1; |
|
8237 one_chunk = 0; |
|
8238 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { |
|
8239 skip_data_for_this_net = 1; |
|
8240 } else { |
|
8241 skip_data_for_this_net = 0; |
|
8242 } |
|
8243 #if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__) || defined(__APPLE__)) |
|
8244 if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) { |
|
8245 /* |
|
8246 * if we have a route and an ifp check to see if we |
|
8247 * have room to send to this guy |
|
8248 */ |
|
8249 struct ifnet *ifp; |
|
8250 |
|
8251 ifp = net->ro.ro_rt->rt_ifp; |
|
8252 if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) { |
|
8253 SCTP_STAT_INCR(sctps_ifnomemqueued); |
|
8254 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) { |
|
8255 sctp_log_maxburst(stcb, net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED); |
|
8256 } |
|
8257 continue; |
|
8258 } |
|
8259 } |
|
8260 #endif |
|
8261 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) { |
|
8262 #ifdef INET |
|
8263 case AF_INET: |
|
8264 mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr)); |
|
8265 break; |
|
8266 #endif |
|
8267 #ifdef INET6 |
|
8268 case AF_INET6: |
|
8269 mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)); |
|
8270 break; |
|
8271 #endif |
|
8272 #if defined(__Userspace__) |
|
8273 case AF_CONN: |
|
8274 mtu = net->mtu - sizeof(struct sctphdr); |
|
8275 break; |
|
8276 #endif |
|
8277 default: |
|
8278 /* TSNH */ |
|
8279 mtu = net->mtu; |
|
8280 break; |
|
8281 } |
|
8282 mx_mtu = mtu; |
|
8283 to_out = 0; |
|
8284 if (mtu > asoc->peers_rwnd) { |
|
8285 if (asoc->total_flight > 0) { |
|
8286 /* We have a packet in flight somewhere */ |
|
8287 r_mtu = asoc->peers_rwnd; |
|
8288 } else { |
|
8289 /* We are always allowed to send one MTU out */ |
|
8290 one_chunk = 1; |
|
8291 r_mtu = mtu; |
|
8292 } |
|
8293 } else { |
|
8294 r_mtu = mtu; |
|
8295 } |
|
8296 /************************/ |
|
8297 /* ASCONF transmission */ |
|
8298 /************************/ |
|
8299 /* Now first lets go through the asconf queue */ |
|
8300 TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) { |
|
8301 if (chk->rec.chunk_id.id != SCTP_ASCONF) { |
|
8302 continue; |
|
8303 } |
|
8304 if (chk->whoTo == NULL) { |
|
8305 if (asoc->alternate == NULL) { |
|
8306 if (asoc->primary_destination != net) { |
|
8307 break; |
|
8308 } |
|
8309 } else { |
|
8310 if (asoc->alternate != net) { |
|
8311 break; |
|
8312 } |
|
8313 } |
|
8314 } else { |
|
8315 if (chk->whoTo != net) { |
|
8316 break; |
|
8317 } |
|
8318 } |
|
8319 if (chk->data == NULL) { |
|
8320 break; |
|
8321 } |
|
8322 if (chk->sent != SCTP_DATAGRAM_UNSENT && |
|
8323 chk->sent != SCTP_DATAGRAM_RESEND) { |
|
8324 break; |
|
8325 } |
|
8326 /* |
|
8327 * if no AUTH is yet included and this chunk |
|
8328 * requires it, make sure to account for it. We |
|
8329 * don't apply the size until the AUTH chunk is |
|
8330 * actually added below in case there is no room for |
|
8331 * this chunk. NOTE: we overload the use of "omtu" |
|
8332 * here |
|
8333 */ |
|
8334 if ((auth == NULL) && |
|
8335 sctp_auth_is_required_chunk(chk->rec.chunk_id.id, |
|
8336 stcb->asoc.peer_auth_chunks)) { |
|
8337 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); |
|
8338 } else |
|
8339 omtu = 0; |
|
8340 /* Here we do NOT factor the r_mtu */ |
|
8341 if ((chk->send_size < (int)(mtu - omtu)) || |
|
8342 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) { |
|
8343 /* |
|
8344 * We probably should glom the mbuf chain |
|
8345 * from the chk->data for control but the |
|
8346 * problem is it becomes yet one more level |
|
8347 * of tracking to do if for some reason |
|
8348 * output fails. Then I have got to |
|
8349 * reconstruct the merged control chain.. el |
|
8350 * yucko.. for now we take the easy way and |
|
8351 * do the copy |
|
8352 */ |
|
8353 /* |
|
8354 * Add an AUTH chunk, if chunk requires it |
|
8355 * save the offset into the chain for AUTH |
|
8356 */ |
|
8357 if ((auth == NULL) && |
|
8358 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id, |
|
8359 stcb->asoc.peer_auth_chunks))) { |
|
8360 outchain = sctp_add_auth_chunk(outchain, |
|
8361 &endoutchain, |
|
8362 &auth, |
|
8363 &auth_offset, |
|
8364 stcb, |
|
8365 chk->rec.chunk_id.id); |
|
8366 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); |
|
8367 } |
|
8368 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, |
|
8369 (int)chk->rec.chunk_id.can_take_data, |
|
8370 chk->send_size, chk->copy_by_ref); |
|
8371 if (outchain == NULL) { |
|
8372 *reason_code = 8; |
|
8373 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); |
|
8374 return (ENOMEM); |
|
8375 } |
|
8376 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); |
|
8377 /* update our MTU size */ |
|
8378 if (mtu > (chk->send_size + omtu)) |
|
8379 mtu -= (chk->send_size + omtu); |
|
8380 else |
|
8381 mtu = 0; |
|
8382 to_out += (chk->send_size + omtu); |
|
8383 /* Do clear IP_DF ? */ |
|
8384 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { |
|
8385 no_fragmentflg = 0; |
|
8386 } |
|
8387 if (chk->rec.chunk_id.can_take_data) |
|
8388 chk->data = NULL; |
|
8389 /* |
|
8390 * set hb flag since we can |
|
8391 * use these for RTO |
|
8392 */ |
|
8393 hbflag = 1; |
|
8394 asconf = 1; |
|
8395 /* |
|
8396 * should sysctl this: don't |
|
8397 * bundle data with ASCONF |
|
8398 * since it requires AUTH |
|
8399 */ |
|
8400 no_data_chunks = 1; |
|
8401 chk->sent = SCTP_DATAGRAM_SENT; |
|
8402 if (chk->whoTo == NULL) { |
|
8403 chk->whoTo = net; |
|
8404 atomic_add_int(&net->ref_count, 1); |
|
8405 } |
|
8406 chk->snd_count++; |
|
8407 if (mtu == 0) { |
|
8408 /* |
|
8409 * Ok we are out of room but we can |
|
8410 * output without effecting the |
|
8411 * flight size since this little guy |
|
8412 * is a control only packet. |
|
8413 */ |
|
8414 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net); |
|
8415 /* |
|
8416 * do NOT clear the asconf |
|
8417 * flag as it is used to do |
|
8418 * appropriate source address |
|
8419 * selection. |
|
8420 */ |
|
8421 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, |
|
8422 (struct sockaddr *)&net->ro._l_addr, |
|
8423 outchain, auth_offset, auth, |
|
8424 stcb->asoc.authinfo.active_keyid, |
|
8425 no_fragmentflg, 0, asconf, |
|
8426 inp->sctp_lport, stcb->rport, |
|
8427 htonl(stcb->asoc.peer_vtag), |
|
8428 net->port, NULL, |
|
8429 #if defined(__FreeBSD__) |
|
8430 0, 0, |
|
8431 #endif |
|
8432 so_locked))) { |
|
8433 if (error == ENOBUFS) { |
|
8434 asoc->ifp_had_enobuf = 1; |
|
8435 SCTP_STAT_INCR(sctps_lowlevelerr); |
|
8436 } |
|
8437 if (from_where == 0) { |
|
8438 SCTP_STAT_INCR(sctps_lowlevelerrusr); |
|
8439 } |
|
8440 if (*now_filled == 0) { |
|
8441 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); |
|
8442 *now_filled = 1; |
|
8443 *now = net->last_sent_time; |
|
8444 } else { |
|
8445 net->last_sent_time = *now; |
|
8446 } |
|
8447 hbflag = 0; |
|
8448 /* error, could not output */ |
|
8449 if (error == EHOSTUNREACH) { |
|
8450 /* |
|
8451 * Destination went |
|
8452 * unreachable |
|
8453 * during this send |
|
8454 */ |
|
8455 sctp_move_chunks_from_net(stcb, net); |
|
8456 } |
|
8457 *reason_code = 7; |
|
8458 continue; |
|
8459 } else |
|
8460 asoc->ifp_had_enobuf = 0; |
|
8461 if (*now_filled == 0) { |
|
8462 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); |
|
8463 *now_filled = 1; |
|
8464 *now = net->last_sent_time; |
|
8465 } else { |
|
8466 net->last_sent_time = *now; |
|
8467 } |
|
8468 hbflag = 0; |
|
8469 /* |
|
8470 * increase the number we sent, if a |
|
8471 * cookie is sent we don't tell them |
|
8472 * any was sent out. |
|
8473 */ |
|
8474 outchain = endoutchain = NULL; |
|
8475 auth = NULL; |
|
8476 auth_offset = 0; |
|
8477 if (!no_out_cnt) |
|
8478 *num_out += ctl_cnt; |
|
8479 /* recalc a clean slate and setup */ |
|
8480 switch (net->ro._l_addr.sa.sa_family) { |
|
8481 #ifdef INET |
|
8482 case AF_INET: |
|
8483 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; |
|
8484 break; |
|
8485 #endif |
|
8486 #ifdef INET6 |
|
8487 case AF_INET6: |
|
8488 mtu = net->mtu - SCTP_MIN_OVERHEAD; |
|
8489 break; |
|
8490 #endif |
|
8491 #if defined(__Userspace__) |
|
8492 case AF_CONN: |
|
8493 mtu = net->mtu - sizeof(struct sctphdr); |
|
8494 break; |
|
8495 #endif |
|
8496 default: |
|
8497 /* TSNH */ |
|
8498 mtu = net->mtu; |
|
8499 break; |
|
8500 } |
|
8501 to_out = 0; |
|
8502 no_fragmentflg = 1; |
|
8503 } |
|
8504 } |
|
8505 } |
|
8506 /************************/ |
|
8507 /* Control transmission */ |
|
8508 /************************/ |
|
8509 /* Now first lets go through the control queue */ |
|
8510 TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) { |
|
8511 if ((sack_goes_to) && |
|
8512 (chk->rec.chunk_id.id == SCTP_ECN_ECHO) && |
|
8513 (chk->whoTo != sack_goes_to)) { |
|
8514 /* |
|
8515 * if we have a sack in queue, and we are looking at an |
|
8516 * ecn echo that is NOT queued to where the sack is going.. |
|
8517 */ |
|
8518 if (chk->whoTo == net) { |
|
8519 /* Don't transmit it to where its going (current net) */ |
|
8520 continue; |
|
8521 } else if (sack_goes_to == net) { |
|
8522 /* But do transmit it to this address */ |
|
8523 goto skip_net_check; |
|
8524 } |
|
8525 } |
|
8526 if (chk->whoTo == NULL) { |
|
8527 if (asoc->alternate == NULL) { |
|
8528 if (asoc->primary_destination != net) { |
|
8529 continue; |
|
8530 } |
|
8531 } else { |
|
8532 if (asoc->alternate != net) { |
|
8533 continue; |
|
8534 } |
|
8535 } |
|
8536 } else { |
|
8537 if (chk->whoTo != net) { |
|
8538 continue; |
|
8539 } |
|
8540 } |
|
8541 skip_net_check: |
|
8542 if (chk->data == NULL) { |
|
8543 continue; |
|
8544 } |
|
8545 if (chk->sent != SCTP_DATAGRAM_UNSENT) { |
|
8546 /* |
|
8547 * It must be unsent. Cookies and ASCONF's |
|
8548 * hang around but there timers will force |
|
8549 * when marked for resend. |
|
8550 */ |
|
8551 continue; |
|
8552 } |
|
8553 /* |
|
8554 * if no AUTH is yet included and this chunk |
|
8555 * requires it, make sure to account for it. We |
|
8556 * don't apply the size until the AUTH chunk is |
|
8557 * actually added below in case there is no room for |
|
8558 * this chunk. NOTE: we overload the use of "omtu" |
|
8559 * here |
|
8560 */ |
|
8561 if ((auth == NULL) && |
|
8562 sctp_auth_is_required_chunk(chk->rec.chunk_id.id, |
|
8563 stcb->asoc.peer_auth_chunks)) { |
|
8564 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); |
|
8565 } else |
|
8566 omtu = 0; |
|
8567 /* Here we do NOT factor the r_mtu */ |
|
8568 if ((chk->send_size <= (int)(mtu - omtu)) || |
|
8569 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) { |
|
8570 /* |
|
8571 * We probably should glom the mbuf chain |
|
8572 * from the chk->data for control but the |
|
8573 * problem is it becomes yet one more level |
|
8574 * of tracking to do if for some reason |
|
8575 * output fails. Then I have got to |
|
8576 * reconstruct the merged control chain.. el |
|
8577 * yucko.. for now we take the easy way and |
|
8578 * do the copy |
|
8579 */ |
|
8580 /* |
|
8581 * Add an AUTH chunk, if chunk requires it |
|
8582 * save the offset into the chain for AUTH |
|
8583 */ |
|
8584 if ((auth == NULL) && |
|
8585 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id, |
|
8586 stcb->asoc.peer_auth_chunks))) { |
|
8587 outchain = sctp_add_auth_chunk(outchain, |
|
8588 &endoutchain, |
|
8589 &auth, |
|
8590 &auth_offset, |
|
8591 stcb, |
|
8592 chk->rec.chunk_id.id); |
|
8593 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); |
|
8594 } |
|
8595 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, |
|
8596 (int)chk->rec.chunk_id.can_take_data, |
|
8597 chk->send_size, chk->copy_by_ref); |
|
8598 if (outchain == NULL) { |
|
8599 *reason_code = 8; |
|
8600 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); |
|
8601 return (ENOMEM); |
|
8602 } |
|
8603 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); |
|
8604 /* update our MTU size */ |
|
8605 if (mtu > (chk->send_size + omtu)) |
|
8606 mtu -= (chk->send_size + omtu); |
|
8607 else |
|
8608 mtu = 0; |
|
8609 to_out += (chk->send_size + omtu); |
|
8610 /* Do clear IP_DF ? */ |
|
8611 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { |
|
8612 no_fragmentflg = 0; |
|
8613 } |
|
8614 if (chk->rec.chunk_id.can_take_data) |
|
8615 chk->data = NULL; |
|
8616 /* Mark things to be removed, if needed */ |
|
8617 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || |
|
8618 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */ |
|
8619 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) || |
|
8620 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) || |
|
8621 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) || |
|
8622 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) || |
|
8623 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) || |
|
8624 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) || |
|
8625 (chk->rec.chunk_id.id == SCTP_ECN_CWR) || |
|
8626 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) || |
|
8627 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) { |
|
8628 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) { |
|
8629 hbflag = 1; |
|
8630 } |
|
8631 /* remove these chunks at the end */ |
|
8632 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || |
|
8633 (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) { |
|
8634 /* turn off the timer */ |
|
8635 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { |
|
8636 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, |
|
8637 inp, stcb, net, SCTP_FROM_SCTP_OUTPUT+SCTP_LOC_1); |
|
8638 } |
|
8639 } |
|
8640 ctl_cnt++; |
|
8641 } else { |
|
8642 /* |
|
8643 * Other chunks, since they have |
|
8644 * timers running (i.e. COOKIE) |
|
8645 * we just "trust" that it |
|
8646 * gets sent or retransmitted. |
|
8647 */ |
|
8648 ctl_cnt++; |
|
8649 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { |
|
8650 cookie = 1; |
|
8651 no_out_cnt = 1; |
|
8652 } else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) { |
|
8653 /* |
|
8654 * Increment ecne send count here |
|
8655 * this means we may be over-zealous in |
|
8656 * our counting if the send fails, but its |
|
8657 * the best place to do it (we used to do |
|
8658 * it in the queue of the chunk, but that did |
|
8659 * not tell how many times it was sent. |
|
8660 */ |
|
8661 SCTP_STAT_INCR(sctps_sendecne); |
|
8662 } |
|
8663 chk->sent = SCTP_DATAGRAM_SENT; |
|
8664 if (chk->whoTo == NULL) { |
|
8665 chk->whoTo = net; |
|
8666 atomic_add_int(&net->ref_count, 1); |
|
8667 } |
|
8668 chk->snd_count++; |
|
8669 } |
|
8670 if (mtu == 0) { |
|
8671 /* |
|
8672 * Ok we are out of room but we can |
|
8673 * output without effecting the |
|
8674 * flight size since this little guy |
|
8675 * is a control only packet. |
|
8676 */ |
|
8677 if (asconf) { |
|
8678 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net); |
|
8679 /* |
|
8680 * do NOT clear the asconf |
|
8681 * flag as it is used to do |
|
8682 * appropriate source address |
|
8683 * selection. |
|
8684 */ |
|
8685 } |
|
8686 if (cookie) { |
|
8687 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net); |
|
8688 cookie = 0; |
|
8689 } |
|
8690 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, |
|
8691 (struct sockaddr *)&net->ro._l_addr, |
|
8692 outchain, |
|
8693 auth_offset, auth, |
|
8694 stcb->asoc.authinfo.active_keyid, |
|
8695 no_fragmentflg, 0, asconf, |
|
8696 inp->sctp_lport, stcb->rport, |
|
8697 htonl(stcb->asoc.peer_vtag), |
|
8698 net->port, NULL, |
|
8699 #if defined(__FreeBSD__) |
|
8700 0, 0, |
|
8701 #endif |
|
8702 so_locked))) { |
|
8703 if (error == ENOBUFS) { |
|
8704 asoc->ifp_had_enobuf = 1; |
|
8705 SCTP_STAT_INCR(sctps_lowlevelerr); |
|
8706 } |
|
8707 if (from_where == 0) { |
|
8708 SCTP_STAT_INCR(sctps_lowlevelerrusr); |
|
8709 } |
|
8710 /* error, could not output */ |
|
8711 if (hbflag) { |
|
8712 if (*now_filled == 0) { |
|
8713 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); |
|
8714 *now_filled = 1; |
|
8715 *now = net->last_sent_time; |
|
8716 } else { |
|
8717 net->last_sent_time = *now; |
|
8718 } |
|
8719 hbflag = 0; |
|
8720 } |
|
8721 if (error == EHOSTUNREACH) { |
|
8722 /* |
|
8723 * Destination went |
|
8724 * unreachable |
|
8725 * during this send |
|
8726 */ |
|
8727 sctp_move_chunks_from_net(stcb, net); |
|
8728 } |
|
8729 *reason_code = 7; |
|
8730 continue; |
|
8731 } else |
|
8732 asoc->ifp_had_enobuf = 0; |
|
8733 /* Only HB or ASCONF advances time */ |
|
8734 if (hbflag) { |
|
8735 if (*now_filled == 0) { |
|
8736 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); |
|
8737 *now_filled = 1; |
|
8738 *now = net->last_sent_time; |
|
8739 } else { |
|
8740 net->last_sent_time = *now; |
|
8741 } |
|
8742 hbflag = 0; |
|
8743 } |
|
8744 /* |
|
8745 * increase the number we sent, if a |
|
8746 * cookie is sent we don't tell them |
|
8747 * any was sent out. |
|
8748 */ |
|
8749 outchain = endoutchain = NULL; |
|
8750 auth = NULL; |
|
8751 auth_offset = 0; |
|
8752 if (!no_out_cnt) |
|
8753 *num_out += ctl_cnt; |
|
8754 /* recalc a clean slate and setup */ |
|
8755 switch (net->ro._l_addr.sa.sa_family) { |
|
8756 #ifdef INET |
|
8757 case AF_INET: |
|
8758 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; |
|
8759 break; |
|
8760 #endif |
|
8761 #ifdef INET6 |
|
8762 case AF_INET6: |
|
8763 mtu = net->mtu - SCTP_MIN_OVERHEAD; |
|
8764 break; |
|
8765 #endif |
|
8766 #if defined(__Userspace__) |
|
8767 case AF_CONN: |
|
8768 mtu = net->mtu - sizeof(struct sctphdr); |
|
8769 break; |
|
8770 #endif |
|
8771 default: |
|
8772 /* TSNH */ |
|
8773 mtu = net->mtu; |
|
8774 break; |
|
8775 } |
|
8776 to_out = 0; |
|
8777 no_fragmentflg = 1; |
|
8778 } |
|
8779 } |
|
8780 } |
|
8781 /* JRI: if dest is in PF state, do not send data to it */ |
|
8782 if ((asoc->sctp_cmt_on_off > 0) && |
|
8783 (net != stcb->asoc.alternate) && |
|
8784 (net->dest_state & SCTP_ADDR_PF)) { |
|
8785 goto no_data_fill; |
|
8786 } |
|
8787 if (net->flight_size >= net->cwnd) { |
|
8788 goto no_data_fill; |
|
8789 } |
|
8790 if ((asoc->sctp_cmt_on_off > 0) && |
|
8791 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) && |
|
8792 (net->flight_size > max_rwnd_per_dest)) { |
|
8793 goto no_data_fill; |
|
8794 } |
|
8795 /* |
|
8796 * We need a specific accounting for the usage of the |
|
8797 * send buffer. We also need to check the number of messages |
|
8798 * per net. For now, this is better than nothing and it |
|
8799 * disabled by default... |
|
8800 */ |
|
8801 if ((asoc->sctp_cmt_on_off > 0) && |
|
8802 (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) && |
|
8803 (max_send_per_dest > 0) && |
|
8804 (net->flight_size > max_send_per_dest)) { |
|
8805 goto no_data_fill; |
|
8806 } |
|
8807 /*********************/ |
|
8808 /* Data transmission */ |
|
8809 /*********************/ |
|
8810 /* |
|
8811 * if AUTH for DATA is required and no AUTH has been added |
|
8812 * yet, account for this in the mtu now... if no data can be |
|
8813 * bundled, this adjustment won't matter anyways since the |
|
8814 * packet will be going out... |
|
8815 */ |
|
8816 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, |
|
8817 stcb->asoc.peer_auth_chunks); |
|
8818 if (data_auth_reqd && (auth == NULL)) { |
|
8819 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); |
|
8820 } |
|
8821 /* now lets add any data within the MTU constraints */ |
|
8822 switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) { |
|
8823 #ifdef INET |
|
8824 case AF_INET: |
|
8825 if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr))) |
|
8826 omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr)); |
|
8827 else |
|
8828 omtu = 0; |
|
8829 break; |
|
8830 #endif |
|
8831 #ifdef INET6 |
|
8832 case AF_INET6: |
|
8833 if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr))) |
|
8834 omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)); |
|
8835 else |
|
8836 omtu = 0; |
|
8837 break; |
|
8838 #endif |
|
8839 #if defined(__Userspace__) |
|
8840 case AF_CONN: |
|
8841 if (net->mtu > sizeof(struct sctphdr)) { |
|
8842 omtu = net->mtu - sizeof(struct sctphdr); |
|
8843 } else { |
|
8844 omtu = 0; |
|
8845 } |
|
8846 break; |
|
8847 #endif |
|
8848 default: |
|
8849 /* TSNH */ |
|
8850 omtu = 0; |
|
8851 break; |
|
8852 } |
|
8853 if ((((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) && |
|
8854 (skip_data_for_this_net == 0)) || |
|
8855 (cookie)) { |
|
8856 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { |
|
8857 if (no_data_chunks) { |
|
8858 /* let only control go out */ |
|
8859 *reason_code = 1; |
|
8860 break; |
|
8861 } |
|
8862 if (net->flight_size >= net->cwnd) { |
|
8863 /* skip this net, no room for data */ |
|
8864 *reason_code = 2; |
|
8865 break; |
|
8866 } |
|
8867 if ((chk->whoTo != NULL) && |
|
8868 (chk->whoTo != net)) { |
|
8869 /* Don't send the chunk on this net */ |
|
8870 continue; |
|
8871 } |
|
8872 |
|
8873 if (asoc->sctp_cmt_on_off == 0) { |
|
8874 if ((asoc->alternate) && |
|
8875 (asoc->alternate != net) && |
|
8876 (chk->whoTo == NULL)) { |
|
8877 continue; |
|
8878 } else if ((net != asoc->primary_destination) && |
|
8879 (asoc->alternate == NULL) && |
|
8880 (chk->whoTo == NULL)) { |
|
8881 continue; |
|
8882 } |
|
8883 } |
|
8884 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) { |
|
8885 /*- |
|
8886 * strange, we have a chunk that is |
|
8887 * to big for its destination and |
|
8888 * yet no fragment ok flag. |
|
8889 * Something went wrong when the |
|
8890 * PMTU changed...we did not mark |
|
8891 * this chunk for some reason?? I |
|
8892 * will fix it here by letting IP |
|
8893 * fragment it for now and printing |
|
8894 * a warning. This really should not |
|
8895 * happen ... |
|
8896 */ |
|
8897 SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n", |
|
8898 chk->send_size, mtu); |
|
8899 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; |
|
8900 } |
|
8901 if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && |
|
8902 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) == SCTP_STATE_SHUTDOWN_PENDING)) { |
|
8903 struct sctp_data_chunk *dchkh; |
|
8904 |
|
8905 dchkh = mtod(chk->data, struct sctp_data_chunk *); |
|
8906 dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY; |
|
8907 } |
|
8908 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) || |
|
8909 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) { |
|
8910 /* ok we will add this one */ |
|
8911 |
|
8912 /* |
|
8913 * Add an AUTH chunk, if chunk |
|
8914 * requires it, save the offset into |
|
8915 * the chain for AUTH |
|
8916 */ |
|
8917 if (data_auth_reqd) { |
|
8918 if (auth == NULL) { |
|
8919 outchain = sctp_add_auth_chunk(outchain, |
|
8920 &endoutchain, |
|
8921 &auth, |
|
8922 &auth_offset, |
|
8923 stcb, |
|
8924 SCTP_DATA); |
|
8925 auth_keyid = chk->auth_keyid; |
|
8926 override_ok = 0; |
|
8927 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); |
|
8928 } else if (override_ok) { |
|
8929 /* use this data's keyid */ |
|
8930 auth_keyid = chk->auth_keyid; |
|
8931 override_ok = 0; |
|
8932 } else if (auth_keyid != chk->auth_keyid) { |
|
8933 /* different keyid, so done bundling */ |
|
8934 break; |
|
8935 } |
|
8936 } |
|
8937 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0, |
|
8938 chk->send_size, chk->copy_by_ref); |
|
8939 if (outchain == NULL) { |
|
8940 SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n"); |
|
8941 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { |
|
8942 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); |
|
8943 } |
|
8944 *reason_code = 3; |
|
8945 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); |
|
8946 return (ENOMEM); |
|
8947 } |
|
8948 /* upate our MTU size */ |
|
8949 /* Do clear IP_DF ? */ |
|
8950 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { |
|
8951 no_fragmentflg = 0; |
|
8952 } |
|
8953 /* unsigned subtraction of mtu */ |
|
8954 if (mtu > chk->send_size) |
|
8955 mtu -= chk->send_size; |
|
8956 else |
|
8957 mtu = 0; |
|
8958 /* unsigned subtraction of r_mtu */ |
|
8959 if (r_mtu > chk->send_size) |
|
8960 r_mtu -= chk->send_size; |
|
8961 else |
|
8962 r_mtu = 0; |
|
8963 |
|
8964 to_out += chk->send_size; |
|
8965 if ((to_out > mx_mtu) && no_fragmentflg) { |
|
8966 #ifdef INVARIANTS |
|
8967 panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out); |
|
8968 #else |
|
8969 SCTP_PRINTF("Exceeding mtu of %d out size is %d\n", |
|
8970 mx_mtu, to_out); |
|
8971 #endif |
|
8972 } |
|
8973 chk->window_probe = 0; |
|
8974 data_list[bundle_at++] = chk; |
|
8975 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) { |
|
8976 break; |
|
8977 } |
|
8978 if (chk->sent == SCTP_DATAGRAM_UNSENT) { |
|
8979 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { |
|
8980 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks); |
|
8981 } else { |
|
8982 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks); |
|
8983 } |
|
8984 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) && |
|
8985 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0)) |
|
8986 /* Count number of user msg's that were fragmented |
|
8987 * we do this by counting when we see a LAST fragment |
|
8988 * only. |
|
8989 */ |
|
8990 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs); |
|
8991 } |
|
8992 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) { |
|
8993 if ((one_chunk) && (stcb->asoc.total_flight == 0)) { |
|
8994 data_list[0]->window_probe = 1; |
|
8995 net->window_probe = 1; |
|
8996 } |
|
8997 break; |
|
8998 } |
|
8999 } else { |
|
9000 /* |
|
9001 * Must be sent in order of the |
|
9002 * TSN's (on a network) |
|
9003 */ |
|
9004 break; |
|
9005 } |
|
9006 } /* for (chunk gather loop for this net) */ |
|
9007 } /* if asoc.state OPEN */ |
|
9008 no_data_fill: |
|
9009 /* Is there something to send for this destination? */ |
|
9010 if (outchain) { |
|
9011 /* We may need to start a control timer or two */ |
|
9012 if (asconf) { |
|
9013 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, |
|
9014 stcb, net); |
|
9015 /* |
|
9016 * do NOT clear the asconf flag as it is used |
|
9017 * to do appropriate source address selection. |
|
9018 */ |
|
9019 } |
|
9020 if (cookie) { |
|
9021 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net); |
|
9022 cookie = 0; |
|
9023 } |
|
9024 /* must start a send timer if data is being sent */ |
|
9025 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) { |
|
9026 /* |
|
9027 * no timer running on this destination |
|
9028 * restart it. |
|
9029 */ |
|
9030 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); |
|
9031 } |
|
9032 /* Now send it, if there is anything to send :> */ |
|
9033 if ((error = sctp_lowlevel_chunk_output(inp, |
|
9034 stcb, |
|
9035 net, |
|
9036 (struct sockaddr *)&net->ro._l_addr, |
|
9037 outchain, |
|
9038 auth_offset, |
|
9039 auth, |
|
9040 auth_keyid, |
|
9041 no_fragmentflg, |
|
9042 bundle_at, |
|
9043 asconf, |
|
9044 inp->sctp_lport, stcb->rport, |
|
9045 htonl(stcb->asoc.peer_vtag), |
|
9046 net->port, NULL, |
|
9047 #if defined(__FreeBSD__) |
|
9048 0, 0, |
|
9049 #endif |
|
9050 so_locked))) { |
|
9051 /* error, we could not output */ |
|
9052 if (error == ENOBUFS) { |
|
9053 SCTP_STAT_INCR(sctps_lowlevelerr); |
|
9054 asoc->ifp_had_enobuf = 1; |
|
9055 } |
|
9056 if (from_where == 0) { |
|
9057 SCTP_STAT_INCR(sctps_lowlevelerrusr); |
|
9058 } |
|
9059 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error); |
|
9060 if (hbflag) { |
|
9061 if (*now_filled == 0) { |
|
9062 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); |
|
9063 *now_filled = 1; |
|
9064 *now = net->last_sent_time; |
|
9065 } else { |
|
9066 net->last_sent_time = *now; |
|
9067 } |
|
9068 hbflag = 0; |
|
9069 } |
|
9070 if (error == EHOSTUNREACH) { |
|
9071 /* |
|
9072 * Destination went unreachable |
|
9073 * during this send |
|
9074 */ |
|
9075 sctp_move_chunks_from_net(stcb, net); |
|
9076 } |
|
9077 *reason_code = 6; |
|
9078 /*- |
|
9079 * I add this line to be paranoid. As far as |
|
9080 * I can tell the continue, takes us back to |
|
9081 * the top of the for, but just to make sure |
|
9082 * I will reset these again here. |
|
9083 */ |
|
9084 ctl_cnt = bundle_at = 0; |
|
9085 continue; /* This takes us back to the for() for the nets. */ |
|
9086 } else { |
|
9087 asoc->ifp_had_enobuf = 0; |
|
9088 } |
|
9089 endoutchain = NULL; |
|
9090 auth = NULL; |
|
9091 auth_offset = 0; |
|
9092 if (bundle_at || hbflag) { |
|
9093 /* For data/asconf and hb set time */ |
|
9094 if (*now_filled == 0) { |
|
9095 (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); |
|
9096 *now_filled = 1; |
|
9097 *now = net->last_sent_time; |
|
9098 } else { |
|
9099 net->last_sent_time = *now; |
|
9100 } |
|
9101 } |
|
9102 if (!no_out_cnt) { |
|
9103 *num_out += (ctl_cnt + bundle_at); |
|
9104 } |
|
9105 if (bundle_at) { |
|
9106 /* setup for a RTO measurement */ |
|
9107 tsns_sent = data_list[0]->rec.data.TSN_seq; |
|
9108 /* fill time if not already filled */ |
|
9109 if (*now_filled == 0) { |
|
9110 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent); |
|
9111 *now_filled = 1; |
|
9112 *now = asoc->time_last_sent; |
|
9113 } else { |
|
9114 asoc->time_last_sent = *now; |
|
9115 } |
|
9116 if (net->rto_needed) { |
|
9117 data_list[0]->do_rtt = 1; |
|
9118 net->rto_needed = 0; |
|
9119 } |
|
9120 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at); |
|
9121 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net); |
|
9122 } |
|
9123 if (one_chunk) { |
|
9124 break; |
|
9125 } |
|
9126 } |
|
9127 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { |
|
9128 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND); |
|
9129 } |
|
9130 } |
|
9131 if (old_start_at == NULL) { |
|
9132 old_start_at = start_at; |
|
9133 start_at = TAILQ_FIRST(&asoc->nets); |
|
9134 if (old_start_at) |
|
9135 goto again_one_more_time; |
|
9136 } |
|
9137 |
|
9138 /* |
|
9139 * At the end there should be no NON timed chunks hanging on this |
|
9140 * queue. |
|
9141 */ |
|
9142 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { |
|
9143 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND); |
|
9144 } |
|
9145 if ((*num_out == 0) && (*reason_code == 0)) { |
|
9146 *reason_code = 4; |
|
9147 } else { |
|
9148 *reason_code = 5; |
|
9149 } |
|
9150 sctp_clean_up_ctl(stcb, asoc, so_locked); |
|
9151 return (0); |
|
9152 } |
|
9153 |
|
9154 void |
|
9155 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err) |
|
9156 { |
|
9157 /*- |
|
9158 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of |
|
9159 * the control chunk queue. |
|
9160 */ |
|
9161 struct sctp_chunkhdr *hdr; |
|
9162 struct sctp_tmit_chunk *chk; |
|
9163 struct mbuf *mat; |
|
9164 |
|
9165 SCTP_TCB_LOCK_ASSERT(stcb); |
|
9166 sctp_alloc_a_chunk(stcb, chk); |
|
9167 if (chk == NULL) { |
|
9168 /* no memory */ |
|
9169 sctp_m_freem(op_err); |
|
9170 return; |
|
9171 } |
|
9172 chk->copy_by_ref = 0; |
|
9173 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_NOWAIT); |
|
9174 if (op_err == NULL) { |
|
9175 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); |
|
9176 return; |
|
9177 } |
|
9178 chk->send_size = 0; |
|
9179 mat = op_err; |
|
9180 while (mat != NULL) { |
|
9181 chk->send_size += SCTP_BUF_LEN(mat); |
|
9182 mat = SCTP_BUF_NEXT(mat); |
|
9183 } |
|
9184 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR; |
|
9185 chk->rec.chunk_id.can_take_data = 1; |
|
9186 chk->sent = SCTP_DATAGRAM_UNSENT; |
|
9187 chk->snd_count = 0; |
|
9188 chk->flags = 0; |
|
9189 chk->asoc = &stcb->asoc; |
|
9190 chk->data = op_err; |
|
9191 chk->whoTo = NULL; |
|
9192 hdr = mtod(op_err, struct sctp_chunkhdr *); |
|
9193 hdr->chunk_type = SCTP_OPERATION_ERROR; |
|
9194 hdr->chunk_flags = 0; |
|
9195 hdr->chunk_length = htons(chk->send_size); |
|
9196 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, |
|
9197 chk, |
|
9198 sctp_next); |
|
9199 chk->asoc->ctrl_queue_cnt++; |
|
9200 } |
|
9201 |
|
9202 int |
|
9203 sctp_send_cookie_echo(struct mbuf *m, |
|
9204 int offset, |
|
9205 struct sctp_tcb *stcb, |
|
9206 struct sctp_nets *net) |
|
9207 { |
|
9208 /*- |
|
9209 * pull out the cookie and put it at the front of the control chunk |
|
9210 * queue. |
|
9211 */ |
|
9212 int at; |
|
9213 struct mbuf *cookie; |
|
9214 struct sctp_paramhdr parm, *phdr; |
|
9215 struct sctp_chunkhdr *hdr; |
|
9216 struct sctp_tmit_chunk *chk; |
|
9217 uint16_t ptype, plen; |
|
9218 |
|
9219 /* First find the cookie in the param area */ |
|
9220 cookie = NULL; |
|
9221 at = offset + sizeof(struct sctp_init_chunk); |
|
9222 |
|
9223 SCTP_TCB_LOCK_ASSERT(stcb); |
|
9224 do { |
|
9225 phdr = sctp_get_next_param(m, at, &parm, sizeof(parm)); |
|
9226 if (phdr == NULL) { |
|
9227 return (-3); |
|
9228 } |
|
9229 ptype = ntohs(phdr->param_type); |
|
9230 plen = ntohs(phdr->param_length); |
|
9231 if (ptype == SCTP_STATE_COOKIE) { |
|
9232 int pad; |
|
9233 |
|
9234 /* found the cookie */ |
|
9235 if ((pad = (plen % 4))) { |
|
9236 plen += 4 - pad; |
|
9237 } |
|
9238 cookie = SCTP_M_COPYM(m, at, plen, M_NOWAIT); |
|
9239 if (cookie == NULL) { |
|
9240 /* No memory */ |
|
9241 return (-2); |
|
9242 } |
|
9243 #ifdef SCTP_MBUF_LOGGING |
|
9244 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { |
|
9245 struct mbuf *mat; |
|
9246 |
|
9247 for (mat = cookie; mat; mat = SCTP_BUF_NEXT(mat)) { |
|
9248 if (SCTP_BUF_IS_EXTENDED(mat)) { |
|
9249 sctp_log_mb(mat, SCTP_MBUF_ICOPY); |
|
9250 } |
|
9251 } |
|
9252 } |
|
9253 #endif |
|
9254 break; |
|
9255 } |
|
9256 at += SCTP_SIZE32(plen); |
|
9257 } while (phdr); |
|
9258 if (cookie == NULL) { |
|
9259 /* Did not find the cookie */ |
|
9260 return (-3); |
|
9261 } |
|
9262 /* ok, we got the cookie lets change it into a cookie echo chunk */ |
|
9263 |
|
9264 /* first the change from param to cookie */ |
|
9265 hdr = mtod(cookie, struct sctp_chunkhdr *); |
|
9266 hdr->chunk_type = SCTP_COOKIE_ECHO; |
|
9267 hdr->chunk_flags = 0; |
|
9268 /* get the chunk stuff now and place it in the FRONT of the queue */ |
|
9269 sctp_alloc_a_chunk(stcb, chk); |
|
9270 if (chk == NULL) { |
|
9271 /* no memory */ |
|
9272 sctp_m_freem(cookie); |
|
9273 return (-5); |
|
9274 } |
|
9275 chk->copy_by_ref = 0; |
|
9276 chk->send_size = plen; |
|
9277 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO; |
|
9278 chk->rec.chunk_id.can_take_data = 0; |
|
9279 chk->sent = SCTP_DATAGRAM_UNSENT; |
|
9280 chk->snd_count = 0; |
|
9281 chk->flags = CHUNK_FLAGS_FRAGMENT_OK; |
|
9282 chk->asoc = &stcb->asoc; |
|
9283 chk->data = cookie; |
|
9284 chk->whoTo = net; |
|
9285 atomic_add_int(&chk->whoTo->ref_count, 1); |
|
9286 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next); |
|
9287 chk->asoc->ctrl_queue_cnt++; |
|
9288 return (0); |
|
9289 } |
|
9290 |
|
9291 void |
|
9292 sctp_send_heartbeat_ack(struct sctp_tcb *stcb, |
|
9293 struct mbuf *m, |
|
9294 int offset, |
|
9295 int chk_length, |
|
9296 struct sctp_nets *net) |
|
9297 { |
|
9298 /* |
|
9299 * take a HB request and make it into a HB ack and send it. |
|
9300 */ |
|
9301 struct mbuf *outchain; |
|
9302 struct sctp_chunkhdr *chdr; |
|
9303 struct sctp_tmit_chunk *chk; |
|
9304 |
|
9305 |
|
9306 if (net == NULL) |
|
9307 /* must have a net pointer */ |
|
9308 return; |
|
9309 |
|
9310 outchain = SCTP_M_COPYM(m, offset, chk_length, M_NOWAIT); |
|
9311 if (outchain == NULL) { |
|
9312 /* gak out of memory */ |
|
9313 return; |
|
9314 } |
|
9315 #ifdef SCTP_MBUF_LOGGING |
|
9316 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { |
|
9317 struct mbuf *mat; |
|
9318 |
|
9319 for (mat = outchain; mat; mat = SCTP_BUF_NEXT(mat)) { |
|
9320 if (SCTP_BUF_IS_EXTENDED(mat)) { |
|
9321 sctp_log_mb(mat, SCTP_MBUF_ICOPY); |
|
9322 } |
|
9323 } |
|
9324 } |
|
9325 #endif |
|
9326 chdr = mtod(outchain, struct sctp_chunkhdr *); |
|
9327 chdr->chunk_type = SCTP_HEARTBEAT_ACK; |
|
9328 chdr->chunk_flags = 0; |
|
9329 if (chk_length % 4) { |
|
9330 /* need pad */ |
|
9331 uint32_t cpthis = 0; |
|
9332 int padlen; |
|
9333 |
|
9334 padlen = 4 - (chk_length % 4); |
|
9335 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis); |
|
9336 } |
|
9337 sctp_alloc_a_chunk(stcb, chk); |
|
9338 if (chk == NULL) { |
|
9339 /* no memory */ |
|
9340 sctp_m_freem(outchain); |
|
9341 return; |
|
9342 } |
|
9343 chk->copy_by_ref = 0; |
|
9344 chk->send_size = chk_length; |
|
9345 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK; |
|
9346 chk->rec.chunk_id.can_take_data = 1; |
|
9347 chk->sent = SCTP_DATAGRAM_UNSENT; |
|
9348 chk->snd_count = 0; |
|
9349 chk->flags = 0; |
|
9350 chk->asoc = &stcb->asoc; |
|
9351 chk->data = outchain; |
|
9352 chk->whoTo = net; |
|
9353 atomic_add_int(&chk->whoTo->ref_count, 1); |
|
9354 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); |
|
9355 chk->asoc->ctrl_queue_cnt++; |
|
9356 } |
|
9357 |
|
9358 void |
|
9359 sctp_send_cookie_ack(struct sctp_tcb *stcb) |
|
9360 { |
|
9361 /* formulate and queue a cookie-ack back to sender */ |
|
9362 struct mbuf *cookie_ack; |
|
9363 struct sctp_chunkhdr *hdr; |
|
9364 struct sctp_tmit_chunk *chk; |
|
9365 |
|
9366 SCTP_TCB_LOCK_ASSERT(stcb); |
|
9367 |
|
9368 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER); |
|
9369 if (cookie_ack == NULL) { |
|
9370 /* no mbuf's */ |
|
9371 return; |
|
9372 } |
|
9373 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD); |
|
9374 sctp_alloc_a_chunk(stcb, chk); |
|
9375 if (chk == NULL) { |
|
9376 /* no memory */ |
|
9377 sctp_m_freem(cookie_ack); |
|
9378 return; |
|
9379 } |
|
9380 chk->copy_by_ref = 0; |
|
9381 chk->send_size = sizeof(struct sctp_chunkhdr); |
|
9382 chk->rec.chunk_id.id = SCTP_COOKIE_ACK; |
|
9383 chk->rec.chunk_id.can_take_data = 1; |
|
9384 chk->sent = SCTP_DATAGRAM_UNSENT; |
|
9385 chk->snd_count = 0; |
|
9386 chk->flags = 0; |
|
9387 chk->asoc = &stcb->asoc; |
|
9388 chk->data = cookie_ack; |
|
9389 if (chk->asoc->last_control_chunk_from != NULL) { |
|
9390 chk->whoTo = chk->asoc->last_control_chunk_from; |
|
9391 atomic_add_int(&chk->whoTo->ref_count, 1); |
|
9392 } else { |
|
9393 chk->whoTo = NULL; |
|
9394 } |
|
9395 hdr = mtod(cookie_ack, struct sctp_chunkhdr *); |
|
9396 hdr->chunk_type = SCTP_COOKIE_ACK; |
|
9397 hdr->chunk_flags = 0; |
|
9398 hdr->chunk_length = htons(chk->send_size); |
|
9399 SCTP_BUF_LEN(cookie_ack) = chk->send_size; |
|
9400 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); |
|
9401 chk->asoc->ctrl_queue_cnt++; |
|
9402 return; |
|
9403 } |
|
9404 |
|
9405 |
|
9406 void |
|
9407 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net) |
|
9408 { |
|
9409 /* formulate and queue a SHUTDOWN-ACK back to the sender */ |
|
9410 struct mbuf *m_shutdown_ack; |
|
9411 struct sctp_shutdown_ack_chunk *ack_cp; |
|
9412 struct sctp_tmit_chunk *chk; |
|
9413 |
|
9414 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_NOWAIT, 1, MT_HEADER); |
|
9415 if (m_shutdown_ack == NULL) { |
|
9416 /* no mbuf's */ |
|
9417 return; |
|
9418 } |
|
9419 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD); |
|
9420 sctp_alloc_a_chunk(stcb, chk); |
|
9421 if (chk == NULL) { |
|
9422 /* no memory */ |
|
9423 sctp_m_freem(m_shutdown_ack); |
|
9424 return; |
|
9425 } |
|
9426 chk->copy_by_ref = 0; |
|
9427 chk->send_size = sizeof(struct sctp_chunkhdr); |
|
9428 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK; |
|
9429 chk->rec.chunk_id.can_take_data = 1; |
|
9430 chk->sent = SCTP_DATAGRAM_UNSENT; |
|
9431 chk->snd_count = 0; |
|
9432 chk->flags = 0; |
|
9433 chk->asoc = &stcb->asoc; |
|
9434 chk->data = m_shutdown_ack; |
|
9435 chk->whoTo = net; |
|
9436 if (chk->whoTo) { |
|
9437 atomic_add_int(&chk->whoTo->ref_count, 1); |
|
9438 } |
|
9439 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *); |
|
9440 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK; |
|
9441 ack_cp->ch.chunk_flags = 0; |
|
9442 ack_cp->ch.chunk_length = htons(chk->send_size); |
|
9443 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size; |
|
9444 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); |
|
9445 chk->asoc->ctrl_queue_cnt++; |
|
9446 return; |
|
9447 } |
|
9448 |
|
9449 void |
|
9450 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net) |
|
9451 { |
|
9452 /* formulate and queue a SHUTDOWN to the sender */ |
|
9453 struct mbuf *m_shutdown; |
|
9454 struct sctp_shutdown_chunk *shutdown_cp; |
|
9455 struct sctp_tmit_chunk *chk; |
|
9456 |
|
9457 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_NOWAIT, 1, MT_HEADER); |
|
9458 if (m_shutdown == NULL) { |
|
9459 /* no mbuf's */ |
|
9460 return; |
|
9461 } |
|
9462 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD); |
|
9463 sctp_alloc_a_chunk(stcb, chk); |
|
9464 if (chk == NULL) { |
|
9465 /* no memory */ |
|
9466 sctp_m_freem(m_shutdown); |
|
9467 return; |
|
9468 } |
|
9469 chk->copy_by_ref = 0; |
|
9470 chk->send_size = sizeof(struct sctp_shutdown_chunk); |
|
9471 chk->rec.chunk_id.id = SCTP_SHUTDOWN; |
|
9472 chk->rec.chunk_id.can_take_data = 1; |
|
9473 chk->sent = SCTP_DATAGRAM_UNSENT; |
|
9474 chk->snd_count = 0; |
|
9475 chk->flags = 0; |
|
9476 chk->asoc = &stcb->asoc; |
|
9477 chk->data = m_shutdown; |
|
9478 chk->whoTo = net; |
|
9479 if (chk->whoTo) { |
|
9480 atomic_add_int(&chk->whoTo->ref_count, 1); |
|
9481 } |
|
9482 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *); |
|
9483 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN; |
|
9484 shutdown_cp->ch.chunk_flags = 0; |
|
9485 shutdown_cp->ch.chunk_length = htons(chk->send_size); |
|
9486 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn); |
|
9487 SCTP_BUF_LEN(m_shutdown) = chk->send_size; |
|
9488 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); |
|
9489 chk->asoc->ctrl_queue_cnt++; |
|
9490 return; |
|
9491 } |
|
9492 |
|
9493 void |
|
9494 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked) |
|
9495 { |
|
9496 /* |
|
9497 * formulate and queue an ASCONF to the peer. |
|
9498 * ASCONF parameters should be queued on the assoc queue. |
|
9499 */ |
|
9500 struct sctp_tmit_chunk *chk; |
|
9501 struct mbuf *m_asconf; |
|
9502 int len; |
|
9503 |
|
9504 SCTP_TCB_LOCK_ASSERT(stcb); |
|
9505 |
|
9506 if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) && |
|
9507 (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) { |
|
9508 /* can't send a new one if there is one in flight already */ |
|
9509 return; |
|
9510 } |
|
9511 |
|
9512 /* compose an ASCONF chunk, maximum length is PMTU */ |
|
9513 m_asconf = sctp_compose_asconf(stcb, &len, addr_locked); |
|
9514 if (m_asconf == NULL) { |
|
9515 return; |
|
9516 } |
|
9517 |
|
9518 sctp_alloc_a_chunk(stcb, chk); |
|
9519 if (chk == NULL) { |
|
9520 /* no memory */ |
|
9521 sctp_m_freem(m_asconf); |
|
9522 return; |
|
9523 } |
|
9524 |
|
9525 chk->copy_by_ref = 0; |
|
9526 chk->data = m_asconf; |
|
9527 chk->send_size = len; |
|
9528 chk->rec.chunk_id.id = SCTP_ASCONF; |
|
9529 chk->rec.chunk_id.can_take_data = 0; |
|
9530 chk->sent = SCTP_DATAGRAM_UNSENT; |
|
9531 chk->snd_count = 0; |
|
9532 chk->flags = CHUNK_FLAGS_FRAGMENT_OK; |
|
9533 chk->asoc = &stcb->asoc; |
|
9534 chk->whoTo = net; |
|
9535 if (chk->whoTo) { |
|
9536 atomic_add_int(&chk->whoTo->ref_count, 1); |
|
9537 } |
|
9538 TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next); |
|
9539 chk->asoc->ctrl_queue_cnt++; |
|
9540 return; |
|
9541 } |
|
9542 |
|
9543 void |
|
9544 sctp_send_asconf_ack(struct sctp_tcb *stcb) |
|
9545 { |
|
9546 /* |
|
9547 * formulate and queue a asconf-ack back to sender. |
|
9548 * the asconf-ack must be stored in the tcb. |
|
9549 */ |
|
9550 struct sctp_tmit_chunk *chk; |
|
9551 struct sctp_asconf_ack *ack, *latest_ack; |
|
9552 struct mbuf *m_ack; |
|
9553 struct sctp_nets *net = NULL; |
|
9554 |
|
9555 SCTP_TCB_LOCK_ASSERT(stcb); |
|
9556 /* Get the latest ASCONF-ACK */ |
|
9557 latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead); |
|
9558 if (latest_ack == NULL) { |
|
9559 return; |
|
9560 } |
|
9561 if (latest_ack->last_sent_to != NULL && |
|
9562 latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) { |
|
9563 /* we're doing a retransmission */ |
|
9564 net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0); |
|
9565 if (net == NULL) { |
|
9566 /* no alternate */ |
|
9567 if (stcb->asoc.last_control_chunk_from == NULL) { |
|
9568 if (stcb->asoc.alternate) { |
|
9569 net = stcb->asoc.alternate; |
|
9570 } else { |
|
9571 net = stcb->asoc.primary_destination; |
|
9572 } |
|
9573 } else { |
|
9574 net = stcb->asoc.last_control_chunk_from; |
|
9575 } |
|
9576 } |
|
9577 } else { |
|
9578 /* normal case */ |
|
9579 if (stcb->asoc.last_control_chunk_from == NULL) { |
|
9580 if (stcb->asoc.alternate) { |
|
9581 net = stcb->asoc.alternate; |
|
9582 } else { |
|
9583 net = stcb->asoc.primary_destination; |
|
9584 } |
|
9585 } else { |
|
9586 net = stcb->asoc.last_control_chunk_from; |
|
9587 } |
|
9588 } |
|
9589 latest_ack->last_sent_to = net; |
|
9590 |
|
9591 TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) { |
|
9592 if (ack->data == NULL) { |
|
9593 continue; |
|
9594 } |
|
9595 |
|
9596 /* copy the asconf_ack */ |
|
9597 m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_NOWAIT); |
|
9598 if (m_ack == NULL) { |
|
9599 /* couldn't copy it */ |
|
9600 return; |
|
9601 } |
|
9602 #ifdef SCTP_MBUF_LOGGING |
|
9603 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { |
|
9604 struct mbuf *mat; |
|
9605 |
|
9606 for (mat = m_ack; mat; mat = SCTP_BUF_NEXT(mat)) { |
|
9607 if (SCTP_BUF_IS_EXTENDED(mat)) { |
|
9608 sctp_log_mb(mat, SCTP_MBUF_ICOPY); |
|
9609 } |
|
9610 } |
|
9611 } |
|
9612 #endif |
|
9613 |
|
9614 sctp_alloc_a_chunk(stcb, chk); |
|
9615 if (chk == NULL) { |
|
9616 /* no memory */ |
|
9617 if (m_ack) |
|
9618 sctp_m_freem(m_ack); |
|
9619 return; |
|
9620 } |
|
9621 chk->copy_by_ref = 0; |
|
9622 |
|
9623 chk->whoTo = net; |
|
9624 if (chk->whoTo) { |
|
9625 atomic_add_int(&chk->whoTo->ref_count, 1); |
|
9626 } |
|
9627 chk->data = m_ack; |
|
9628 chk->send_size = 0; |
|
9629 /* Get size */ |
|
9630 chk->send_size = ack->len; |
|
9631 chk->rec.chunk_id.id = SCTP_ASCONF_ACK; |
|
9632 chk->rec.chunk_id.can_take_data = 1; |
|
9633 chk->sent = SCTP_DATAGRAM_UNSENT; |
|
9634 chk->snd_count = 0; |
|
9635 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; /* XXX */ |
|
9636 chk->asoc = &stcb->asoc; |
|
9637 |
|
9638 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); |
|
9639 chk->asoc->ctrl_queue_cnt++; |
|
9640 } |
|
9641 return; |
|
9642 } |
|
9643 |
|
9644 |
|
9645 static int |
|
9646 sctp_chunk_retransmission(struct sctp_inpcb *inp, |
|
9647 struct sctp_tcb *stcb, |
|
9648 struct sctp_association *asoc, |
|
9649 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked |
|
9650 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) |
|
9651 SCTP_UNUSED |
|
9652 #endif |
|
9653 ) |
|
9654 { |
|
9655 /*- |
|
9656 * send out one MTU of retransmission. If fast_retransmit is |
|
9657 * happening we ignore the cwnd. Otherwise we obey the cwnd and |
|
9658 * rwnd. For a Cookie or Asconf in the control chunk queue we |
|
9659 * retransmit them by themselves. |
|
9660 * |
|
9661 * For data chunks we will pick out the lowest TSN's in the sent_queue |
|
9662 * marked for resend and bundle them all together (up to a MTU of |
|
9663 * destination). The address to send to should have been |
|
9664 * selected/changed where the retransmission was marked (i.e. in FR |
|
9665 * or t3-timeout routines). |
|
9666 */ |
|
9667 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING]; |
|
9668 struct sctp_tmit_chunk *chk, *fwd; |
|
9669 struct mbuf *m, *endofchain; |
|
9670 struct sctp_nets *net = NULL; |
|
9671 uint32_t tsns_sent = 0; |
|
9672 int no_fragmentflg, bundle_at, cnt_thru; |
|
9673 unsigned int mtu; |
|
9674 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started; |
|
9675 struct sctp_auth_chunk *auth = NULL; |
|
9676 uint32_t auth_offset = 0; |
|
9677 uint16_t auth_keyid; |
|
9678 int override_ok = 1; |
|
9679 int data_auth_reqd = 0; |
|
9680 uint32_t dmtu = 0; |
|
9681 |
|
9682 #if defined(__APPLE__) |
|
9683 if (so_locked) { |
|
9684 sctp_lock_assert(SCTP_INP_SO(inp)); |
|
9685 } else { |
|
9686 sctp_unlock_assert(SCTP_INP_SO(inp)); |
|
9687 } |
|
9688 #endif |
|
9689 SCTP_TCB_LOCK_ASSERT(stcb); |
|
9690 tmr_started = ctl_cnt = bundle_at = error = 0; |
|
9691 no_fragmentflg = 1; |
|
9692 fwd_tsn = 0; |
|
9693 *cnt_out = 0; |
|
9694 fwd = NULL; |
|
9695 endofchain = m = NULL; |
|
9696 auth_keyid = stcb->asoc.authinfo.active_keyid; |
|
9697 #ifdef SCTP_AUDITING_ENABLED |
|
9698 sctp_audit_log(0xC3, 1); |
|
9699 #endif |
|
9700 if ((TAILQ_EMPTY(&asoc->sent_queue)) && |
|
9701 (TAILQ_EMPTY(&asoc->control_send_queue))) { |
|
9702 SCTPDBG(SCTP_DEBUG_OUTPUT1,"SCTP hits empty queue with cnt set to %d?\n", |
|
9703 asoc->sent_queue_retran_cnt); |
|
9704 asoc->sent_queue_cnt = 0; |
|
9705 asoc->sent_queue_cnt_removeable = 0; |
|
9706 /* send back 0/0 so we enter normal transmission */ |
|
9707 *cnt_out = 0; |
|
9708 return (0); |
|
9709 } |
|
9710 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { |
|
9711 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) || |
|
9712 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) || |
|
9713 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) { |
|
9714 if (chk->sent != SCTP_DATAGRAM_RESEND) { |
|
9715 continue; |
|
9716 } |
|
9717 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) { |
|
9718 if (chk != asoc->str_reset) { |
|
9719 /* |
|
9720 * not eligible for retran if its |
|
9721 * not ours |
|
9722 */ |
|
9723 continue; |
|
9724 } |
|
9725 } |
|
9726 ctl_cnt++; |
|
9727 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) { |
|
9728 fwd_tsn = 1; |
|
9729 } |
|
9730 /* |
|
9731 * Add an AUTH chunk, if chunk requires it save the |
|
9732 * offset into the chain for AUTH |
|
9733 */ |
|
9734 if ((auth == NULL) && |
|
9735 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id, |
|
9736 stcb->asoc.peer_auth_chunks))) { |
|
9737 m = sctp_add_auth_chunk(m, &endofchain, |
|
9738 &auth, &auth_offset, |
|
9739 stcb, |
|
9740 chk->rec.chunk_id.id); |
|
9741 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); |
|
9742 } |
|
9743 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref); |
|
9744 break; |
|
9745 } |
|
9746 } |
|
9747 one_chunk = 0; |
|
9748 cnt_thru = 0; |
|
9749 /* do we have control chunks to retransmit? */ |
|
9750 if (m != NULL) { |
|
9751 /* Start a timer no matter if we suceed or fail */ |
|
9752 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { |
|
9753 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo); |
|
9754 } else if (chk->rec.chunk_id.id == SCTP_ASCONF) |
|
9755 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo); |
|
9756 chk->snd_count++; /* update our count */ |
|
9757 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo, |
|
9758 (struct sockaddr *)&chk->whoTo->ro._l_addr, m, |
|
9759 auth_offset, auth, stcb->asoc.authinfo.active_keyid, |
|
9760 no_fragmentflg, 0, 0, |
|
9761 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag), |
|
9762 chk->whoTo->port, NULL, |
|
9763 #if defined(__FreeBSD__) |
|
9764 0, 0, |
|
9765 #endif |
|
9766 so_locked))) { |
|
9767 SCTP_STAT_INCR(sctps_lowlevelerr); |
|
9768 return (error); |
|
9769 } |
|
9770 endofchain = NULL; |
|
9771 auth = NULL; |
|
9772 auth_offset = 0; |
|
9773 /* |
|
9774 * We don't want to mark the net->sent time here since this |
|
9775 * we use this for HB and retrans cannot measure RTT |
|
9776 */ |
|
9777 /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */ |
|
9778 *cnt_out += 1; |
|
9779 chk->sent = SCTP_DATAGRAM_SENT; |
|
9780 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt); |
|
9781 if (fwd_tsn == 0) { |
|
9782 return (0); |
|
9783 } else { |
|
9784 /* Clean up the fwd-tsn list */ |
|
9785 sctp_clean_up_ctl(stcb, asoc, so_locked); |
|
9786 return (0); |
|
9787 } |
|
9788 } |
|
9789 /* |
|
9790 * Ok, it is just data retransmission we need to do or that and a |
|
9791 * fwd-tsn with it all. |
|
9792 */ |
|
9793 if (TAILQ_EMPTY(&asoc->sent_queue)) { |
|
9794 return (SCTP_RETRAN_DONE); |
|
9795 } |
|
9796 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) || |
|
9797 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) { |
|
9798 /* not yet open, resend the cookie and that is it */ |
|
9799 return (1); |
|
9800 } |
|
9801 #ifdef SCTP_AUDITING_ENABLED |
|
9802 sctp_auditing(20, inp, stcb, NULL); |
|
9803 #endif |
|
9804 data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks); |
|
9805 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { |
|
9806 if (chk->sent != SCTP_DATAGRAM_RESEND) { |
|
9807 /* No, not sent to this net or not ready for rtx */ |
|
9808 continue; |
|
9809 } |
|
9810 if (chk->data == NULL) { |
|
9811 SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n", |
|
9812 chk->rec.data.TSN_seq, chk->snd_count, chk->sent); |
|
9813 continue; |
|
9814 } |
|
9815 if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) && |
|
9816 (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) { |
|
9817 /* Gak, we have exceeded max unlucky retran, abort! */ |
|
9818 SCTP_PRINTF("Gak, chk->snd_count:%d >= max:%d - send abort\n", |
|
9819 chk->snd_count, |
|
9820 SCTP_BASE_SYSCTL(sctp_max_retran_chunk)); |
|
9821 atomic_add_int(&stcb->asoc.refcnt, 1); |
|
9822 sctp_abort_an_association(stcb->sctp_ep, stcb, NULL, so_locked); |
|
9823 SCTP_TCB_LOCK(stcb); |
|
9824 atomic_subtract_int(&stcb->asoc.refcnt, 1); |
|
9825 return (SCTP_RETRAN_EXIT); |
|
9826 } |
|
9827 /* pick up the net */ |
|
9828 net = chk->whoTo; |
|
9829 switch (net->ro._l_addr.sa.sa_family) { |
|
9830 #ifdef INET |
|
9831 case AF_INET: |
|
9832 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; |
|
9833 break; |
|
9834 #endif |
|
9835 #ifdef INET6 |
|
9836 case AF_INET6: |
|
9837 mtu = net->mtu - SCTP_MIN_OVERHEAD; |
|
9838 break; |
|
9839 #endif |
|
9840 #if defined(__Userspace__) |
|
9841 case AF_CONN: |
|
9842 mtu = net->mtu - sizeof(struct sctphdr); |
|
9843 break; |
|
9844 #endif |
|
9845 default: |
|
9846 /* TSNH */ |
|
9847 mtu = net->mtu; |
|
9848 break; |
|
9849 } |
|
9850 |
|
9851 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) { |
|
9852 /* No room in peers rwnd */ |
|
9853 uint32_t tsn; |
|
9854 |
|
9855 tsn = asoc->last_acked_seq + 1; |
|
9856 if (tsn == chk->rec.data.TSN_seq) { |
|
9857 /* |
|
9858 * we make a special exception for this |
|
9859 * case. The peer has no rwnd but is missing |
|
9860 * the lowest chunk.. which is probably what |
|
9861 * is holding up the rwnd. |
|
9862 */ |
|
9863 goto one_chunk_around; |
|
9864 } |
|
9865 return (1); |
|
9866 } |
|
9867 one_chunk_around: |
|
9868 if (asoc->peers_rwnd < mtu) { |
|
9869 one_chunk = 1; |
|
9870 if ((asoc->peers_rwnd == 0) && |
|
9871 (asoc->total_flight == 0)) { |
|
9872 chk->window_probe = 1; |
|
9873 chk->whoTo->window_probe = 1; |
|
9874 } |
|
9875 } |
|
9876 #ifdef SCTP_AUDITING_ENABLED |
|
9877 sctp_audit_log(0xC3, 2); |
|
9878 #endif |
|
9879 bundle_at = 0; |
|
9880 m = NULL; |
|
9881 net->fast_retran_ip = 0; |
|
9882 if (chk->rec.data.doing_fast_retransmit == 0) { |
|
9883 /* |
|
9884 * if no FR in progress skip destination that have |
|
9885 * flight_size > cwnd. |
|
9886 */ |
|
9887 if (net->flight_size >= net->cwnd) { |
|
9888 continue; |
|
9889 } |
|
9890 } else { |
|
9891 /* |
|
9892 * Mark the destination net to have FR recovery |
|
9893 * limits put on it. |
|
9894 */ |
|
9895 *fr_done = 1; |
|
9896 net->fast_retran_ip = 1; |
|
9897 } |
|
9898 |
|
9899 /* |
|
9900 * if no AUTH is yet included and this chunk requires it, |
|
9901 * make sure to account for it. We don't apply the size |
|
9902 * until the AUTH chunk is actually added below in case |
|
9903 * there is no room for this chunk. |
|
9904 */ |
|
9905 if (data_auth_reqd && (auth == NULL)) { |
|
9906 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); |
|
9907 } else |
|
9908 dmtu = 0; |
|
9909 |
|
9910 if ((chk->send_size <= (mtu - dmtu)) || |
|
9911 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) { |
|
9912 /* ok we will add this one */ |
|
9913 if (data_auth_reqd) { |
|
9914 if (auth == NULL) { |
|
9915 m = sctp_add_auth_chunk(m, |
|
9916 &endofchain, |
|
9917 &auth, |
|
9918 &auth_offset, |
|
9919 stcb, |
|
9920 SCTP_DATA); |
|
9921 auth_keyid = chk->auth_keyid; |
|
9922 override_ok = 0; |
|
9923 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); |
|
9924 } else if (override_ok) { |
|
9925 auth_keyid = chk->auth_keyid; |
|
9926 override_ok = 0; |
|
9927 } else if (chk->auth_keyid != auth_keyid) { |
|
9928 /* different keyid, so done bundling */ |
|
9929 break; |
|
9930 } |
|
9931 } |
|
9932 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref); |
|
9933 if (m == NULL) { |
|
9934 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); |
|
9935 return (ENOMEM); |
|
9936 } |
|
9937 /* Do clear IP_DF ? */ |
|
9938 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { |
|
9939 no_fragmentflg = 0; |
|
9940 } |
|
9941 /* upate our MTU size */ |
|
9942 if (mtu > (chk->send_size + dmtu)) |
|
9943 mtu -= (chk->send_size + dmtu); |
|
9944 else |
|
9945 mtu = 0; |
|
9946 data_list[bundle_at++] = chk; |
|
9947 if (one_chunk && (asoc->total_flight <= 0)) { |
|
9948 SCTP_STAT_INCR(sctps_windowprobed); |
|
9949 } |
|
9950 } |
|
9951 if (one_chunk == 0) { |
|
9952 /* |
|
9953 * now are there anymore forward from chk to pick |
|
9954 * up? |
|
9955 */ |
|
9956 for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) { |
|
9957 if (fwd->sent != SCTP_DATAGRAM_RESEND) { |
|
9958 /* Nope, not for retran */ |
|
9959 continue; |
|
9960 } |
|
9961 if (fwd->whoTo != net) { |
|
9962 /* Nope, not the net in question */ |
|
9963 continue; |
|
9964 } |
|
9965 if (data_auth_reqd && (auth == NULL)) { |
|
9966 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); |
|
9967 } else |
|
9968 dmtu = 0; |
|
9969 if (fwd->send_size <= (mtu - dmtu)) { |
|
9970 if (data_auth_reqd) { |
|
9971 if (auth == NULL) { |
|
9972 m = sctp_add_auth_chunk(m, |
|
9973 &endofchain, |
|
9974 &auth, |
|
9975 &auth_offset, |
|
9976 stcb, |
|
9977 SCTP_DATA); |
|
9978 auth_keyid = fwd->auth_keyid; |
|
9979 override_ok = 0; |
|
9980 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); |
|
9981 } else if (override_ok) { |
|
9982 auth_keyid = fwd->auth_keyid; |
|
9983 override_ok = 0; |
|
9984 } else if (fwd->auth_keyid != auth_keyid) { |
|
9985 /* different keyid, so done bundling */ |
|
9986 break; |
|
9987 } |
|
9988 } |
|
9989 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref); |
|
9990 if (m == NULL) { |
|
9991 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); |
|
9992 return (ENOMEM); |
|
9993 } |
|
9994 /* Do clear IP_DF ? */ |
|
9995 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) { |
|
9996 no_fragmentflg = 0; |
|
9997 } |
|
9998 /* upate our MTU size */ |
|
9999 if (mtu > (fwd->send_size + dmtu)) |
|
10000 mtu -= (fwd->send_size + dmtu); |
|
10001 else |
|
10002 mtu = 0; |
|
10003 data_list[bundle_at++] = fwd; |
|
10004 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) { |
|
10005 break; |
|
10006 } |
|
10007 } else { |
|
10008 /* can't fit so we are done */ |
|
10009 break; |
|
10010 } |
|
10011 } |
|
10012 } |
|
10013 /* Is there something to send for this destination? */ |
|
10014 if (m) { |
|
10015 /* |
|
10016 * No matter if we fail/or suceed we should start a |
|
10017 * timer. A failure is like a lost IP packet :-) |
|
10018 */ |
|
10019 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { |
|
10020 /* |
|
10021 * no timer running on this destination |
|
10022 * restart it. |
|
10023 */ |
|
10024 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); |
|
10025 tmr_started = 1; |
|
10026 } |
|
10027 /* Now lets send it, if there is anything to send :> */ |
|
10028 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, |
|
10029 (struct sockaddr *)&net->ro._l_addr, m, |
|
10030 auth_offset, auth, auth_keyid, |
|
10031 no_fragmentflg, 0, 0, |
|
10032 inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag), |
|
10033 net->port, NULL, |
|
10034 #if defined(__FreeBSD__) |
|
10035 0, 0, |
|
10036 #endif |
|
10037 so_locked))) { |
|
10038 /* error, we could not output */ |
|
10039 SCTP_STAT_INCR(sctps_lowlevelerr); |
|
10040 return (error); |
|
10041 } |
|
10042 endofchain = NULL; |
|
10043 auth = NULL; |
|
10044 auth_offset = 0; |
|
10045 /* For HB's */ |
|
10046 /* |
|
10047 * We don't want to mark the net->sent time here |
|
10048 * since this we use this for HB and retrans cannot |
|
10049 * measure RTT |
|
10050 */ |
|
10051 /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */ |
|
10052 |
|
10053 /* For auto-close */ |
|
10054 cnt_thru++; |
|
10055 if (*now_filled == 0) { |
|
10056 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent); |
|
10057 *now = asoc->time_last_sent; |
|
10058 *now_filled = 1; |
|
10059 } else { |
|
10060 asoc->time_last_sent = *now; |
|
10061 } |
|
10062 *cnt_out += bundle_at; |
|
10063 #ifdef SCTP_AUDITING_ENABLED |
|
10064 sctp_audit_log(0xC4, bundle_at); |
|
10065 #endif |
|
10066 if (bundle_at) { |
|
10067 tsns_sent = data_list[0]->rec.data.TSN_seq; |
|
10068 } |
|
10069 for (i = 0; i < bundle_at; i++) { |
|
10070 SCTP_STAT_INCR(sctps_sendretransdata); |
|
10071 data_list[i]->sent = SCTP_DATAGRAM_SENT; |
|
10072 /* |
|
10073 * When we have a revoked data, and we |
|
10074 * retransmit it, then we clear the revoked |
|
10075 * flag since this flag dictates if we |
|
10076 * subtracted from the fs |
|
10077 */ |
|
10078 if (data_list[i]->rec.data.chunk_was_revoked) { |
|
10079 /* Deflate the cwnd */ |
|
10080 data_list[i]->whoTo->cwnd -= data_list[i]->book_size; |
|
10081 data_list[i]->rec.data.chunk_was_revoked = 0; |
|
10082 } |
|
10083 data_list[i]->snd_count++; |
|
10084 sctp_ucount_decr(asoc->sent_queue_retran_cnt); |
|
10085 /* record the time */ |
|
10086 data_list[i]->sent_rcv_time = asoc->time_last_sent; |
|
10087 if (data_list[i]->book_size_scale) { |
|
10088 /* |
|
10089 * need to double the book size on |
|
10090 * this one |
|
10091 */ |
|
10092 data_list[i]->book_size_scale = 0; |
|
10093 /* Since we double the booksize, we must |
|
10094 * also double the output queue size, since this |
|
10095 * get shrunk when we free by this amount. |
|
10096 */ |
|
10097 atomic_add_int(&((asoc)->total_output_queue_size),data_list[i]->book_size); |
|
10098 data_list[i]->book_size *= 2; |
|
10099 |
|
10100 |
|
10101 } else { |
|
10102 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { |
|
10103 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND, |
|
10104 asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); |
|
10105 } |
|
10106 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd, |
|
10107 (uint32_t) (data_list[i]->send_size + |
|
10108 SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))); |
|
10109 } |
|
10110 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { |
|
10111 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND, |
|
10112 data_list[i]->whoTo->flight_size, |
|
10113 data_list[i]->book_size, |
|
10114 (uintptr_t)data_list[i]->whoTo, |
|
10115 data_list[i]->rec.data.TSN_seq); |
|
10116 } |
|
10117 sctp_flight_size_increase(data_list[i]); |
|
10118 sctp_total_flight_increase(stcb, data_list[i]); |
|
10119 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { |
|
10120 /* SWS sender side engages */ |
|
10121 asoc->peers_rwnd = 0; |
|
10122 } |
|
10123 if ((i == 0) && |
|
10124 (data_list[i]->rec.data.doing_fast_retransmit)) { |
|
10125 SCTP_STAT_INCR(sctps_sendfastretrans); |
|
10126 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) && |
|
10127 (tmr_started == 0)) { |
|
10128 /*- |
|
10129 * ok we just fast-retrans'd |
|
10130 * the lowest TSN, i.e the |
|
10131 * first on the list. In |
|
10132 * this case we want to give |
|
10133 * some more time to get a |
|
10134 * SACK back without a |
|
10135 * t3-expiring. |
|
10136 */ |
|
10137 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, |
|
10138 SCTP_FROM_SCTP_OUTPUT+SCTP_LOC_4); |
|
10139 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); |
|
10140 } |
|
10141 } |
|
10142 } |
|
10143 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { |
|
10144 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND); |
|
10145 } |
|
10146 #ifdef SCTP_AUDITING_ENABLED |
|
10147 sctp_auditing(21, inp, stcb, NULL); |
|
10148 #endif |
|
10149 } else { |
|
10150 /* None will fit */ |
|
10151 return (1); |
|
10152 } |
|
10153 if (asoc->sent_queue_retran_cnt <= 0) { |
|
10154 /* all done we have no more to retran */ |
|
10155 asoc->sent_queue_retran_cnt = 0; |
|
10156 break; |
|
10157 } |
|
10158 if (one_chunk) { |
|
10159 /* No more room in rwnd */ |
|
10160 return (1); |
|
10161 } |
|
10162 /* stop the for loop here. we sent out a packet */ |
|
10163 break; |
|
10164 } |
|
10165 return (0); |
|
10166 } |
|
10167 |
|
10168 static void |
|
10169 sctp_timer_validation(struct sctp_inpcb *inp, |
|
10170 struct sctp_tcb *stcb, |
|
10171 struct sctp_association *asoc) |
|
10172 { |
|
10173 struct sctp_nets *net; |
|
10174 |
|
10175 /* Validate that a timer is running somewhere */ |
|
10176 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
|
10177 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { |
|
10178 /* Here is a timer */ |
|
10179 return; |
|
10180 } |
|
10181 } |
|
10182 SCTP_TCB_LOCK_ASSERT(stcb); |
|
10183 /* Gak, we did not have a timer somewhere */ |
|
10184 SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n"); |
|
10185 if (asoc->alternate) { |
|
10186 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate); |
|
10187 } else { |
|
10188 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination); |
|
10189 } |
|
10190 return; |
|
10191 } |
|
10192 |
|
10193 void |
|
10194 sctp_chunk_output (struct sctp_inpcb *inp, |
|
10195 struct sctp_tcb *stcb, |
|
10196 int from_where, |
|
10197 int so_locked |
|
10198 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) |
|
10199 SCTP_UNUSED |
|
10200 #endif |
|
10201 ) |
|
10202 { |
|
10203 /*- |
|
10204 * Ok this is the generic chunk service queue. we must do the |
|
10205 * following: |
|
10206 * - See if there are retransmits pending, if so we must |
|
10207 * do these first. |
|
10208 * - Service the stream queue that is next, moving any |
|
10209 * message (note I must get a complete message i.e. |
|
10210 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning |
|
10211 * TSN's |
|
10212 * - Check to see if the cwnd/rwnd allows any output, if so we |
|
10213 * go ahead and fomulate and send the low level chunks. Making sure |
|
10214 * to combine any control in the control chunk queue also. |
|
10215 */ |
|
10216 struct sctp_association *asoc; |
|
10217 struct sctp_nets *net; |
|
10218 int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0; |
|
10219 unsigned int burst_cnt = 0; |
|
10220 struct timeval now; |
|
10221 int now_filled = 0; |
|
10222 int nagle_on; |
|
10223 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc); |
|
10224 int un_sent = 0; |
|
10225 int fr_done; |
|
10226 unsigned int tot_frs = 0; |
|
10227 |
|
10228 #if defined(__APPLE__) |
|
10229 if (so_locked) { |
|
10230 sctp_lock_assert(SCTP_INP_SO(inp)); |
|
10231 } else { |
|
10232 sctp_unlock_assert(SCTP_INP_SO(inp)); |
|
10233 } |
|
10234 #endif |
|
10235 asoc = &stcb->asoc; |
|
10236 /* The Nagle algorithm is only applied when handling a send call. */ |
|
10237 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) { |
|
10238 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) { |
|
10239 nagle_on = 0; |
|
10240 } else { |
|
10241 nagle_on = 1; |
|
10242 } |
|
10243 } else { |
|
10244 nagle_on = 0; |
|
10245 } |
|
10246 SCTP_TCB_LOCK_ASSERT(stcb); |
|
10247 |
|
10248 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight); |
|
10249 |
|
10250 if ((un_sent <= 0) && |
|
10251 (TAILQ_EMPTY(&asoc->control_send_queue)) && |
|
10252 (TAILQ_EMPTY(&asoc->asconf_send_queue)) && |
|
10253 (asoc->sent_queue_retran_cnt == 0)) { |
|
10254 /* Nothing to do unless there is something to be sent left */ |
|
10255 return; |
|
10256 } |
|
10257 /* Do we have something to send, data or control AND |
|
10258 * a sack timer running, if so piggy-back the sack. |
|
10259 */ |
|
10260 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { |
|
10261 sctp_send_sack(stcb, so_locked); |
|
10262 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); |
|
10263 } |
|
10264 while (asoc->sent_queue_retran_cnt) { |
|
10265 /*- |
|
10266 * Ok, it is retransmission time only, we send out only ONE |
|
10267 * packet with a single call off to the retran code. |
|
10268 */ |
|
10269 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) { |
|
10270 /*- |
|
10271 * Special hook for handling cookiess discarded |
|
10272 * by peer that carried data. Send cookie-ack only |
|
10273 * and then the next call with get the retran's. |
|
10274 */ |
|
10275 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, |
|
10276 from_where, |
|
10277 &now, &now_filled, frag_point, so_locked); |
|
10278 return; |
|
10279 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) { |
|
10280 /* if its not from a HB then do it */ |
|
10281 fr_done = 0; |
|
10282 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked); |
|
10283 if (fr_done) { |
|
10284 tot_frs++; |
|
10285 } |
|
10286 } else { |
|
10287 /* |
|
10288 * its from any other place, we don't allow retran |
|
10289 * output (only control) |
|
10290 */ |
|
10291 ret = 1; |
|
10292 } |
|
10293 if (ret > 0) { |
|
10294 /* Can't send anymore */ |
|
10295 /*- |
|
10296 * now lets push out control by calling med-level |
|
10297 * output once. this assures that we WILL send HB's |
|
10298 * if queued too. |
|
10299 */ |
|
10300 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, |
|
10301 from_where, |
|
10302 &now, &now_filled, frag_point, so_locked); |
|
10303 #ifdef SCTP_AUDITING_ENABLED |
|
10304 sctp_auditing(8, inp, stcb, NULL); |
|
10305 #endif |
|
10306 sctp_timer_validation(inp, stcb, asoc); |
|
10307 return; |
|
10308 } |
|
10309 if (ret < 0) { |
|
10310 /*- |
|
10311 * The count was off.. retran is not happening so do |
|
10312 * the normal retransmission. |
|
10313 */ |
|
10314 #ifdef SCTP_AUDITING_ENABLED |
|
10315 sctp_auditing(9, inp, stcb, NULL); |
|
10316 #endif |
|
10317 if (ret == SCTP_RETRAN_EXIT) { |
|
10318 return; |
|
10319 } |
|
10320 break; |
|
10321 } |
|
10322 if (from_where == SCTP_OUTPUT_FROM_T3) { |
|
10323 /* Only one transmission allowed out of a timeout */ |
|
10324 #ifdef SCTP_AUDITING_ENABLED |
|
10325 sctp_auditing(10, inp, stcb, NULL); |
|
10326 #endif |
|
10327 /* Push out any control */ |
|
10328 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where, |
|
10329 &now, &now_filled, frag_point, so_locked); |
|
10330 return; |
|
10331 } |
|
10332 if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) { |
|
10333 /* Hit FR burst limit */ |
|
10334 return; |
|
10335 } |
|
10336 if ((num_out == 0) && (ret == 0)) { |
|
10337 /* No more retrans to send */ |
|
10338 break; |
|
10339 } |
|
10340 } |
|
10341 #ifdef SCTP_AUDITING_ENABLED |
|
10342 sctp_auditing(12, inp, stcb, NULL); |
|
10343 #endif |
|
10344 /* Check for bad destinations, if they exist move chunks around. */ |
|
10345 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { |
|
10346 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { |
|
10347 /*- |
|
10348 * if possible move things off of this address we |
|
10349 * still may send below due to the dormant state but |
|
10350 * we try to find an alternate address to send to |
|
10351 * and if we have one we move all queued data on the |
|
10352 * out wheel to this alternate address. |
|
10353 */ |
|
10354 if (net->ref_count > 1) |
|
10355 sctp_move_chunks_from_net(stcb, net); |
|
10356 } else { |
|
10357 /*- |
|
10358 * if ((asoc->sat_network) || (net->addr_is_local)) |
|
10359 * { burst_limit = asoc->max_burst * |
|
10360 * SCTP_SAT_NETWORK_BURST_INCR; } |
|
10361 */ |
|
10362 if (asoc->max_burst > 0) { |
|
10363 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) { |
|
10364 if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) { |
|
10365 /* JRS - Use the congestion control given in the congestion control module */ |
|
10366 asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst); |
|
10367 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) { |
|
10368 sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED); |
|
10369 } |
|
10370 SCTP_STAT_INCR(sctps_maxburstqueued); |
|
10371 } |
|
10372 net->fast_retran_ip = 0; |
|
10373 } else { |
|
10374 if (net->flight_size == 0) { |
|
10375 /* Should be decaying the cwnd here */ |
|
10376 ; |
|
10377 } |
|
10378 } |
|
10379 } |
|
10380 } |
|
10381 |
|
10382 } |
|
10383 burst_cnt = 0; |
|
10384 do { |
|
10385 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out, |
|
10386 &reason_code, 0, from_where, |
|
10387 &now, &now_filled, frag_point, so_locked); |
|
10388 if (error) { |
|
10389 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error); |
|
10390 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) { |
|
10391 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP); |
|
10392 } |
|
10393 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { |
|
10394 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES); |
|
10395 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES); |
|
10396 } |
|
10397 break; |
|
10398 } |
|
10399 SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out); |
|
10400 |
|
10401 tot_out += num_out; |
|
10402 burst_cnt++; |
|
10403 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { |
|
10404 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES); |
|
10405 if (num_out == 0) { |
|
10406 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES); |
|
10407 } |
|
10408 } |
|
10409 if (nagle_on) { |
|
10410 /* |
|
10411 * When the Nagle algorithm is used, look at how much |
|
10412 * is unsent, then if its smaller than an MTU and we |
|
10413 * have data in flight we stop, except if we are |
|
10414 * handling a fragmented user message. |
|
10415 */ |
|
10416 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + |
|
10417 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk))); |
|
10418 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) && |
|
10419 (stcb->asoc.total_flight > 0) && |
|
10420 ((stcb->asoc.locked_on_sending == NULL) || |
|
10421 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) { |
|
10422 break; |
|
10423 } |
|
10424 } |
|
10425 if (TAILQ_EMPTY(&asoc->control_send_queue) && |
|
10426 TAILQ_EMPTY(&asoc->send_queue) && |
|
10427 stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) { |
|
10428 /* Nothing left to send */ |
|
10429 break; |
|
10430 } |
|
10431 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) { |
|
10432 /* Nothing left to send */ |
|
10433 break; |
|
10434 } |
|
10435 } while (num_out && |
|
10436 ((asoc->max_burst == 0) || |
|
10437 SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) || |
|
10438 (burst_cnt < asoc->max_burst))); |
|
10439 |
|
10440 if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) { |
|
10441 if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) { |
|
10442 SCTP_STAT_INCR(sctps_maxburstqueued); |
|
10443 asoc->burst_limit_applied = 1; |
|
10444 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) { |
|
10445 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED); |
|
10446 } |
|
10447 } else { |
|
10448 asoc->burst_limit_applied = 0; |
|
10449 } |
|
10450 } |
|
10451 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { |
|
10452 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES); |
|
10453 } |
|
10454 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n", |
|
10455 tot_out); |
|
10456 |
|
10457 /*- |
|
10458 * Now we need to clean up the control chunk chain if a ECNE is on |
|
10459 * it. It must be marked as UNSENT again so next call will continue |
|
10460 * to send it until such time that we get a CWR, to remove it. |
|
10461 */ |
|
10462 if (stcb->asoc.ecn_echo_cnt_onq) |
|
10463 sctp_fix_ecn_echo(asoc); |
|
10464 return; |
|
10465 } |
|
10466 |
|
10467 |
|
10468 int |
|
10469 sctp_output( |
|
10470 struct sctp_inpcb *inp, |
|
10471 #if defined(__Panda__) |
|
10472 pakhandle_type m, |
|
10473 #else |
|
10474 struct mbuf *m, |
|
10475 #endif |
|
10476 struct sockaddr *addr, |
|
10477 #if defined(__Panda__) |
|
10478 pakhandle_type control, |
|
10479 #else |
|
10480 struct mbuf *control, |
|
10481 #endif |
|
10482 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 |
|
10483 struct thread *p, |
|
10484 #elif defined(__Windows__) |
|
10485 PKTHREAD p, |
|
10486 #else |
|
10487 #if defined(__APPLE__) |
|
10488 struct proc *p SCTP_UNUSED, |
|
10489 #else |
|
10490 struct proc *p, |
|
10491 #endif |
|
10492 #endif |
|
10493 int flags) |
|
10494 { |
|
10495 if (inp == NULL) { |
|
10496 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); |
|
10497 return (EINVAL); |
|
10498 } |
|
10499 |
|
10500 if (inp->sctp_socket == NULL) { |
|
10501 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); |
|
10502 return (EINVAL); |
|
10503 } |
|
10504 return (sctp_sosend(inp->sctp_socket, |
|
10505 addr, |
|
10506 (struct uio *)NULL, |
|
10507 m, |
|
10508 control, |
|
10509 #if defined(__APPLE__) || defined(__Panda__) |
|
10510 flags |
|
10511 #else |
|
10512 flags, p |
|
10513 #endif |
|
10514 )); |
|
10515 } |
|
10516 |
|
10517 void |
|
10518 send_forward_tsn(struct sctp_tcb *stcb, |
|
10519 struct sctp_association *asoc) |
|
10520 { |
|
10521 struct sctp_tmit_chunk *chk; |
|
10522 struct sctp_forward_tsn_chunk *fwdtsn; |
|
10523 uint32_t advance_peer_ack_point; |
|
10524 |
|
10525 SCTP_TCB_LOCK_ASSERT(stcb); |
|
10526 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { |
|
10527 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) { |
|
10528 /* mark it to unsent */ |
|
10529 chk->sent = SCTP_DATAGRAM_UNSENT; |
|
10530 chk->snd_count = 0; |
|
10531 /* Do we correct its output location? */ |
|
10532 if (chk->whoTo) { |
|
10533 sctp_free_remote_addr(chk->whoTo); |
|
10534 chk->whoTo = NULL; |
|
10535 } |
|
10536 goto sctp_fill_in_rest; |
|
10537 } |
|
10538 } |
|
10539 /* Ok if we reach here we must build one */ |
|
10540 sctp_alloc_a_chunk(stcb, chk); |
|
10541 if (chk == NULL) { |
|
10542 return; |
|
10543 } |
|
10544 asoc->fwd_tsn_cnt++; |
|
10545 chk->copy_by_ref = 0; |
|
10546 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN; |
|
10547 chk->rec.chunk_id.can_take_data = 0; |
|
10548 chk->asoc = asoc; |
|
10549 chk->whoTo = NULL; |
|
10550 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); |
|
10551 if (chk->data == NULL) { |
|
10552 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); |
|
10553 return; |
|
10554 } |
|
10555 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); |
|
10556 chk->sent = SCTP_DATAGRAM_UNSENT; |
|
10557 chk->snd_count = 0; |
|
10558 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next); |
|
10559 asoc->ctrl_queue_cnt++; |
|
10560 sctp_fill_in_rest: |
|
10561 /*- |
|
10562 * Here we go through and fill out the part that deals with |
|
10563 * stream/seq of the ones we skip. |
|
10564 */ |
|
10565 SCTP_BUF_LEN(chk->data) = 0; |
|
10566 { |
|
10567 struct sctp_tmit_chunk *at, *tp1, *last; |
|
10568 struct sctp_strseq *strseq; |
|
10569 unsigned int cnt_of_space, i, ovh; |
|
10570 unsigned int space_needed; |
|
10571 unsigned int cnt_of_skipped = 0; |
|
10572 |
|
10573 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) { |
|
10574 if ((at->sent != SCTP_FORWARD_TSN_SKIP) && |
|
10575 (at->sent != SCTP_DATAGRAM_NR_ACKED)) { |
|
10576 /* no more to look at */ |
|
10577 break; |
|
10578 } |
|
10579 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) { |
|
10580 /* We don't report these */ |
|
10581 continue; |
|
10582 } |
|
10583 cnt_of_skipped++; |
|
10584 } |
|
10585 space_needed = (sizeof(struct sctp_forward_tsn_chunk) + |
|
10586 (cnt_of_skipped * sizeof(struct sctp_strseq))); |
|
10587 |
|
10588 cnt_of_space = M_TRAILINGSPACE(chk->data); |
|
10589 |
|
10590 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { |
|
10591 ovh = SCTP_MIN_OVERHEAD; |
|
10592 } else { |
|
10593 ovh = SCTP_MIN_V4_OVERHEAD; |
|
10594 } |
|
10595 if (cnt_of_space > (asoc->smallest_mtu - ovh)) { |
|
10596 /* trim to a mtu size */ |
|
10597 cnt_of_space = asoc->smallest_mtu - ovh; |
|
10598 } |
|
10599 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { |
|
10600 sctp_misc_ints(SCTP_FWD_TSN_CHECK, |
|
10601 0xff, 0, cnt_of_skipped, |
|
10602 asoc->advanced_peer_ack_point); |
|
10603 |
|
10604 } |
|
10605 advance_peer_ack_point = asoc->advanced_peer_ack_point; |
|
10606 if (cnt_of_space < space_needed) { |
|
10607 /*- |
|
10608 * ok we must trim down the chunk by lowering the |
|
10609 * advance peer ack point. |
|
10610 */ |
|
10611 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { |
|
10612 sctp_misc_ints(SCTP_FWD_TSN_CHECK, |
|
10613 0xff, 0xff, cnt_of_space, |
|
10614 space_needed); |
|
10615 } |
|
10616 cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk); |
|
10617 cnt_of_skipped /= sizeof(struct sctp_strseq); |
|
10618 /*- |
|
10619 * Go through and find the TSN that will be the one |
|
10620 * we report. |
|
10621 */ |
|
10622 at = TAILQ_FIRST(&asoc->sent_queue); |
|
10623 if (at != NULL) { |
|
10624 for (i = 0; i < cnt_of_skipped; i++) { |
|
10625 tp1 = TAILQ_NEXT(at, sctp_next); |
|
10626 if (tp1 == NULL) { |
|
10627 break; |
|
10628 } |
|
10629 at = tp1; |
|
10630 } |
|
10631 } |
|
10632 if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { |
|
10633 sctp_misc_ints(SCTP_FWD_TSN_CHECK, |
|
10634 0xff, cnt_of_skipped, at->rec.data.TSN_seq, |
|
10635 asoc->advanced_peer_ack_point); |
|
10636 } |
|
10637 last = at; |
|
10638 /*- |
|
10639 * last now points to last one I can report, update |
|
10640 * peer ack point |
|
10641 */ |
|
10642 if (last) |
|
10643 advance_peer_ack_point = last->rec.data.TSN_seq; |
|
10644 space_needed = sizeof(struct sctp_forward_tsn_chunk) + |
|
10645 cnt_of_skipped * sizeof(struct sctp_strseq); |
|
10646 } |
|
10647 chk->send_size = space_needed; |
|
10648 /* Setup the chunk */ |
|
10649 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *); |
|
10650 fwdtsn->ch.chunk_length = htons(chk->send_size); |
|
10651 fwdtsn->ch.chunk_flags = 0; |
|
10652 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN; |
|
10653 fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point); |
|
10654 SCTP_BUF_LEN(chk->data) = chk->send_size; |
|
10655 fwdtsn++; |
|
10656 /*- |
|
10657 * Move pointer to after the fwdtsn and transfer to the |
|
10658 * strseq pointer. |
|
10659 */ |
|
10660 strseq = (struct sctp_strseq *)fwdtsn; |
|
10661 /*- |
|
10662 * Now populate the strseq list. This is done blindly |
|
10663 * without pulling out duplicate stream info. This is |
|
10664 * inefficent but won't harm the process since the peer will |
|
10665 * look at these in sequence and will thus release anything. |
|
10666 * It could mean we exceed the PMTU and chop off some that |
|
10667 * we could have included.. but this is unlikely (aka 1432/4 |
|
10668 * would mean 300+ stream seq's would have to be reported in |
|
10669 * one FWD-TSN. With a bit of work we can later FIX this to |
|
10670 * optimize and pull out duplcates.. but it does add more |
|
10671 * overhead. So for now... not! |
|
10672 */ |
|
10673 at = TAILQ_FIRST(&asoc->sent_queue); |
|
10674 for (i = 0; i < cnt_of_skipped; i++) { |
|
10675 tp1 = TAILQ_NEXT(at, sctp_next); |
|
10676 if (tp1 == NULL) |
|
10677 break; |
|
10678 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) { |
|
10679 /* We don't report these */ |
|
10680 i--; |
|
10681 at = tp1; |
|
10682 continue; |
|
10683 } |
|
10684 if (at->rec.data.TSN_seq == advance_peer_ack_point) { |
|
10685 at->rec.data.fwd_tsn_cnt = 0; |
|
10686 } |
|
10687 strseq->stream = ntohs(at->rec.data.stream_number); |
|
10688 strseq->sequence = ntohs(at->rec.data.stream_seq); |
|
10689 strseq++; |
|
10690 at = tp1; |
|
10691 } |
|
10692 } |
|
10693 return; |
|
10694 } |
|
10695 |
|
10696 void |
|
10697 sctp_send_sack(struct sctp_tcb *stcb, int so_locked |
|
10698 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) |
|
10699 SCTP_UNUSED |
|
10700 #endif |
|
10701 ) |
|
10702 { |
|
10703 /*- |
|
10704 * Queue up a SACK or NR-SACK in the control queue. |
|
10705 * We must first check to see if a SACK or NR-SACK is |
|
10706 * somehow on the control queue. |
|
10707 * If so, we will take and and remove the old one. |
|
10708 */ |
|
10709 struct sctp_association *asoc; |
|
10710 struct sctp_tmit_chunk *chk, *a_chk; |
|
10711 struct sctp_sack_chunk *sack; |
|
10712 struct sctp_nr_sack_chunk *nr_sack; |
|
10713 struct sctp_gap_ack_block *gap_descriptor; |
|
10714 struct sack_track *selector; |
|
10715 int mergeable = 0; |
|
10716 int offset; |
|
10717 caddr_t limit; |
|
10718 uint32_t *dup; |
|
10719 int limit_reached = 0; |
|
10720 unsigned int i, siz, j; |
|
10721 unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space; |
|
10722 int num_dups = 0; |
|
10723 int space_req; |
|
10724 uint32_t highest_tsn; |
|
10725 uint8_t flags; |
|
10726 uint8_t type; |
|
10727 uint8_t tsn_map; |
|
10728 |
|
10729 if ((stcb->asoc.sctp_nr_sack_on_off == 1) && |
|
10730 (stcb->asoc.peer_supports_nr_sack == 1)) { |
|
10731 type = SCTP_NR_SELECTIVE_ACK; |
|
10732 } else { |
|
10733 type = SCTP_SELECTIVE_ACK; |
|
10734 } |
|
10735 a_chk = NULL; |
|
10736 asoc = &stcb->asoc; |
|
10737 SCTP_TCB_LOCK_ASSERT(stcb); |
|
10738 if (asoc->last_data_chunk_from == NULL) { |
|
10739 /* Hmm we never received anything */ |
|
10740 return; |
|
10741 } |
|
10742 sctp_slide_mapping_arrays(stcb); |
|
10743 sctp_set_rwnd(stcb, asoc); |
|
10744 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { |
|
10745 if (chk->rec.chunk_id.id == type) { |
|
10746 /* Hmm, found a sack already on queue, remove it */ |
|
10747 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); |
|
10748 asoc->ctrl_queue_cnt--; |
|
10749 a_chk = chk; |
|
10750 if (a_chk->data) { |
|
10751 sctp_m_freem(a_chk->data); |
|
10752 a_chk->data = NULL; |
|
10753 } |
|
10754 if (a_chk->whoTo) { |
|
10755 sctp_free_remote_addr(a_chk->whoTo); |
|
10756 a_chk->whoTo = NULL; |
|
10757 } |
|
10758 break; |
|
10759 } |
|
10760 } |
|
10761 if (a_chk == NULL) { |
|
10762 sctp_alloc_a_chunk(stcb, a_chk); |
|
10763 if (a_chk == NULL) { |
|
10764 /* No memory so we drop the idea, and set a timer */ |
|
10765 if (stcb->asoc.delayed_ack) { |
|
10766 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, |
|
10767 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5); |
|
10768 sctp_timer_start(SCTP_TIMER_TYPE_RECV, |
|
10769 stcb->sctp_ep, stcb, NULL); |
|
10770 } else { |
|
10771 stcb->asoc.send_sack = 1; |
|
10772 } |
|
10773 return; |
|
10774 } |
|
10775 a_chk->copy_by_ref = 0; |
|
10776 a_chk->rec.chunk_id.id = type; |
|
10777 a_chk->rec.chunk_id.can_take_data = 1; |
|
10778 } |
|
10779 /* Clear our pkt counts */ |
|
10780 asoc->data_pkts_seen = 0; |
|
10781 |
|
10782 a_chk->asoc = asoc; |
|
10783 a_chk->snd_count = 0; |
|
10784 a_chk->send_size = 0; /* fill in later */ |
|
10785 a_chk->sent = SCTP_DATAGRAM_UNSENT; |
|
10786 a_chk->whoTo = NULL; |
|
10787 |
|
10788 if ((asoc->numduptsns) || |
|
10789 (!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE))) { |
|
10790 /*- |
|
10791 * Ok, we have some duplicates or the destination for the |
|
10792 * sack is unreachable, lets see if we can select an |
|
10793 * alternate than asoc->last_data_chunk_from |
|
10794 */ |
|
10795 if ((asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE) && |
|
10796 (asoc->used_alt_onsack > asoc->numnets)) { |
|
10797 /* We used an alt last time, don't this time */ |
|
10798 a_chk->whoTo = NULL; |
|
10799 } else { |
|
10800 asoc->used_alt_onsack++; |
|
10801 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0); |
|
10802 } |
|
10803 if (a_chk->whoTo == NULL) { |
|
10804 /* Nope, no alternate */ |
|
10805 a_chk->whoTo = asoc->last_data_chunk_from; |
|
10806 asoc->used_alt_onsack = 0; |
|
10807 } |
|
10808 } else { |
|
10809 /* |
|
10810 * No duplicates so we use the last place we received data |
|
10811 * from. |
|
10812 */ |
|
10813 asoc->used_alt_onsack = 0; |
|
10814 a_chk->whoTo = asoc->last_data_chunk_from; |
|
10815 } |
|
10816 if (a_chk->whoTo) { |
|
10817 atomic_add_int(&a_chk->whoTo->ref_count, 1); |
|
10818 } |
|
10819 if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) { |
|
10820 highest_tsn = asoc->highest_tsn_inside_map; |
|
10821 } else { |
|
10822 highest_tsn = asoc->highest_tsn_inside_nr_map; |
|
10823 } |
|
10824 if (highest_tsn == asoc->cumulative_tsn) { |
|
10825 /* no gaps */ |
|
10826 if (type == SCTP_SELECTIVE_ACK) { |
|
10827 space_req = sizeof(struct sctp_sack_chunk); |
|
10828 } else { |
|
10829 space_req = sizeof(struct sctp_nr_sack_chunk); |
|
10830 } |
|
10831 } else { |
|
10832 /* gaps get a cluster */ |
|
10833 space_req = MCLBYTES; |
|
10834 } |
|
10835 /* Ok now lets formulate a MBUF with our sack */ |
|
10836 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_NOWAIT, 1, MT_DATA); |
|
10837 if ((a_chk->data == NULL) || |
|
10838 (a_chk->whoTo == NULL)) { |
|
10839 /* rats, no mbuf memory */ |
|
10840 if (a_chk->data) { |
|
10841 /* was a problem with the destination */ |
|
10842 sctp_m_freem(a_chk->data); |
|
10843 a_chk->data = NULL; |
|
10844 } |
|
10845 sctp_free_a_chunk(stcb, a_chk, so_locked); |
|
10846 /* sa_ignore NO_NULL_CHK */ |
|
10847 if (stcb->asoc.delayed_ack) { |
|
10848 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, |
|
10849 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6); |
|
10850 sctp_timer_start(SCTP_TIMER_TYPE_RECV, |
|
10851 stcb->sctp_ep, stcb, NULL); |
|
10852 } else { |
|
10853 stcb->asoc.send_sack = 1; |
|
10854 } |
|
10855 return; |
|
10856 } |
|
10857 /* ok, lets go through and fill it in */ |
|
10858 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD); |
|
10859 space = M_TRAILINGSPACE(a_chk->data); |
|
10860 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) { |
|
10861 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD); |
|
10862 } |
|
10863 limit = mtod(a_chk->data, caddr_t); |
|
10864 limit += space; |
|
10865 |
|
10866 flags = 0; |
|
10867 |
|
10868 if ((asoc->sctp_cmt_on_off > 0) && |
|
10869 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { |
|
10870 /*- |
|
10871 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been |
|
10872 * received, then set high bit to 1, else 0. Reset |
|
10873 * pkts_rcvd. |
|
10874 */ |
|
10875 flags |= (asoc->cmt_dac_pkts_rcvd << 6); |
|
10876 asoc->cmt_dac_pkts_rcvd = 0; |
|
10877 } |
|
10878 #ifdef SCTP_ASOCLOG_OF_TSNS |
|
10879 stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn; |
|
10880 stcb->asoc.cumack_log_atsnt++; |
|
10881 if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) { |
|
10882 stcb->asoc.cumack_log_atsnt = 0; |
|
10883 } |
|
10884 #endif |
|
10885 /* reset the readers interpretation */ |
|
10886 stcb->freed_by_sorcv_sincelast = 0; |
|
10887 |
|
10888 if (type == SCTP_SELECTIVE_ACK) { |
|
10889 sack = mtod(a_chk->data, struct sctp_sack_chunk *); |
|
10890 nr_sack = NULL; |
|
10891 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk)); |
|
10892 if (highest_tsn > asoc->mapping_array_base_tsn) { |
|
10893 siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8; |
|
10894 } else { |
|
10895 siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8; |
|
10896 } |
|
10897 } else { |
|
10898 sack = NULL; |
|
10899 nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *); |
|
10900 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk)); |
|
10901 if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) { |
|
10902 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8; |
|
10903 } else { |
|
10904 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8; |
|
10905 } |
|
10906 } |
|
10907 |
|
10908 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) { |
|
10909 offset = 1; |
|
10910 } else { |
|
10911 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn; |
|
10912 } |
|
10913 if (((type == SCTP_SELECTIVE_ACK) && |
|
10914 SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) || |
|
10915 ((type == SCTP_NR_SELECTIVE_ACK) && |
|
10916 SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) { |
|
10917 /* we have a gap .. maybe */ |
|
10918 for (i = 0; i < siz; i++) { |
|
10919 tsn_map = asoc->mapping_array[i]; |
|
10920 if (type == SCTP_SELECTIVE_ACK) { |
|
10921 tsn_map |= asoc->nr_mapping_array[i]; |
|
10922 } |
|
10923 if (i == 0) { |
|
10924 /* |
|
10925 * Clear all bits corresponding to TSNs |
|
10926 * smaller or equal to the cumulative TSN. |
|
10927 */ |
|
10928 tsn_map &= (~0 << (1 - offset)); |
|
10929 } |
|
10930 selector = &sack_array[tsn_map]; |
|
10931 if (mergeable && selector->right_edge) { |
|
10932 /* |
|
10933 * Backup, left and right edges were ok to |
|
10934 * merge. |
|
10935 */ |
|
10936 num_gap_blocks--; |
|
10937 gap_descriptor--; |
|
10938 } |
|
10939 if (selector->num_entries == 0) |
|
10940 mergeable = 0; |
|
10941 else { |
|
10942 for (j = 0; j < selector->num_entries; j++) { |
|
10943 if (mergeable && selector->right_edge) { |
|
10944 /* |
|
10945 * do a merge by NOT setting |
|
10946 * the left side |
|
10947 */ |
|
10948 mergeable = 0; |
|
10949 } else { |
|
10950 /* |
|
10951 * no merge, set the left |
|
10952 * side |
|
10953 */ |
|
10954 mergeable = 0; |
|
10955 gap_descriptor->start = htons((selector->gaps[j].start + offset)); |
|
10956 } |
|
10957 gap_descriptor->end = htons((selector->gaps[j].end + offset)); |
|
10958 num_gap_blocks++; |
|
10959 gap_descriptor++; |
|
10960 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) { |
|
10961 /* no more room */ |
|
10962 limit_reached = 1; |
|
10963 break; |
|
10964 } |
|
10965 } |
|
10966 if (selector->left_edge) { |
|
10967 mergeable = 1; |
|
10968 } |
|
10969 } |
|
10970 if (limit_reached) { |
|
10971 /* Reached the limit stop */ |
|
10972 break; |
|
10973 } |
|
10974 offset += 8; |
|
10975 } |
|
10976 } |
|
10977 if ((type == SCTP_NR_SELECTIVE_ACK) && |
|
10978 (limit_reached == 0)) { |
|
10979 |
|
10980 mergeable = 0; |
|
10981 |
|
10982 if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) { |
|
10983 siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8; |
|
10984 } else { |
|
10985 siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8; |
|
10986 } |
|
10987 |
|
10988 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) { |
|
10989 offset = 1; |
|
10990 } else { |
|
10991 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn; |
|
10992 } |
|
10993 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) { |
|
10994 /* we have a gap .. maybe */ |
|
10995 for (i = 0; i < siz; i++) { |
|
10996 tsn_map = asoc->nr_mapping_array[i]; |
|
10997 if (i == 0) { |
|
10998 /* |
|
10999 * Clear all bits corresponding to TSNs |
|
11000 * smaller or equal to the cumulative TSN. |
|
11001 */ |
|
11002 tsn_map &= (~0 << (1 - offset)); |
|
11003 } |
|
11004 selector = &sack_array[tsn_map]; |
|
11005 if (mergeable && selector->right_edge) { |
|
11006 /* |
|
11007 * Backup, left and right edges were ok to |
|
11008 * merge. |
|
11009 */ |
|
11010 num_nr_gap_blocks--; |
|
11011 gap_descriptor--; |
|
11012 } |
|
11013 if (selector->num_entries == 0) |
|
11014 mergeable = 0; |
|
11015 else { |
|
11016 for (j = 0; j < selector->num_entries; j++) { |
|
11017 if (mergeable && selector->right_edge) { |
|
11018 /* |
|
11019 * do a merge by NOT setting |
|
11020 * the left side |
|
11021 */ |
|
11022 mergeable = 0; |
|
11023 } else { |
|
11024 /* |
|
11025 * no merge, set the left |
|
11026 * side |
|
11027 */ |
|
11028 mergeable = 0; |
|
11029 gap_descriptor->start = htons((selector->gaps[j].start + offset)); |
|
11030 } |
|
11031 gap_descriptor->end = htons((selector->gaps[j].end + offset)); |
|
11032 num_nr_gap_blocks++; |
|
11033 gap_descriptor++; |
|
11034 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) { |
|
11035 /* no more room */ |
|
11036 limit_reached = 1; |
|
11037 break; |
|
11038 } |
|
11039 } |
|
11040 if (selector->left_edge) { |
|
11041 mergeable = 1; |
|
11042 } |
|
11043 } |
|
11044 if (limit_reached) { |
|
11045 /* Reached the limit stop */ |
|
11046 break; |
|
11047 } |
|
11048 offset += 8; |
|
11049 } |
|
11050 } |
|
11051 } |
|
11052 /* now we must add any dups we are going to report. */ |
|
11053 if ((limit_reached == 0) && (asoc->numduptsns)) { |
|
11054 dup = (uint32_t *) gap_descriptor; |
|
11055 for (i = 0; i < asoc->numduptsns; i++) { |
|
11056 *dup = htonl(asoc->dup_tsns[i]); |
|
11057 dup++; |
|
11058 num_dups++; |
|
11059 if (((caddr_t)dup + sizeof(uint32_t)) > limit) { |
|
11060 /* no more room */ |
|
11061 break; |
|
11062 } |
|
11063 } |
|
11064 asoc->numduptsns = 0; |
|
11065 } |
|
11066 /* |
|
11067 * now that the chunk is prepared queue it to the control chunk |
|
11068 * queue. |
|
11069 */ |
|
11070 if (type == SCTP_SELECTIVE_ACK) { |
|
11071 a_chk->send_size = sizeof(struct sctp_sack_chunk) + |
|
11072 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) + |
|
11073 num_dups * sizeof(int32_t); |
|
11074 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size; |
|
11075 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn); |
|
11076 sack->sack.a_rwnd = htonl(asoc->my_rwnd); |
|
11077 sack->sack.num_gap_ack_blks = htons(num_gap_blocks); |
|
11078 sack->sack.num_dup_tsns = htons(num_dups); |
|
11079 sack->ch.chunk_type = type; |
|
11080 sack->ch.chunk_flags = flags; |
|
11081 sack->ch.chunk_length = htons(a_chk->send_size); |
|
11082 } else { |
|
11083 a_chk->send_size = sizeof(struct sctp_nr_sack_chunk) + |
|
11084 (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) + |
|
11085 num_dups * sizeof(int32_t); |
|
11086 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size; |
|
11087 nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn); |
|
11088 nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd); |
|
11089 nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks); |
|
11090 nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks); |
|
11091 nr_sack->nr_sack.num_dup_tsns = htons(num_dups); |
|
11092 nr_sack->nr_sack.reserved = 0; |
|
11093 nr_sack->ch.chunk_type = type; |
|
11094 nr_sack->ch.chunk_flags = flags; |
|
11095 nr_sack->ch.chunk_length = htons(a_chk->send_size); |
|
11096 } |
|
11097 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next); |
|
11098 asoc->my_last_reported_rwnd = asoc->my_rwnd; |
|
11099 asoc->ctrl_queue_cnt++; |
|
11100 asoc->send_sack = 0; |
|
11101 SCTP_STAT_INCR(sctps_sendsacks); |
|
11102 return; |
|
11103 } |
|
11104 |
|
11105 void |
|
11106 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked |
|
11107 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) |
|
11108 SCTP_UNUSED |
|
11109 #endif |
|
11110 ) |
|
11111 { |
|
11112 struct mbuf *m_abort, *m, *m_last; |
|
11113 struct mbuf *m_out, *m_end = NULL; |
|
11114 struct sctp_abort_chunk *abort; |
|
11115 struct sctp_auth_chunk *auth = NULL; |
|
11116 struct sctp_nets *net; |
|
11117 uint32_t vtag; |
|
11118 uint32_t auth_offset = 0; |
|
11119 uint16_t cause_len, chunk_len, padding_len; |
|
11120 |
|
11121 #if defined(__APPLE__) |
|
11122 if (so_locked) { |
|
11123 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep)); |
|
11124 } else { |
|
11125 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep)); |
|
11126 } |
|
11127 #endif |
|
11128 SCTP_TCB_LOCK_ASSERT(stcb); |
|
11129 /*- |
|
11130 * Add an AUTH chunk, if chunk requires it and save the offset into |
|
11131 * the chain for AUTH |
|
11132 */ |
|
11133 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION, |
|
11134 stcb->asoc.peer_auth_chunks)) { |
|
11135 m_out = sctp_add_auth_chunk(NULL, &m_end, &auth, &auth_offset, |
|
11136 stcb, SCTP_ABORT_ASSOCIATION); |
|
11137 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); |
|
11138 } else { |
|
11139 m_out = NULL; |
|
11140 } |
|
11141 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_NOWAIT, 1, MT_HEADER); |
|
11142 if (m_abort == NULL) { |
|
11143 if (m_out) { |
|
11144 sctp_m_freem(m_out); |
|
11145 } |
|
11146 if (operr) { |
|
11147 sctp_m_freem(operr); |
|
11148 } |
|
11149 return; |
|
11150 } |
|
11151 /* link in any error */ |
|
11152 SCTP_BUF_NEXT(m_abort) = operr; |
|
11153 cause_len = 0; |
|
11154 m_last = NULL; |
|
11155 for (m = operr; m; m = SCTP_BUF_NEXT(m)) { |
|
11156 cause_len += (uint16_t)SCTP_BUF_LEN(m); |
|
11157 if (SCTP_BUF_NEXT(m) == NULL) { |
|
11158 m_last = m; |
|
11159 } |
|
11160 } |
|
11161 SCTP_BUF_LEN(m_abort) = sizeof(struct sctp_abort_chunk); |
|
11162 chunk_len = (uint16_t)sizeof(struct sctp_abort_chunk) + cause_len; |
|
11163 padding_len = SCTP_SIZE32(chunk_len) - chunk_len; |
|
11164 if (m_out == NULL) { |
|
11165 /* NO Auth chunk prepended, so reserve space in front */ |
|
11166 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD); |
|
11167 m_out = m_abort; |
|
11168 } else { |
|
11169 /* Put AUTH chunk at the front of the chain */ |
|
11170 SCTP_BUF_NEXT(m_end) = m_abort; |
|
11171 } |
|
11172 if (stcb->asoc.alternate) { |
|
11173 net = stcb->asoc.alternate; |
|
11174 } else { |
|
11175 net = stcb->asoc.primary_destination; |
|
11176 } |
|
11177 /* Fill in the ABORT chunk header. */ |
|
11178 abort = mtod(m_abort, struct sctp_abort_chunk *); |
|
11179 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION; |
|
11180 if (stcb->asoc.peer_vtag == 0) { |
|
11181 /* This happens iff the assoc is in COOKIE-WAIT state. */ |
|
11182 vtag = stcb->asoc.my_vtag; |
|
11183 abort->ch.chunk_flags = SCTP_HAD_NO_TCB; |
|
11184 } else { |
|
11185 vtag = stcb->asoc.peer_vtag; |
|
11186 abort->ch.chunk_flags = 0; |
|
11187 } |
|
11188 abort->ch.chunk_length = htons(chunk_len); |
|
11189 /* Add padding, if necessary. */ |
|
11190 if (padding_len > 0) { |
|
11191 if ((m_last == NULL) || sctp_add_pad_tombuf(m_last, padding_len)) { |
|
11192 sctp_m_freem(m_out); |
|
11193 return; |
|
11194 } |
|
11195 } |
|
11196 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net, |
|
11197 (struct sockaddr *)&net->ro._l_addr, |
|
11198 m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0, |
|
11199 stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag), |
|
11200 stcb->asoc.primary_destination->port, NULL, |
|
11201 #if defined(__FreeBSD__) |
|
11202 0, 0, |
|
11203 #endif |
|
11204 so_locked); |
|
11205 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); |
|
11206 } |
|
11207 |
|
11208 void |
|
11209 sctp_send_shutdown_complete(struct sctp_tcb *stcb, |
|
11210 struct sctp_nets *net, |
|
11211 int reflect_vtag) |
|
11212 { |
|
11213 /* formulate and SEND a SHUTDOWN-COMPLETE */ |
|
11214 struct mbuf *m_shutdown_comp; |
|
11215 struct sctp_shutdown_complete_chunk *shutdown_complete; |
|
11216 uint32_t vtag; |
|
11217 uint8_t flags; |
|
11218 |
|
11219 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER); |
|
11220 if (m_shutdown_comp == NULL) { |
|
11221 /* no mbuf's */ |
|
11222 return; |
|
11223 } |
|
11224 if (reflect_vtag) { |
|
11225 flags = SCTP_HAD_NO_TCB; |
|
11226 vtag = stcb->asoc.my_vtag; |
|
11227 } else { |
|
11228 flags = 0; |
|
11229 vtag = stcb->asoc.peer_vtag; |
|
11230 } |
|
11231 shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *); |
|
11232 shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE; |
|
11233 shutdown_complete->ch.chunk_flags = flags; |
|
11234 shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk)); |
|
11235 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk); |
|
11236 (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net, |
|
11237 (struct sockaddr *)&net->ro._l_addr, |
|
11238 m_shutdown_comp, 0, NULL, 0, 1, 0, 0, |
|
11239 stcb->sctp_ep->sctp_lport, stcb->rport, |
|
11240 htonl(vtag), |
|
11241 net->port, NULL, |
|
11242 #if defined(__FreeBSD__) |
|
11243 0, 0, |
|
11244 #endif |
|
11245 SCTP_SO_NOT_LOCKED); |
|
11246 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); |
|
11247 return; |
|
11248 } |
|
11249 |
|
11250 #if defined(__FreeBSD__) |
|
11251 static void |
|
11252 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst, |
|
11253 struct sctphdr *sh, uint32_t vtag, |
|
11254 uint8_t type, struct mbuf *cause, |
|
11255 uint8_t use_mflowid, uint32_t mflowid, |
|
11256 uint32_t vrf_id, uint16_t port) |
|
11257 #else |
|
11258 static void |
|
11259 sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst, |
|
11260 struct sctphdr *sh, uint32_t vtag, |
|
11261 uint8_t type, struct mbuf *cause, |
|
11262 uint32_t vrf_id SCTP_UNUSED, uint16_t port) |
|
11263 #endif |
|
11264 { |
|
11265 #ifdef __Panda__ |
|
11266 pakhandle_type o_pak; |
|
11267 #else |
|
11268 struct mbuf *o_pak; |
|
11269 #endif |
|
11270 struct mbuf *mout; |
|
11271 struct sctphdr *shout; |
|
11272 struct sctp_chunkhdr *ch; |
|
11273 struct udphdr *udp; |
|
11274 int len, cause_len, padding_len; |
|
11275 #if defined(INET) || defined(INET6) |
|
11276 int ret; |
|
11277 #endif |
|
11278 #ifdef INET |
|
11279 #if defined(__APPLE__) || defined(__Panda__) |
|
11280 sctp_route_t ro; |
|
11281 #endif |
|
11282 struct sockaddr_in *src_sin, *dst_sin; |
|
11283 struct ip *ip; |
|
11284 #endif |
|
11285 #ifdef INET6 |
|
11286 struct sockaddr_in6 *src_sin6, *dst_sin6; |
|
11287 struct ip6_hdr *ip6; |
|
11288 #endif |
|
11289 |
|
11290 /* Compute the length of the cause and add final padding. */ |
|
11291 cause_len = 0; |
|
11292 if (cause != NULL) { |
|
11293 struct mbuf *m_at, *m_last = NULL; |
|
11294 |
|
11295 for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) { |
|
11296 if (SCTP_BUF_NEXT(m_at) == NULL) |
|
11297 m_last = m_at; |
|
11298 cause_len += SCTP_BUF_LEN(m_at); |
|
11299 } |
|
11300 padding_len = cause_len % 4; |
|
11301 if (padding_len != 0) { |
|
11302 padding_len = 4 - padding_len; |
|
11303 } |
|
11304 if (padding_len != 0) { |
|
11305 if (sctp_add_pad_tombuf(m_last, padding_len)) { |
|
11306 sctp_m_freem(cause); |
|
11307 return; |
|
11308 } |
|
11309 } |
|
11310 } else { |
|
11311 padding_len = 0; |
|
11312 } |
|
11313 /* Get an mbuf for the header. */ |
|
11314 len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); |
|
11315 switch (dst->sa_family) { |
|
11316 #ifdef INET |
|
11317 case AF_INET: |
|
11318 len += sizeof(struct ip); |
|
11319 break; |
|
11320 #endif |
|
11321 #ifdef INET6 |
|
11322 case AF_INET6: |
|
11323 len += sizeof(struct ip6_hdr); |
|
11324 break; |
|
11325 #endif |
|
11326 default: |
|
11327 break; |
|
11328 } |
|
11329 if (port) { |
|
11330 len += sizeof(struct udphdr); |
|
11331 } |
|
11332 #if defined(__APPLE__) |
|
11333 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) |
|
11334 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA); |
|
11335 #else |
|
11336 mout = sctp_get_mbuf_for_msg(len + SCTP_MAX_LINKHDR, 1, M_NOWAIT, 1, MT_DATA); |
|
11337 #endif |
|
11338 #else |
|
11339 mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA); |
|
11340 #endif |
|
11341 if (mout == NULL) { |
|
11342 if (cause) { |
|
11343 sctp_m_freem(cause); |
|
11344 } |
|
11345 return; |
|
11346 } |
|
11347 #if defined(__APPLE__) |
|
11348 #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) |
|
11349 SCTP_BUF_RESV_UF(mout, max_linkhdr); |
|
11350 #else |
|
11351 SCTP_BUF_RESV_UF(mout, SCTP_MAX_LINKHDR); |
|
11352 #endif |
|
11353 #else |
|
11354 SCTP_BUF_RESV_UF(mout, max_linkhdr); |
|
11355 #endif |
|
11356 SCTP_BUF_LEN(mout) = len; |
|
11357 SCTP_BUF_NEXT(mout) = cause; |
|
11358 #if defined(__FreeBSD__) |
|
11359 if (use_mflowid != 0) { |
|
11360 mout->m_pkthdr.flowid = mflowid; |
|
11361 mout->m_flags |= M_FLOWID; |
|
11362 } |
|
11363 #endif |
|
11364 #ifdef INET |
|
11365 ip = NULL; |
|
11366 #endif |
|
11367 #ifdef INET6 |
|
11368 ip6 = NULL; |
|
11369 #endif |
|
11370 switch (dst->sa_family) { |
|
11371 #ifdef INET |
|
11372 case AF_INET: |
|
11373 src_sin = (struct sockaddr_in *)src; |
|
11374 dst_sin = (struct sockaddr_in *)dst; |
|
11375 ip = mtod(mout, struct ip *); |
|
11376 ip->ip_v = IPVERSION; |
|
11377 ip->ip_hl = (sizeof(struct ip) >> 2); |
|
11378 ip->ip_tos = 0; |
|
11379 #if defined(__FreeBSD__) |
|
11380 ip->ip_id = ip_newid(); |
|
11381 #elif defined(__APPLE__) |
|
11382 #if RANDOM_IP_ID |
|
11383 ip->ip_id = ip_randomid(); |
|
11384 #else |
|
11385 ip->ip_id = htons(ip_id++); |
|
11386 #endif |
|
11387 #else |
|
11388 ip->ip_id = htons(ip_id++); |
|
11389 #endif |
|
11390 ip->ip_off = 0; |
|
11391 ip->ip_ttl = MODULE_GLOBAL(ip_defttl); |
|
11392 if (port) { |
|
11393 ip->ip_p = IPPROTO_UDP; |
|
11394 } else { |
|
11395 ip->ip_p = IPPROTO_SCTP; |
|
11396 } |
|
11397 ip->ip_src.s_addr = dst_sin->sin_addr.s_addr; |
|
11398 ip->ip_dst.s_addr = src_sin->sin_addr.s_addr; |
|
11399 ip->ip_sum = 0; |
|
11400 len = sizeof(struct ip); |
|
11401 shout = (struct sctphdr *)((caddr_t)ip + len); |
|
11402 break; |
|
11403 #endif |
|
11404 #ifdef INET6 |
|
11405 case AF_INET6: |
|
11406 src_sin6 = (struct sockaddr_in6 *)src; |
|
11407 dst_sin6 = (struct sockaddr_in6 *)dst; |
|
11408 ip6 = mtod(mout, struct ip6_hdr *); |
|
11409 ip6->ip6_flow = htonl(0x60000000); |
|
11410 #if defined(__FreeBSD__) |
|
11411 if (V_ip6_auto_flowlabel) { |
|
11412 ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK); |
|
11413 } |
|
11414 #endif |
|
11415 #if defined(__Userspace__) |
|
11416 ip6->ip6_hlim = IPv6_HOP_LIMIT; |
|
11417 #else |
|
11418 ip6->ip6_hlim = MODULE_GLOBAL(ip6_defhlim); |
|
11419 #endif |
|
11420 if (port) { |
|
11421 ip6->ip6_nxt = IPPROTO_UDP; |
|
11422 } else { |
|
11423 ip6->ip6_nxt = IPPROTO_SCTP; |
|
11424 } |
|
11425 ip6->ip6_src = dst_sin6->sin6_addr; |
|
11426 ip6->ip6_dst = src_sin6->sin6_addr; |
|
11427 len = sizeof(struct ip6_hdr); |
|
11428 shout = (struct sctphdr *)((caddr_t)ip6 + len); |
|
11429 break; |
|
11430 #endif |
|
11431 default: |
|
11432 len = 0; |
|
11433 shout = mtod(mout, struct sctphdr *); |
|
11434 break; |
|
11435 } |
|
11436 if (port) { |
|
11437 if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) { |
|
11438 sctp_m_freem(mout); |
|
11439 return; |
|
11440 } |
|
11441 udp = (struct udphdr *)shout; |
|
11442 udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)); |
|
11443 udp->uh_dport = port; |
|
11444 udp->uh_sum = 0; |
|
11445 udp->uh_ulen = htons(sizeof(struct udphdr) + |
|
11446 sizeof(struct sctphdr) + |
|
11447 sizeof(struct sctp_chunkhdr) + |
|
11448 cause_len + padding_len); |
|
11449 len += sizeof(struct udphdr); |
|
11450 shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr)); |
|
11451 } else { |
|
11452 udp = NULL; |
|
11453 } |
|
11454 shout->src_port = sh->dest_port; |
|
11455 shout->dest_port = sh->src_port; |
|
11456 shout->checksum = 0; |
|
11457 if (vtag) { |
|
11458 shout->v_tag = htonl(vtag); |
|
11459 } else { |
|
11460 shout->v_tag = sh->v_tag; |
|
11461 } |
|
11462 len += sizeof(struct sctphdr); |
|
11463 ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr)); |
|
11464 ch->chunk_type = type; |
|
11465 if (vtag) { |
|
11466 ch->chunk_flags = 0; |
|
11467 } else { |
|
11468 ch->chunk_flags = SCTP_HAD_NO_TCB; |
|
11469 } |
|
11470 ch->chunk_length = htons(sizeof(struct sctp_chunkhdr) + cause_len); |
|
11471 len += sizeof(struct sctp_chunkhdr); |
|
11472 len += cause_len + padding_len; |
|
11473 |
|
11474 if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { |
|
11475 sctp_m_freem(mout); |
|
11476 return; |
|
11477 } |
|
11478 SCTP_ATTACH_CHAIN(o_pak, mout, len); |
|
11479 switch (dst->sa_family) { |
|
11480 #ifdef INET |
|
11481 case AF_INET: |
|
11482 #if defined(__APPLE__) || defined(__Panda__) |
|
11483 /* zap the stack pointer to the route */ |
|
11484 bzero(&ro, sizeof(sctp_route_t)); |
|
11485 #if defined(__Panda__) |
|
11486 ro._l_addr.sa.sa_family = AF_INET; |
|
11487 #endif |
|
11488 #endif |
|
11489 if (port) { |
|
11490 #if !defined(__Windows__) && !defined(__Userspace__) |
|
11491 #if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000) |
|
11492 if (V_udp_cksum) { |
|
11493 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP)); |
|
11494 } else { |
|
11495 udp->uh_sum = 0; |
|
11496 } |
|
11497 #else |
|
11498 udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP)); |
|
11499 #endif |
|
11500 #else |
|
11501 udp->uh_sum = 0; |
|
11502 #endif |
|
11503 } |
|
11504 #if defined(__FreeBSD__) |
|
11505 #if __FreeBSD_version >= 1000000 |
|
11506 ip->ip_len = htons(len); |
|
11507 #else |
|
11508 ip->ip_len = len; |
|
11509 #endif |
|
11510 #elif defined(__APPLE__) || defined(__Userspace__) |
|
11511 ip->ip_len = len; |
|
11512 #else |
|
11513 ip->ip_len = htons(len); |
|
11514 #endif |
|
11515 if (port) { |
|
11516 #if defined(SCTP_WITH_NO_CSUM) |
|
11517 SCTP_STAT_INCR(sctps_sendnocrc); |
|
11518 #else |
|
11519 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr)); |
|
11520 SCTP_STAT_INCR(sctps_sendswcrc); |
|
11521 #endif |
|
11522 #if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000) |
|
11523 if (V_udp_cksum) { |
|
11524 SCTP_ENABLE_UDP_CSUM(o_pak); |
|
11525 } |
|
11526 #else |
|
11527 SCTP_ENABLE_UDP_CSUM(o_pak); |
|
11528 #endif |
|
11529 } else { |
|
11530 #if defined(SCTP_WITH_NO_CSUM) |
|
11531 SCTP_STAT_INCR(sctps_sendnocrc); |
|
11532 #else |
|
11533 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000 |
|
11534 mout->m_pkthdr.csum_flags = CSUM_SCTP; |
|
11535 mout->m_pkthdr.csum_data = 0; |
|
11536 SCTP_STAT_INCR(sctps_sendhwcrc); |
|
11537 #else |
|
11538 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip)); |
|
11539 SCTP_STAT_INCR(sctps_sendswcrc); |
|
11540 #endif |
|
11541 #endif |
|
11542 } |
|
11543 #ifdef SCTP_PACKET_LOGGING |
|
11544 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) { |
|
11545 sctp_packet_log(o_pak); |
|
11546 } |
|
11547 #endif |
|
11548 #if defined(__APPLE__) || defined(__Panda__) |
|
11549 SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id); |
|
11550 /* Free the route if we got one back */ |
|
11551 if (ro.ro_rt) { |
|
11552 RTFREE(ro.ro_rt); |
|
11553 ro.ro_rt = NULL; |
|
11554 } |
|
11555 #else |
|
11556 SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id); |
|
11557 #endif |
|
11558 break; |
|
11559 #endif |
|
11560 #ifdef INET6 |
|
11561 case AF_INET6: |
|
11562 ip6->ip6_plen = len - sizeof(struct ip6_hdr); |
|
11563 if (port) { |
|
11564 #if defined(SCTP_WITH_NO_CSUM) |
|
11565 SCTP_STAT_INCR(sctps_sendnocrc); |
|
11566 #else |
|
11567 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr)); |
|
11568 SCTP_STAT_INCR(sctps_sendswcrc); |
|
11569 #endif |
|
11570 #if defined(__Windows__) |
|
11571 udp->uh_sum = 0; |
|
11572 #elif !defined(__Userspace__) |
|
11573 if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) { |
|
11574 udp->uh_sum = 0xffff; |
|
11575 } |
|
11576 #endif |
|
11577 } else { |
|
11578 #if defined(SCTP_WITH_NO_CSUM) |
|
11579 SCTP_STAT_INCR(sctps_sendnocrc); |
|
11580 #else |
|
11581 #if defined(__FreeBSD__) && __FreeBSD_version >= 900000 |
|
11582 #if __FreeBSD_version > 901000 |
|
11583 mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6; |
|
11584 #else |
|
11585 mout->m_pkthdr.csum_flags = CSUM_SCTP; |
|
11586 #endif |
|
11587 mout->m_pkthdr.csum_data = 0; |
|
11588 SCTP_STAT_INCR(sctps_sendhwcrc); |
|
11589 #else |
|
11590 shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr)); |
|
11591 SCTP_STAT_INCR(sctps_sendswcrc); |
|
11592 #endif |
|
11593 #endif |
|
11594 } |
|
11595 #ifdef SCTP_PACKET_LOGGING |
|
11596 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) { |
|
11597 sctp_packet_log(o_pak); |
|
11598 } |
|
11599 #endif |
|
11600 SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id); |
|
11601 break; |
|
11602 #endif |
|
11603 #if defined(__Userspace__) |
|
11604 case AF_CONN: |
|
11605 { |
|
11606 char *buffer; |
|
11607 struct sockaddr_conn *sconn; |
|
11608 |
|
11609 sconn = (struct sockaddr_conn *)src; |
|
11610 #if defined(SCTP_WITH_NO_CSUM) |
|
11611 SCTP_STAT_INCR(sctps_sendnocrc); |
|
11612 #else |
|
11613 shout->checksum = sctp_calculate_cksum(mout, 0); |
|
11614 SCTP_STAT_INCR(sctps_sendswcrc); |
|
11615 #endif |
|
11616 #ifdef SCTP_PACKET_LOGGING |
|
11617 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) { |
|
11618 sctp_packet_log(mout); |
|
11619 } |
|
11620 #endif |
|
11621 /* Don't alloc/free for each packet */ |
|
11622 if ((buffer = malloc(len)) != NULL) { |
|
11623 m_copydata(mout, 0, len, buffer); |
|
11624 SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, len, 0, 0); |
|
11625 free(buffer); |
|
11626 } |
|
11627 sctp_m_freem(mout); |
|
11628 break; |
|
11629 } |
|
11630 #endif |
|
11631 default: |
|
11632 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n", |
|
11633 dst->sa_family); |
|
11634 sctp_m_freem(mout); |
|
11635 SCTP_LTRACE_ERR_RET_PKT(mout, NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT); |
|
11636 return; |
|
11637 } |
|
11638 SCTP_STAT_INCR(sctps_sendpackets); |
|
11639 SCTP_STAT_INCR_COUNTER64(sctps_outpackets); |
|
11640 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); |
|
11641 return; |
|
11642 } |
|
11643 |
|
11644 void |
|
11645 sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst, |
|
11646 struct sctphdr *sh, |
|
11647 #if defined(__FreeBSD__) |
|
11648 uint8_t use_mflowid, uint32_t mflowid, |
|
11649 #endif |
|
11650 uint32_t vrf_id, uint16_t port) |
|
11651 { |
|
11652 sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL, |
|
11653 #if defined(__FreeBSD__) |
|
11654 use_mflowid, mflowid, |
|
11655 #endif |
|
11656 vrf_id, port); |
|
11657 } |
|
11658 |
|
11659 void |
|
11660 sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net,int so_locked |
|
11661 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) |
|
11662 SCTP_UNUSED |
|
11663 #endif |
|
11664 ) |
|
11665 { |
|
11666 struct sctp_tmit_chunk *chk; |
|
11667 struct sctp_heartbeat_chunk *hb; |
|
11668 struct timeval now; |
|
11669 |
|
11670 SCTP_TCB_LOCK_ASSERT(stcb); |
|
11671 if (net == NULL) { |
|
11672 return; |
|
11673 } |
|
11674 (void)SCTP_GETTIME_TIMEVAL(&now); |
|
11675 switch (net->ro._l_addr.sa.sa_family) { |
|
11676 #ifdef INET |
|
11677 case AF_INET: |
|
11678 break; |
|
11679 #endif |
|
11680 #ifdef INET6 |
|
11681 case AF_INET6: |
|
11682 break; |
|
11683 #endif |
|
11684 #if defined(__Userspace__) |
|
11685 case AF_CONN: |
|
11686 break; |
|
11687 #endif |
|
11688 default: |
|
11689 return; |
|
11690 } |
|
11691 sctp_alloc_a_chunk(stcb, chk); |
|
11692 if (chk == NULL) { |
|
11693 SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n"); |
|
11694 return; |
|
11695 } |
|
11696 |
|
11697 chk->copy_by_ref = 0; |
|
11698 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST; |
|
11699 chk->rec.chunk_id.can_take_data = 1; |
|
11700 chk->asoc = &stcb->asoc; |
|
11701 chk->send_size = sizeof(struct sctp_heartbeat_chunk); |
|
11702 |
|
11703 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER); |
|
11704 if (chk->data == NULL) { |
|
11705 sctp_free_a_chunk(stcb, chk, so_locked); |
|
11706 return; |
|
11707 } |
|
11708 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); |
|
11709 SCTP_BUF_LEN(chk->data) = chk->send_size; |
|
11710 chk->sent = SCTP_DATAGRAM_UNSENT; |
|
11711 chk->snd_count = 0; |
|
11712 chk->whoTo = net; |
|
11713 atomic_add_int(&chk->whoTo->ref_count, 1); |
|
11714 /* Now we have a mbuf that we can fill in with the details */ |
|
11715 hb = mtod(chk->data, struct sctp_heartbeat_chunk *); |
|
11716 memset(hb, 0, sizeof(struct sctp_heartbeat_chunk)); |
|
11717 /* fill out chunk header */ |
|
11718 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST; |
|
11719 hb->ch.chunk_flags = 0; |
|
11720 hb->ch.chunk_length = htons(chk->send_size); |
|
11721 /* Fill out hb parameter */ |
|
11722 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO); |
|
11723 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param)); |
|
11724 hb->heartbeat.hb_info.time_value_1 = now.tv_sec; |
|
11725 hb->heartbeat.hb_info.time_value_2 = now.tv_usec; |
|
11726 /* Did our user request this one, put it in */ |
|
11727 hb->heartbeat.hb_info.addr_family = net->ro._l_addr.sa.sa_family; |
|
11728 #ifdef HAVE_SA_LEN |
|
11729 hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len; |
|
11730 #else |
|
11731 switch (net->ro._l_addr.sa.sa_family) { |
|
11732 #ifdef INET |
|
11733 case AF_INET: |
|
11734 hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in); |
|
11735 break; |
|
11736 #endif |
|
11737 #ifdef INET6 |
|
11738 case AF_INET6: |
|
11739 hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in6); |
|
11740 break; |
|
11741 #endif |
|
11742 #if defined(__Userspace__) |
|
11743 case AF_CONN: |
|
11744 hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_conn); |
|
11745 break; |
|
11746 #endif |
|
11747 default: |
|
11748 hb->heartbeat.hb_info.addr_len = 0; |
|
11749 break; |
|
11750 } |
|
11751 #endif |
|
11752 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { |
|
11753 /* |
|
11754 * we only take from the entropy pool if the address is not |
|
11755 * confirmed. |
|
11756 */ |
|
11757 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); |
|
11758 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); |
|
11759 } else { |
|
11760 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0; |
|
11761 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0; |
|
11762 } |
|
11763 switch (net->ro._l_addr.sa.sa_family) { |
|
11764 #ifdef INET |
|
11765 case AF_INET: |
|
11766 memcpy(hb->heartbeat.hb_info.address, |
|
11767 &net->ro._l_addr.sin.sin_addr, |
|
11768 sizeof(net->ro._l_addr.sin.sin_addr)); |
|
11769 break; |
|
11770 #endif |
|
11771 #ifdef INET6 |
|
11772 case AF_INET6: |
|
11773 memcpy(hb->heartbeat.hb_info.address, |
|
11774 &net->ro._l_addr.sin6.sin6_addr, |
|
11775 sizeof(net->ro._l_addr.sin6.sin6_addr)); |
|
11776 break; |
|
11777 #endif |
|
11778 #if defined(__Userspace__) |
|
11779 case AF_CONN: |
|
11780 memcpy(hb->heartbeat.hb_info.address, |
|
11781 &net->ro._l_addr.sconn.sconn_addr, |
|
11782 sizeof(net->ro._l_addr.sconn.sconn_addr)); |
|
11783 break; |
|
11784 #endif |
|
11785 default: |
|
11786 return; |
|
11787 break; |
|
11788 } |
|
11789 net->hb_responded = 0; |
|
11790 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); |
|
11791 stcb->asoc.ctrl_queue_cnt++; |
|
11792 SCTP_STAT_INCR(sctps_sendheartbeat); |
|
11793 return; |
|
11794 } |
|
11795 |
|
11796 void |
|
11797 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net, |
|
11798 uint32_t high_tsn) |
|
11799 { |
|
11800 struct sctp_association *asoc; |
|
11801 struct sctp_ecne_chunk *ecne; |
|
11802 struct sctp_tmit_chunk *chk; |
|
11803 |
|
11804 if (net == NULL) { |
|
11805 return; |
|
11806 } |
|
11807 asoc = &stcb->asoc; |
|
11808 SCTP_TCB_LOCK_ASSERT(stcb); |
|
11809 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { |
|
11810 if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) { |
|
11811 /* found a previous ECN_ECHO update it if needed */ |
|
11812 uint32_t cnt, ctsn; |
|
11813 ecne = mtod(chk->data, struct sctp_ecne_chunk *); |
|
11814 ctsn = ntohl(ecne->tsn); |
|
11815 if (SCTP_TSN_GT(high_tsn, ctsn)) { |
|
11816 ecne->tsn = htonl(high_tsn); |
|
11817 SCTP_STAT_INCR(sctps_queue_upd_ecne); |
|
11818 } |
|
11819 cnt = ntohl(ecne->num_pkts_since_cwr); |
|
11820 cnt++; |
|
11821 ecne->num_pkts_since_cwr = htonl(cnt); |
|
11822 return; |
|
11823 } |
|
11824 } |
|
11825 /* nope could not find one to update so we must build one */ |
|
11826 sctp_alloc_a_chunk(stcb, chk); |
|
11827 if (chk == NULL) { |
|
11828 return; |
|
11829 } |
|
11830 chk->copy_by_ref = 0; |
|
11831 SCTP_STAT_INCR(sctps_queue_upd_ecne); |
|
11832 chk->rec.chunk_id.id = SCTP_ECN_ECHO; |
|
11833 chk->rec.chunk_id.can_take_data = 0; |
|
11834 chk->asoc = &stcb->asoc; |
|
11835 chk->send_size = sizeof(struct sctp_ecne_chunk); |
|
11836 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER); |
|
11837 if (chk->data == NULL) { |
|
11838 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); |
|
11839 return; |
|
11840 } |
|
11841 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); |
|
11842 SCTP_BUF_LEN(chk->data) = chk->send_size; |
|
11843 chk->sent = SCTP_DATAGRAM_UNSENT; |
|
11844 chk->snd_count = 0; |
|
11845 chk->whoTo = net; |
|
11846 atomic_add_int(&chk->whoTo->ref_count, 1); |
|
11847 |
|
11848 stcb->asoc.ecn_echo_cnt_onq++; |
|
11849 ecne = mtod(chk->data, struct sctp_ecne_chunk *); |
|
11850 ecne->ch.chunk_type = SCTP_ECN_ECHO; |
|
11851 ecne->ch.chunk_flags = 0; |
|
11852 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk)); |
|
11853 ecne->tsn = htonl(high_tsn); |
|
11854 ecne->num_pkts_since_cwr = htonl(1); |
|
11855 TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next); |
|
11856 asoc->ctrl_queue_cnt++; |
|
11857 } |
|
11858 |
|
11859 void |
|
11860 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net, |
|
11861 struct mbuf *m, int len, int iphlen, int bad_crc) |
|
11862 { |
|
11863 struct sctp_association *asoc; |
|
11864 struct sctp_pktdrop_chunk *drp; |
|
11865 struct sctp_tmit_chunk *chk; |
|
11866 uint8_t *datap; |
|
11867 int was_trunc = 0; |
|
11868 int fullsz = 0; |
|
11869 long spc; |
|
11870 int offset; |
|
11871 struct sctp_chunkhdr *ch, chunk_buf; |
|
11872 unsigned int chk_length; |
|
11873 |
|
11874 if (!stcb) { |
|
11875 return; |
|
11876 } |
|
11877 asoc = &stcb->asoc; |
|
11878 SCTP_TCB_LOCK_ASSERT(stcb); |
|
11879 if (asoc->peer_supports_pktdrop == 0) { |
|
11880 /*- |
|
11881 * peer must declare support before I send one. |
|
11882 */ |
|
11883 return; |
|
11884 } |
|
11885 if (stcb->sctp_socket == NULL) { |
|
11886 return; |
|
11887 } |
|
11888 sctp_alloc_a_chunk(stcb, chk); |
|
11889 if (chk == NULL) { |
|
11890 return; |
|
11891 } |
|
11892 chk->copy_by_ref = 0; |
|
11893 len -= iphlen; |
|
11894 chk->send_size = len; |
|
11895 /* Validate that we do not have an ABORT in here. */ |
|
11896 offset = iphlen + sizeof(struct sctphdr); |
|
11897 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, |
|
11898 sizeof(*ch), (uint8_t *) & chunk_buf); |
|
11899 while (ch != NULL) { |
|
11900 chk_length = ntohs(ch->chunk_length); |
|
11901 if (chk_length < sizeof(*ch)) { |
|
11902 /* break to abort land */ |
|
11903 break; |
|
11904 } |
|
11905 switch (ch->chunk_type) { |
|
11906 case SCTP_PACKET_DROPPED: |
|
11907 case SCTP_ABORT_ASSOCIATION: |
|
11908 case SCTP_INITIATION_ACK: |
|
11909 /** |
|
11910 * We don't respond with an PKT-DROP to an ABORT |
|
11911 * or PKT-DROP. We also do not respond to an |
|
11912 * INIT-ACK, because we can't know if the initiation |
|
11913 * tag is correct or not. |
|
11914 */ |
|
11915 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); |
|
11916 return; |
|
11917 default: |
|
11918 break; |
|
11919 } |
|
11920 offset += SCTP_SIZE32(chk_length); |
|
11921 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, |
|
11922 sizeof(*ch), (uint8_t *) & chunk_buf); |
|
11923 } |
|
11924 |
|
11925 if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) > |
|
11926 min(stcb->asoc.smallest_mtu, MCLBYTES)) { |
|
11927 /* only send 1 mtu worth, trim off the |
|
11928 * excess on the end. |
|
11929 */ |
|
11930 fullsz = len; |
|
11931 len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD; |
|
11932 was_trunc = 1; |
|
11933 } |
|
11934 chk->asoc = &stcb->asoc; |
|
11935 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); |
|
11936 if (chk->data == NULL) { |
|
11937 jump_out: |
|
11938 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); |
|
11939 return; |
|
11940 } |
|
11941 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); |
|
11942 drp = mtod(chk->data, struct sctp_pktdrop_chunk *); |
|
11943 if (drp == NULL) { |
|
11944 sctp_m_freem(chk->data); |
|
11945 chk->data = NULL; |
|
11946 goto jump_out; |
|
11947 } |
|
11948 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) + |
|
11949 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD)); |
|
11950 chk->book_size_scale = 0; |
|
11951 if (was_trunc) { |
|
11952 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED; |
|
11953 drp->trunc_len = htons(fullsz); |
|
11954 /* Len is already adjusted to size minus overhead above |
|
11955 * take out the pkt_drop chunk itself from it. |
|
11956 */ |
|
11957 chk->send_size = len - sizeof(struct sctp_pktdrop_chunk); |
|
11958 len = chk->send_size; |
|
11959 } else { |
|
11960 /* no truncation needed */ |
|
11961 drp->ch.chunk_flags = 0; |
|
11962 drp->trunc_len = htons(0); |
|
11963 } |
|
11964 if (bad_crc) { |
|
11965 drp->ch.chunk_flags |= SCTP_BADCRC; |
|
11966 } |
|
11967 chk->send_size += sizeof(struct sctp_pktdrop_chunk); |
|
11968 SCTP_BUF_LEN(chk->data) = chk->send_size; |
|
11969 chk->sent = SCTP_DATAGRAM_UNSENT; |
|
11970 chk->snd_count = 0; |
|
11971 if (net) { |
|
11972 /* we should hit here */ |
|
11973 chk->whoTo = net; |
|
11974 atomic_add_int(&chk->whoTo->ref_count, 1); |
|
11975 } else { |
|
11976 chk->whoTo = NULL; |
|
11977 } |
|
11978 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED; |
|
11979 chk->rec.chunk_id.can_take_data = 1; |
|
11980 drp->ch.chunk_type = SCTP_PACKET_DROPPED; |
|
11981 drp->ch.chunk_length = htons(chk->send_size); |
|
11982 spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket); |
|
11983 if (spc < 0) { |
|
11984 spc = 0; |
|
11985 } |
|
11986 drp->bottle_bw = htonl(spc); |
|
11987 if (asoc->my_rwnd) { |
|
11988 drp->current_onq = htonl(asoc->size_on_reasm_queue + |
|
11989 asoc->size_on_all_streams + |
|
11990 asoc->my_rwnd_control_len + |
|
11991 stcb->sctp_socket->so_rcv.sb_cc); |
|
11992 } else { |
|
11993 /*- |
|
11994 * If my rwnd is 0, possibly from mbuf depletion as well as |
|
11995 * space used, tell the peer there is NO space aka onq == bw |
|
11996 */ |
|
11997 drp->current_onq = htonl(spc); |
|
11998 } |
|
11999 drp->reserved = 0; |
|
12000 datap = drp->data; |
|
12001 m_copydata(m, iphlen, len, (caddr_t)datap); |
|
12002 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); |
|
12003 asoc->ctrl_queue_cnt++; |
|
12004 } |
|
12005 |
|
12006 void |
|
12007 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override) |
|
12008 { |
|
12009 struct sctp_association *asoc; |
|
12010 struct sctp_cwr_chunk *cwr; |
|
12011 struct sctp_tmit_chunk *chk; |
|
12012 |
|
12013 SCTP_TCB_LOCK_ASSERT(stcb); |
|
12014 if (net == NULL) { |
|
12015 return; |
|
12016 } |
|
12017 asoc = &stcb->asoc; |
|
12018 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { |
|
12019 if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) { |
|
12020 /* found a previous CWR queued to same destination update it if needed */ |
|
12021 uint32_t ctsn; |
|
12022 cwr = mtod(chk->data, struct sctp_cwr_chunk *); |
|
12023 ctsn = ntohl(cwr->tsn); |
|
12024 if (SCTP_TSN_GT(high_tsn, ctsn)) { |
|
12025 cwr->tsn = htonl(high_tsn); |
|
12026 } |
|
12027 if (override & SCTP_CWR_REDUCE_OVERRIDE) { |
|
12028 /* Make sure override is carried */ |
|
12029 cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE; |
|
12030 } |
|
12031 return; |
|
12032 } |
|
12033 } |
|
12034 sctp_alloc_a_chunk(stcb, chk); |
|
12035 if (chk == NULL) { |
|
12036 return; |
|
12037 } |
|
12038 chk->copy_by_ref = 0; |
|
12039 chk->rec.chunk_id.id = SCTP_ECN_CWR; |
|
12040 chk->rec.chunk_id.can_take_data = 1; |
|
12041 chk->asoc = &stcb->asoc; |
|
12042 chk->send_size = sizeof(struct sctp_cwr_chunk); |
|
12043 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER); |
|
12044 if (chk->data == NULL) { |
|
12045 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); |
|
12046 return; |
|
12047 } |
|
12048 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); |
|
12049 SCTP_BUF_LEN(chk->data) = chk->send_size; |
|
12050 chk->sent = SCTP_DATAGRAM_UNSENT; |
|
12051 chk->snd_count = 0; |
|
12052 chk->whoTo = net; |
|
12053 atomic_add_int(&chk->whoTo->ref_count, 1); |
|
12054 cwr = mtod(chk->data, struct sctp_cwr_chunk *); |
|
12055 cwr->ch.chunk_type = SCTP_ECN_CWR; |
|
12056 cwr->ch.chunk_flags = override; |
|
12057 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk)); |
|
12058 cwr->tsn = htonl(high_tsn); |
|
12059 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); |
|
12060 asoc->ctrl_queue_cnt++; |
|
12061 } |
|
12062 |
|
12063 void |
|
12064 sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk, |
|
12065 int number_entries, uint16_t * list, |
|
12066 uint32_t seq, uint32_t resp_seq, uint32_t last_sent) |
|
12067 { |
|
12068 uint16_t len, old_len, i; |
|
12069 struct sctp_stream_reset_out_request *req_out; |
|
12070 struct sctp_chunkhdr *ch; |
|
12071 |
|
12072 ch = mtod(chk->data, struct sctp_chunkhdr *); |
|
12073 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); |
|
12074 |
|
12075 /* get to new offset for the param. */ |
|
12076 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len); |
|
12077 /* now how long will this param be? */ |
|
12078 len = (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries)); |
|
12079 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST); |
|
12080 req_out->ph.param_length = htons(len); |
|
12081 req_out->request_seq = htonl(seq); |
|
12082 req_out->response_seq = htonl(resp_seq); |
|
12083 req_out->send_reset_at_tsn = htonl(last_sent); |
|
12084 if (number_entries) { |
|
12085 for (i = 0; i < number_entries; i++) { |
|
12086 req_out->list_of_streams[i] = htons(list[i]); |
|
12087 } |
|
12088 } |
|
12089 if (SCTP_SIZE32(len) > len) { |
|
12090 /*- |
|
12091 * Need to worry about the pad we may end up adding to the |
|
12092 * end. This is easy since the struct is either aligned to 4 |
|
12093 * bytes or 2 bytes off. |
|
12094 */ |
|
12095 req_out->list_of_streams[number_entries] = 0; |
|
12096 } |
|
12097 /* now fix the chunk length */ |
|
12098 ch->chunk_length = htons(len + old_len); |
|
12099 chk->book_size = len + old_len; |
|
12100 chk->book_size_scale = 0; |
|
12101 chk->send_size = SCTP_SIZE32(chk->book_size); |
|
12102 SCTP_BUF_LEN(chk->data) = chk->send_size; |
|
12103 return; |
|
12104 } |
|
12105 |
|
12106 static void |
|
12107 sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk, |
|
12108 int number_entries, uint16_t *list, |
|
12109 uint32_t seq) |
|
12110 { |
|
12111 uint16_t len, old_len, i; |
|
12112 struct sctp_stream_reset_in_request *req_in; |
|
12113 struct sctp_chunkhdr *ch; |
|
12114 |
|
12115 ch = mtod(chk->data, struct sctp_chunkhdr *); |
|
12116 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); |
|
12117 |
|
12118 /* get to new offset for the param. */ |
|
12119 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len); |
|
12120 /* now how long will this param be? */ |
|
12121 len = (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries)); |
|
12122 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST); |
|
12123 req_in->ph.param_length = htons(len); |
|
12124 req_in->request_seq = htonl(seq); |
|
12125 if (number_entries) { |
|
12126 for (i = 0; i < number_entries; i++) { |
|
12127 req_in->list_of_streams[i] = htons(list[i]); |
|
12128 } |
|
12129 } |
|
12130 if (SCTP_SIZE32(len) > len) { |
|
12131 /*- |
|
12132 * Need to worry about the pad we may end up adding to the |
|
12133 * end. This is easy since the struct is either aligned to 4 |
|
12134 * bytes or 2 bytes off. |
|
12135 */ |
|
12136 req_in->list_of_streams[number_entries] = 0; |
|
12137 } |
|
12138 /* now fix the chunk length */ |
|
12139 ch->chunk_length = htons(len + old_len); |
|
12140 chk->book_size = len + old_len; |
|
12141 chk->book_size_scale = 0; |
|
12142 chk->send_size = SCTP_SIZE32(chk->book_size); |
|
12143 SCTP_BUF_LEN(chk->data) = chk->send_size; |
|
12144 return; |
|
12145 } |
|
12146 |
|
12147 static void |
|
12148 sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk, |
|
12149 uint32_t seq) |
|
12150 { |
|
12151 uint16_t len, old_len; |
|
12152 struct sctp_stream_reset_tsn_request *req_tsn; |
|
12153 struct sctp_chunkhdr *ch; |
|
12154 |
|
12155 ch = mtod(chk->data, struct sctp_chunkhdr *); |
|
12156 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); |
|
12157 |
|
12158 /* get to new offset for the param. */ |
|
12159 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len); |
|
12160 /* now how long will this param be? */ |
|
12161 len = sizeof(struct sctp_stream_reset_tsn_request); |
|
12162 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST); |
|
12163 req_tsn->ph.param_length = htons(len); |
|
12164 req_tsn->request_seq = htonl(seq); |
|
12165 |
|
12166 /* now fix the chunk length */ |
|
12167 ch->chunk_length = htons(len + old_len); |
|
12168 chk->send_size = len + old_len; |
|
12169 chk->book_size = SCTP_SIZE32(chk->send_size); |
|
12170 chk->book_size_scale = 0; |
|
12171 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); |
|
12172 return; |
|
12173 } |
|
12174 |
|
12175 void |
|
12176 sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk, |
|
12177 uint32_t resp_seq, uint32_t result) |
|
12178 { |
|
12179 uint16_t len, old_len; |
|
12180 struct sctp_stream_reset_response *resp; |
|
12181 struct sctp_chunkhdr *ch; |
|
12182 |
|
12183 ch = mtod(chk->data, struct sctp_chunkhdr *); |
|
12184 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); |
|
12185 |
|
12186 /* get to new offset for the param. */ |
|
12187 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len); |
|
12188 /* now how long will this param be? */ |
|
12189 len = sizeof(struct sctp_stream_reset_response); |
|
12190 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE); |
|
12191 resp->ph.param_length = htons(len); |
|
12192 resp->response_seq = htonl(resp_seq); |
|
12193 resp->result = ntohl(result); |
|
12194 |
|
12195 /* now fix the chunk length */ |
|
12196 ch->chunk_length = htons(len + old_len); |
|
12197 chk->book_size = len + old_len; |
|
12198 chk->book_size_scale = 0; |
|
12199 chk->send_size = SCTP_SIZE32(chk->book_size); |
|
12200 SCTP_BUF_LEN(chk->data) = chk->send_size; |
|
12201 return; |
|
12202 } |
|
12203 |
|
12204 void |
|
12205 sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk, |
|
12206 uint32_t resp_seq, uint32_t result, |
|
12207 uint32_t send_una, uint32_t recv_next) |
|
12208 { |
|
12209 uint16_t len, old_len; |
|
12210 struct sctp_stream_reset_response_tsn *resp; |
|
12211 struct sctp_chunkhdr *ch; |
|
12212 |
|
12213 ch = mtod(chk->data, struct sctp_chunkhdr *); |
|
12214 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); |
|
12215 |
|
12216 /* get to new offset for the param. */ |
|
12217 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len); |
|
12218 /* now how long will this param be? */ |
|
12219 len = sizeof(struct sctp_stream_reset_response_tsn); |
|
12220 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE); |
|
12221 resp->ph.param_length = htons(len); |
|
12222 resp->response_seq = htonl(resp_seq); |
|
12223 resp->result = htonl(result); |
|
12224 resp->senders_next_tsn = htonl(send_una); |
|
12225 resp->receivers_next_tsn = htonl(recv_next); |
|
12226 |
|
12227 /* now fix the chunk length */ |
|
12228 ch->chunk_length = htons(len + old_len); |
|
12229 chk->book_size = len + old_len; |
|
12230 chk->send_size = SCTP_SIZE32(chk->book_size); |
|
12231 chk->book_size_scale = 0; |
|
12232 SCTP_BUF_LEN(chk->data) = chk->send_size; |
|
12233 return; |
|
12234 } |
|
12235 |
|
12236 static void |
|
12237 sctp_add_an_out_stream(struct sctp_tmit_chunk *chk, |
|
12238 uint32_t seq, |
|
12239 uint16_t adding) |
|
12240 { |
|
12241 uint16_t len, old_len; |
|
12242 struct sctp_chunkhdr *ch; |
|
12243 struct sctp_stream_reset_add_strm *addstr; |
|
12244 |
|
12245 ch = mtod(chk->data, struct sctp_chunkhdr *); |
|
12246 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); |
|
12247 |
|
12248 /* get to new offset for the param. */ |
|
12249 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len); |
|
12250 /* now how long will this param be? */ |
|
12251 len = sizeof(struct sctp_stream_reset_add_strm); |
|
12252 |
|
12253 /* Fill it out. */ |
|
12254 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS); |
|
12255 addstr->ph.param_length = htons(len); |
|
12256 addstr->request_seq = htonl(seq); |
|
12257 addstr->number_of_streams = htons(adding); |
|
12258 addstr->reserved = 0; |
|
12259 |
|
12260 /* now fix the chunk length */ |
|
12261 ch->chunk_length = htons(len + old_len); |
|
12262 chk->send_size = len + old_len; |
|
12263 chk->book_size = SCTP_SIZE32(chk->send_size); |
|
12264 chk->book_size_scale = 0; |
|
12265 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); |
|
12266 return; |
|
12267 } |
|
12268 |
|
12269 static void |
|
12270 sctp_add_an_in_stream(struct sctp_tmit_chunk *chk, |
|
12271 uint32_t seq, |
|
12272 uint16_t adding) |
|
12273 { |
|
12274 uint16_t len, old_len; |
|
12275 struct sctp_chunkhdr *ch; |
|
12276 struct sctp_stream_reset_add_strm *addstr; |
|
12277 |
|
12278 ch = mtod(chk->data, struct sctp_chunkhdr *); |
|
12279 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); |
|
12280 |
|
12281 /* get to new offset for the param. */ |
|
12282 addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len); |
|
12283 /* now how long will this param be? */ |
|
12284 len = sizeof(struct sctp_stream_reset_add_strm); |
|
12285 /* Fill it out. */ |
|
12286 addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS); |
|
12287 addstr->ph.param_length = htons(len); |
|
12288 addstr->request_seq = htonl(seq); |
|
12289 addstr->number_of_streams = htons(adding); |
|
12290 addstr->reserved = 0; |
|
12291 |
|
12292 /* now fix the chunk length */ |
|
12293 ch->chunk_length = htons(len + old_len); |
|
12294 chk->send_size = len + old_len; |
|
12295 chk->book_size = SCTP_SIZE32(chk->send_size); |
|
12296 chk->book_size_scale = 0; |
|
12297 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); |
|
12298 return; |
|
12299 } |
|
12300 |
|
12301 int |
|
12302 sctp_send_str_reset_req(struct sctp_tcb *stcb, |
|
12303 int number_entries, uint16_t *list, |
|
12304 uint8_t send_out_req, |
|
12305 uint8_t send_in_req, |
|
12306 uint8_t send_tsn_req, |
|
12307 uint8_t add_stream, |
|
12308 uint16_t adding_o, |
|
12309 uint16_t adding_i, uint8_t peer_asked) |
|
12310 { |
|
12311 |
|
12312 struct sctp_association *asoc; |
|
12313 struct sctp_tmit_chunk *chk; |
|
12314 struct sctp_chunkhdr *ch; |
|
12315 uint32_t seq; |
|
12316 |
|
12317 asoc = &stcb->asoc; |
|
12318 if (asoc->stream_reset_outstanding) { |
|
12319 /*- |
|
12320 * Already one pending, must get ACK back to clear the flag. |
|
12321 */ |
|
12322 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY); |
|
12323 return (EBUSY); |
|
12324 } |
|
12325 if ((send_out_req == 0) && (send_in_req == 0) && (send_tsn_req == 0) && |
|
12326 (add_stream == 0)) { |
|
12327 /* nothing to do */ |
|
12328 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); |
|
12329 return (EINVAL); |
|
12330 } |
|
12331 if (send_tsn_req && (send_out_req || send_in_req)) { |
|
12332 /* error, can't do that */ |
|
12333 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); |
|
12334 return (EINVAL); |
|
12335 } |
|
12336 sctp_alloc_a_chunk(stcb, chk); |
|
12337 if (chk == NULL) { |
|
12338 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); |
|
12339 return (ENOMEM); |
|
12340 } |
|
12341 chk->copy_by_ref = 0; |
|
12342 chk->rec.chunk_id.id = SCTP_STREAM_RESET; |
|
12343 chk->rec.chunk_id.can_take_data = 0; |
|
12344 chk->asoc = &stcb->asoc; |
|
12345 chk->book_size = sizeof(struct sctp_chunkhdr); |
|
12346 chk->send_size = SCTP_SIZE32(chk->book_size); |
|
12347 chk->book_size_scale = 0; |
|
12348 |
|
12349 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); |
|
12350 if (chk->data == NULL) { |
|
12351 sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED); |
|
12352 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); |
|
12353 return (ENOMEM); |
|
12354 } |
|
12355 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); |
|
12356 |
|
12357 /* setup chunk parameters */ |
|
12358 chk->sent = SCTP_DATAGRAM_UNSENT; |
|
12359 chk->snd_count = 0; |
|
12360 if (stcb->asoc.alternate) { |
|
12361 chk->whoTo = stcb->asoc.alternate; |
|
12362 } else { |
|
12363 chk->whoTo = stcb->asoc.primary_destination; |
|
12364 } |
|
12365 atomic_add_int(&chk->whoTo->ref_count, 1); |
|
12366 ch = mtod(chk->data, struct sctp_chunkhdr *); |
|
12367 ch->chunk_type = SCTP_STREAM_RESET; |
|
12368 ch->chunk_flags = 0; |
|
12369 ch->chunk_length = htons(chk->book_size); |
|
12370 SCTP_BUF_LEN(chk->data) = chk->send_size; |
|
12371 |
|
12372 seq = stcb->asoc.str_reset_seq_out; |
|
12373 if (send_out_req) { |
|
12374 sctp_add_stream_reset_out(chk, number_entries, list, |
|
12375 seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1)); |
|
12376 asoc->stream_reset_out_is_outstanding = 1; |
|
12377 seq++; |
|
12378 asoc->stream_reset_outstanding++; |
|
12379 } |
|
12380 if ((add_stream & 1) && |
|
12381 ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) { |
|
12382 /* Need to allocate more */ |
|
12383 struct sctp_stream_out *oldstream; |
|
12384 struct sctp_stream_queue_pending *sp, *nsp; |
|
12385 int i; |
|
12386 |
|
12387 oldstream = stcb->asoc.strmout; |
|
12388 /* get some more */ |
|
12389 SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *, |
|
12390 ((stcb->asoc.streamoutcnt+adding_o) * sizeof(struct sctp_stream_out)), |
|
12391 SCTP_M_STRMO); |
|
12392 if (stcb->asoc.strmout == NULL) { |
|
12393 uint8_t x; |
|
12394 stcb->asoc.strmout = oldstream; |
|
12395 /* Turn off the bit */ |
|
12396 x = add_stream & 0xfe; |
|
12397 add_stream = x; |
|
12398 goto skip_stuff; |
|
12399 } |
|
12400 /* Ok now we proceed with copying the old out stuff and |
|
12401 * initializing the new stuff. |
|
12402 */ |
|
12403 SCTP_TCB_SEND_LOCK(stcb); |
|
12404 stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1); |
|
12405 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { |
|
12406 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue); |
|
12407 stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues; |
|
12408 stcb->asoc.strmout[i].next_sequence_send = oldstream[i].next_sequence_send; |
|
12409 stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete; |
|
12410 stcb->asoc.strmout[i].stream_no = i; |
|
12411 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], &oldstream[i]); |
|
12412 /* now anything on those queues? */ |
|
12413 TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) { |
|
12414 TAILQ_REMOVE(&oldstream[i].outqueue, sp, next); |
|
12415 TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next); |
|
12416 } |
|
12417 /* Now move assoc pointers too */ |
|
12418 if (stcb->asoc.last_out_stream == &oldstream[i]) { |
|
12419 stcb->asoc.last_out_stream = &stcb->asoc.strmout[i]; |
|
12420 } |
|
12421 if (stcb->asoc.locked_on_sending == &oldstream[i]) { |
|
12422 stcb->asoc.locked_on_sending = &stcb->asoc.strmout[i]; |
|
12423 } |
|
12424 } |
|
12425 /* now the new streams */ |
|
12426 stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1); |
|
12427 for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) { |
|
12428 TAILQ_INIT(&stcb->asoc.strmout[i].outqueue); |
|
12429 stcb->asoc.strmout[i].chunks_on_queues = 0; |
|
12430 stcb->asoc.strmout[i].next_sequence_send = 0x0; |
|
12431 stcb->asoc.strmout[i].stream_no = i; |
|
12432 stcb->asoc.strmout[i].last_msg_incomplete = 0; |
|
12433 stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL); |
|
12434 } |
|
12435 stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o; |
|
12436 SCTP_FREE(oldstream, SCTP_M_STRMO); |
|
12437 SCTP_TCB_SEND_UNLOCK(stcb); |
|
12438 } |
|
12439 skip_stuff: |
|
12440 if ((add_stream & 1) && (adding_o > 0)) { |
|
12441 asoc->strm_pending_add_size = adding_o; |
|
12442 asoc->peer_req_out = peer_asked; |
|
12443 sctp_add_an_out_stream(chk, seq, adding_o); |
|
12444 seq++; |
|
12445 asoc->stream_reset_outstanding++; |
|
12446 } |
|
12447 if ((add_stream & 2) && (adding_i > 0)) { |
|
12448 sctp_add_an_in_stream(chk, seq, adding_i); |
|
12449 seq++; |
|
12450 asoc->stream_reset_outstanding++; |
|
12451 } |
|
12452 if (send_in_req) { |
|
12453 sctp_add_stream_reset_in(chk, number_entries, list, seq); |
|
12454 seq++; |
|
12455 asoc->stream_reset_outstanding++; |
|
12456 } |
|
12457 if (send_tsn_req) { |
|
12458 sctp_add_stream_reset_tsn(chk, seq); |
|
12459 asoc->stream_reset_outstanding++; |
|
12460 } |
|
12461 asoc->str_reset = chk; |
|
12462 /* insert the chunk for sending */ |
|
12463 TAILQ_INSERT_TAIL(&asoc->control_send_queue, |
|
12464 chk, |
|
12465 sctp_next); |
|
12466 asoc->ctrl_queue_cnt++; |
|
12467 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo); |
|
12468 return (0); |
|
12469 } |
|
12470 |
|
12471 void |
|
12472 sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst, |
|
12473 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause, |
|
12474 #if defined(__FreeBSD__) |
|
12475 uint8_t use_mflowid, uint32_t mflowid, |
|
12476 #endif |
|
12477 uint32_t vrf_id, uint16_t port) |
|
12478 { |
|
12479 /* Don't respond to an ABORT with an ABORT. */ |
|
12480 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) { |
|
12481 if (cause) |
|
12482 sctp_m_freem(cause); |
|
12483 return; |
|
12484 } |
|
12485 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause, |
|
12486 #if defined(__FreeBSD__) |
|
12487 use_mflowid, mflowid, |
|
12488 #endif |
|
12489 vrf_id, port); |
|
12490 return; |
|
12491 } |
|
12492 |
|
12493 void |
|
12494 sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst, |
|
12495 struct sctphdr *sh, uint32_t vtag, struct mbuf *cause, |
|
12496 #if defined(__FreeBSD__) |
|
12497 uint8_t use_mflowid, uint32_t mflowid, |
|
12498 #endif |
|
12499 uint32_t vrf_id, uint16_t port) |
|
12500 { |
|
12501 sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause, |
|
12502 #if defined(__FreeBSD__) |
|
12503 use_mflowid, mflowid, |
|
12504 #endif |
|
12505 vrf_id, port); |
|
12506 return; |
|
12507 } |
|
12508 |
|
12509 static struct mbuf * |
|
12510 sctp_copy_resume(struct uio *uio, |
|
12511 int max_send_len, |
|
12512 #if defined(__FreeBSD__) && __FreeBSD_version > 602000 |
|
12513 int user_marks_eor, |
|
12514 #endif |
|
12515 int *error, |
|
12516 uint32_t *sndout, |
|
12517 struct mbuf **new_tail) |
|
12518 { |
|
12519 #if defined(__Panda__) |
|
12520 struct mbuf *m; |
|
12521 |
|
12522 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0, |
|
12523 (user_marks_eor ? M_EOR : 0)); |
|
12524 if (m == NULL) { |
|
12525 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); |
|
12526 *error = ENOMEM; |
|
12527 } else { |
|
12528 *sndout = m_length(m, NULL); |
|
12529 *new_tail = m_last(m); |
|
12530 } |
|
12531 return (m); |
|
12532 #elif defined(__FreeBSD__) && __FreeBSD_version > 602000 |
|
12533 struct mbuf *m; |
|
12534 |
|
12535 m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0, |
|
12536 (M_PKTHDR | (user_marks_eor ? M_EOR : 0))); |
|
12537 if (m == NULL) { |
|
12538 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); |
|
12539 *error = ENOMEM; |
|
12540 } else { |
|
12541 *sndout = m_length(m, NULL); |
|
12542 *new_tail = m_last(m); |
|
12543 } |
|
12544 return (m); |
|
12545 #else |
|
12546 int left, cancpy, willcpy; |
|
12547 struct mbuf *m, *head; |
|
12548 |
|
12549 #if defined(__APPLE__) |
|
12550 #if defined(APPLE_LEOPARD) |
|
12551 left = min(uio->uio_resid, max_send_len); |
|
12552 #else |
|
12553 left = min(uio_resid(uio), max_send_len); |
|
12554 #endif |
|
12555 #else |
|
12556 left = min(uio->uio_resid, max_send_len); |
|
12557 #endif |
|
12558 /* Always get a header just in case */ |
|
12559 head = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA); |
|
12560 if (head == NULL) { |
|
12561 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); |
|
12562 *error = ENOMEM; |
|
12563 return (NULL); |
|
12564 } |
|
12565 cancpy = M_TRAILINGSPACE(head); |
|
12566 willcpy = min(cancpy, left); |
|
12567 *error = uiomove(mtod(head, caddr_t), willcpy, uio); |
|
12568 if (*error) { |
|
12569 sctp_m_freem(head); |
|
12570 return (NULL); |
|
12571 } |
|
12572 *sndout += willcpy; |
|
12573 left -= willcpy; |
|
12574 SCTP_BUF_LEN(head) = willcpy; |
|
12575 m = head; |
|
12576 *new_tail = head; |
|
12577 while (left > 0) { |
|
12578 /* move in user data */ |
|
12579 SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA); |
|
12580 if (SCTP_BUF_NEXT(m) == NULL) { |
|
12581 sctp_m_freem(head); |
|
12582 *new_tail = NULL; |
|
12583 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); |
|
12584 *error = ENOMEM; |
|
12585 return (NULL); |
|
12586 } |
|
12587 m = SCTP_BUF_NEXT(m); |
|
12588 cancpy = M_TRAILINGSPACE(m); |
|
12589 willcpy = min(cancpy, left); |
|
12590 *error = uiomove(mtod(m, caddr_t), willcpy, uio); |
|
12591 if (*error) { |
|
12592 sctp_m_freem(head); |
|
12593 *new_tail = NULL; |
|
12594 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT); |
|
12595 *error = EFAULT; |
|
12596 return (NULL); |
|
12597 } |
|
12598 SCTP_BUF_LEN(m) = willcpy; |
|
12599 left -= willcpy; |
|
12600 *sndout += willcpy; |
|
12601 *new_tail = m; |
|
12602 if (left == 0) { |
|
12603 SCTP_BUF_NEXT(m) = NULL; |
|
12604 } |
|
12605 } |
|
12606 return (head); |
|
12607 #endif |
|
12608 } |
|
12609 |
|
12610 static int |
|
12611 sctp_copy_one(struct sctp_stream_queue_pending *sp, |
|
12612 struct uio *uio, |
|
12613 int resv_upfront) |
|
12614 { |
|
12615 int left; |
|
12616 #if defined(__Panda__) |
|
12617 left = sp->length; |
|
12618 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length, |
|
12619 resv_upfront, 0); |
|
12620 if (sp->data == NULL) { |
|
12621 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); |
|
12622 return (ENOMEM); |
|
12623 } |
|
12624 |
|
12625 sp->tail_mbuf = m_last(sp->data); |
|
12626 return (0); |
|
12627 |
|
12628 #elif defined(__FreeBSD__) && __FreeBSD_version > 602000 |
|
12629 left = sp->length; |
|
12630 sp->data = m_uiotombuf(uio, M_WAITOK, sp->length, |
|
12631 resv_upfront, 0); |
|
12632 if (sp->data == NULL) { |
|
12633 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); |
|
12634 return (ENOMEM); |
|
12635 } |
|
12636 |
|
12637 sp->tail_mbuf = m_last(sp->data); |
|
12638 return (0); |
|
12639 #else |
|
12640 int cancpy, willcpy, error; |
|
12641 struct mbuf *m, *head; |
|
12642 int cpsz = 0; |
|
12643 |
|
12644 /* First one gets a header */ |
|
12645 left = sp->length; |
|
12646 head = m = sctp_get_mbuf_for_msg((left + resv_upfront), 0, M_WAITOK, 0, MT_DATA); |
|
12647 if (m == NULL) { |
|
12648 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); |
|
12649 return (ENOMEM); |
|
12650 } |
|
12651 /*- |
|
12652 * Add this one for m in now, that way if the alloc fails we won't |
|
12653 * have a bad cnt. |
|
12654 */ |
|
12655 SCTP_BUF_RESV_UF(m, resv_upfront); |
|
12656 cancpy = M_TRAILINGSPACE(m); |
|
12657 willcpy = min(cancpy, left); |
|
12658 while (left > 0) { |
|
12659 /* move in user data */ |
|
12660 error = uiomove(mtod(m, caddr_t), willcpy, uio); |
|
12661 if (error) { |
|
12662 sctp_m_freem(head); |
|
12663 return (error); |
|
12664 } |
|
12665 SCTP_BUF_LEN(m) = willcpy; |
|
12666 left -= willcpy; |
|
12667 cpsz += willcpy; |
|
12668 if (left > 0) { |
|
12669 SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA); |
|
12670 if (SCTP_BUF_NEXT(m) == NULL) { |
|
12671 /* |
|
12672 * the head goes back to caller, he can free |
|
12673 * the rest |
|
12674 */ |
|
12675 sctp_m_freem(head); |
|
12676 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); |
|
12677 return (ENOMEM); |
|
12678 } |
|
12679 m = SCTP_BUF_NEXT(m); |
|
12680 cancpy = M_TRAILINGSPACE(m); |
|
12681 willcpy = min(cancpy, left); |
|
12682 } else { |
|
12683 sp->tail_mbuf = m; |
|
12684 SCTP_BUF_NEXT(m) = NULL; |
|
12685 } |
|
12686 } |
|
12687 sp->data = head; |
|
12688 sp->length = cpsz; |
|
12689 return (0); |
|
12690 #endif |
|
12691 } |
|
12692 |
|
12693 |
|
12694 |
|
12695 static struct sctp_stream_queue_pending * |
|
12696 sctp_copy_it_in(struct sctp_tcb *stcb, |
|
12697 struct sctp_association *asoc, |
|
12698 struct sctp_sndrcvinfo *srcv, |
|
12699 struct uio *uio, |
|
12700 struct sctp_nets *net, |
|
12701 int max_send_len, |
|
12702 int user_marks_eor, |
|
12703 int *error) |
|
12704 |
|
12705 { |
|
12706 /*- |
|
12707 * This routine must be very careful in its work. Protocol |
|
12708 * processing is up and running so care must be taken to spl...() |
|
12709 * when you need to do something that may effect the stcb/asoc. The |
|
12710 * sb is locked however. When data is copied the protocol processing |
|
12711 * should be enabled since this is a slower operation... |
|
12712 */ |
|
12713 struct sctp_stream_queue_pending *sp = NULL; |
|
12714 int resv_in_first; |
|
12715 |
|
12716 *error = 0; |
|
12717 /* Now can we send this? */ |
|
12718 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) || |
|
12719 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) || |
|
12720 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) || |
|
12721 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) { |
|
12722 /* got data while shutting down */ |
|
12723 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET); |
|
12724 *error = ECONNRESET; |
|
12725 goto out_now; |
|
12726 } |
|
12727 sctp_alloc_a_strmoq(stcb, sp); |
|
12728 if (sp == NULL) { |
|
12729 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM); |
|
12730 *error = ENOMEM; |
|
12731 goto out_now; |
|
12732 } |
|
12733 sp->act_flags = 0; |
|
12734 sp->sender_all_done = 0; |
|
12735 sp->sinfo_flags = srcv->sinfo_flags; |
|
12736 sp->timetolive = srcv->sinfo_timetolive; |
|
12737 sp->ppid = srcv->sinfo_ppid; |
|
12738 sp->context = srcv->sinfo_context; |
|
12739 (void)SCTP_GETTIME_TIMEVAL(&sp->ts); |
|
12740 |
|
12741 sp->stream = srcv->sinfo_stream; |
|
12742 #if defined(__APPLE__) |
|
12743 #if defined(APPLE_LEOPARD) |
|
12744 sp->length = min(uio->uio_resid, max_send_len); |
|
12745 #else |
|
12746 sp->length = min(uio_resid(uio), max_send_len); |
|
12747 #endif |
|
12748 #else |
|
12749 sp->length = min(uio->uio_resid, max_send_len); |
|
12750 #endif |
|
12751 #if defined(__APPLE__) |
|
12752 #if defined(APPLE_LEOPARD) |
|
12753 if ((sp->length == (uint32_t)uio->uio_resid) && |
|
12754 #else |
|
12755 if ((sp->length == (uint32_t)uio_resid(uio)) && |
|
12756 #endif |
|
12757 #else |
|
12758 if ((sp->length == (uint32_t)uio->uio_resid) && |
|
12759 #endif |
|
12760 ((user_marks_eor == 0) || |
|
12761 (srcv->sinfo_flags & SCTP_EOF) || |
|
12762 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) { |
|
12763 sp->msg_is_complete = 1; |
|
12764 } else { |
|
12765 sp->msg_is_complete = 0; |
|
12766 } |
|
12767 sp->sender_all_done = 0; |
|
12768 sp->some_taken = 0; |
|
12769 sp->put_last_out = 0; |
|
12770 resv_in_first = sizeof(struct sctp_data_chunk); |
|
12771 sp->data = sp->tail_mbuf = NULL; |
|
12772 if (sp->length == 0) { |
|
12773 *error = 0; |
|
12774 goto skip_copy; |
|
12775 } |
|
12776 if (srcv->sinfo_keynumber_valid) { |
|
12777 sp->auth_keyid = srcv->sinfo_keynumber; |
|
12778 } else { |
|
12779 sp->auth_keyid = stcb->asoc.authinfo.active_keyid; |
|
12780 } |
|
12781 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) { |
|
12782 sctp_auth_key_acquire(stcb, sp->auth_keyid); |
|
12783 sp->holds_key_ref = 1; |
|
12784 } |
|
12785 #if defined(__APPLE__) |
|
12786 SCTP_SOCKET_UNLOCK(SCTP_INP_SO(stcb->sctp_ep), 0); |
|
12787 #endif |
|
12788 *error = sctp_copy_one(sp, uio, resv_in_first); |
|
12789 #if defined(__APPLE__) |
|
12790 SCTP_SOCKET_LOCK(SCTP_INP_SO(stcb->sctp_ep), 0); |
|
12791 #endif |
|
12792 skip_copy: |
|
12793 if (*error) { |
|
12794 sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED); |
|
12795 sp = NULL; |
|
12796 } else { |
|
12797 if (sp->sinfo_flags & SCTP_ADDR_OVER) { |
|
12798 sp->net = net; |
|
12799 atomic_add_int(&sp->net->ref_count, 1); |
|
12800 } else { |
|
12801 sp->net = NULL; |
|
12802 } |
|
12803 sctp_set_prsctp_policy(sp); |
|
12804 } |
|
12805 out_now: |
|
12806 return (sp); |
|
12807 } |
|
12808 |
|
12809 |
|
12810 int |
|
12811 sctp_sosend(struct socket *so, |
|
12812 struct sockaddr *addr, |
|
12813 struct uio *uio, |
|
12814 #ifdef __Panda__ |
|
12815 pakhandle_type top, |
|
12816 pakhandle_type icontrol, |
|
12817 #else |
|
12818 struct mbuf *top, |
|
12819 struct mbuf *control, |
|
12820 #endif |
|
12821 #if defined(__APPLE__) || defined(__Panda__) |
|
12822 int flags |
|
12823 #else |
|
12824 int flags, |
|
12825 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 |
|
12826 struct thread *p |
|
12827 #elif defined(__Windows__) |
|
12828 PKTHREAD p |
|
12829 #else |
|
12830 #if defined(__Userspace__) |
|
12831 /* |
|
12832 * proc is a dummy in __Userspace__ and will not be passed |
|
12833 * to sctp_lower_sosend |
|
12834 */ |
|
12835 #endif |
|
12836 struct proc *p |
|
12837 #endif |
|
12838 #endif |
|
12839 ) |
|
12840 { |
|
12841 #ifdef __Panda__ |
|
12842 struct mbuf *control = NULL; |
|
12843 #endif |
|
12844 #if defined(__APPLE__) |
|
12845 struct proc *p = current_proc(); |
|
12846 #endif |
|
12847 int error, use_sndinfo = 0; |
|
12848 struct sctp_sndrcvinfo sndrcvninfo; |
|
12849 struct sockaddr *addr_to_use; |
|
12850 #if defined(INET) && defined(INET6) |
|
12851 struct sockaddr_in sin; |
|
12852 #endif |
|
12853 |
|
12854 #if defined(__APPLE__) |
|
12855 SCTP_SOCKET_LOCK(so, 1); |
|
12856 #endif |
|
12857 #ifdef __Panda__ |
|
12858 control = SCTP_HEADER_TO_CHAIN(icontrol); |
|
12859 #endif |
|
12860 if (control) { |
|
12861 /* process cmsg snd/rcv info (maybe a assoc-id) */ |
|
12862 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control, |
|
12863 sizeof(sndrcvninfo))) { |
|
12864 /* got one */ |
|
12865 use_sndinfo = 1; |
|
12866 } |
|
12867 } |
|
12868 addr_to_use = addr; |
|
12869 #if defined(INET) && defined(INET6) |
|
12870 if ((addr) && (addr->sa_family == AF_INET6)) { |
|
12871 struct sockaddr_in6 *sin6; |
|
12872 |
|
12873 sin6 = (struct sockaddr_in6 *)addr; |
|
12874 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { |
|
12875 in6_sin6_2_sin(&sin, sin6); |
|
12876 addr_to_use = (struct sockaddr *)&sin; |
|
12877 } |
|
12878 } |
|
12879 #endif |
|
12880 error = sctp_lower_sosend(so, addr_to_use, uio, top, |
|
12881 #ifdef __Panda__ |
|
12882 icontrol, |
|
12883 #else |
|
12884 control, |
|
12885 #endif |
|
12886 flags, |
|
12887 use_sndinfo ? &sndrcvninfo: NULL |
|
12888 #if !(defined(__Panda__) || defined(__Userspace__)) |
|
12889 , p |
|
12890 #endif |
|
12891 ); |
|
12892 #if defined(__APPLE__) |
|
12893 SCTP_SOCKET_UNLOCK(so, 1); |
|
12894 #endif |
|
12895 return (error); |
|
12896 } |
|
12897 |
|
12898 |
|
12899 int |
|
12900 sctp_lower_sosend(struct socket *so, |
|
12901 struct sockaddr *addr, |
|
12902 struct uio *uio, |
|
12903 #ifdef __Panda__ |
|
12904 pakhandle_type i_pak, |
|
12905 pakhandle_type i_control, |
|
12906 #else |
|
12907 struct mbuf *i_pak, |
|
12908 struct mbuf *control, |
|
12909 #endif |
|
12910 int flags, |
|
12911 struct sctp_sndrcvinfo *srcv |
|
12912 #if !(defined( __Panda__) || defined(__Userspace__)) |
|
12913 , |
|
12914 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 |
|
12915 struct thread *p |
|
12916 #elif defined(__Windows__) |
|
12917 PKTHREAD p |
|
12918 #else |
|
12919 struct proc *p |
|
12920 #endif |
|
12921 #endif |
|
12922 ) |
|
12923 { |
|
12924 unsigned int sndlen = 0, max_len; |
|
12925 int error, len; |
|
12926 struct mbuf *top = NULL; |
|
12927 #ifdef __Panda__ |
|
12928 struct mbuf *control = NULL; |
|
12929 #endif |
|
12930 int queue_only = 0, queue_only_for_init = 0; |
|
12931 int free_cnt_applied = 0; |
|
12932 int un_sent; |
|
12933 int now_filled = 0; |
|
12934 unsigned int inqueue_bytes = 0; |
|
12935 struct sctp_block_entry be; |
|
12936 struct sctp_inpcb *inp; |
|
12937 struct sctp_tcb *stcb = NULL; |
|
12938 struct timeval now; |
|
12939 struct sctp_nets *net; |
|
12940 struct sctp_association *asoc; |
|
12941 struct sctp_inpcb *t_inp; |
|
12942 int user_marks_eor; |
|
12943 int create_lock_applied = 0; |
|
12944 int nagle_applies = 0; |
|
12945 int some_on_control = 0; |
|
12946 int got_all_of_the_send = 0; |
|
12947 int hold_tcblock = 0; |
|
12948 int non_blocking = 0; |
|
12949 uint32_t local_add_more, local_soresv = 0; |
|
12950 uint16_t port; |
|
12951 uint16_t sinfo_flags; |
|
12952 sctp_assoc_t sinfo_assoc_id; |
|
12953 |
|
12954 error = 0; |
|
12955 net = NULL; |
|
12956 stcb = NULL; |
|
12957 asoc = NULL; |
|
12958 |
|
12959 #if defined(__APPLE__) |
|
12960 sctp_lock_assert(so); |
|
12961 #endif |
|
12962 t_inp = inp = (struct sctp_inpcb *)so->so_pcb; |
|
12963 if (inp == NULL) { |
|
12964 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); |
|
12965 error = EINVAL; |
|
12966 if (i_pak) { |
|
12967 SCTP_RELEASE_PKT(i_pak); |
|
12968 } |
|
12969 return (error); |
|
12970 } |
|
12971 if ((uio == NULL) && (i_pak == NULL)) { |
|
12972 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); |
|
12973 return (EINVAL); |
|
12974 } |
|
12975 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); |
|
12976 atomic_add_int(&inp->total_sends, 1); |
|
12977 if (uio) { |
|
12978 #if defined(__APPLE__) |
|
12979 #if defined(APPLE_LEOPARD) |
|
12980 if (uio->uio_resid < 0) { |
|
12981 #else |
|
12982 if (uio_resid(uio) < 0) { |
|
12983 #endif |
|
12984 #else |
|
12985 if (uio->uio_resid < 0) { |
|
12986 #endif |
|
12987 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); |
|
12988 return (EINVAL); |
|
12989 } |
|
12990 #if defined(__APPLE__) |
|
12991 #if defined(APPLE_LEOPARD) |
|
12992 sndlen = uio->uio_resid; |
|
12993 #else |
|
12994 sndlen = uio_resid(uio); |
|
12995 #endif |
|
12996 #else |
|
12997 sndlen = uio->uio_resid; |
|
12998 #endif |
|
12999 } else { |
|
13000 top = SCTP_HEADER_TO_CHAIN(i_pak); |
|
13001 #ifdef __Panda__ |
|
13002 /*- |
|
13003 * app len indicates the datalen, dgsize for cases |
|
13004 * of SCTP_EOF/ABORT will not have the right len |
|
13005 */ |
|
13006 sndlen = SCTP_APP_DATA_LEN(i_pak); |
|
13007 /*- |
|
13008 * Set the particle len also to zero to match |
|
13009 * up with app len. We only have one particle |
|
13010 * if app len is zero for Panda. This is ensured |
|
13011 * in the socket lib |
|
13012 */ |
|
13013 if (sndlen == 0) { |
|
13014 SCTP_BUF_LEN(top) = 0; |
|
13015 } |
|
13016 /*- |
|
13017 * We delink the chain from header, but keep |
|
13018 * the header around as we will need it in |
|
13019 * EAGAIN case |
|
13020 */ |
|
13021 SCTP_DETACH_HEADER_FROM_CHAIN(i_pak); |
|
13022 #else |
|
13023 sndlen = SCTP_HEADER_LEN(i_pak); |
|
13024 #endif |
|
13025 } |
|
13026 SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %d\n", |
|
13027 (void *)addr, |
|
13028 sndlen); |
|
13029 #ifdef __Panda__ |
|
13030 if (i_control) { |
|
13031 control = SCTP_HEADER_TO_CHAIN(i_control); |
|
13032 } |
|
13033 #endif |
|
13034 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && |
|
13035 (inp->sctp_socket->so_qlimit)) { |
|
13036 /* The listener can NOT send */ |
|
13037 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN); |
|
13038 error = ENOTCONN; |
|
13039 goto out_unlocked; |
|
13040 } |
|
13041 /** |
|
13042 * Pre-screen address, if one is given the sin-len |
|
13043 * must be set correctly! |
|
13044 */ |
|
13045 if (addr) { |
|
13046 union sctp_sockstore *raddr = (union sctp_sockstore *)addr; |
|
13047 switch (raddr->sa.sa_family) { |
|
13048 #ifdef INET |
|
13049 case AF_INET: |
|
13050 #ifdef HAVE_SIN_LEN |
|
13051 if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) { |
|
13052 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); |
|
13053 error = EINVAL; |
|
13054 goto out_unlocked; |
|
13055 } |
|
13056 #endif |
|
13057 port = raddr->sin.sin_port; |
|
13058 break; |
|
13059 #endif |
|
13060 #ifdef INET6 |
|
13061 case AF_INET6: |
|
13062 #ifdef HAVE_SIN6_LEN |
|
13063 if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) { |
|
13064 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); |
|
13065 error = EINVAL; |
|
13066 goto out_unlocked; |
|
13067 } |
|
13068 #endif |
|
13069 port = raddr->sin6.sin6_port; |
|
13070 break; |
|
13071 #endif |
|
13072 #if defined(__Userspace__) |
|
13073 case AF_CONN: |
|
13074 #ifdef HAVE_SCONN_LEN |
|
13075 if (raddr->sconn.sconn_len != sizeof(struct sockaddr_conn)) { |
|
13076 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); |
|
13077 error = EINVAL; |
|
13078 goto out_unlocked; |
|
13079 } |
|
13080 #endif |
|
13081 port = raddr->sconn.sconn_port; |
|
13082 break; |
|
13083 #endif |
|
13084 default: |
|
13085 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT); |
|
13086 error = EAFNOSUPPORT; |
|
13087 goto out_unlocked; |
|
13088 } |
|
13089 } else |
|
13090 port = 0; |
|
13091 |
|
13092 if (srcv) { |
|
13093 sinfo_flags = srcv->sinfo_flags; |
|
13094 sinfo_assoc_id = srcv->sinfo_assoc_id; |
|
13095 if (INVALID_SINFO_FLAG(sinfo_flags) || |
|
13096 PR_SCTP_INVALID_POLICY(sinfo_flags)) { |
|
13097 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); |
|
13098 error = EINVAL; |
|
13099 goto out_unlocked; |
|
13100 } |
|
13101 if (srcv->sinfo_flags) |
|
13102 SCTP_STAT_INCR(sctps_sends_with_flags); |
|
13103 } else { |
|
13104 sinfo_flags = inp->def_send.sinfo_flags; |
|
13105 sinfo_assoc_id = inp->def_send.sinfo_assoc_id; |
|
13106 } |
|
13107 if (sinfo_flags & SCTP_SENDALL) { |
|
13108 /* its a sendall */ |
|
13109 error = sctp_sendall(inp, uio, top, srcv); |
|
13110 top = NULL; |
|
13111 goto out_unlocked; |
|
13112 } |
|
13113 if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) { |
|
13114 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); |
|
13115 error = EINVAL; |
|
13116 goto out_unlocked; |
|
13117 } |
|
13118 /* now we must find the assoc */ |
|
13119 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) || |
|
13120 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { |
|
13121 SCTP_INP_RLOCK(inp); |
|
13122 stcb = LIST_FIRST(&inp->sctp_asoc_list); |
|
13123 if (stcb) { |
|
13124 SCTP_TCB_LOCK(stcb); |
|
13125 hold_tcblock = 1; |
|
13126 } |
|
13127 SCTP_INP_RUNLOCK(inp); |
|
13128 } else if (sinfo_assoc_id) { |
|
13129 stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 0); |
|
13130 } else if (addr) { |
|
13131 /*- |
|
13132 * Since we did not use findep we must |
|
13133 * increment it, and if we don't find a tcb |
|
13134 * decrement it. |
|
13135 */ |
|
13136 SCTP_INP_WLOCK(inp); |
|
13137 SCTP_INP_INCR_REF(inp); |
|
13138 SCTP_INP_WUNLOCK(inp); |
|
13139 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL); |
|
13140 if (stcb == NULL) { |
|
13141 SCTP_INP_WLOCK(inp); |
|
13142 SCTP_INP_DECR_REF(inp); |
|
13143 SCTP_INP_WUNLOCK(inp); |
|
13144 } else { |
|
13145 hold_tcblock = 1; |
|
13146 } |
|
13147 } |
|
13148 if ((stcb == NULL) && (addr)) { |
|
13149 /* Possible implicit send? */ |
|
13150 SCTP_ASOC_CREATE_LOCK(inp); |
|
13151 create_lock_applied = 1; |
|
13152 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || |
|
13153 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { |
|
13154 /* Should I really unlock ? */ |
|
13155 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); |
|
13156 error = EINVAL; |
|
13157 goto out_unlocked; |
|
13158 |
|
13159 } |
|
13160 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && |
|
13161 (addr->sa_family == AF_INET6)) { |
|
13162 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); |
|
13163 error = EINVAL; |
|
13164 goto out_unlocked; |
|
13165 } |
|
13166 SCTP_INP_WLOCK(inp); |
|
13167 SCTP_INP_INCR_REF(inp); |
|
13168 SCTP_INP_WUNLOCK(inp); |
|
13169 /* With the lock applied look again */ |
|
13170 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL); |
|
13171 if ((stcb == NULL) && (control != NULL) && (port > 0)) { |
|
13172 stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error); |
|
13173 } |
|
13174 if (stcb == NULL) { |
|
13175 SCTP_INP_WLOCK(inp); |
|
13176 SCTP_INP_DECR_REF(inp); |
|
13177 SCTP_INP_WUNLOCK(inp); |
|
13178 } else { |
|
13179 hold_tcblock = 1; |
|
13180 } |
|
13181 if (error) { |
|
13182 goto out_unlocked; |
|
13183 } |
|
13184 if (t_inp != inp) { |
|
13185 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN); |
|
13186 error = ENOTCONN; |
|
13187 goto out_unlocked; |
|
13188 } |
|
13189 } |
|
13190 if (stcb == NULL) { |
|
13191 if (addr == NULL) { |
|
13192 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT); |
|
13193 error = ENOENT; |
|
13194 goto out_unlocked; |
|
13195 } else { |
|
13196 /* We must go ahead and start the INIT process */ |
|
13197 uint32_t vrf_id; |
|
13198 |
|
13199 if ((sinfo_flags & SCTP_ABORT) || |
|
13200 ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) { |
|
13201 /*- |
|
13202 * User asks to abort a non-existant assoc, |
|
13203 * or EOF a non-existant assoc with no data |
|
13204 */ |
|
13205 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT); |
|
13206 error = ENOENT; |
|
13207 goto out_unlocked; |
|
13208 } |
|
13209 /* get an asoc/stcb struct */ |
|
13210 vrf_id = inp->def_vrf_id; |
|
13211 #ifdef INVARIANTS |
|
13212 if (create_lock_applied == 0) { |
|
13213 panic("Error, should hold create lock and I don't?"); |
|
13214 } |
|
13215 #endif |
|
13216 stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id, |
|
13217 #if !(defined( __Panda__) || defined(__Userspace__)) |
|
13218 p |
|
13219 #else |
|
13220 (struct proc *)NULL |
|
13221 #endif |
|
13222 ); |
|
13223 if (stcb == NULL) { |
|
13224 /* Error is setup for us in the call */ |
|
13225 goto out_unlocked; |
|
13226 } |
|
13227 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { |
|
13228 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; |
|
13229 /* Set the connected flag so we can queue data */ |
|
13230 soisconnecting(so); |
|
13231 } |
|
13232 hold_tcblock = 1; |
|
13233 if (create_lock_applied) { |
|
13234 SCTP_ASOC_CREATE_UNLOCK(inp); |
|
13235 create_lock_applied = 0; |
|
13236 } else { |
|
13237 SCTP_PRINTF("Huh-3? create lock should have been on??\n"); |
|
13238 } |
|
13239 /* Turn on queue only flag to prevent data from being sent */ |
|
13240 queue_only = 1; |
|
13241 asoc = &stcb->asoc; |
|
13242 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT); |
|
13243 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); |
|
13244 |
|
13245 /* initialize authentication params for the assoc */ |
|
13246 sctp_initialize_auth_params(inp, stcb); |
|
13247 |
|
13248 if (control) { |
|
13249 if (sctp_process_cmsgs_for_init(stcb, control, &error)) { |
|
13250 sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_7); |
|
13251 hold_tcblock = 0; |
|
13252 stcb = NULL; |
|
13253 goto out_unlocked; |
|
13254 } |
|
13255 } |
|
13256 /* out with the INIT */ |
|
13257 queue_only_for_init = 1; |
|
13258 /*- |
|
13259 * we may want to dig in after this call and adjust the MTU |
|
13260 * value. It defaulted to 1500 (constant) but the ro |
|
13261 * structure may now have an update and thus we may need to |
|
13262 * change it BEFORE we append the message. |
|
13263 */ |
|
13264 } |
|
13265 } else |
|
13266 asoc = &stcb->asoc; |
|
13267 if (srcv == NULL) |
|
13268 srcv = (struct sctp_sndrcvinfo *)&asoc->def_send; |
|
13269 if (srcv->sinfo_flags & SCTP_ADDR_OVER) { |
|
13270 if (addr) |
|
13271 net = sctp_findnet(stcb, addr); |
|
13272 else |
|
13273 net = NULL; |
|
13274 if ((net == NULL) || |
|
13275 ((port != 0) && (port != stcb->rport))) { |
|
13276 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); |
|
13277 error = EINVAL; |
|
13278 goto out_unlocked; |
|
13279 } |
|
13280 } else { |
|
13281 if (stcb->asoc.alternate) { |
|
13282 net = stcb->asoc.alternate; |
|
13283 } else { |
|
13284 net = stcb->asoc.primary_destination; |
|
13285 } |
|
13286 } |
|
13287 atomic_add_int(&stcb->total_sends, 1); |
|
13288 /* Keep the stcb from being freed under our feet */ |
|
13289 atomic_add_int(&asoc->refcnt, 1); |
|
13290 free_cnt_applied = 1; |
|
13291 |
|
13292 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) { |
|
13293 if (sndlen > asoc->smallest_mtu) { |
|
13294 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE); |
|
13295 error = EMSGSIZE; |
|
13296 goto out_unlocked; |
|
13297 } |
|
13298 } |
|
13299 #if defined(__Userspace__) |
|
13300 if (inp->recv_callback) { |
|
13301 non_blocking = 1; |
|
13302 } |
|
13303 #else |
|
13304 if (SCTP_SO_IS_NBIO(so) |
|
13305 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 |
|
13306 || (flags & MSG_NBIO) |
|
13307 #endif |
|
13308 ) { |
|
13309 non_blocking = 1; |
|
13310 } |
|
13311 #endif |
|
13312 /* would we block? */ |
|
13313 if (non_blocking) { |
|
13314 if (hold_tcblock == 0) { |
|
13315 SCTP_TCB_LOCK(stcb); |
|
13316 hold_tcblock = 1; |
|
13317 } |
|
13318 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); |
|
13319 if ((SCTP_SB_LIMIT_SND(so) < (sndlen + inqueue_bytes + stcb->asoc.sb_send_resv)) || |
|
13320 (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) { |
|
13321 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK); |
|
13322 if (sndlen > SCTP_SB_LIMIT_SND(so)) |
|
13323 error = EMSGSIZE; |
|
13324 else |
|
13325 error = EWOULDBLOCK; |
|
13326 goto out_unlocked; |
|
13327 } |
|
13328 stcb->asoc.sb_send_resv += sndlen; |
|
13329 SCTP_TCB_UNLOCK(stcb); |
|
13330 hold_tcblock = 0; |
|
13331 } else { |
|
13332 atomic_add_int(&stcb->asoc.sb_send_resv, sndlen); |
|
13333 } |
|
13334 local_soresv = sndlen; |
|
13335 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { |
|
13336 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET); |
|
13337 error = ECONNRESET; |
|
13338 goto out_unlocked; |
|
13339 } |
|
13340 if (create_lock_applied) { |
|
13341 SCTP_ASOC_CREATE_UNLOCK(inp); |
|
13342 create_lock_applied = 0; |
|
13343 } |
|
13344 if (asoc->stream_reset_outstanding) { |
|
13345 /* |
|
13346 * Can't queue any data while stream reset is underway. |
|
13347 */ |
|
13348 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAGAIN); |
|
13349 error = EAGAIN; |
|
13350 goto out_unlocked; |
|
13351 } |
|
13352 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || |
|
13353 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { |
|
13354 queue_only = 1; |
|
13355 } |
|
13356 /* we are now done with all control */ |
|
13357 if (control) { |
|
13358 sctp_m_freem(control); |
|
13359 control = NULL; |
|
13360 } |
|
13361 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) || |
|
13362 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) || |
|
13363 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) || |
|
13364 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) { |
|
13365 if (srcv->sinfo_flags & SCTP_ABORT) { |
|
13366 ; |
|
13367 } else { |
|
13368 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET); |
|
13369 error = ECONNRESET; |
|
13370 goto out_unlocked; |
|
13371 } |
|
13372 } |
|
13373 /* Ok, we will attempt a msgsnd :> */ |
|
13374 #if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)) |
|
13375 if (p) { |
|
13376 #if defined(__FreeBSD__) && __FreeBSD_version >= 603000 |
|
13377 p->td_ru.ru_msgsnd++; |
|
13378 #elif defined(__FreeBSD__) && __FreeBSD_version >= 500000 |
|
13379 p->td_proc->p_stats->p_ru.ru_msgsnd++; |
|
13380 #else |
|
13381 p->p_stats->p_ru.ru_msgsnd++; |
|
13382 #endif |
|
13383 } |
|
13384 #endif |
|
13385 /* Are we aborting? */ |
|
13386 if (srcv->sinfo_flags & SCTP_ABORT) { |
|
13387 struct mbuf *mm; |
|
13388 int tot_demand, tot_out = 0, max_out; |
|
13389 |
|
13390 SCTP_STAT_INCR(sctps_sends_with_abort); |
|
13391 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || |
|
13392 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { |
|
13393 /* It has to be up before we abort */ |
|
13394 /* how big is the user initiated abort? */ |
|
13395 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); |
|
13396 error = EINVAL; |
|
13397 goto out; |
|
13398 } |
|
13399 if (hold_tcblock) { |
|
13400 SCTP_TCB_UNLOCK(stcb); |
|
13401 hold_tcblock = 0; |
|
13402 } |
|
13403 if (top) { |
|
13404 struct mbuf *cntm = NULL; |
|
13405 |
|
13406 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_WAITOK, 1, MT_DATA); |
|
13407 if (sndlen != 0) { |
|
13408 for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) { |
|
13409 tot_out += SCTP_BUF_LEN(cntm); |
|
13410 } |
|
13411 } |
|
13412 } else { |
|
13413 /* Must fit in a MTU */ |
|
13414 tot_out = sndlen; |
|
13415 tot_demand = (tot_out + sizeof(struct sctp_paramhdr)); |
|
13416 if (tot_demand > SCTP_DEFAULT_ADD_MORE) { |
|
13417 /* To big */ |
|
13418 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE); |
|
13419 error = EMSGSIZE; |
|
13420 goto out; |
|
13421 } |
|
13422 mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAITOK, 1, MT_DATA); |
|
13423 } |
|
13424 if (mm == NULL) { |
|
13425 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM); |
|
13426 error = ENOMEM; |
|
13427 goto out; |
|
13428 } |
|
13429 max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr); |
|
13430 max_out -= sizeof(struct sctp_abort_msg); |
|
13431 if (tot_out > max_out) { |
|
13432 tot_out = max_out; |
|
13433 } |
|
13434 if (mm) { |
|
13435 struct sctp_paramhdr *ph; |
|
13436 |
|
13437 /* now move forward the data pointer */ |
|
13438 ph = mtod(mm, struct sctp_paramhdr *); |
|
13439 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); |
|
13440 ph->param_length = htons(sizeof(struct sctp_paramhdr) + tot_out); |
|
13441 ph++; |
|
13442 SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr); |
|
13443 if (top == NULL) { |
|
13444 #if defined(__APPLE__) |
|
13445 SCTP_SOCKET_UNLOCK(so, 0); |
|
13446 #endif |
|
13447 error = uiomove((caddr_t)ph, (int)tot_out, uio); |
|
13448 #if defined(__APPLE__) |
|
13449 SCTP_SOCKET_LOCK(so, 0); |
|
13450 #endif |
|
13451 if (error) { |
|
13452 /*- |
|
13453 * Here if we can't get his data we |
|
13454 * still abort we just don't get to |
|
13455 * send the users note :-0 |
|
13456 */ |
|
13457 sctp_m_freem(mm); |
|
13458 mm = NULL; |
|
13459 } |
|
13460 } else { |
|
13461 if (sndlen != 0) { |
|
13462 SCTP_BUF_NEXT(mm) = top; |
|
13463 } |
|
13464 } |
|
13465 } |
|
13466 if (hold_tcblock == 0) { |
|
13467 SCTP_TCB_LOCK(stcb); |
|
13468 } |
|
13469 atomic_add_int(&stcb->asoc.refcnt, -1); |
|
13470 free_cnt_applied = 0; |
|
13471 /* release this lock, otherwise we hang on ourselves */ |
|
13472 sctp_abort_an_association(stcb->sctp_ep, stcb, mm, SCTP_SO_LOCKED); |
|
13473 /* now relock the stcb so everything is sane */ |
|
13474 hold_tcblock = 0; |
|
13475 stcb = NULL; |
|
13476 /* In this case top is already chained to mm |
|
13477 * avoid double free, since we free it below if |
|
13478 * top != NULL and driver would free it after sending |
|
13479 * the packet out |
|
13480 */ |
|
13481 if (sndlen != 0) { |
|
13482 top = NULL; |
|
13483 } |
|
13484 goto out_unlocked; |
|
13485 } |
|
13486 /* Calculate the maximum we can send */ |
|
13487 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); |
|
13488 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) { |
|
13489 if (non_blocking) { |
|
13490 /* we already checked for non-blocking above. */ |
|
13491 max_len = sndlen; |
|
13492 } else { |
|
13493 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes; |
|
13494 } |
|
13495 } else { |
|
13496 max_len = 0; |
|
13497 } |
|
13498 if (hold_tcblock) { |
|
13499 SCTP_TCB_UNLOCK(stcb); |
|
13500 hold_tcblock = 0; |
|
13501 } |
|
13502 /* Is the stream no. valid? */ |
|
13503 if (srcv->sinfo_stream >= asoc->streamoutcnt) { |
|
13504 /* Invalid stream number */ |
|
13505 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); |
|
13506 error = EINVAL; |
|
13507 goto out_unlocked; |
|
13508 } |
|
13509 if (asoc->strmout == NULL) { |
|
13510 /* huh? software error */ |
|
13511 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT); |
|
13512 error = EFAULT; |
|
13513 goto out_unlocked; |
|
13514 } |
|
13515 |
|
13516 /* Unless E_EOR mode is on, we must make a send FIT in one call. */ |
|
13517 if ((user_marks_eor == 0) && |
|
13518 (sndlen > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) { |
|
13519 /* It will NEVER fit */ |
|
13520 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE); |
|
13521 error = EMSGSIZE; |
|
13522 goto out_unlocked; |
|
13523 } |
|
13524 if ((uio == NULL) && user_marks_eor) { |
|
13525 /*- |
|
13526 * We do not support eeor mode for |
|
13527 * sending with mbuf chains (like sendfile). |
|
13528 */ |
|
13529 SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); |
|
13530 error = EINVAL; |
|
13531 goto out_unlocked; |
|
13532 } |
|
13533 |
|
13534 if (user_marks_eor) { |
|
13535 local_add_more = min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold)); |
|
13536 } else { |
|
13537 /*- |
|
13538 * For non-eeor the whole message must fit in |
|
13539 * the socket send buffer. |
|
13540 */ |
|
13541 local_add_more = sndlen; |
|
13542 } |
|
13543 len = 0; |
|
13544 if (non_blocking) { |
|
13545 goto skip_preblock; |
|
13546 } |
|
13547 if (((max_len <= local_add_more) && |
|
13548 (SCTP_SB_LIMIT_SND(so) >= local_add_more)) || |
|
13549 (max_len == 0) || |
|
13550 ((stcb->asoc.chunks_on_out_queue+stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) { |
|
13551 /* No room right now ! */ |
|
13552 SOCKBUF_LOCK(&so->so_snd); |
|
13553 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); |
|
13554 while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) || |
|
13555 ((stcb->asoc.stream_queue_cnt+stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) { |
|
13556 SCTPDBG(SCTP_DEBUG_OUTPUT1,"pre_block limit:%u <(inq:%d + %d) || (%d+%d > %d)\n", |
|
13557 (unsigned int)SCTP_SB_LIMIT_SND(so), |
|
13558 inqueue_bytes, |
|
13559 local_add_more, |
|
13560 stcb->asoc.stream_queue_cnt, |
|
13561 stcb->asoc.chunks_on_out_queue, |
|
13562 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)); |
|
13563 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { |
|
13564 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen); |
|
13565 } |
|
13566 be.error = 0; |
|
13567 #if !defined(__Panda__) && !defined(__Windows__) |
|
13568 stcb->block_entry = &be; |
|
13569 #endif |
|
13570 error = sbwait(&so->so_snd); |
|
13571 stcb->block_entry = NULL; |
|
13572 if (error || so->so_error || be.error) { |
|
13573 if (error == 0) { |
|
13574 if (so->so_error) |
|
13575 error = so->so_error; |
|
13576 if (be.error) { |
|
13577 error = be.error; |
|
13578 } |
|
13579 } |
|
13580 SOCKBUF_UNLOCK(&so->so_snd); |
|
13581 goto out_unlocked; |
|
13582 } |
|
13583 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { |
|
13584 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK, |
|
13585 asoc, stcb->asoc.total_output_queue_size); |
|
13586 } |
|
13587 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { |
|
13588 goto out_unlocked; |
|
13589 } |
|
13590 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); |
|
13591 } |
|
13592 if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) { |
|
13593 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes; |
|
13594 } else { |
|
13595 max_len = 0; |
|
13596 } |
|
13597 SOCKBUF_UNLOCK(&so->so_snd); |
|
13598 } |
|
13599 |
|
13600 skip_preblock: |
|
13601 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { |
|
13602 goto out_unlocked; |
|
13603 } |
|
13604 #if defined(__APPLE__) |
|
13605 error = sblock(&so->so_snd, SBLOCKWAIT(flags)); |
|
13606 #endif |
|
13607 /* sndlen covers for mbuf case |
|
13608 * uio_resid covers for the non-mbuf case |
|
13609 * NOTE: uio will be null when top/mbuf is passed |
|
13610 */ |
|
13611 if (sndlen == 0) { |
|
13612 if (srcv->sinfo_flags & SCTP_EOF) { |
|
13613 got_all_of_the_send = 1; |
|
13614 goto dataless_eof; |
|
13615 } else { |
|
13616 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); |
|
13617 error = EINVAL; |
|
13618 goto out; |
|
13619 } |
|
13620 } |
|
13621 if (top == NULL) { |
|
13622 struct sctp_stream_queue_pending *sp; |
|
13623 struct sctp_stream_out *strm; |
|
13624 uint32_t sndout; |
|
13625 |
|
13626 SCTP_TCB_SEND_LOCK(stcb); |
|
13627 if ((asoc->stream_locked) && |
|
13628 (asoc->stream_locked_on != srcv->sinfo_stream)) { |
|
13629 SCTP_TCB_SEND_UNLOCK(stcb); |
|
13630 SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); |
|
13631 error = EINVAL; |
|
13632 goto out; |
|
13633 } |
|
13634 SCTP_TCB_SEND_UNLOCK(stcb); |
|
13635 |
|
13636 strm = &stcb->asoc.strmout[srcv->sinfo_stream]; |
|
13637 if (strm->last_msg_incomplete == 0) { |
|
13638 do_a_copy_in: |
|
13639 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error); |
|
13640 if ((sp == NULL) || (error)) { |
|
13641 goto out; |
|
13642 } |
|
13643 SCTP_TCB_SEND_LOCK(stcb); |
|
13644 if (sp->msg_is_complete) { |
|
13645 strm->last_msg_incomplete = 0; |
|
13646 asoc->stream_locked = 0; |
|
13647 } else { |
|
13648 /* Just got locked to this guy in |
|
13649 * case of an interrupt. |
|
13650 */ |
|
13651 strm->last_msg_incomplete = 1; |
|
13652 asoc->stream_locked = 1; |
|
13653 asoc->stream_locked_on = srcv->sinfo_stream; |
|
13654 sp->sender_all_done = 0; |
|
13655 } |
|
13656 sctp_snd_sb_alloc(stcb, sp->length); |
|
13657 atomic_add_int(&asoc->stream_queue_cnt, 1); |
|
13658 if (srcv->sinfo_flags & SCTP_UNORDERED) { |
|
13659 SCTP_STAT_INCR(sctps_sends_with_unord); |
|
13660 } |
|
13661 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next); |
|
13662 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1); |
|
13663 SCTP_TCB_SEND_UNLOCK(stcb); |
|
13664 } else { |
|
13665 SCTP_TCB_SEND_LOCK(stcb); |
|
13666 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead); |
|
13667 SCTP_TCB_SEND_UNLOCK(stcb); |
|
13668 if (sp == NULL) { |
|
13669 /* ???? Huh ??? last msg is gone */ |
|
13670 #ifdef INVARIANTS |
|
13671 panic("Warning: Last msg marked incomplete, yet nothing left?"); |
|
13672 #else |
|
13673 SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n"); |
|
13674 strm->last_msg_incomplete = 0; |
|
13675 #endif |
|
13676 goto do_a_copy_in; |
|
13677 |
|
13678 } |
|
13679 } |
|
13680 #if defined(__APPLE__) |
|
13681 #if defined(APPLE_LEOPARD) |
|
13682 while (uio->uio_resid > 0) { |
|
13683 #else |
|
13684 while (uio_resid(uio) > 0) { |
|
13685 #endif |
|
13686 #else |
|
13687 while (uio->uio_resid > 0) { |
|
13688 #endif |
|
13689 /* How much room do we have? */ |
|
13690 struct mbuf *new_tail, *mm; |
|
13691 |
|
13692 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size) |
|
13693 max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size; |
|
13694 else |
|
13695 max_len = 0; |
|
13696 |
|
13697 if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) || |
|
13698 (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) || |
|
13699 #if defined(__APPLE__) |
|
13700 #if defined(APPLE_LEOPARD) |
|
13701 (uio->uio_resid && (uio->uio_resid <= (int)max_len))) { |
|
13702 #else |
|
13703 (uio_resid(uio) && (uio_resid(uio) <= (int)max_len))) { |
|
13704 #endif |
|
13705 #else |
|
13706 (uio->uio_resid && (uio->uio_resid <= (int)max_len))) { |
|
13707 #endif |
|
13708 sndout = 0; |
|
13709 new_tail = NULL; |
|
13710 if (hold_tcblock) { |
|
13711 SCTP_TCB_UNLOCK(stcb); |
|
13712 hold_tcblock = 0; |
|
13713 } |
|
13714 #if defined(__APPLE__) |
|
13715 SCTP_SOCKET_UNLOCK(so, 0); |
|
13716 #endif |
|
13717 #if defined(__FreeBSD__) && __FreeBSD_version > 602000 |
|
13718 mm = sctp_copy_resume(uio, max_len, user_marks_eor, &error, &sndout, &new_tail); |
|
13719 #else |
|
13720 mm = sctp_copy_resume(uio, max_len, &error, &sndout, &new_tail); |
|
13721 #endif |
|
13722 #if defined(__APPLE__) |
|
13723 SCTP_SOCKET_LOCK(so, 0); |
|
13724 #endif |
|
13725 if ((mm == NULL) || error) { |
|
13726 if (mm) { |
|
13727 sctp_m_freem(mm); |
|
13728 } |
|
13729 goto out; |
|
13730 } |
|
13731 /* Update the mbuf and count */ |
|
13732 SCTP_TCB_SEND_LOCK(stcb); |
|
13733 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { |
|
13734 /* we need to get out. |
|
13735 * Peer probably aborted. |
|
13736 */ |
|
13737 sctp_m_freem(mm); |
|
13738 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) { |
|
13739 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET); |
|
13740 error = ECONNRESET; |
|
13741 } |
|
13742 SCTP_TCB_SEND_UNLOCK(stcb); |
|
13743 goto out; |
|
13744 } |
|
13745 if (sp->tail_mbuf) { |
|
13746 /* tack it to the end */ |
|
13747 SCTP_BUF_NEXT(sp->tail_mbuf) = mm; |
|
13748 sp->tail_mbuf = new_tail; |
|
13749 } else { |
|
13750 /* A stolen mbuf */ |
|
13751 sp->data = mm; |
|
13752 sp->tail_mbuf = new_tail; |
|
13753 } |
|
13754 sctp_snd_sb_alloc(stcb, sndout); |
|
13755 atomic_add_int(&sp->length,sndout); |
|
13756 len += sndout; |
|
13757 |
|
13758 /* Did we reach EOR? */ |
|
13759 #if defined(__APPLE__) |
|
13760 #if defined(APPLE_LEOPARD) |
|
13761 if ((uio->uio_resid == 0) && |
|
13762 #else |
|
13763 if ((uio_resid(uio) == 0) && |
|
13764 #endif |
|
13765 #else |
|
13766 if ((uio->uio_resid == 0) && |
|
13767 #endif |
|
13768 ((user_marks_eor == 0) || |
|
13769 (srcv->sinfo_flags & SCTP_EOF) || |
|
13770 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) { |
|
13771 sp->msg_is_complete = 1; |
|
13772 } else { |
|
13773 sp->msg_is_complete = 0; |
|
13774 } |
|
13775 SCTP_TCB_SEND_UNLOCK(stcb); |
|
13776 } |
|
13777 #if defined(__APPLE__) |
|
13778 #if defined(APPLE_LEOPARD) |
|
13779 if (uio->uio_resid == 0) { |
|
13780 #else |
|
13781 if (uio_resid(uio) == 0) { |
|
13782 #endif |
|
13783 #else |
|
13784 if (uio->uio_resid == 0) { |
|
13785 #endif |
|
13786 /* got it all? */ |
|
13787 continue; |
|
13788 } |
|
13789 /* PR-SCTP? */ |
|
13790 if ((asoc->peer_supports_prsctp) && (asoc->sent_queue_cnt_removeable > 0)) { |
|
13791 /* This is ugly but we must assure locking order */ |
|
13792 if (hold_tcblock == 0) { |
|
13793 SCTP_TCB_LOCK(stcb); |
|
13794 hold_tcblock = 1; |
|
13795 } |
|
13796 sctp_prune_prsctp(stcb, asoc, srcv, sndlen); |
|
13797 inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); |
|
13798 if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size) |
|
13799 max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes; |
|
13800 else |
|
13801 max_len = 0; |
|
13802 if (max_len > 0) { |
|
13803 continue; |
|
13804 } |
|
13805 SCTP_TCB_UNLOCK(stcb); |
|
13806 hold_tcblock = 0; |
|
13807 } |
|
13808 /* wait for space now */ |
|
13809 if (non_blocking) { |
|
13810 /* Non-blocking io in place out */ |
|
13811 goto skip_out_eof; |
|
13812 } |
|
13813 /* What about the INIT, send it maybe */ |
|
13814 if (queue_only_for_init) { |
|
13815 if (hold_tcblock == 0) { |
|
13816 SCTP_TCB_LOCK(stcb); |
|
13817 hold_tcblock = 1; |
|
13818 } |
|
13819 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) { |
|
13820 /* a collision took us forward? */ |
|
13821 queue_only = 0; |
|
13822 } else { |
|
13823 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); |
|
13824 SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT); |
|
13825 queue_only = 1; |
|
13826 } |
|
13827 } |
|
13828 if ((net->flight_size > net->cwnd) && |
|
13829 (asoc->sctp_cmt_on_off == 0)) { |
|
13830 SCTP_STAT_INCR(sctps_send_cwnd_avoid); |
|
13831 queue_only = 1; |
|
13832 } else if (asoc->ifp_had_enobuf) { |
|
13833 SCTP_STAT_INCR(sctps_ifnomemqueued); |
|
13834 if (net->flight_size > (2 * net->mtu)) { |
|
13835 queue_only = 1; |
|
13836 } |
|
13837 asoc->ifp_had_enobuf = 0; |
|
13838 } |
|
13839 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + |
|
13840 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk))); |
|
13841 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && |
|
13842 (stcb->asoc.total_flight > 0) && |
|
13843 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) && |
|
13844 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) { |
|
13845 |
|
13846 /*- |
|
13847 * Ok, Nagle is set on and we have data outstanding. |
|
13848 * Don't send anything and let SACKs drive out the |
|
13849 * data unless wen have a "full" segment to send. |
|
13850 */ |
|
13851 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) { |
|
13852 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED); |
|
13853 } |
|
13854 SCTP_STAT_INCR(sctps_naglequeued); |
|
13855 nagle_applies = 1; |
|
13856 } else { |
|
13857 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) { |
|
13858 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) |
|
13859 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED); |
|
13860 } |
|
13861 SCTP_STAT_INCR(sctps_naglesent); |
|
13862 nagle_applies = 0; |
|
13863 } |
|
13864 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { |
|
13865 |
|
13866 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only, |
|
13867 nagle_applies, un_sent); |
|
13868 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size, |
|
13869 stcb->asoc.total_flight, |
|
13870 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count); |
|
13871 } |
|
13872 if (queue_only_for_init) |
|
13873 queue_only_for_init = 0; |
|
13874 if ((queue_only == 0) && (nagle_applies == 0)) { |
|
13875 /*- |
|
13876 * need to start chunk output |
|
13877 * before blocking.. note that if |
|
13878 * a lock is already applied, then |
|
13879 * the input via the net is happening |
|
13880 * and I don't need to start output :-D |
|
13881 */ |
|
13882 if (hold_tcblock == 0) { |
|
13883 if (SCTP_TCB_TRYLOCK(stcb)) { |
|
13884 hold_tcblock = 1; |
|
13885 sctp_chunk_output(inp, |
|
13886 stcb, |
|
13887 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); |
|
13888 } |
|
13889 } else { |
|
13890 sctp_chunk_output(inp, |
|
13891 stcb, |
|
13892 SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); |
|
13893 } |
|
13894 if (hold_tcblock == 1) { |
|
13895 SCTP_TCB_UNLOCK(stcb); |
|
13896 hold_tcblock = 0; |
|
13897 } |
|
13898 } |
|
13899 SOCKBUF_LOCK(&so->so_snd); |
|
13900 /*- |
|
13901 * This is a bit strange, but I think it will |
|
13902 * work. The total_output_queue_size is locked and |
|
13903 * protected by the TCB_LOCK, which we just released. |
|
13904 * There is a race that can occur between releasing it |
|
13905 * above, and me getting the socket lock, where sacks |
|
13906 * come in but we have not put the SB_WAIT on the |
|
13907 * so_snd buffer to get the wakeup. After the LOCK |
|
13908 * is applied the sack_processing will also need to |
|
13909 * LOCK the so->so_snd to do the actual sowwakeup(). So |
|
13910 * once we have the socket buffer lock if we recheck the |
|
13911 * size we KNOW we will get to sleep safely with the |
|
13912 * wakeup flag in place. |
|
13913 */ |
|
13914 if (SCTP_SB_LIMIT_SND(so) <= (stcb->asoc.total_output_queue_size + |
|
13915 min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) { |
|
13916 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { |
|
13917 #if defined(__APPLE__) |
|
13918 #if defined(APPLE_LEOPARD) |
|
13919 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK, |
|
13920 asoc, uio->uio_resid); |
|
13921 #else |
|
13922 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK, |
|
13923 asoc, uio_resid(uio)); |
|
13924 #endif |
|
13925 #else |
|
13926 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK, |
|
13927 asoc, uio->uio_resid); |
|
13928 #endif |
|
13929 } |
|
13930 be.error = 0; |
|
13931 #if !defined(__Panda__) && !defined(__Windows__) |
|
13932 stcb->block_entry = &be; |
|
13933 #endif |
|
13934 #if defined(__APPLE__) |
|
13935 sbunlock(&so->so_snd, 1); |
|
13936 #endif |
|
13937 error = sbwait(&so->so_snd); |
|
13938 stcb->block_entry = NULL; |
|
13939 |
|
13940 if (error || so->so_error || be.error) { |
|
13941 if (error == 0) { |
|
13942 if (so->so_error) |
|
13943 error = so->so_error; |
|
13944 if (be.error) { |
|
13945 error = be.error; |
|
13946 } |
|
13947 } |
|
13948 SOCKBUF_UNLOCK(&so->so_snd); |
|
13949 goto out_unlocked; |
|
13950 } |
|
13951 |
|
13952 #if defined(__APPLE__) |
|
13953 error = sblock(&so->so_snd, SBLOCKWAIT(flags)); |
|
13954 #endif |
|
13955 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { |
|
13956 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK, |
|
13957 asoc, stcb->asoc.total_output_queue_size); |
|
13958 } |
|
13959 } |
|
13960 SOCKBUF_UNLOCK(&so->so_snd); |
|
13961 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { |
|
13962 goto out_unlocked; |
|
13963 } |
|
13964 } |
|
13965 SCTP_TCB_SEND_LOCK(stcb); |
|
13966 if (sp) { |
|
13967 if (sp->msg_is_complete == 0) { |
|
13968 strm->last_msg_incomplete = 1; |
|
13969 asoc->stream_locked = 1; |
|
13970 asoc->stream_locked_on = srcv->sinfo_stream; |
|
13971 } else { |
|
13972 sp->sender_all_done = 1; |
|
13973 strm->last_msg_incomplete = 0; |
|
13974 asoc->stream_locked = 0; |
|
13975 } |
|
13976 } else { |
|
13977 SCTP_PRINTF("Huh no sp TSNH?\n"); |
|
13978 strm->last_msg_incomplete = 0; |
|
13979 asoc->stream_locked = 0; |
|
13980 } |
|
13981 SCTP_TCB_SEND_UNLOCK(stcb); |
|
13982 #if defined(__APPLE__) |
|
13983 #if defined(APPLE_LEOPARD) |
|
13984 if (uio->uio_resid == 0) { |
|
13985 #else |
|
13986 if (uio_resid(uio) == 0) { |
|
13987 #endif |
|
13988 #else |
|
13989 if (uio->uio_resid == 0) { |
|
13990 #endif |
|
13991 got_all_of_the_send = 1; |
|
13992 } |
|
13993 } else { |
|
13994 /* We send in a 0, since we do NOT have any locks */ |
|
13995 error = sctp_msg_append(stcb, net, top, srcv, 0); |
|
13996 top = NULL; |
|
13997 if (srcv->sinfo_flags & SCTP_EOF) { |
|
13998 /* |
|
13999 * This should only happen for Panda for the mbuf |
|
14000 * send case, which does NOT yet support EEOR mode. |
|
14001 * Thus, we can just set this flag to do the proper |
|
14002 * EOF handling. |
|
14003 */ |
|
14004 got_all_of_the_send = 1; |
|
14005 } |
|
14006 } |
|
14007 if (error) { |
|
14008 goto out; |
|
14009 } |
|
14010 dataless_eof: |
|
14011 /* EOF thing ? */ |
|
14012 if ((srcv->sinfo_flags & SCTP_EOF) && |
|
14013 (got_all_of_the_send == 1)) { |
|
14014 int cnt; |
|
14015 SCTP_STAT_INCR(sctps_sends_with_eof); |
|
14016 error = 0; |
|
14017 if (hold_tcblock == 0) { |
|
14018 SCTP_TCB_LOCK(stcb); |
|
14019 hold_tcblock = 1; |
|
14020 } |
|
14021 cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED); |
|
14022 if (TAILQ_EMPTY(&asoc->send_queue) && |
|
14023 TAILQ_EMPTY(&asoc->sent_queue) && |
|
14024 (cnt == 0)) { |
|
14025 if (asoc->locked_on_sending) { |
|
14026 goto abort_anyway; |
|
14027 } |
|
14028 /* there is nothing queued to send, so I'm done... */ |
|
14029 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && |
|
14030 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && |
|
14031 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { |
|
14032 struct sctp_nets *netp; |
|
14033 |
|
14034 /* only send SHUTDOWN the first time through */ |
|
14035 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { |
|
14036 SCTP_STAT_DECR_GAUGE32(sctps_currestab); |
|
14037 } |
|
14038 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); |
|
14039 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); |
|
14040 sctp_stop_timers_for_shutdown(stcb); |
|
14041 if (stcb->asoc.alternate) { |
|
14042 netp = stcb->asoc.alternate; |
|
14043 } else { |
|
14044 netp = stcb->asoc.primary_destination; |
|
14045 } |
|
14046 sctp_send_shutdown(stcb, netp); |
|
14047 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, |
|
14048 netp); |
|
14049 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, |
|
14050 asoc->primary_destination); |
|
14051 } |
|
14052 } else { |
|
14053 /*- |
|
14054 * we still got (or just got) data to send, so set |
|
14055 * SHUTDOWN_PENDING |
|
14056 */ |
|
14057 /*- |
|
14058 * XXX sockets draft says that SCTP_EOF should be |
|
14059 * sent with no data. currently, we will allow user |
|
14060 * data to be sent first and move to |
|
14061 * SHUTDOWN-PENDING |
|
14062 */ |
|
14063 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && |
|
14064 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && |
|
14065 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { |
|
14066 if (hold_tcblock == 0) { |
|
14067 SCTP_TCB_LOCK(stcb); |
|
14068 hold_tcblock = 1; |
|
14069 } |
|
14070 if (asoc->locked_on_sending) { |
|
14071 /* Locked to send out the data */ |
|
14072 struct sctp_stream_queue_pending *sp; |
|
14073 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); |
|
14074 if (sp) { |
|
14075 if ((sp->length == 0) && (sp->msg_is_complete == 0)) |
|
14076 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; |
|
14077 } |
|
14078 } |
|
14079 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; |
|
14080 if (TAILQ_EMPTY(&asoc->send_queue) && |
|
14081 TAILQ_EMPTY(&asoc->sent_queue) && |
|
14082 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { |
|
14083 abort_anyway: |
|
14084 if (free_cnt_applied) { |
|
14085 atomic_add_int(&stcb->asoc.refcnt, -1); |
|
14086 free_cnt_applied = 0; |
|
14087 } |
|
14088 sctp_abort_an_association(stcb->sctp_ep, stcb, |
|
14089 NULL, SCTP_SO_LOCKED); |
|
14090 /* now relock the stcb so everything is sane */ |
|
14091 hold_tcblock = 0; |
|
14092 stcb = NULL; |
|
14093 goto out; |
|
14094 } |
|
14095 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, |
|
14096 asoc->primary_destination); |
|
14097 sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY); |
|
14098 } |
|
14099 } |
|
14100 } |
|
14101 skip_out_eof: |
|
14102 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { |
|
14103 some_on_control = 1; |
|
14104 } |
|
14105 if (queue_only_for_init) { |
|
14106 if (hold_tcblock == 0) { |
|
14107 SCTP_TCB_LOCK(stcb); |
|
14108 hold_tcblock = 1; |
|
14109 } |
|
14110 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) { |
|
14111 /* a collision took us forward? */ |
|
14112 queue_only = 0; |
|
14113 } else { |
|
14114 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); |
|
14115 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT); |
|
14116 queue_only = 1; |
|
14117 } |
|
14118 } |
|
14119 if ((net->flight_size > net->cwnd) && |
|
14120 (stcb->asoc.sctp_cmt_on_off == 0)) { |
|
14121 SCTP_STAT_INCR(sctps_send_cwnd_avoid); |
|
14122 queue_only = 1; |
|
14123 } else if (asoc->ifp_had_enobuf) { |
|
14124 SCTP_STAT_INCR(sctps_ifnomemqueued); |
|
14125 if (net->flight_size > (2 * net->mtu)) { |
|
14126 queue_only = 1; |
|
14127 } |
|
14128 asoc->ifp_had_enobuf = 0; |
|
14129 } |
|
14130 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + |
|
14131 (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk))); |
|
14132 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && |
|
14133 (stcb->asoc.total_flight > 0) && |
|
14134 (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) && |
|
14135 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) { |
|
14136 /*- |
|
14137 * Ok, Nagle is set on and we have data outstanding. |
|
14138 * Don't send anything and let SACKs drive out the |
|
14139 * data unless wen have a "full" segment to send. |
|
14140 */ |
|
14141 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) { |
|
14142 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED); |
|
14143 } |
|
14144 SCTP_STAT_INCR(sctps_naglequeued); |
|
14145 nagle_applies = 1; |
|
14146 } else { |
|
14147 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) { |
|
14148 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) |
|
14149 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED); |
|
14150 } |
|
14151 SCTP_STAT_INCR(sctps_naglesent); |
|
14152 nagle_applies = 0; |
|
14153 } |
|
14154 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { |
|
14155 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only, |
|
14156 nagle_applies, un_sent); |
|
14157 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size, |
|
14158 stcb->asoc.total_flight, |
|
14159 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count); |
|
14160 } |
|
14161 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) { |
|
14162 /* we can attempt to send too. */ |
|
14163 if (hold_tcblock == 0) { |
|
14164 /* If there is activity recv'ing sacks no need to send */ |
|
14165 if (SCTP_TCB_TRYLOCK(stcb)) { |
|
14166 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); |
|
14167 hold_tcblock = 1; |
|
14168 } |
|
14169 } else { |
|
14170 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); |
|
14171 } |
|
14172 } else if ((queue_only == 0) && |
|
14173 (stcb->asoc.peers_rwnd == 0) && |
|
14174 (stcb->asoc.total_flight == 0)) { |
|
14175 /* We get to have a probe outstanding */ |
|
14176 if (hold_tcblock == 0) { |
|
14177 hold_tcblock = 1; |
|
14178 SCTP_TCB_LOCK(stcb); |
|
14179 } |
|
14180 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); |
|
14181 } else if (some_on_control) { |
|
14182 int num_out, reason, frag_point; |
|
14183 |
|
14184 /* Here we do control only */ |
|
14185 if (hold_tcblock == 0) { |
|
14186 hold_tcblock = 1; |
|
14187 SCTP_TCB_LOCK(stcb); |
|
14188 } |
|
14189 frag_point = sctp_get_frag_point(stcb, &stcb->asoc); |
|
14190 (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out, |
|
14191 &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED); |
|
14192 } |
|
14193 SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n", |
|
14194 queue_only, stcb->asoc.peers_rwnd, un_sent, |
|
14195 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue, |
|
14196 stcb->asoc.total_output_queue_size, error); |
|
14197 |
|
14198 out: |
|
14199 #if defined(__APPLE__) |
|
14200 sbunlock(&so->so_snd, 1); |
|
14201 #endif |
|
14202 out_unlocked: |
|
14203 |
|
14204 if (local_soresv && stcb) { |
|
14205 atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen); |
|
14206 } |
|
14207 if (create_lock_applied) { |
|
14208 SCTP_ASOC_CREATE_UNLOCK(inp); |
|
14209 } |
|
14210 if ((stcb) && hold_tcblock) { |
|
14211 SCTP_TCB_UNLOCK(stcb); |
|
14212 } |
|
14213 if (stcb && free_cnt_applied) { |
|
14214 atomic_add_int(&stcb->asoc.refcnt, -1); |
|
14215 } |
|
14216 #ifdef INVARIANTS |
|
14217 #if !defined(__APPLE__) |
|
14218 if (stcb) { |
|
14219 if (mtx_owned(&stcb->tcb_mtx)) { |
|
14220 panic("Leaving with tcb mtx owned?"); |
|
14221 } |
|
14222 if (mtx_owned(&stcb->tcb_send_mtx)) { |
|
14223 panic("Leaving with tcb send mtx owned?"); |
|
14224 } |
|
14225 } |
|
14226 #endif |
|
14227 #endif |
|
14228 #ifdef __Panda__ |
|
14229 /* |
|
14230 * Handle the EAGAIN/ENOMEM cases to reattach the pak header |
|
14231 * to particle when pak is passed in, so that caller |
|
14232 * can try again with this pak |
|
14233 * |
|
14234 * NOTE: For other cases, including success case, |
|
14235 * we simply want to return the header back to free |
|
14236 * pool |
|
14237 */ |
|
14238 if (top) { |
|
14239 if ((error == EAGAIN) || (error == ENOMEM)) { |
|
14240 SCTP_ATTACH_CHAIN(i_pak, top, sndlen); |
|
14241 top = NULL; |
|
14242 } else { |
|
14243 (void)SCTP_RELEASE_HEADER(i_pak); |
|
14244 } |
|
14245 } else { |
|
14246 /* This is to handle cases when top has |
|
14247 * been reset to NULL but pak might not |
|
14248 * be freed |
|
14249 */ |
|
14250 if (i_pak) { |
|
14251 (void)SCTP_RELEASE_HEADER(i_pak); |
|
14252 } |
|
14253 } |
|
14254 #endif |
|
14255 #ifdef INVARIANTS |
|
14256 if (inp) { |
|
14257 sctp_validate_no_locks(inp); |
|
14258 } else { |
|
14259 SCTP_PRINTF("Warning - inp is NULL so cant validate locks\n"); |
|
14260 } |
|
14261 #endif |
|
14262 if (top) { |
|
14263 sctp_m_freem(top); |
|
14264 } |
|
14265 if (control) { |
|
14266 sctp_m_freem(control); |
|
14267 } |
|
14268 return (error); |
|
14269 } |
|
14270 |
|
14271 |
|
14272 /* |
|
14273 * generate an AUTHentication chunk, if required |
|
14274 */ |
|
14275 struct mbuf * |
|
14276 sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end, |
|
14277 struct sctp_auth_chunk **auth_ret, uint32_t * offset, |
|
14278 struct sctp_tcb *stcb, uint8_t chunk) |
|
14279 { |
|
14280 struct mbuf *m_auth; |
|
14281 struct sctp_auth_chunk *auth; |
|
14282 int chunk_len; |
|
14283 struct mbuf *cn; |
|
14284 |
|
14285 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) || |
|
14286 (stcb == NULL)) |
|
14287 return (m); |
|
14288 |
|
14289 /* sysctl disabled auth? */ |
|
14290 if (SCTP_BASE_SYSCTL(sctp_auth_disable)) |
|
14291 return (m); |
|
14292 |
|
14293 /* peer doesn't do auth... */ |
|
14294 if (!stcb->asoc.peer_supports_auth) { |
|
14295 return (m); |
|
14296 } |
|
14297 /* does the requested chunk require auth? */ |
|
14298 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) { |
|
14299 return (m); |
|
14300 } |
|
14301 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_NOWAIT, 1, MT_HEADER); |
|
14302 if (m_auth == NULL) { |
|
14303 /* no mbuf's */ |
|
14304 return (m); |
|
14305 } |
|
14306 /* reserve some space if this will be the first mbuf */ |
|
14307 if (m == NULL) |
|
14308 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD); |
|
14309 /* fill in the AUTH chunk details */ |
|
14310 auth = mtod(m_auth, struct sctp_auth_chunk *); |
|
14311 bzero(auth, sizeof(*auth)); |
|
14312 auth->ch.chunk_type = SCTP_AUTHENTICATION; |
|
14313 auth->ch.chunk_flags = 0; |
|
14314 chunk_len = sizeof(*auth) + |
|
14315 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id); |
|
14316 auth->ch.chunk_length = htons(chunk_len); |
|
14317 auth->hmac_id = htons(stcb->asoc.peer_hmac_id); |
|
14318 /* key id and hmac digest will be computed and filled in upon send */ |
|
14319 |
|
14320 /* save the offset where the auth was inserted into the chain */ |
|
14321 *offset = 0; |
|
14322 for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) { |
|
14323 *offset += SCTP_BUF_LEN(cn); |
|
14324 } |
|
14325 |
|
14326 /* update length and return pointer to the auth chunk */ |
|
14327 SCTP_BUF_LEN(m_auth) = chunk_len; |
|
14328 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0); |
|
14329 if (auth_ret != NULL) |
|
14330 *auth_ret = auth; |
|
14331 |
|
14332 return (m); |
|
14333 } |
|
14334 |
|
14335 #if defined(__FreeBSD__) || defined(__APPLE__) |
|
14336 #ifdef INET6 |
|
14337 int |
|
14338 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro) |
|
14339 { |
|
14340 struct nd_prefix *pfx = NULL; |
|
14341 struct nd_pfxrouter *pfxrtr = NULL; |
|
14342 struct sockaddr_in6 gw6; |
|
14343 |
|
14344 if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6) |
|
14345 return (0); |
|
14346 |
|
14347 /* get prefix entry of address */ |
|
14348 LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) { |
|
14349 if (pfx->ndpr_stateflags & NDPRF_DETACHED) |
|
14350 continue; |
|
14351 if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr, |
|
14352 &src6->sin6_addr, &pfx->ndpr_mask)) |
|
14353 break; |
|
14354 } |
|
14355 /* no prefix entry in the prefix list */ |
|
14356 if (pfx == NULL) { |
|
14357 SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for "); |
|
14358 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6); |
|
14359 return (0); |
|
14360 } |
|
14361 |
|
14362 SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is "); |
|
14363 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6); |
|
14364 |
|
14365 /* search installed gateway from prefix entry */ |
|
14366 LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) { |
|
14367 memset(&gw6, 0, sizeof(struct sockaddr_in6)); |
|
14368 gw6.sin6_family = AF_INET6; |
|
14369 #ifdef HAVE_SIN6_LEN |
|
14370 gw6.sin6_len = sizeof(struct sockaddr_in6); |
|
14371 #endif |
|
14372 memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr, |
|
14373 sizeof(struct in6_addr)); |
|
14374 SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is "); |
|
14375 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6); |
|
14376 SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is "); |
|
14377 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway); |
|
14378 if (sctp_cmpaddr((struct sockaddr *)&gw6, |
|
14379 ro->ro_rt->rt_gateway)) { |
|
14380 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n"); |
|
14381 return (1); |
|
14382 } |
|
14383 } |
|
14384 SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n"); |
|
14385 return (0); |
|
14386 } |
|
14387 #endif |
|
14388 |
|
14389 int |
|
14390 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro) |
|
14391 { |
|
14392 #ifdef INET |
|
14393 struct sockaddr_in *sin, *mask; |
|
14394 struct ifaddr *ifa; |
|
14395 struct in_addr srcnetaddr, gwnetaddr; |
|
14396 |
|
14397 if (ro == NULL || ro->ro_rt == NULL || |
|
14398 sifa->address.sa.sa_family != AF_INET) { |
|
14399 return (0); |
|
14400 } |
|
14401 ifa = (struct ifaddr *)sifa->ifa; |
|
14402 mask = (struct sockaddr_in *)(ifa->ifa_netmask); |
|
14403 sin = (struct sockaddr_in *)&sifa->address.sin; |
|
14404 srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr); |
|
14405 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is "); |
|
14406 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa); |
|
14407 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr); |
|
14408 |
|
14409 sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway; |
|
14410 gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr); |
|
14411 SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is "); |
|
14412 SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway); |
|
14413 SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr); |
|
14414 if (srcnetaddr.s_addr == gwnetaddr.s_addr) { |
|
14415 return (1); |
|
14416 } |
|
14417 #endif |
|
14418 return (0); |
|
14419 } |
|
14420 #elif defined(__Userspace__) |
|
14421 /* TODO __Userspace__ versions of sctp_vXsrc_match_nexthop(). */ |
|
14422 int |
|
14423 sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro) |
|
14424 { |
|
14425 return (0); |
|
14426 } |
|
14427 int |
|
14428 sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro) |
|
14429 { |
|
14430 return (0); |
|
14431 } |
|
14432 |
|
14433 #endif |