1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/netwerk/sctp/src/netinet/sctp_output.c Wed Dec 31 06:09:35 2014 +0100 1.3 @@ -0,0 +1,14433 @@ 1.4 +/*- 1.5 + * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 1.6 + * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 1.7 + * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 1.8 + * 1.9 + * Redistribution and use in source and binary forms, with or without 1.10 + * modification, are permitted provided that the following conditions are met: 1.11 + * 1.12 + * a) Redistributions of source code must retain the above copyright notice, 1.13 + * this list of conditions and the following disclaimer. 1.14 + * 1.15 + * b) Redistributions in binary form must reproduce the above copyright 1.16 + * notice, this list of conditions and the following disclaimer in 1.17 + * the documentation and/or other materials provided with the distribution. 1.18 + * 1.19 + * c) Neither the name of Cisco Systems, Inc. nor the names of its 1.20 + * contributors may be used to endorse or promote products derived 1.21 + * from this software without specific prior written permission. 1.22 + * 1.23 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 1.24 + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 1.25 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1.26 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 1.27 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 1.28 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 1.29 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 1.30 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 1.31 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 1.32 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 1.33 + * THE POSSIBILITY OF SUCH DAMAGE. 1.34 + */ 1.35 + 1.36 +#ifdef __FreeBSD__ 1.37 +#include <sys/cdefs.h> 1.38 +__FBSDID("$FreeBSD: head/sys/netinet/sctp_output.c 262252 2014-02-20 20:14:43Z tuexen $"); 1.39 +#endif 1.40 + 1.41 +#include <netinet/sctp_os.h> 1.42 +#ifdef __FreeBSD__ 1.43 +#include <sys/proc.h> 1.44 +#endif 1.45 +#include <netinet/sctp_var.h> 1.46 +#include <netinet/sctp_sysctl.h> 1.47 +#include <netinet/sctp_header.h> 1.48 +#include <netinet/sctp_pcb.h> 1.49 +#include <netinet/sctputil.h> 1.50 +#include <netinet/sctp_output.h> 1.51 +#include <netinet/sctp_uio.h> 1.52 +#include <netinet/sctputil.h> 1.53 +#include <netinet/sctp_auth.h> 1.54 +#include <netinet/sctp_timer.h> 1.55 +#include <netinet/sctp_asconf.h> 1.56 +#include <netinet/sctp_indata.h> 1.57 +#include <netinet/sctp_bsd_addr.h> 1.58 +#include <netinet/sctp_input.h> 1.59 +#include <netinet/sctp_crc32.h> 1.60 +#if defined(__Userspace_os_Linux) 1.61 +#define __FAVOR_BSD /* (on Ubuntu at least) enables UDP header field names like BSD in RFC 768 */ 1.62 +#endif 1.63 +#if !defined(__Userspace_os_Windows) 1.64 +#include <netinet/udp.h> 1.65 +#endif 1.66 +#if defined(__APPLE__) 1.67 +#include <netinet/in.h> 1.68 +#endif 1.69 +#if defined(__FreeBSD__) 1.70 +#if defined(__FreeBSD__) && __FreeBSD_version >= 800000 1.71 +#include <netinet/udp_var.h> 1.72 +#endif 1.73 +#include <machine/in_cksum.h> 1.74 +#endif 1.75 +#if defined(__Userspace__) && defined(INET6) 1.76 +#include <netinet6/sctp6_var.h> 1.77 +#endif 1.78 + 1.79 +#if defined(__APPLE__) 1.80 +#define APPLE_FILE_NO 3 1.81 +#endif 1.82 + 1.83 +#if defined(__APPLE__) 1.84 +#if !(defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)) 1.85 +#define SCTP_MAX_LINKHDR 16 1.86 +#endif 1.87 +#endif 1.88 + 1.89 +#define SCTP_MAX_GAPS_INARRAY 4 1.90 +struct sack_track { 1.91 + uint8_t right_edge; /* mergable on the right edge */ 1.92 + uint8_t left_edge; /* mergable on the left edge */ 1.93 + uint8_t num_entries; 1.94 + uint8_t spare; 1.95 + struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY]; 1.96 +}; 1.97 + 1.98 +struct sack_track sack_array[256] = { 1.99 + {0, 0, 0, 0, /* 0x00 */ 1.100 + {{0, 0}, 1.101 + {0, 0}, 1.102 + {0, 0}, 1.103 + {0, 0} 1.104 + } 1.105 + }, 1.106 + {1, 0, 1, 0, /* 0x01 */ 1.107 + {{0, 0}, 1.108 + {0, 0}, 1.109 + {0, 0}, 1.110 + {0, 0} 1.111 + } 1.112 + }, 1.113 + {0, 0, 1, 0, /* 0x02 */ 1.114 + {{1, 1}, 1.115 + {0, 0}, 1.116 + {0, 0}, 1.117 + {0, 0} 1.118 + } 1.119 + }, 1.120 + {1, 0, 1, 0, /* 0x03 */ 1.121 + {{0, 1}, 1.122 + {0, 0}, 1.123 + {0, 0}, 1.124 + {0, 0} 1.125 + } 1.126 + }, 1.127 + {0, 0, 1, 0, /* 0x04 */ 1.128 + {{2, 2}, 1.129 + {0, 0}, 1.130 + {0, 0}, 1.131 + {0, 0} 1.132 + } 1.133 + }, 1.134 + {1, 0, 2, 0, /* 0x05 */ 1.135 + {{0, 0}, 1.136 + {2, 2}, 1.137 + {0, 0}, 1.138 + {0, 0} 1.139 + } 1.140 + }, 1.141 + {0, 0, 1, 0, /* 0x06 */ 1.142 + {{1, 2}, 1.143 + {0, 0}, 1.144 + {0, 0}, 1.145 + {0, 0} 1.146 + } 1.147 + }, 1.148 + {1, 0, 1, 0, /* 0x07 */ 1.149 + {{0, 2}, 1.150 + {0, 0}, 1.151 + {0, 0}, 1.152 + {0, 0} 1.153 + } 1.154 + }, 1.155 + {0, 0, 1, 0, /* 0x08 */ 1.156 + {{3, 3}, 1.157 + {0, 0}, 1.158 + {0, 0}, 1.159 + {0, 0} 1.160 + } 1.161 + }, 1.162 + {1, 0, 2, 0, /* 0x09 */ 1.163 + {{0, 0}, 1.164 + {3, 3}, 1.165 + {0, 0}, 1.166 + {0, 0} 1.167 + } 1.168 + }, 1.169 + {0, 0, 2, 0, /* 0x0a */ 1.170 + {{1, 1}, 1.171 + {3, 3}, 1.172 + {0, 0}, 1.173 + {0, 0} 1.174 + } 1.175 + }, 1.176 + {1, 0, 2, 0, /* 0x0b */ 1.177 + {{0, 1}, 1.178 + {3, 3}, 1.179 + {0, 0}, 1.180 + {0, 0} 1.181 + } 1.182 + }, 1.183 + {0, 0, 1, 0, /* 0x0c */ 1.184 + {{2, 3}, 1.185 + {0, 0}, 1.186 + {0, 0}, 1.187 + {0, 0} 1.188 + } 1.189 + }, 1.190 + {1, 0, 2, 0, /* 0x0d */ 1.191 + {{0, 0}, 1.192 + {2, 3}, 1.193 + {0, 0}, 1.194 + {0, 0} 1.195 + } 1.196 + }, 1.197 + {0, 0, 1, 0, /* 0x0e */ 1.198 + {{1, 3}, 1.199 + {0, 0}, 1.200 + {0, 0}, 1.201 + {0, 0} 1.202 + } 1.203 + }, 1.204 + {1, 0, 1, 0, /* 0x0f */ 1.205 + {{0, 3}, 1.206 + {0, 0}, 1.207 + {0, 0}, 1.208 + {0, 0} 1.209 + } 1.210 + }, 1.211 + {0, 0, 1, 0, /* 0x10 */ 1.212 + {{4, 4}, 1.213 + {0, 0}, 1.214 + {0, 0}, 1.215 + {0, 0} 1.216 + } 1.217 + }, 1.218 + {1, 0, 2, 0, /* 0x11 */ 1.219 + {{0, 0}, 1.220 + {4, 4}, 1.221 + {0, 0}, 1.222 + {0, 0} 1.223 + } 1.224 + }, 1.225 + {0, 0, 2, 0, /* 0x12 */ 1.226 + {{1, 1}, 1.227 + {4, 4}, 1.228 + {0, 0}, 1.229 + {0, 0} 1.230 + } 1.231 + }, 1.232 + {1, 0, 2, 0, /* 0x13 */ 1.233 + {{0, 1}, 1.234 + {4, 4}, 1.235 + {0, 0}, 1.236 + {0, 0} 1.237 + } 1.238 + }, 1.239 + {0, 0, 2, 0, /* 0x14 */ 1.240 + {{2, 2}, 1.241 + {4, 4}, 1.242 + {0, 0}, 1.243 + {0, 0} 1.244 + } 1.245 + }, 1.246 + {1, 0, 3, 0, /* 0x15 */ 1.247 + {{0, 0}, 1.248 + {2, 2}, 1.249 + {4, 4}, 1.250 + {0, 0} 1.251 + } 1.252 + }, 1.253 + {0, 0, 2, 0, /* 0x16 */ 1.254 + {{1, 2}, 1.255 + {4, 4}, 1.256 + {0, 0}, 1.257 + {0, 0} 1.258 + } 1.259 + }, 1.260 + {1, 0, 2, 0, /* 0x17 */ 1.261 + {{0, 2}, 1.262 + {4, 4}, 1.263 + {0, 0}, 1.264 + {0, 0} 1.265 + } 1.266 + }, 1.267 + {0, 0, 1, 0, /* 0x18 */ 1.268 + {{3, 4}, 1.269 + {0, 0}, 1.270 + {0, 0}, 1.271 + {0, 0} 1.272 + } 1.273 + }, 1.274 + {1, 0, 2, 0, /* 0x19 */ 1.275 + {{0, 0}, 1.276 + {3, 4}, 1.277 + {0, 0}, 1.278 + {0, 0} 1.279 + } 1.280 + }, 1.281 + {0, 0, 2, 0, /* 0x1a */ 1.282 + {{1, 1}, 1.283 + {3, 4}, 1.284 + {0, 0}, 1.285 + {0, 0} 1.286 + } 1.287 + }, 1.288 + {1, 0, 2, 0, /* 0x1b */ 1.289 + {{0, 1}, 1.290 + {3, 4}, 1.291 + {0, 0}, 1.292 + {0, 0} 1.293 + } 1.294 + }, 1.295 + {0, 0, 1, 0, /* 0x1c */ 1.296 + {{2, 4}, 1.297 + {0, 0}, 1.298 + {0, 0}, 1.299 + {0, 0} 1.300 + } 1.301 + }, 1.302 + {1, 0, 2, 0, /* 0x1d */ 1.303 + {{0, 0}, 1.304 + {2, 4}, 1.305 + {0, 0}, 1.306 + {0, 0} 1.307 + } 1.308 + }, 1.309 + {0, 0, 1, 0, /* 0x1e */ 1.310 + {{1, 4}, 1.311 + {0, 0}, 1.312 + {0, 0}, 1.313 + {0, 0} 1.314 + } 1.315 + }, 1.316 + {1, 0, 1, 0, /* 0x1f */ 1.317 + {{0, 4}, 1.318 + {0, 0}, 1.319 + {0, 0}, 1.320 + {0, 0} 1.321 + } 1.322 + }, 1.323 + {0, 0, 1, 0, /* 0x20 */ 1.324 + {{5, 5}, 1.325 + {0, 0}, 1.326 + {0, 0}, 1.327 + {0, 0} 1.328 + } 1.329 + }, 1.330 + {1, 0, 2, 0, /* 0x21 */ 1.331 + {{0, 0}, 1.332 + {5, 5}, 1.333 + {0, 0}, 1.334 + {0, 0} 1.335 + } 1.336 + }, 1.337 + {0, 0, 2, 0, /* 0x22 */ 1.338 + {{1, 1}, 1.339 + {5, 5}, 1.340 + {0, 0}, 1.341 + {0, 0} 1.342 + } 1.343 + }, 1.344 + {1, 0, 2, 0, /* 0x23 */ 1.345 + {{0, 1}, 1.346 + {5, 5}, 1.347 + {0, 0}, 1.348 + {0, 0} 1.349 + } 1.350 + }, 1.351 + {0, 0, 2, 0, /* 0x24 */ 1.352 + {{2, 2}, 1.353 + {5, 5}, 1.354 + {0, 0}, 1.355 + {0, 0} 1.356 + } 1.357 + }, 1.358 + {1, 0, 3, 0, /* 0x25 */ 1.359 + {{0, 0}, 1.360 + {2, 2}, 1.361 + {5, 5}, 1.362 + {0, 0} 1.363 + } 1.364 + }, 1.365 + {0, 0, 2, 0, /* 0x26 */ 1.366 + {{1, 2}, 1.367 + {5, 5}, 1.368 + {0, 0}, 1.369 + {0, 0} 1.370 + } 1.371 + }, 1.372 + {1, 0, 2, 0, /* 0x27 */ 1.373 + {{0, 2}, 1.374 + {5, 5}, 1.375 + {0, 0}, 1.376 + {0, 0} 1.377 + } 1.378 + }, 1.379 + {0, 0, 2, 0, /* 0x28 */ 1.380 + {{3, 3}, 1.381 + {5, 5}, 1.382 + {0, 0}, 1.383 + {0, 0} 1.384 + } 1.385 + }, 1.386 + {1, 0, 3, 0, /* 0x29 */ 1.387 + {{0, 0}, 1.388 + {3, 3}, 1.389 + {5, 5}, 1.390 + {0, 0} 1.391 + } 1.392 + }, 1.393 + {0, 0, 3, 0, /* 0x2a */ 1.394 + {{1, 1}, 1.395 + {3, 3}, 1.396 + {5, 5}, 1.397 + {0, 0} 1.398 + } 1.399 + }, 1.400 + {1, 0, 3, 0, /* 0x2b */ 1.401 + {{0, 1}, 1.402 + {3, 3}, 1.403 + {5, 5}, 1.404 + {0, 0} 1.405 + } 1.406 + }, 1.407 + {0, 0, 2, 0, /* 0x2c */ 1.408 + {{2, 3}, 1.409 + {5, 5}, 1.410 + {0, 0}, 1.411 + {0, 0} 1.412 + } 1.413 + }, 1.414 + {1, 0, 3, 0, /* 0x2d */ 1.415 + {{0, 0}, 1.416 + {2, 3}, 1.417 + {5, 5}, 1.418 + {0, 0} 1.419 + } 1.420 + }, 1.421 + {0, 0, 2, 0, /* 0x2e */ 1.422 + {{1, 3}, 1.423 + {5, 5}, 1.424 + {0, 0}, 1.425 + {0, 0} 1.426 + } 1.427 + }, 1.428 + {1, 0, 2, 0, /* 0x2f */ 1.429 + {{0, 3}, 1.430 + {5, 5}, 1.431 + {0, 0}, 1.432 + {0, 0} 1.433 + } 1.434 + }, 1.435 + {0, 0, 1, 0, /* 0x30 */ 1.436 + {{4, 5}, 1.437 + {0, 0}, 1.438 + {0, 0}, 1.439 + {0, 0} 1.440 + } 1.441 + }, 1.442 + {1, 0, 2, 0, /* 0x31 */ 1.443 + {{0, 0}, 1.444 + {4, 5}, 1.445 + {0, 0}, 1.446 + {0, 0} 1.447 + } 1.448 + }, 1.449 + {0, 0, 2, 0, /* 0x32 */ 1.450 + {{1, 1}, 1.451 + {4, 5}, 1.452 + {0, 0}, 1.453 + {0, 0} 1.454 + } 1.455 + }, 1.456 + {1, 0, 2, 0, /* 0x33 */ 1.457 + {{0, 1}, 1.458 + {4, 5}, 1.459 + {0, 0}, 1.460 + {0, 0} 1.461 + } 1.462 + }, 1.463 + {0, 0, 2, 0, /* 0x34 */ 1.464 + {{2, 2}, 1.465 + {4, 5}, 1.466 + {0, 0}, 1.467 + {0, 0} 1.468 + } 1.469 + }, 1.470 + {1, 0, 3, 0, /* 0x35 */ 1.471 + {{0, 0}, 1.472 + {2, 2}, 1.473 + {4, 5}, 1.474 + {0, 0} 1.475 + } 1.476 + }, 1.477 + {0, 0, 2, 0, /* 0x36 */ 1.478 + {{1, 2}, 1.479 + {4, 5}, 1.480 + {0, 0}, 1.481 + {0, 0} 1.482 + } 1.483 + }, 1.484 + {1, 0, 2, 0, /* 0x37 */ 1.485 + {{0, 2}, 1.486 + {4, 5}, 1.487 + {0, 0}, 1.488 + {0, 0} 1.489 + } 1.490 + }, 1.491 + {0, 0, 1, 0, /* 0x38 */ 1.492 + {{3, 5}, 1.493 + {0, 0}, 1.494 + {0, 0}, 1.495 + {0, 0} 1.496 + } 1.497 + }, 1.498 + {1, 0, 2, 0, /* 0x39 */ 1.499 + {{0, 0}, 1.500 + {3, 5}, 1.501 + {0, 0}, 1.502 + {0, 0} 1.503 + } 1.504 + }, 1.505 + {0, 0, 2, 0, /* 0x3a */ 1.506 + {{1, 1}, 1.507 + {3, 5}, 1.508 + {0, 0}, 1.509 + {0, 0} 1.510 + } 1.511 + }, 1.512 + {1, 0, 2, 0, /* 0x3b */ 1.513 + {{0, 1}, 1.514 + {3, 5}, 1.515 + {0, 0}, 1.516 + {0, 0} 1.517 + } 1.518 + }, 1.519 + {0, 0, 1, 0, /* 0x3c */ 1.520 + {{2, 5}, 1.521 + {0, 0}, 1.522 + {0, 0}, 1.523 + {0, 0} 1.524 + } 1.525 + }, 1.526 + {1, 0, 2, 0, /* 0x3d */ 1.527 + {{0, 0}, 1.528 + {2, 5}, 1.529 + {0, 0}, 1.530 + {0, 0} 1.531 + } 1.532 + }, 1.533 + {0, 0, 1, 0, /* 0x3e */ 1.534 + {{1, 5}, 1.535 + {0, 0}, 1.536 + {0, 0}, 1.537 + {0, 0} 1.538 + } 1.539 + }, 1.540 + {1, 0, 1, 0, /* 0x3f */ 1.541 + {{0, 5}, 1.542 + {0, 0}, 1.543 + {0, 0}, 1.544 + {0, 0} 1.545 + } 1.546 + }, 1.547 + {0, 0, 1, 0, /* 0x40 */ 1.548 + {{6, 6}, 1.549 + {0, 0}, 1.550 + {0, 0}, 1.551 + {0, 0} 1.552 + } 1.553 + }, 1.554 + {1, 0, 2, 0, /* 0x41 */ 1.555 + {{0, 0}, 1.556 + {6, 6}, 1.557 + {0, 0}, 1.558 + {0, 0} 1.559 + } 1.560 + }, 1.561 + {0, 0, 2, 0, /* 0x42 */ 1.562 + {{1, 1}, 1.563 + {6, 6}, 1.564 + {0, 0}, 1.565 + {0, 0} 1.566 + } 1.567 + }, 1.568 + {1, 0, 2, 0, /* 0x43 */ 1.569 + {{0, 1}, 1.570 + {6, 6}, 1.571 + {0, 0}, 1.572 + {0, 0} 1.573 + } 1.574 + }, 1.575 + {0, 0, 2, 0, /* 0x44 */ 1.576 + {{2, 2}, 1.577 + {6, 6}, 1.578 + {0, 0}, 1.579 + {0, 0} 1.580 + } 1.581 + }, 1.582 + {1, 0, 3, 0, /* 0x45 */ 1.583 + {{0, 0}, 1.584 + {2, 2}, 1.585 + {6, 6}, 1.586 + {0, 0} 1.587 + } 1.588 + }, 1.589 + {0, 0, 2, 0, /* 0x46 */ 1.590 + {{1, 2}, 1.591 + {6, 6}, 1.592 + {0, 0}, 1.593 + {0, 0} 1.594 + } 1.595 + }, 1.596 + {1, 0, 2, 0, /* 0x47 */ 1.597 + {{0, 2}, 1.598 + {6, 6}, 1.599 + {0, 0}, 1.600 + {0, 0} 1.601 + } 1.602 + }, 1.603 + {0, 0, 2, 0, /* 0x48 */ 1.604 + {{3, 3}, 1.605 + {6, 6}, 1.606 + {0, 0}, 1.607 + {0, 0} 1.608 + } 1.609 + }, 1.610 + {1, 0, 3, 0, /* 0x49 */ 1.611 + {{0, 0}, 1.612 + {3, 3}, 1.613 + {6, 6}, 1.614 + {0, 0} 1.615 + } 1.616 + }, 1.617 + {0, 0, 3, 0, /* 0x4a */ 1.618 + {{1, 1}, 1.619 + {3, 3}, 1.620 + {6, 6}, 1.621 + {0, 0} 1.622 + } 1.623 + }, 1.624 + {1, 0, 3, 0, /* 0x4b */ 1.625 + {{0, 1}, 1.626 + {3, 3}, 1.627 + {6, 6}, 1.628 + {0, 0} 1.629 + } 1.630 + }, 1.631 + {0, 0, 2, 0, /* 0x4c */ 1.632 + {{2, 3}, 1.633 + {6, 6}, 1.634 + {0, 0}, 1.635 + {0, 0} 1.636 + } 1.637 + }, 1.638 + {1, 0, 3, 0, /* 0x4d */ 1.639 + {{0, 0}, 1.640 + {2, 3}, 1.641 + {6, 6}, 1.642 + {0, 0} 1.643 + } 1.644 + }, 1.645 + {0, 0, 2, 0, /* 0x4e */ 1.646 + {{1, 3}, 1.647 + {6, 6}, 1.648 + {0, 0}, 1.649 + {0, 0} 1.650 + } 1.651 + }, 1.652 + {1, 0, 2, 0, /* 0x4f */ 1.653 + {{0, 3}, 1.654 + {6, 6}, 1.655 + {0, 0}, 1.656 + {0, 0} 1.657 + } 1.658 + }, 1.659 + {0, 0, 2, 0, /* 0x50 */ 1.660 + {{4, 4}, 1.661 + {6, 6}, 1.662 + {0, 0}, 1.663 + {0, 0} 1.664 + } 1.665 + }, 1.666 + {1, 0, 3, 0, /* 0x51 */ 1.667 + {{0, 0}, 1.668 + {4, 4}, 1.669 + {6, 6}, 1.670 + {0, 0} 1.671 + } 1.672 + }, 1.673 + {0, 0, 3, 0, /* 0x52 */ 1.674 + {{1, 1}, 1.675 + {4, 4}, 1.676 + {6, 6}, 1.677 + {0, 0} 1.678 + } 1.679 + }, 1.680 + {1, 0, 3, 0, /* 0x53 */ 1.681 + {{0, 1}, 1.682 + {4, 4}, 1.683 + {6, 6}, 1.684 + {0, 0} 1.685 + } 1.686 + }, 1.687 + {0, 0, 3, 0, /* 0x54 */ 1.688 + {{2, 2}, 1.689 + {4, 4}, 1.690 + {6, 6}, 1.691 + {0, 0} 1.692 + } 1.693 + }, 1.694 + {1, 0, 4, 0, /* 0x55 */ 1.695 + {{0, 0}, 1.696 + {2, 2}, 1.697 + {4, 4}, 1.698 + {6, 6} 1.699 + } 1.700 + }, 1.701 + {0, 0, 3, 0, /* 0x56 */ 1.702 + {{1, 2}, 1.703 + {4, 4}, 1.704 + {6, 6}, 1.705 + {0, 0} 1.706 + } 1.707 + }, 1.708 + {1, 0, 3, 0, /* 0x57 */ 1.709 + {{0, 2}, 1.710 + {4, 4}, 1.711 + {6, 6}, 1.712 + {0, 0} 1.713 + } 1.714 + }, 1.715 + {0, 0, 2, 0, /* 0x58 */ 1.716 + {{3, 4}, 1.717 + {6, 6}, 1.718 + {0, 0}, 1.719 + {0, 0} 1.720 + } 1.721 + }, 1.722 + {1, 0, 3, 0, /* 0x59 */ 1.723 + {{0, 0}, 1.724 + {3, 4}, 1.725 + {6, 6}, 1.726 + {0, 0} 1.727 + } 1.728 + }, 1.729 + {0, 0, 3, 0, /* 0x5a */ 1.730 + {{1, 1}, 1.731 + {3, 4}, 1.732 + {6, 6}, 1.733 + {0, 0} 1.734 + } 1.735 + }, 1.736 + {1, 0, 3, 0, /* 0x5b */ 1.737 + {{0, 1}, 1.738 + {3, 4}, 1.739 + {6, 6}, 1.740 + {0, 0} 1.741 + } 1.742 + }, 1.743 + {0, 0, 2, 0, /* 0x5c */ 1.744 + {{2, 4}, 1.745 + {6, 6}, 1.746 + {0, 0}, 1.747 + {0, 0} 1.748 + } 1.749 + }, 1.750 + {1, 0, 3, 0, /* 0x5d */ 1.751 + {{0, 0}, 1.752 + {2, 4}, 1.753 + {6, 6}, 1.754 + {0, 0} 1.755 + } 1.756 + }, 1.757 + {0, 0, 2, 0, /* 0x5e */ 1.758 + {{1, 4}, 1.759 + {6, 6}, 1.760 + {0, 0}, 1.761 + {0, 0} 1.762 + } 1.763 + }, 1.764 + {1, 0, 2, 0, /* 0x5f */ 1.765 + {{0, 4}, 1.766 + {6, 6}, 1.767 + {0, 0}, 1.768 + {0, 0} 1.769 + } 1.770 + }, 1.771 + {0, 0, 1, 0, /* 0x60 */ 1.772 + {{5, 6}, 1.773 + {0, 0}, 1.774 + {0, 0}, 1.775 + {0, 0} 1.776 + } 1.777 + }, 1.778 + {1, 0, 2, 0, /* 0x61 */ 1.779 + {{0, 0}, 1.780 + {5, 6}, 1.781 + {0, 0}, 1.782 + {0, 0} 1.783 + } 1.784 + }, 1.785 + {0, 0, 2, 0, /* 0x62 */ 1.786 + {{1, 1}, 1.787 + {5, 6}, 1.788 + {0, 0}, 1.789 + {0, 0} 1.790 + } 1.791 + }, 1.792 + {1, 0, 2, 0, /* 0x63 */ 1.793 + {{0, 1}, 1.794 + {5, 6}, 1.795 + {0, 0}, 1.796 + {0, 0} 1.797 + } 1.798 + }, 1.799 + {0, 0, 2, 0, /* 0x64 */ 1.800 + {{2, 2}, 1.801 + {5, 6}, 1.802 + {0, 0}, 1.803 + {0, 0} 1.804 + } 1.805 + }, 1.806 + {1, 0, 3, 0, /* 0x65 */ 1.807 + {{0, 0}, 1.808 + {2, 2}, 1.809 + {5, 6}, 1.810 + {0, 0} 1.811 + } 1.812 + }, 1.813 + {0, 0, 2, 0, /* 0x66 */ 1.814 + {{1, 2}, 1.815 + {5, 6}, 1.816 + {0, 0}, 1.817 + {0, 0} 1.818 + } 1.819 + }, 1.820 + {1, 0, 2, 0, /* 0x67 */ 1.821 + {{0, 2}, 1.822 + {5, 6}, 1.823 + {0, 0}, 1.824 + {0, 0} 1.825 + } 1.826 + }, 1.827 + {0, 0, 2, 0, /* 0x68 */ 1.828 + {{3, 3}, 1.829 + {5, 6}, 1.830 + {0, 0}, 1.831 + {0, 0} 1.832 + } 1.833 + }, 1.834 + {1, 0, 3, 0, /* 0x69 */ 1.835 + {{0, 0}, 1.836 + {3, 3}, 1.837 + {5, 6}, 1.838 + {0, 0} 1.839 + } 1.840 + }, 1.841 + {0, 0, 3, 0, /* 0x6a */ 1.842 + {{1, 1}, 1.843 + {3, 3}, 1.844 + {5, 6}, 1.845 + {0, 0} 1.846 + } 1.847 + }, 1.848 + {1, 0, 3, 0, /* 0x6b */ 1.849 + {{0, 1}, 1.850 + {3, 3}, 1.851 + {5, 6}, 1.852 + {0, 0} 1.853 + } 1.854 + }, 1.855 + {0, 0, 2, 0, /* 0x6c */ 1.856 + {{2, 3}, 1.857 + {5, 6}, 1.858 + {0, 0}, 1.859 + {0, 0} 1.860 + } 1.861 + }, 1.862 + {1, 0, 3, 0, /* 0x6d */ 1.863 + {{0, 0}, 1.864 + {2, 3}, 1.865 + {5, 6}, 1.866 + {0, 0} 1.867 + } 1.868 + }, 1.869 + {0, 0, 2, 0, /* 0x6e */ 1.870 + {{1, 3}, 1.871 + {5, 6}, 1.872 + {0, 0}, 1.873 + {0, 0} 1.874 + } 1.875 + }, 1.876 + {1, 0, 2, 0, /* 0x6f */ 1.877 + {{0, 3}, 1.878 + {5, 6}, 1.879 + {0, 0}, 1.880 + {0, 0} 1.881 + } 1.882 + }, 1.883 + {0, 0, 1, 0, /* 0x70 */ 1.884 + {{4, 6}, 1.885 + {0, 0}, 1.886 + {0, 0}, 1.887 + {0, 0} 1.888 + } 1.889 + }, 1.890 + {1, 0, 2, 0, /* 0x71 */ 1.891 + {{0, 0}, 1.892 + {4, 6}, 1.893 + {0, 0}, 1.894 + {0, 0} 1.895 + } 1.896 + }, 1.897 + {0, 0, 2, 0, /* 0x72 */ 1.898 + {{1, 1}, 1.899 + {4, 6}, 1.900 + {0, 0}, 1.901 + {0, 0} 1.902 + } 1.903 + }, 1.904 + {1, 0, 2, 0, /* 0x73 */ 1.905 + {{0, 1}, 1.906 + {4, 6}, 1.907 + {0, 0}, 1.908 + {0, 0} 1.909 + } 1.910 + }, 1.911 + {0, 0, 2, 0, /* 0x74 */ 1.912 + {{2, 2}, 1.913 + {4, 6}, 1.914 + {0, 0}, 1.915 + {0, 0} 1.916 + } 1.917 + }, 1.918 + {1, 0, 3, 0, /* 0x75 */ 1.919 + {{0, 0}, 1.920 + {2, 2}, 1.921 + {4, 6}, 1.922 + {0, 0} 1.923 + } 1.924 + }, 1.925 + {0, 0, 2, 0, /* 0x76 */ 1.926 + {{1, 2}, 1.927 + {4, 6}, 1.928 + {0, 0}, 1.929 + {0, 0} 1.930 + } 1.931 + }, 1.932 + {1, 0, 2, 0, /* 0x77 */ 1.933 + {{0, 2}, 1.934 + {4, 6}, 1.935 + {0, 0}, 1.936 + {0, 0} 1.937 + } 1.938 + }, 1.939 + {0, 0, 1, 0, /* 0x78 */ 1.940 + {{3, 6}, 1.941 + {0, 0}, 1.942 + {0, 0}, 1.943 + {0, 0} 1.944 + } 1.945 + }, 1.946 + {1, 0, 2, 0, /* 0x79 */ 1.947 + {{0, 0}, 1.948 + {3, 6}, 1.949 + {0, 0}, 1.950 + {0, 0} 1.951 + } 1.952 + }, 1.953 + {0, 0, 2, 0, /* 0x7a */ 1.954 + {{1, 1}, 1.955 + {3, 6}, 1.956 + {0, 0}, 1.957 + {0, 0} 1.958 + } 1.959 + }, 1.960 + {1, 0, 2, 0, /* 0x7b */ 1.961 + {{0, 1}, 1.962 + {3, 6}, 1.963 + {0, 0}, 1.964 + {0, 0} 1.965 + } 1.966 + }, 1.967 + {0, 0, 1, 0, /* 0x7c */ 1.968 + {{2, 6}, 1.969 + {0, 0}, 1.970 + {0, 0}, 1.971 + {0, 0} 1.972 + } 1.973 + }, 1.974 + {1, 0, 2, 0, /* 0x7d */ 1.975 + {{0, 0}, 1.976 + {2, 6}, 1.977 + {0, 0}, 1.978 + {0, 0} 1.979 + } 1.980 + }, 1.981 + {0, 0, 1, 0, /* 0x7e */ 1.982 + {{1, 6}, 1.983 + {0, 0}, 1.984 + {0, 0}, 1.985 + {0, 0} 1.986 + } 1.987 + }, 1.988 + {1, 0, 1, 0, /* 0x7f */ 1.989 + {{0, 6}, 1.990 + {0, 0}, 1.991 + {0, 0}, 1.992 + {0, 0} 1.993 + } 1.994 + }, 1.995 + {0, 1, 1, 0, /* 0x80 */ 1.996 + {{7, 7}, 1.997 + {0, 0}, 1.998 + {0, 0}, 1.999 + {0, 0} 1.1000 + } 1.1001 + }, 1.1002 + {1, 1, 2, 0, /* 0x81 */ 1.1003 + {{0, 0}, 1.1004 + {7, 7}, 1.1005 + {0, 0}, 1.1006 + {0, 0} 1.1007 + } 1.1008 + }, 1.1009 + {0, 1, 2, 0, /* 0x82 */ 1.1010 + {{1, 1}, 1.1011 + {7, 7}, 1.1012 + {0, 0}, 1.1013 + {0, 0} 1.1014 + } 1.1015 + }, 1.1016 + {1, 1, 2, 0, /* 0x83 */ 1.1017 + {{0, 1}, 1.1018 + {7, 7}, 1.1019 + {0, 0}, 1.1020 + {0, 0} 1.1021 + } 1.1022 + }, 1.1023 + {0, 1, 2, 0, /* 0x84 */ 1.1024 + {{2, 2}, 1.1025 + {7, 7}, 1.1026 + {0, 0}, 1.1027 + {0, 0} 1.1028 + } 1.1029 + }, 1.1030 + {1, 1, 3, 0, /* 0x85 */ 1.1031 + {{0, 0}, 1.1032 + {2, 2}, 1.1033 + {7, 7}, 1.1034 + {0, 0} 1.1035 + } 1.1036 + }, 1.1037 + {0, 1, 2, 0, /* 0x86 */ 1.1038 + {{1, 2}, 1.1039 + {7, 7}, 1.1040 + {0, 0}, 1.1041 + {0, 0} 1.1042 + } 1.1043 + }, 1.1044 + {1, 1, 2, 0, /* 0x87 */ 1.1045 + {{0, 2}, 1.1046 + {7, 7}, 1.1047 + {0, 0}, 1.1048 + {0, 0} 1.1049 + } 1.1050 + }, 1.1051 + {0, 1, 2, 0, /* 0x88 */ 1.1052 + {{3, 3}, 1.1053 + {7, 7}, 1.1054 + {0, 0}, 1.1055 + {0, 0} 1.1056 + } 1.1057 + }, 1.1058 + {1, 1, 3, 0, /* 0x89 */ 1.1059 + {{0, 0}, 1.1060 + {3, 3}, 1.1061 + {7, 7}, 1.1062 + {0, 0} 1.1063 + } 1.1064 + }, 1.1065 + {0, 1, 3, 0, /* 0x8a */ 1.1066 + {{1, 1}, 1.1067 + {3, 3}, 1.1068 + {7, 7}, 1.1069 + {0, 0} 1.1070 + } 1.1071 + }, 1.1072 + {1, 1, 3, 0, /* 0x8b */ 1.1073 + {{0, 1}, 1.1074 + {3, 3}, 1.1075 + {7, 7}, 1.1076 + {0, 0} 1.1077 + } 1.1078 + }, 1.1079 + {0, 1, 2, 0, /* 0x8c */ 1.1080 + {{2, 3}, 1.1081 + {7, 7}, 1.1082 + {0, 0}, 1.1083 + {0, 0} 1.1084 + } 1.1085 + }, 1.1086 + {1, 1, 3, 0, /* 0x8d */ 1.1087 + {{0, 0}, 1.1088 + {2, 3}, 1.1089 + {7, 7}, 1.1090 + {0, 0} 1.1091 + } 1.1092 + }, 1.1093 + {0, 1, 2, 0, /* 0x8e */ 1.1094 + {{1, 3}, 1.1095 + {7, 7}, 1.1096 + {0, 0}, 1.1097 + {0, 0} 1.1098 + } 1.1099 + }, 1.1100 + {1, 1, 2, 0, /* 0x8f */ 1.1101 + {{0, 3}, 1.1102 + {7, 7}, 1.1103 + {0, 0}, 1.1104 + {0, 0} 1.1105 + } 1.1106 + }, 1.1107 + {0, 1, 2, 0, /* 0x90 */ 1.1108 + {{4, 4}, 1.1109 + {7, 7}, 1.1110 + {0, 0}, 1.1111 + {0, 0} 1.1112 + } 1.1113 + }, 1.1114 + {1, 1, 3, 0, /* 0x91 */ 1.1115 + {{0, 0}, 1.1116 + {4, 4}, 1.1117 + {7, 7}, 1.1118 + {0, 0} 1.1119 + } 1.1120 + }, 1.1121 + {0, 1, 3, 0, /* 0x92 */ 1.1122 + {{1, 1}, 1.1123 + {4, 4}, 1.1124 + {7, 7}, 1.1125 + {0, 0} 1.1126 + } 1.1127 + }, 1.1128 + {1, 1, 3, 0, /* 0x93 */ 1.1129 + {{0, 1}, 1.1130 + {4, 4}, 1.1131 + {7, 7}, 1.1132 + {0, 0} 1.1133 + } 1.1134 + }, 1.1135 + {0, 1, 3, 0, /* 0x94 */ 1.1136 + {{2, 2}, 1.1137 + {4, 4}, 1.1138 + {7, 7}, 1.1139 + {0, 0} 1.1140 + } 1.1141 + }, 1.1142 + {1, 1, 4, 0, /* 0x95 */ 1.1143 + {{0, 0}, 1.1144 + {2, 2}, 1.1145 + {4, 4}, 1.1146 + {7, 7} 1.1147 + } 1.1148 + }, 1.1149 + {0, 1, 3, 0, /* 0x96 */ 1.1150 + {{1, 2}, 1.1151 + {4, 4}, 1.1152 + {7, 7}, 1.1153 + {0, 0} 1.1154 + } 1.1155 + }, 1.1156 + {1, 1, 3, 0, /* 0x97 */ 1.1157 + {{0, 2}, 1.1158 + {4, 4}, 1.1159 + {7, 7}, 1.1160 + {0, 0} 1.1161 + } 1.1162 + }, 1.1163 + {0, 1, 2, 0, /* 0x98 */ 1.1164 + {{3, 4}, 1.1165 + {7, 7}, 1.1166 + {0, 0}, 1.1167 + {0, 0} 1.1168 + } 1.1169 + }, 1.1170 + {1, 1, 3, 0, /* 0x99 */ 1.1171 + {{0, 0}, 1.1172 + {3, 4}, 1.1173 + {7, 7}, 1.1174 + {0, 0} 1.1175 + } 1.1176 + }, 1.1177 + {0, 1, 3, 0, /* 0x9a */ 1.1178 + {{1, 1}, 1.1179 + {3, 4}, 1.1180 + {7, 7}, 1.1181 + {0, 0} 1.1182 + } 1.1183 + }, 1.1184 + {1, 1, 3, 0, /* 0x9b */ 1.1185 + {{0, 1}, 1.1186 + {3, 4}, 1.1187 + {7, 7}, 1.1188 + {0, 0} 1.1189 + } 1.1190 + }, 1.1191 + {0, 1, 2, 0, /* 0x9c */ 1.1192 + {{2, 4}, 1.1193 + {7, 7}, 1.1194 + {0, 0}, 1.1195 + {0, 0} 1.1196 + } 1.1197 + }, 1.1198 + {1, 1, 3, 0, /* 0x9d */ 1.1199 + {{0, 0}, 1.1200 + {2, 4}, 1.1201 + {7, 7}, 1.1202 + {0, 0} 1.1203 + } 1.1204 + }, 1.1205 + {0, 1, 2, 0, /* 0x9e */ 1.1206 + {{1, 4}, 1.1207 + {7, 7}, 1.1208 + {0, 0}, 1.1209 + {0, 0} 1.1210 + } 1.1211 + }, 1.1212 + {1, 1, 2, 0, /* 0x9f */ 1.1213 + {{0, 4}, 1.1214 + {7, 7}, 1.1215 + {0, 0}, 1.1216 + {0, 0} 1.1217 + } 1.1218 + }, 1.1219 + {0, 1, 2, 0, /* 0xa0 */ 1.1220 + {{5, 5}, 1.1221 + {7, 7}, 1.1222 + {0, 0}, 1.1223 + {0, 0} 1.1224 + } 1.1225 + }, 1.1226 + {1, 1, 3, 0, /* 0xa1 */ 1.1227 + {{0, 0}, 1.1228 + {5, 5}, 1.1229 + {7, 7}, 1.1230 + {0, 0} 1.1231 + } 1.1232 + }, 1.1233 + {0, 1, 3, 0, /* 0xa2 */ 1.1234 + {{1, 1}, 1.1235 + {5, 5}, 1.1236 + {7, 7}, 1.1237 + {0, 0} 1.1238 + } 1.1239 + }, 1.1240 + {1, 1, 3, 0, /* 0xa3 */ 1.1241 + {{0, 1}, 1.1242 + {5, 5}, 1.1243 + {7, 7}, 1.1244 + {0, 0} 1.1245 + } 1.1246 + }, 1.1247 + {0, 1, 3, 0, /* 0xa4 */ 1.1248 + {{2, 2}, 1.1249 + {5, 5}, 1.1250 + {7, 7}, 1.1251 + {0, 0} 1.1252 + } 1.1253 + }, 1.1254 + {1, 1, 4, 0, /* 0xa5 */ 1.1255 + {{0, 0}, 1.1256 + {2, 2}, 1.1257 + {5, 5}, 1.1258 + {7, 7} 1.1259 + } 1.1260 + }, 1.1261 + {0, 1, 3, 0, /* 0xa6 */ 1.1262 + {{1, 2}, 1.1263 + {5, 5}, 1.1264 + {7, 7}, 1.1265 + {0, 0} 1.1266 + } 1.1267 + }, 1.1268 + {1, 1, 3, 0, /* 0xa7 */ 1.1269 + {{0, 2}, 1.1270 + {5, 5}, 1.1271 + {7, 7}, 1.1272 + {0, 0} 1.1273 + } 1.1274 + }, 1.1275 + {0, 1, 3, 0, /* 0xa8 */ 1.1276 + {{3, 3}, 1.1277 + {5, 5}, 1.1278 + {7, 7}, 1.1279 + {0, 0} 1.1280 + } 1.1281 + }, 1.1282 + {1, 1, 4, 0, /* 0xa9 */ 1.1283 + {{0, 0}, 1.1284 + {3, 3}, 1.1285 + {5, 5}, 1.1286 + {7, 7} 1.1287 + } 1.1288 + }, 1.1289 + {0, 1, 4, 0, /* 0xaa */ 1.1290 + {{1, 1}, 1.1291 + {3, 3}, 1.1292 + {5, 5}, 1.1293 + {7, 7} 1.1294 + } 1.1295 + }, 1.1296 + {1, 1, 4, 0, /* 0xab */ 1.1297 + {{0, 1}, 1.1298 + {3, 3}, 1.1299 + {5, 5}, 1.1300 + {7, 7} 1.1301 + } 1.1302 + }, 1.1303 + {0, 1, 3, 0, /* 0xac */ 1.1304 + {{2, 3}, 1.1305 + {5, 5}, 1.1306 + {7, 7}, 1.1307 + {0, 0} 1.1308 + } 1.1309 + }, 1.1310 + {1, 1, 4, 0, /* 0xad */ 1.1311 + {{0, 0}, 1.1312 + {2, 3}, 1.1313 + {5, 5}, 1.1314 + {7, 7} 1.1315 + } 1.1316 + }, 1.1317 + {0, 1, 3, 0, /* 0xae */ 1.1318 + {{1, 3}, 1.1319 + {5, 5}, 1.1320 + {7, 7}, 1.1321 + {0, 0} 1.1322 + } 1.1323 + }, 1.1324 + {1, 1, 3, 0, /* 0xaf */ 1.1325 + {{0, 3}, 1.1326 + {5, 5}, 1.1327 + {7, 7}, 1.1328 + {0, 0} 1.1329 + } 1.1330 + }, 1.1331 + {0, 1, 2, 0, /* 0xb0 */ 1.1332 + {{4, 5}, 1.1333 + {7, 7}, 1.1334 + {0, 0}, 1.1335 + {0, 0} 1.1336 + } 1.1337 + }, 1.1338 + {1, 1, 3, 0, /* 0xb1 */ 1.1339 + {{0, 0}, 1.1340 + {4, 5}, 1.1341 + {7, 7}, 1.1342 + {0, 0} 1.1343 + } 1.1344 + }, 1.1345 + {0, 1, 3, 0, /* 0xb2 */ 1.1346 + {{1, 1}, 1.1347 + {4, 5}, 1.1348 + {7, 7}, 1.1349 + {0, 0} 1.1350 + } 1.1351 + }, 1.1352 + {1, 1, 3, 0, /* 0xb3 */ 1.1353 + {{0, 1}, 1.1354 + {4, 5}, 1.1355 + {7, 7}, 1.1356 + {0, 0} 1.1357 + } 1.1358 + }, 1.1359 + {0, 1, 3, 0, /* 0xb4 */ 1.1360 + {{2, 2}, 1.1361 + {4, 5}, 1.1362 + {7, 7}, 1.1363 + {0, 0} 1.1364 + } 1.1365 + }, 1.1366 + {1, 1, 4, 0, /* 0xb5 */ 1.1367 + {{0, 0}, 1.1368 + {2, 2}, 1.1369 + {4, 5}, 1.1370 + {7, 7} 1.1371 + } 1.1372 + }, 1.1373 + {0, 1, 3, 0, /* 0xb6 */ 1.1374 + {{1, 2}, 1.1375 + {4, 5}, 1.1376 + {7, 7}, 1.1377 + {0, 0} 1.1378 + } 1.1379 + }, 1.1380 + {1, 1, 3, 0, /* 0xb7 */ 1.1381 + {{0, 2}, 1.1382 + {4, 5}, 1.1383 + {7, 7}, 1.1384 + {0, 0} 1.1385 + } 1.1386 + }, 1.1387 + {0, 1, 2, 0, /* 0xb8 */ 1.1388 + {{3, 5}, 1.1389 + {7, 7}, 1.1390 + {0, 0}, 1.1391 + {0, 0} 1.1392 + } 1.1393 + }, 1.1394 + {1, 1, 3, 0, /* 0xb9 */ 1.1395 + {{0, 0}, 1.1396 + {3, 5}, 1.1397 + {7, 7}, 1.1398 + {0, 0} 1.1399 + } 1.1400 + }, 1.1401 + {0, 1, 3, 0, /* 0xba */ 1.1402 + {{1, 1}, 1.1403 + {3, 5}, 1.1404 + {7, 7}, 1.1405 + {0, 0} 1.1406 + } 1.1407 + }, 1.1408 + {1, 1, 3, 0, /* 0xbb */ 1.1409 + {{0, 1}, 1.1410 + {3, 5}, 1.1411 + {7, 7}, 1.1412 + {0, 0} 1.1413 + } 1.1414 + }, 1.1415 + {0, 1, 2, 0, /* 0xbc */ 1.1416 + {{2, 5}, 1.1417 + {7, 7}, 1.1418 + {0, 0}, 1.1419 + {0, 0} 1.1420 + } 1.1421 + }, 1.1422 + {1, 1, 3, 0, /* 0xbd */ 1.1423 + {{0, 0}, 1.1424 + {2, 5}, 1.1425 + {7, 7}, 1.1426 + {0, 0} 1.1427 + } 1.1428 + }, 1.1429 + {0, 1, 2, 0, /* 0xbe */ 1.1430 + {{1, 5}, 1.1431 + {7, 7}, 1.1432 + {0, 0}, 1.1433 + {0, 0} 1.1434 + } 1.1435 + }, 1.1436 + {1, 1, 2, 0, /* 0xbf */ 1.1437 + {{0, 5}, 1.1438 + {7, 7}, 1.1439 + {0, 0}, 1.1440 + {0, 0} 1.1441 + } 1.1442 + }, 1.1443 + {0, 1, 1, 0, /* 0xc0 */ 1.1444 + {{6, 7}, 1.1445 + {0, 0}, 1.1446 + {0, 0}, 1.1447 + {0, 0} 1.1448 + } 1.1449 + }, 1.1450 + {1, 1, 2, 0, /* 0xc1 */ 1.1451 + {{0, 0}, 1.1452 + {6, 7}, 1.1453 + {0, 0}, 1.1454 + {0, 0} 1.1455 + } 1.1456 + }, 1.1457 + {0, 1, 2, 0, /* 0xc2 */ 1.1458 + {{1, 1}, 1.1459 + {6, 7}, 1.1460 + {0, 0}, 1.1461 + {0, 0} 1.1462 + } 1.1463 + }, 1.1464 + {1, 1, 2, 0, /* 0xc3 */ 1.1465 + {{0, 1}, 1.1466 + {6, 7}, 1.1467 + {0, 0}, 1.1468 + {0, 0} 1.1469 + } 1.1470 + }, 1.1471 + {0, 1, 2, 0, /* 0xc4 */ 1.1472 + {{2, 2}, 1.1473 + {6, 7}, 1.1474 + {0, 0}, 1.1475 + {0, 0} 1.1476 + } 1.1477 + }, 1.1478 + {1, 1, 3, 0, /* 0xc5 */ 1.1479 + {{0, 0}, 1.1480 + {2, 2}, 1.1481 + {6, 7}, 1.1482 + {0, 0} 1.1483 + } 1.1484 + }, 1.1485 + {0, 1, 2, 0, /* 0xc6 */ 1.1486 + {{1, 2}, 1.1487 + {6, 7}, 1.1488 + {0, 0}, 1.1489 + {0, 0} 1.1490 + } 1.1491 + }, 1.1492 + {1, 1, 2, 0, /* 0xc7 */ 1.1493 + {{0, 2}, 1.1494 + {6, 7}, 1.1495 + {0, 0}, 1.1496 + {0, 0} 1.1497 + } 1.1498 + }, 1.1499 + {0, 1, 2, 0, /* 0xc8 */ 1.1500 + {{3, 3}, 1.1501 + {6, 7}, 1.1502 + {0, 0}, 1.1503 + {0, 0} 1.1504 + } 1.1505 + }, 1.1506 + {1, 1, 3, 0, /* 0xc9 */ 1.1507 + {{0, 0}, 1.1508 + {3, 3}, 1.1509 + {6, 7}, 1.1510 + {0, 0} 1.1511 + } 1.1512 + }, 1.1513 + {0, 1, 3, 0, /* 0xca */ 1.1514 + {{1, 1}, 1.1515 + {3, 3}, 1.1516 + {6, 7}, 1.1517 + {0, 0} 1.1518 + } 1.1519 + }, 1.1520 + {1, 1, 3, 0, /* 0xcb */ 1.1521 + {{0, 1}, 1.1522 + {3, 3}, 1.1523 + {6, 7}, 1.1524 + {0, 0} 1.1525 + } 1.1526 + }, 1.1527 + {0, 1, 2, 0, /* 0xcc */ 1.1528 + {{2, 3}, 1.1529 + {6, 7}, 1.1530 + {0, 0}, 1.1531 + {0, 0} 1.1532 + } 1.1533 + }, 1.1534 + {1, 1, 3, 0, /* 0xcd */ 1.1535 + {{0, 0}, 1.1536 + {2, 3}, 1.1537 + {6, 7}, 1.1538 + {0, 0} 1.1539 + } 1.1540 + }, 1.1541 + {0, 1, 2, 0, /* 0xce */ 1.1542 + {{1, 3}, 1.1543 + {6, 7}, 1.1544 + {0, 0}, 1.1545 + {0, 0} 1.1546 + } 1.1547 + }, 1.1548 + {1, 1, 2, 0, /* 0xcf */ 1.1549 + {{0, 3}, 1.1550 + {6, 7}, 1.1551 + {0, 0}, 1.1552 + {0, 0} 1.1553 + } 1.1554 + }, 1.1555 + {0, 1, 2, 0, /* 0xd0 */ 1.1556 + {{4, 4}, 1.1557 + {6, 7}, 1.1558 + {0, 0}, 1.1559 + {0, 0} 1.1560 + } 1.1561 + }, 1.1562 + {1, 1, 3, 0, /* 0xd1 */ 1.1563 + {{0, 0}, 1.1564 + {4, 4}, 1.1565 + {6, 7}, 1.1566 + {0, 0} 1.1567 + } 1.1568 + }, 1.1569 + {0, 1, 3, 0, /* 0xd2 */ 1.1570 + {{1, 1}, 1.1571 + {4, 4}, 1.1572 + {6, 7}, 1.1573 + {0, 0} 1.1574 + } 1.1575 + }, 1.1576 + {1, 1, 3, 0, /* 0xd3 */ 1.1577 + {{0, 1}, 1.1578 + {4, 4}, 1.1579 + {6, 7}, 1.1580 + {0, 0} 1.1581 + } 1.1582 + }, 1.1583 + {0, 1, 3, 0, /* 0xd4 */ 1.1584 + {{2, 2}, 1.1585 + {4, 4}, 1.1586 + {6, 7}, 1.1587 + {0, 0} 1.1588 + } 1.1589 + }, 1.1590 + {1, 1, 4, 0, /* 0xd5 */ 1.1591 + {{0, 0}, 1.1592 + {2, 2}, 1.1593 + {4, 4}, 1.1594 + {6, 7} 1.1595 + } 1.1596 + }, 1.1597 + {0, 1, 3, 0, /* 0xd6 */ 1.1598 + {{1, 2}, 1.1599 + {4, 4}, 1.1600 + {6, 7}, 1.1601 + {0, 0} 1.1602 + } 1.1603 + }, 1.1604 + {1, 1, 3, 0, /* 0xd7 */ 1.1605 + {{0, 2}, 1.1606 + {4, 4}, 1.1607 + {6, 7}, 1.1608 + {0, 0} 1.1609 + } 1.1610 + }, 1.1611 + {0, 1, 2, 0, /* 0xd8 */ 1.1612 + {{3, 4}, 1.1613 + {6, 7}, 1.1614 + {0, 0}, 1.1615 + {0, 0} 1.1616 + } 1.1617 + }, 1.1618 + {1, 1, 3, 0, /* 0xd9 */ 1.1619 + {{0, 0}, 1.1620 + {3, 4}, 1.1621 + {6, 7}, 1.1622 + {0, 0} 1.1623 + } 1.1624 + }, 1.1625 + {0, 1, 3, 0, /* 0xda */ 1.1626 + {{1, 1}, 1.1627 + {3, 4}, 1.1628 + {6, 7}, 1.1629 + {0, 0} 1.1630 + } 1.1631 + }, 1.1632 + {1, 1, 3, 0, /* 0xdb */ 1.1633 + {{0, 1}, 1.1634 + {3, 4}, 1.1635 + {6, 7}, 1.1636 + {0, 0} 1.1637 + } 1.1638 + }, 1.1639 + {0, 1, 2, 0, /* 0xdc */ 1.1640 + {{2, 4}, 1.1641 + {6, 7}, 1.1642 + {0, 0}, 1.1643 + {0, 0} 1.1644 + } 1.1645 + }, 1.1646 + {1, 1, 3, 0, /* 0xdd */ 1.1647 + {{0, 0}, 1.1648 + {2, 4}, 1.1649 + {6, 7}, 1.1650 + {0, 0} 1.1651 + } 1.1652 + }, 1.1653 + {0, 1, 2, 0, /* 0xde */ 1.1654 + {{1, 4}, 1.1655 + {6, 7}, 1.1656 + {0, 0}, 1.1657 + {0, 0} 1.1658 + } 1.1659 + }, 1.1660 + {1, 1, 2, 0, /* 0xdf */ 1.1661 + {{0, 4}, 1.1662 + {6, 7}, 1.1663 + {0, 0}, 1.1664 + {0, 0} 1.1665 + } 1.1666 + }, 1.1667 + {0, 1, 1, 0, /* 0xe0 */ 1.1668 + {{5, 7}, 1.1669 + {0, 0}, 1.1670 + {0, 0}, 1.1671 + {0, 0} 1.1672 + } 1.1673 + }, 1.1674 + {1, 1, 2, 0, /* 0xe1 */ 1.1675 + {{0, 0}, 1.1676 + {5, 7}, 1.1677 + {0, 0}, 1.1678 + {0, 0} 1.1679 + } 1.1680 + }, 1.1681 + {0, 1, 2, 0, /* 0xe2 */ 1.1682 + {{1, 1}, 1.1683 + {5, 7}, 1.1684 + {0, 0}, 1.1685 + {0, 0} 1.1686 + } 1.1687 + }, 1.1688 + {1, 1, 2, 0, /* 0xe3 */ 1.1689 + {{0, 1}, 1.1690 + {5, 7}, 1.1691 + {0, 0}, 1.1692 + {0, 0} 1.1693 + } 1.1694 + }, 1.1695 + {0, 1, 2, 0, /* 0xe4 */ 1.1696 + {{2, 2}, 1.1697 + {5, 7}, 1.1698 + {0, 0}, 1.1699 + {0, 0} 1.1700 + } 1.1701 + }, 1.1702 + {1, 1, 3, 0, /* 0xe5 */ 1.1703 + {{0, 0}, 1.1704 + {2, 2}, 1.1705 + {5, 7}, 1.1706 + {0, 0} 1.1707 + } 1.1708 + }, 1.1709 + {0, 1, 2, 0, /* 0xe6 */ 1.1710 + {{1, 2}, 1.1711 + {5, 7}, 1.1712 + {0, 0}, 1.1713 + {0, 0} 1.1714 + } 1.1715 + }, 1.1716 + {1, 1, 2, 0, /* 0xe7 */ 1.1717 + {{0, 2}, 1.1718 + {5, 7}, 1.1719 + {0, 0}, 1.1720 + {0, 0} 1.1721 + } 1.1722 + }, 1.1723 + {0, 1, 2, 0, /* 0xe8 */ 1.1724 + {{3, 3}, 1.1725 + {5, 7}, 1.1726 + {0, 0}, 1.1727 + {0, 0} 1.1728 + } 1.1729 + }, 1.1730 + {1, 1, 3, 0, /* 0xe9 */ 1.1731 + {{0, 0}, 1.1732 + {3, 3}, 1.1733 + {5, 7}, 1.1734 + {0, 0} 1.1735 + } 1.1736 + }, 1.1737 + {0, 1, 3, 0, /* 0xea */ 1.1738 + {{1, 1}, 1.1739 + {3, 3}, 1.1740 + {5, 7}, 1.1741 + {0, 0} 1.1742 + } 1.1743 + }, 1.1744 + {1, 1, 3, 0, /* 0xeb */ 1.1745 + {{0, 1}, 1.1746 + {3, 3}, 1.1747 + {5, 7}, 1.1748 + {0, 0} 1.1749 + } 1.1750 + }, 1.1751 + {0, 1, 2, 0, /* 0xec */ 1.1752 + {{2, 3}, 1.1753 + {5, 7}, 1.1754 + {0, 0}, 1.1755 + {0, 0} 1.1756 + } 1.1757 + }, 1.1758 + {1, 1, 3, 0, /* 0xed */ 1.1759 + {{0, 0}, 1.1760 + {2, 3}, 1.1761 + {5, 7}, 1.1762 + {0, 0} 1.1763 + } 1.1764 + }, 1.1765 + {0, 1, 2, 0, /* 0xee */ 1.1766 + {{1, 3}, 1.1767 + {5, 7}, 1.1768 + {0, 0}, 1.1769 + {0, 0} 1.1770 + } 1.1771 + }, 1.1772 + {1, 1, 2, 0, /* 0xef */ 1.1773 + {{0, 3}, 1.1774 + {5, 7}, 1.1775 + {0, 0}, 1.1776 + {0, 0} 1.1777 + } 1.1778 + }, 1.1779 + {0, 1, 1, 0, /* 0xf0 */ 1.1780 + {{4, 7}, 1.1781 + {0, 0}, 1.1782 + {0, 0}, 1.1783 + {0, 0} 1.1784 + } 1.1785 + }, 1.1786 + {1, 1, 2, 0, /* 0xf1 */ 1.1787 + {{0, 0}, 1.1788 + {4, 7}, 1.1789 + {0, 0}, 1.1790 + {0, 0} 1.1791 + } 1.1792 + }, 1.1793 + {0, 1, 2, 0, /* 0xf2 */ 1.1794 + {{1, 1}, 1.1795 + {4, 7}, 1.1796 + {0, 0}, 1.1797 + {0, 0} 1.1798 + } 1.1799 + }, 1.1800 + {1, 1, 2, 0, /* 0xf3 */ 1.1801 + {{0, 1}, 1.1802 + {4, 7}, 1.1803 + {0, 0}, 1.1804 + {0, 0} 1.1805 + } 1.1806 + }, 1.1807 + {0, 1, 2, 0, /* 0xf4 */ 1.1808 + {{2, 2}, 1.1809 + {4, 7}, 1.1810 + {0, 0}, 1.1811 + {0, 0} 1.1812 + } 1.1813 + }, 1.1814 + {1, 1, 3, 0, /* 0xf5 */ 1.1815 + {{0, 0}, 1.1816 + {2, 2}, 1.1817 + {4, 7}, 1.1818 + {0, 0} 1.1819 + } 1.1820 + }, 1.1821 + {0, 1, 2, 0, /* 0xf6 */ 1.1822 + {{1, 2}, 1.1823 + {4, 7}, 1.1824 + {0, 0}, 1.1825 + {0, 0} 1.1826 + } 1.1827 + }, 1.1828 + {1, 1, 2, 0, /* 0xf7 */ 1.1829 + {{0, 2}, 1.1830 + {4, 7}, 1.1831 + {0, 0}, 1.1832 + {0, 0} 1.1833 + } 1.1834 + }, 1.1835 + {0, 1, 1, 0, /* 0xf8 */ 1.1836 + {{3, 7}, 1.1837 + {0, 0}, 1.1838 + {0, 0}, 1.1839 + {0, 0} 1.1840 + } 1.1841 + }, 1.1842 + {1, 1, 2, 0, /* 0xf9 */ 1.1843 + {{0, 0}, 1.1844 + {3, 7}, 1.1845 + {0, 0}, 1.1846 + {0, 0} 1.1847 + } 1.1848 + }, 1.1849 + {0, 1, 2, 0, /* 0xfa */ 1.1850 + {{1, 1}, 1.1851 + {3, 7}, 1.1852 + {0, 0}, 1.1853 + {0, 0} 1.1854 + } 1.1855 + }, 1.1856 + {1, 1, 2, 0, /* 0xfb */ 1.1857 + {{0, 1}, 1.1858 + {3, 7}, 1.1859 + {0, 0}, 1.1860 + {0, 0} 1.1861 + } 1.1862 + }, 1.1863 + {0, 1, 1, 0, /* 0xfc */ 1.1864 + {{2, 7}, 1.1865 + {0, 0}, 1.1866 + {0, 0}, 1.1867 + {0, 0} 1.1868 + } 1.1869 + }, 1.1870 + {1, 1, 2, 0, /* 0xfd */ 1.1871 + {{0, 0}, 1.1872 + {2, 7}, 1.1873 + {0, 0}, 1.1874 + {0, 0} 1.1875 + } 1.1876 + }, 1.1877 + {0, 1, 1, 0, /* 0xfe */ 1.1878 + {{1, 7}, 1.1879 + {0, 0}, 1.1880 + {0, 0}, 1.1881 + {0, 0} 1.1882 + } 1.1883 + }, 1.1884 + {1, 1, 1, 0, /* 0xff */ 1.1885 + {{0, 7}, 1.1886 + {0, 0}, 1.1887 + {0, 0}, 1.1888 + {0, 0} 1.1889 + } 1.1890 + } 1.1891 +}; 1.1892 + 1.1893 + 1.1894 +int 1.1895 +sctp_is_address_in_scope(struct sctp_ifa *ifa, 1.1896 + struct sctp_scoping *scope, 1.1897 + int do_update) 1.1898 +{ 1.1899 + if ((scope->loopback_scope == 0) && 1.1900 + (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) { 1.1901 + /* 1.1902 + * skip loopback if not in scope * 1.1903 + */ 1.1904 + return (0); 1.1905 + } 1.1906 + switch (ifa->address.sa.sa_family) { 1.1907 +#ifdef INET 1.1908 + case AF_INET: 1.1909 + if (scope->ipv4_addr_legal) { 1.1910 + struct sockaddr_in *sin; 1.1911 + 1.1912 + sin = (struct sockaddr_in *)&ifa->address.sin; 1.1913 + if (sin->sin_addr.s_addr == 0) { 1.1914 + /* not in scope , unspecified */ 1.1915 + return (0); 1.1916 + } 1.1917 + if ((scope->ipv4_local_scope == 0) && 1.1918 + (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 1.1919 + /* private address not in scope */ 1.1920 + return (0); 1.1921 + } 1.1922 + } else { 1.1923 + return (0); 1.1924 + } 1.1925 + break; 1.1926 +#endif 1.1927 +#ifdef INET6 1.1928 + case AF_INET6: 1.1929 + if (scope->ipv6_addr_legal) { 1.1930 + struct sockaddr_in6 *sin6; 1.1931 + 1.1932 +#if !defined(__Panda__) 1.1933 + /* Must update the flags, bummer, which 1.1934 + * means any IFA locks must now be applied HERE <-> 1.1935 + */ 1.1936 + if (do_update) { 1.1937 + sctp_gather_internal_ifa_flags(ifa); 1.1938 + } 1.1939 +#endif 1.1940 + if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { 1.1941 + return (0); 1.1942 + } 1.1943 + /* ok to use deprecated addresses? */ 1.1944 + sin6 = (struct sockaddr_in6 *)&ifa->address.sin6; 1.1945 + if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 1.1946 + /* skip unspecifed addresses */ 1.1947 + return (0); 1.1948 + } 1.1949 + if ( /* (local_scope == 0) && */ 1.1950 + (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) { 1.1951 + return (0); 1.1952 + } 1.1953 + if ((scope->site_scope == 0) && 1.1954 + (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 1.1955 + return (0); 1.1956 + } 1.1957 + } else { 1.1958 + return (0); 1.1959 + } 1.1960 + break; 1.1961 +#endif 1.1962 +#if defined(__Userspace__) 1.1963 + case AF_CONN: 1.1964 + if (!scope->conn_addr_legal) { 1.1965 + return (0); 1.1966 + } 1.1967 + break; 1.1968 +#endif 1.1969 + default: 1.1970 + return (0); 1.1971 + } 1.1972 + return (1); 1.1973 +} 1.1974 + 1.1975 +static struct mbuf * 1.1976 +sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t *len) 1.1977 +{ 1.1978 +#if defined(INET) || defined(INET6) 1.1979 + struct sctp_paramhdr *parmh; 1.1980 + struct mbuf *mret; 1.1981 + uint16_t plen; 1.1982 +#endif 1.1983 + 1.1984 + switch (ifa->address.sa.sa_family) { 1.1985 +#ifdef INET 1.1986 + case AF_INET: 1.1987 + plen = (uint16_t)sizeof(struct sctp_ipv4addr_param); 1.1988 + break; 1.1989 +#endif 1.1990 +#ifdef INET6 1.1991 + case AF_INET6: 1.1992 + plen = (uint16_t)sizeof(struct sctp_ipv6addr_param); 1.1993 + break; 1.1994 +#endif 1.1995 + default: 1.1996 + return (m); 1.1997 + } 1.1998 +#if defined(INET) || defined(INET6) 1.1999 + if (M_TRAILINGSPACE(m) >= plen) { 1.2000 + /* easy side we just drop it on the end */ 1.2001 + parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m))); 1.2002 + mret = m; 1.2003 + } else { 1.2004 + /* Need more space */ 1.2005 + mret = m; 1.2006 + while (SCTP_BUF_NEXT(mret) != NULL) { 1.2007 + mret = SCTP_BUF_NEXT(mret); 1.2008 + } 1.2009 + SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_NOWAIT, 1, MT_DATA); 1.2010 + if (SCTP_BUF_NEXT(mret) == NULL) { 1.2011 + /* We are hosed, can't add more addresses */ 1.2012 + return (m); 1.2013 + } 1.2014 + mret = SCTP_BUF_NEXT(mret); 1.2015 + parmh = mtod(mret, struct sctp_paramhdr *); 1.2016 + } 1.2017 + /* now add the parameter */ 1.2018 + switch (ifa->address.sa.sa_family) { 1.2019 +#ifdef INET 1.2020 + case AF_INET: 1.2021 + { 1.2022 + struct sctp_ipv4addr_param *ipv4p; 1.2023 + struct sockaddr_in *sin; 1.2024 + 1.2025 + sin = (struct sockaddr_in *)&ifa->address.sin; 1.2026 + ipv4p = (struct sctp_ipv4addr_param *)parmh; 1.2027 + parmh->param_type = htons(SCTP_IPV4_ADDRESS); 1.2028 + parmh->param_length = htons(plen); 1.2029 + ipv4p->addr = sin->sin_addr.s_addr; 1.2030 + SCTP_BUF_LEN(mret) += plen; 1.2031 + break; 1.2032 + } 1.2033 +#endif 1.2034 +#ifdef INET6 1.2035 + case AF_INET6: 1.2036 + { 1.2037 + struct sctp_ipv6addr_param *ipv6p; 1.2038 + struct sockaddr_in6 *sin6; 1.2039 + 1.2040 + sin6 = (struct sockaddr_in6 *)&ifa->address.sin6; 1.2041 + ipv6p = (struct sctp_ipv6addr_param *)parmh; 1.2042 + parmh->param_type = htons(SCTP_IPV6_ADDRESS); 1.2043 + parmh->param_length = htons(plen); 1.2044 + memcpy(ipv6p->addr, &sin6->sin6_addr, 1.2045 + sizeof(ipv6p->addr)); 1.2046 +#if defined(SCTP_EMBEDDED_V6_SCOPE) 1.2047 + /* clear embedded scope in the address */ 1.2048 + in6_clearscope((struct in6_addr *)ipv6p->addr); 1.2049 +#endif 1.2050 + SCTP_BUF_LEN(mret) += plen; 1.2051 + break; 1.2052 + } 1.2053 +#endif 1.2054 + default: 1.2055 + return (m); 1.2056 + } 1.2057 + if (len != NULL) { 1.2058 + *len += plen; 1.2059 + } 1.2060 + return (mret); 1.2061 +#endif 1.2062 +} 1.2063 + 1.2064 + 1.2065 +struct mbuf * 1.2066 +sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1.2067 + struct sctp_scoping *scope, 1.2068 + struct mbuf *m_at, int cnt_inits_to, 1.2069 + uint16_t *padding_len, uint16_t *chunk_len) 1.2070 +{ 1.2071 + struct sctp_vrf *vrf = NULL; 1.2072 + int cnt, limit_out = 0, total_count; 1.2073 + uint32_t vrf_id; 1.2074 + 1.2075 + vrf_id = inp->def_vrf_id; 1.2076 + SCTP_IPI_ADDR_RLOCK(); 1.2077 + vrf = sctp_find_vrf(vrf_id); 1.2078 + if (vrf == NULL) { 1.2079 + SCTP_IPI_ADDR_RUNLOCK(); 1.2080 + return (m_at); 1.2081 + } 1.2082 + if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1.2083 + struct sctp_ifa *sctp_ifap; 1.2084 + struct sctp_ifn *sctp_ifnp; 1.2085 + 1.2086 + cnt = cnt_inits_to; 1.2087 + if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) { 1.2088 + limit_out = 1; 1.2089 + cnt = SCTP_ADDRESS_LIMIT; 1.2090 + goto skip_count; 1.2091 + } 1.2092 + LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) { 1.2093 + if ((scope->loopback_scope == 0) && 1.2094 + SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) { 1.2095 + /* 1.2096 + * Skip loopback devices if loopback_scope 1.2097 + * not set 1.2098 + */ 1.2099 + continue; 1.2100 + } 1.2101 + LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) { 1.2102 + if (sctp_is_addr_restricted(stcb, sctp_ifap)) { 1.2103 + continue; 1.2104 + } 1.2105 +#if defined(__Userspace__) 1.2106 + if (sctp_ifap->address.sa.sa_family == AF_CONN) { 1.2107 + continue; 1.2108 + } 1.2109 +#endif 1.2110 + if (sctp_is_address_in_scope(sctp_ifap, scope, 1) == 0) { 1.2111 + continue; 1.2112 + } 1.2113 + cnt++; 1.2114 + if (cnt > SCTP_ADDRESS_LIMIT) { 1.2115 + break; 1.2116 + } 1.2117 + } 1.2118 + if (cnt > SCTP_ADDRESS_LIMIT) { 1.2119 + break; 1.2120 + } 1.2121 + } 1.2122 + skip_count: 1.2123 + if (cnt > 1) { 1.2124 + total_count = 0; 1.2125 + LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) { 1.2126 + cnt = 0; 1.2127 + if ((scope->loopback_scope == 0) && 1.2128 + SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) { 1.2129 + /* 1.2130 + * Skip loopback devices if 1.2131 + * loopback_scope not set 1.2132 + */ 1.2133 + continue; 1.2134 + } 1.2135 + LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) { 1.2136 + if (sctp_is_addr_restricted(stcb, sctp_ifap)) { 1.2137 + continue; 1.2138 + } 1.2139 +#if defined(__Userspace__) 1.2140 + if (sctp_ifap->address.sa.sa_family == AF_CONN) { 1.2141 + continue; 1.2142 + } 1.2143 +#endif 1.2144 + if (sctp_is_address_in_scope(sctp_ifap, 1.2145 + scope, 0) == 0) { 1.2146 + continue; 1.2147 + } 1.2148 + if ((chunk_len != NULL) && 1.2149 + (padding_len != NULL) && 1.2150 + (*padding_len > 0)) { 1.2151 + memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len); 1.2152 + SCTP_BUF_LEN(m_at) += *padding_len; 1.2153 + *chunk_len += *padding_len; 1.2154 + *padding_len = 0; 1.2155 + } 1.2156 + m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len); 1.2157 + if (limit_out) { 1.2158 + cnt++; 1.2159 + total_count++; 1.2160 + if (cnt >= 2) { 1.2161 + /* two from each address */ 1.2162 + break; 1.2163 + } 1.2164 + if (total_count > SCTP_ADDRESS_LIMIT) { 1.2165 + /* No more addresses */ 1.2166 + break; 1.2167 + } 1.2168 + } 1.2169 + } 1.2170 + } 1.2171 + } 1.2172 + } else { 1.2173 + struct sctp_laddr *laddr; 1.2174 + 1.2175 + cnt = cnt_inits_to; 1.2176 + /* First, how many ? */ 1.2177 + LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 1.2178 + if (laddr->ifa == NULL) { 1.2179 + continue; 1.2180 + } 1.2181 + if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) 1.2182 + /* Address being deleted by the system, dont 1.2183 + * list. 1.2184 + */ 1.2185 + continue; 1.2186 + if (laddr->action == SCTP_DEL_IP_ADDRESS) { 1.2187 + /* Address being deleted on this ep 1.2188 + * don't list. 1.2189 + */ 1.2190 + continue; 1.2191 + } 1.2192 +#if defined(__Userspace__) 1.2193 + if (laddr->ifa->address.sa.sa_family == AF_CONN) { 1.2194 + continue; 1.2195 + } 1.2196 +#endif 1.2197 + if (sctp_is_address_in_scope(laddr->ifa, 1.2198 + scope, 1) == 0) { 1.2199 + continue; 1.2200 + } 1.2201 + cnt++; 1.2202 + } 1.2203 + /* 1.2204 + * To get through a NAT we only list addresses if we have 1.2205 + * more than one. That way if you just bind a single address 1.2206 + * we let the source of the init dictate our address. 1.2207 + */ 1.2208 + if (cnt > 1) { 1.2209 + cnt = cnt_inits_to; 1.2210 + LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 1.2211 + if (laddr->ifa == NULL) { 1.2212 + continue; 1.2213 + } 1.2214 + if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) { 1.2215 + continue; 1.2216 + } 1.2217 +#if defined(__Userspace__) 1.2218 + if (laddr->ifa->address.sa.sa_family == AF_CONN) { 1.2219 + continue; 1.2220 + } 1.2221 +#endif 1.2222 + if (sctp_is_address_in_scope(laddr->ifa, 1.2223 + scope, 0) == 0) { 1.2224 + continue; 1.2225 + } 1.2226 + if ((chunk_len != NULL) && 1.2227 + (padding_len != NULL) && 1.2228 + (*padding_len > 0)) { 1.2229 + memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len); 1.2230 + SCTP_BUF_LEN(m_at) += *padding_len; 1.2231 + *chunk_len += *padding_len; 1.2232 + *padding_len = 0; 1.2233 + } 1.2234 + m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len); 1.2235 + cnt++; 1.2236 + if (cnt >= SCTP_ADDRESS_LIMIT) { 1.2237 + break; 1.2238 + } 1.2239 + } 1.2240 + } 1.2241 + } 1.2242 + SCTP_IPI_ADDR_RUNLOCK(); 1.2243 + return (m_at); 1.2244 +} 1.2245 + 1.2246 +static struct sctp_ifa * 1.2247 +sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa, 1.2248 + uint8_t dest_is_loop, 1.2249 + uint8_t dest_is_priv, 1.2250 + sa_family_t fam) 1.2251 +{ 1.2252 + uint8_t dest_is_global = 0; 1.2253 + /* dest_is_priv is true if destination is a private address */ 1.2254 + /* dest_is_loop is true if destination is a loopback addresses */ 1.2255 + 1.2256 + /** 1.2257 + * Here we determine if its a preferred address. A preferred address 1.2258 + * means it is the same scope or higher scope then the destination. 1.2259 + * L = loopback, P = private, G = global 1.2260 + * ----------------------------------------- 1.2261 + * src | dest | result 1.2262 + * ---------------------------------------- 1.2263 + * L | L | yes 1.2264 + * ----------------------------------------- 1.2265 + * P | L | yes-v4 no-v6 1.2266 + * ----------------------------------------- 1.2267 + * G | L | yes-v4 no-v6 1.2268 + * ----------------------------------------- 1.2269 + * L | P | no 1.2270 + * ----------------------------------------- 1.2271 + * P | P | yes 1.2272 + * ----------------------------------------- 1.2273 + * G | P | no 1.2274 + * ----------------------------------------- 1.2275 + * L | G | no 1.2276 + * ----------------------------------------- 1.2277 + * P | G | no 1.2278 + * ----------------------------------------- 1.2279 + * G | G | yes 1.2280 + * ----------------------------------------- 1.2281 + */ 1.2282 + 1.2283 + if (ifa->address.sa.sa_family != fam) { 1.2284 + /* forget mis-matched family */ 1.2285 + return (NULL); 1.2286 + } 1.2287 + if ((dest_is_priv == 0) && (dest_is_loop == 0)) { 1.2288 + dest_is_global = 1; 1.2289 + } 1.2290 + SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:"); 1.2291 + SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa); 1.2292 + /* Ok the address may be ok */ 1.2293 +#ifdef INET6 1.2294 + if (fam == AF_INET6) { 1.2295 + /* ok to use deprecated addresses? no lets not! */ 1.2296 + if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { 1.2297 + SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n"); 1.2298 + return (NULL); 1.2299 + } 1.2300 + if (ifa->src_is_priv && !ifa->src_is_loop) { 1.2301 + if (dest_is_loop) { 1.2302 + SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n"); 1.2303 + return (NULL); 1.2304 + } 1.2305 + } 1.2306 + if (ifa->src_is_glob) { 1.2307 + if (dest_is_loop) { 1.2308 + SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n"); 1.2309 + return (NULL); 1.2310 + } 1.2311 + } 1.2312 + } 1.2313 +#endif 1.2314 + /* Now that we know what is what, implement or table 1.2315 + * this could in theory be done slicker (it used to be), but this 1.2316 + * is straightforward and easier to validate :-) 1.2317 + */ 1.2318 + SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n", 1.2319 + ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob); 1.2320 + SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n", 1.2321 + dest_is_loop, dest_is_priv, dest_is_global); 1.2322 + 1.2323 + if ((ifa->src_is_loop) && (dest_is_priv)) { 1.2324 + SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n"); 1.2325 + return (NULL); 1.2326 + } 1.2327 + if ((ifa->src_is_glob) && (dest_is_priv)) { 1.2328 + SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n"); 1.2329 + return (NULL); 1.2330 + } 1.2331 + if ((ifa->src_is_loop) && (dest_is_global)) { 1.2332 + SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n"); 1.2333 + return (NULL); 1.2334 + } 1.2335 + if ((ifa->src_is_priv) && (dest_is_global)) { 1.2336 + SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n"); 1.2337 + return (NULL); 1.2338 + } 1.2339 + SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n"); 1.2340 + /* its a preferred address */ 1.2341 + return (ifa); 1.2342 +} 1.2343 + 1.2344 +static struct sctp_ifa * 1.2345 +sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa, 1.2346 + uint8_t dest_is_loop, 1.2347 + uint8_t dest_is_priv, 1.2348 + sa_family_t fam) 1.2349 +{ 1.2350 + uint8_t dest_is_global = 0; 1.2351 + 1.2352 + /** 1.2353 + * Here we determine if its a acceptable address. A acceptable 1.2354 + * address means it is the same scope or higher scope but we can 1.2355 + * allow for NAT which means its ok to have a global dest and a 1.2356 + * private src. 1.2357 + * 1.2358 + * L = loopback, P = private, G = global 1.2359 + * ----------------------------------------- 1.2360 + * src | dest | result 1.2361 + * ----------------------------------------- 1.2362 + * L | L | yes 1.2363 + * ----------------------------------------- 1.2364 + * P | L | yes-v4 no-v6 1.2365 + * ----------------------------------------- 1.2366 + * G | L | yes 1.2367 + * ----------------------------------------- 1.2368 + * L | P | no 1.2369 + * ----------------------------------------- 1.2370 + * P | P | yes 1.2371 + * ----------------------------------------- 1.2372 + * G | P | yes - May not work 1.2373 + * ----------------------------------------- 1.2374 + * L | G | no 1.2375 + * ----------------------------------------- 1.2376 + * P | G | yes - May not work 1.2377 + * ----------------------------------------- 1.2378 + * G | G | yes 1.2379 + * ----------------------------------------- 1.2380 + */ 1.2381 + 1.2382 + if (ifa->address.sa.sa_family != fam) { 1.2383 + /* forget non matching family */ 1.2384 + SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n", 1.2385 + ifa->address.sa.sa_family, fam); 1.2386 + return (NULL); 1.2387 + } 1.2388 + /* Ok the address may be ok */ 1.2389 + SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa); 1.2390 + SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n", 1.2391 + dest_is_loop, dest_is_priv); 1.2392 + if ((dest_is_loop == 0) && (dest_is_priv == 0)) { 1.2393 + dest_is_global = 1; 1.2394 + } 1.2395 +#ifdef INET6 1.2396 + if (fam == AF_INET6) { 1.2397 + /* ok to use deprecated addresses? */ 1.2398 + if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { 1.2399 + return (NULL); 1.2400 + } 1.2401 + if (ifa->src_is_priv) { 1.2402 + /* Special case, linklocal to loop */ 1.2403 + if (dest_is_loop) 1.2404 + return (NULL); 1.2405 + } 1.2406 + } 1.2407 +#endif 1.2408 + /* 1.2409 + * Now that we know what is what, implement our table. 1.2410 + * This could in theory be done slicker (it used to be), but this 1.2411 + * is straightforward and easier to validate :-) 1.2412 + */ 1.2413 + SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n", 1.2414 + ifa->src_is_loop, 1.2415 + dest_is_priv); 1.2416 + if ((ifa->src_is_loop == 1) && (dest_is_priv)) { 1.2417 + return (NULL); 1.2418 + } 1.2419 + SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n", 1.2420 + ifa->src_is_loop, 1.2421 + dest_is_global); 1.2422 + if ((ifa->src_is_loop == 1) && (dest_is_global)) { 1.2423 + return (NULL); 1.2424 + } 1.2425 + SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n"); 1.2426 + /* its an acceptable address */ 1.2427 + return (ifa); 1.2428 +} 1.2429 + 1.2430 +int 1.2431 +sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa) 1.2432 +{ 1.2433 + struct sctp_laddr *laddr; 1.2434 + 1.2435 + if (stcb == NULL) { 1.2436 + /* There are no restrictions, no TCB :-) */ 1.2437 + return (0); 1.2438 + } 1.2439 + LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) { 1.2440 + if (laddr->ifa == NULL) { 1.2441 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n", 1.2442 + __FUNCTION__); 1.2443 + continue; 1.2444 + } 1.2445 + if (laddr->ifa == ifa) { 1.2446 + /* Yes it is on the list */ 1.2447 + return (1); 1.2448 + } 1.2449 + } 1.2450 + return (0); 1.2451 +} 1.2452 + 1.2453 + 1.2454 +int 1.2455 +sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa) 1.2456 +{ 1.2457 + struct sctp_laddr *laddr; 1.2458 + 1.2459 + if (ifa == NULL) 1.2460 + return (0); 1.2461 + LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 1.2462 + if (laddr->ifa == NULL) { 1.2463 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n", 1.2464 + __FUNCTION__); 1.2465 + continue; 1.2466 + } 1.2467 + if ((laddr->ifa == ifa) && laddr->action == 0) 1.2468 + /* same pointer */ 1.2469 + return (1); 1.2470 + } 1.2471 + return (0); 1.2472 +} 1.2473 + 1.2474 + 1.2475 + 1.2476 +static struct sctp_ifa * 1.2477 +sctp_choose_boundspecific_inp(struct sctp_inpcb *inp, 1.2478 + sctp_route_t *ro, 1.2479 + uint32_t vrf_id, 1.2480 + int non_asoc_addr_ok, 1.2481 + uint8_t dest_is_priv, 1.2482 + uint8_t dest_is_loop, 1.2483 + sa_family_t fam) 1.2484 +{ 1.2485 + struct sctp_laddr *laddr, *starting_point; 1.2486 + void *ifn; 1.2487 + int resettotop = 0; 1.2488 + struct sctp_ifn *sctp_ifn; 1.2489 + struct sctp_ifa *sctp_ifa, *sifa; 1.2490 + struct sctp_vrf *vrf; 1.2491 + uint32_t ifn_index; 1.2492 + 1.2493 + vrf = sctp_find_vrf(vrf_id); 1.2494 + if (vrf == NULL) 1.2495 + return (NULL); 1.2496 + 1.2497 + ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); 1.2498 + ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro); 1.2499 + sctp_ifn = sctp_find_ifn(ifn, ifn_index); 1.2500 + /* 1.2501 + * first question, is the ifn we will emit on in our list, if so, we 1.2502 + * want such an address. Note that we first looked for a 1.2503 + * preferred address. 1.2504 + */ 1.2505 + if (sctp_ifn) { 1.2506 + /* is a preferred one on the interface we route out? */ 1.2507 + LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 1.2508 + if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 1.2509 + (non_asoc_addr_ok == 0)) 1.2510 + continue; 1.2511 + sifa = sctp_is_ifa_addr_preferred(sctp_ifa, 1.2512 + dest_is_loop, 1.2513 + dest_is_priv, fam); 1.2514 + if (sifa == NULL) 1.2515 + continue; 1.2516 + if (sctp_is_addr_in_ep(inp, sifa)) { 1.2517 + atomic_add_int(&sifa->refcount, 1); 1.2518 + return (sifa); 1.2519 + } 1.2520 + } 1.2521 + } 1.2522 + /* 1.2523 + * ok, now we now need to find one on the list of the addresses. 1.2524 + * We can't get one on the emitting interface so let's find first 1.2525 + * a preferred one. If not that an acceptable one otherwise... 1.2526 + * we return NULL. 1.2527 + */ 1.2528 + starting_point = inp->next_addr_touse; 1.2529 + once_again: 1.2530 + if (inp->next_addr_touse == NULL) { 1.2531 + inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list); 1.2532 + resettotop = 1; 1.2533 + } 1.2534 + for (laddr = inp->next_addr_touse; laddr; 1.2535 + laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { 1.2536 + if (laddr->ifa == NULL) { 1.2537 + /* address has been removed */ 1.2538 + continue; 1.2539 + } 1.2540 + if (laddr->action == SCTP_DEL_IP_ADDRESS) { 1.2541 + /* address is being deleted */ 1.2542 + continue; 1.2543 + } 1.2544 + sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, 1.2545 + dest_is_priv, fam); 1.2546 + if (sifa == NULL) 1.2547 + continue; 1.2548 + atomic_add_int(&sifa->refcount, 1); 1.2549 + return (sifa); 1.2550 + } 1.2551 + if (resettotop == 0) { 1.2552 + inp->next_addr_touse = NULL; 1.2553 + goto once_again; 1.2554 + } 1.2555 + 1.2556 + inp->next_addr_touse = starting_point; 1.2557 + resettotop = 0; 1.2558 + once_again_too: 1.2559 + if (inp->next_addr_touse == NULL) { 1.2560 + inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list); 1.2561 + resettotop = 1; 1.2562 + } 1.2563 + 1.2564 + /* ok, what about an acceptable address in the inp */ 1.2565 + for (laddr = inp->next_addr_touse; laddr; 1.2566 + laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { 1.2567 + if (laddr->ifa == NULL) { 1.2568 + /* address has been removed */ 1.2569 + continue; 1.2570 + } 1.2571 + if (laddr->action == SCTP_DEL_IP_ADDRESS) { 1.2572 + /* address is being deleted */ 1.2573 + continue; 1.2574 + } 1.2575 + sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop, 1.2576 + dest_is_priv, fam); 1.2577 + if (sifa == NULL) 1.2578 + continue; 1.2579 + atomic_add_int(&sifa->refcount, 1); 1.2580 + return (sifa); 1.2581 + } 1.2582 + if (resettotop == 0) { 1.2583 + inp->next_addr_touse = NULL; 1.2584 + goto once_again_too; 1.2585 + } 1.2586 + 1.2587 + /* 1.2588 + * no address bound can be a source for the destination we are in 1.2589 + * trouble 1.2590 + */ 1.2591 + return (NULL); 1.2592 +} 1.2593 + 1.2594 + 1.2595 + 1.2596 +static struct sctp_ifa * 1.2597 +sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp, 1.2598 + struct sctp_tcb *stcb, 1.2599 + sctp_route_t *ro, 1.2600 + uint32_t vrf_id, 1.2601 + uint8_t dest_is_priv, 1.2602 + uint8_t dest_is_loop, 1.2603 + int non_asoc_addr_ok, 1.2604 + sa_family_t fam) 1.2605 +{ 1.2606 + struct sctp_laddr *laddr, *starting_point; 1.2607 + void *ifn; 1.2608 + struct sctp_ifn *sctp_ifn; 1.2609 + struct sctp_ifa *sctp_ifa, *sifa; 1.2610 + uint8_t start_at_beginning = 0; 1.2611 + struct sctp_vrf *vrf; 1.2612 + uint32_t ifn_index; 1.2613 + 1.2614 + /* 1.2615 + * first question, is the ifn we will emit on in our list, if so, we 1.2616 + * want that one. 1.2617 + */ 1.2618 + vrf = sctp_find_vrf(vrf_id); 1.2619 + if (vrf == NULL) 1.2620 + return (NULL); 1.2621 + 1.2622 + ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); 1.2623 + ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro); 1.2624 + sctp_ifn = sctp_find_ifn( ifn, ifn_index); 1.2625 + 1.2626 + /* 1.2627 + * first question, is the ifn we will emit on in our list? If so, 1.2628 + * we want that one. First we look for a preferred. Second, we go 1.2629 + * for an acceptable. 1.2630 + */ 1.2631 + if (sctp_ifn) { 1.2632 + /* first try for a preferred address on the ep */ 1.2633 + LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 1.2634 + if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) 1.2635 + continue; 1.2636 + if (sctp_is_addr_in_ep(inp, sctp_ifa)) { 1.2637 + sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam); 1.2638 + if (sifa == NULL) 1.2639 + continue; 1.2640 + if (((non_asoc_addr_ok == 0) && 1.2641 + (sctp_is_addr_restricted(stcb, sifa))) || 1.2642 + (non_asoc_addr_ok && 1.2643 + (sctp_is_addr_restricted(stcb, sifa)) && 1.2644 + (!sctp_is_addr_pending(stcb, sifa)))) { 1.2645 + /* on the no-no list */ 1.2646 + continue; 1.2647 + } 1.2648 + atomic_add_int(&sifa->refcount, 1); 1.2649 + return (sifa); 1.2650 + } 1.2651 + } 1.2652 + /* next try for an acceptable address on the ep */ 1.2653 + LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 1.2654 + if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) 1.2655 + continue; 1.2656 + if (sctp_is_addr_in_ep(inp, sctp_ifa)) { 1.2657 + sifa= sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv,fam); 1.2658 + if (sifa == NULL) 1.2659 + continue; 1.2660 + if (((non_asoc_addr_ok == 0) && 1.2661 + (sctp_is_addr_restricted(stcb, sifa))) || 1.2662 + (non_asoc_addr_ok && 1.2663 + (sctp_is_addr_restricted(stcb, sifa)) && 1.2664 + (!sctp_is_addr_pending(stcb, sifa)))) { 1.2665 + /* on the no-no list */ 1.2666 + continue; 1.2667 + } 1.2668 + atomic_add_int(&sifa->refcount, 1); 1.2669 + return (sifa); 1.2670 + } 1.2671 + } 1.2672 + 1.2673 + } 1.2674 + /* 1.2675 + * if we can't find one like that then we must look at all 1.2676 + * addresses bound to pick one at first preferable then 1.2677 + * secondly acceptable. 1.2678 + */ 1.2679 + starting_point = stcb->asoc.last_used_address; 1.2680 + sctp_from_the_top: 1.2681 + if (stcb->asoc.last_used_address == NULL) { 1.2682 + start_at_beginning = 1; 1.2683 + stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list); 1.2684 + } 1.2685 + /* search beginning with the last used address */ 1.2686 + for (laddr = stcb->asoc.last_used_address; laddr; 1.2687 + laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { 1.2688 + if (laddr->ifa == NULL) { 1.2689 + /* address has been removed */ 1.2690 + continue; 1.2691 + } 1.2692 + if (laddr->action == SCTP_DEL_IP_ADDRESS) { 1.2693 + /* address is being deleted */ 1.2694 + continue; 1.2695 + } 1.2696 + sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam); 1.2697 + if (sifa == NULL) 1.2698 + continue; 1.2699 + if (((non_asoc_addr_ok == 0) && 1.2700 + (sctp_is_addr_restricted(stcb, sifa))) || 1.2701 + (non_asoc_addr_ok && 1.2702 + (sctp_is_addr_restricted(stcb, sifa)) && 1.2703 + (!sctp_is_addr_pending(stcb, sifa)))) { 1.2704 + /* on the no-no list */ 1.2705 + continue; 1.2706 + } 1.2707 + stcb->asoc.last_used_address = laddr; 1.2708 + atomic_add_int(&sifa->refcount, 1); 1.2709 + return (sifa); 1.2710 + } 1.2711 + if (start_at_beginning == 0) { 1.2712 + stcb->asoc.last_used_address = NULL; 1.2713 + goto sctp_from_the_top; 1.2714 + } 1.2715 + /* now try for any higher scope than the destination */ 1.2716 + stcb->asoc.last_used_address = starting_point; 1.2717 + start_at_beginning = 0; 1.2718 + sctp_from_the_top2: 1.2719 + if (stcb->asoc.last_used_address == NULL) { 1.2720 + start_at_beginning = 1; 1.2721 + stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list); 1.2722 + } 1.2723 + /* search beginning with the last used address */ 1.2724 + for (laddr = stcb->asoc.last_used_address; laddr; 1.2725 + laddr = LIST_NEXT(laddr, sctp_nxt_addr)) { 1.2726 + if (laddr->ifa == NULL) { 1.2727 + /* address has been removed */ 1.2728 + continue; 1.2729 + } 1.2730 + if (laddr->action == SCTP_DEL_IP_ADDRESS) { 1.2731 + /* address is being deleted */ 1.2732 + continue; 1.2733 + } 1.2734 + sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop, 1.2735 + dest_is_priv, fam); 1.2736 + if (sifa == NULL) 1.2737 + continue; 1.2738 + if (((non_asoc_addr_ok == 0) && 1.2739 + (sctp_is_addr_restricted(stcb, sifa))) || 1.2740 + (non_asoc_addr_ok && 1.2741 + (sctp_is_addr_restricted(stcb, sifa)) && 1.2742 + (!sctp_is_addr_pending(stcb, sifa)))) { 1.2743 + /* on the no-no list */ 1.2744 + continue; 1.2745 + } 1.2746 + stcb->asoc.last_used_address = laddr; 1.2747 + atomic_add_int(&sifa->refcount, 1); 1.2748 + return (sifa); 1.2749 + } 1.2750 + if (start_at_beginning == 0) { 1.2751 + stcb->asoc.last_used_address = NULL; 1.2752 + goto sctp_from_the_top2; 1.2753 + } 1.2754 + return (NULL); 1.2755 +} 1.2756 + 1.2757 +static struct sctp_ifa * 1.2758 +sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn, 1.2759 + struct sctp_tcb *stcb, 1.2760 + int non_asoc_addr_ok, 1.2761 + uint8_t dest_is_loop, 1.2762 + uint8_t dest_is_priv, 1.2763 + int addr_wanted, 1.2764 + sa_family_t fam, 1.2765 + sctp_route_t *ro 1.2766 + ) 1.2767 +{ 1.2768 + struct sctp_ifa *ifa, *sifa; 1.2769 + int num_eligible_addr = 0; 1.2770 +#ifdef INET6 1.2771 +#ifdef SCTP_EMBEDDED_V6_SCOPE 1.2772 + struct sockaddr_in6 sin6, lsa6; 1.2773 + 1.2774 + if (fam == AF_INET6) { 1.2775 + memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6)); 1.2776 +#ifdef SCTP_KAME 1.2777 + (void)sa6_recoverscope(&sin6); 1.2778 +#else 1.2779 + (void)in6_recoverscope(&sin6, &sin6.sin6_addr, NULL); 1.2780 +#endif /* SCTP_KAME */ 1.2781 + } 1.2782 +#endif /* SCTP_EMBEDDED_V6_SCOPE */ 1.2783 +#endif /* INET6 */ 1.2784 + LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) { 1.2785 + if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 1.2786 + (non_asoc_addr_ok == 0)) 1.2787 + continue; 1.2788 + sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop, 1.2789 + dest_is_priv, fam); 1.2790 + if (sifa == NULL) 1.2791 + continue; 1.2792 +#ifdef INET6 1.2793 + if (fam == AF_INET6 && 1.2794 + dest_is_loop && 1.2795 + sifa->src_is_loop && sifa->src_is_priv) { 1.2796 + /* don't allow fe80::1 to be a src on loop ::1, we don't list it 1.2797 + * to the peer so we will get an abort. 1.2798 + */ 1.2799 + continue; 1.2800 + } 1.2801 +#ifdef SCTP_EMBEDDED_V6_SCOPE 1.2802 + if (fam == AF_INET6 && 1.2803 + IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) && 1.2804 + IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) { 1.2805 + /* link-local <-> link-local must belong to the same scope. */ 1.2806 + memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6)); 1.2807 +#ifdef SCTP_KAME 1.2808 + (void)sa6_recoverscope(&lsa6); 1.2809 +#else 1.2810 + (void)in6_recoverscope(&lsa6, &lsa6.sin6_addr, NULL); 1.2811 +#endif /* SCTP_KAME */ 1.2812 + if (sin6.sin6_scope_id != lsa6.sin6_scope_id) { 1.2813 + continue; 1.2814 + } 1.2815 + } 1.2816 +#endif /* SCTP_EMBEDDED_V6_SCOPE */ 1.2817 +#endif /* INET6 */ 1.2818 + 1.2819 +#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__) 1.2820 + /* Check if the IPv6 address matches to next-hop. 1.2821 + In the mobile case, old IPv6 address may be not deleted 1.2822 + from the interface. Then, the interface has previous and 1.2823 + new addresses. We should use one corresponding to the 1.2824 + next-hop. (by micchie) 1.2825 + */ 1.2826 +#ifdef INET6 1.2827 + if (stcb && fam == AF_INET6 && 1.2828 + sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) { 1.2829 + if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro) 1.2830 + == 0) { 1.2831 + continue; 1.2832 + } 1.2833 + } 1.2834 +#endif 1.2835 +#ifdef INET 1.2836 + /* Avoid topologically incorrect IPv4 address */ 1.2837 + if (stcb && fam == AF_INET && 1.2838 + sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) { 1.2839 + if (sctp_v4src_match_nexthop(sifa, ro) == 0) { 1.2840 + continue; 1.2841 + } 1.2842 + } 1.2843 +#endif 1.2844 +#endif 1.2845 + if (stcb) { 1.2846 + if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) { 1.2847 + continue; 1.2848 + } 1.2849 + if (((non_asoc_addr_ok == 0) && 1.2850 + (sctp_is_addr_restricted(stcb, sifa))) || 1.2851 + (non_asoc_addr_ok && 1.2852 + (sctp_is_addr_restricted(stcb, sifa)) && 1.2853 + (!sctp_is_addr_pending(stcb, sifa)))) { 1.2854 + /* 1.2855 + * It is restricted for some reason.. 1.2856 + * probably not yet added. 1.2857 + */ 1.2858 + continue; 1.2859 + } 1.2860 + } 1.2861 + if (num_eligible_addr >= addr_wanted) { 1.2862 + return (sifa); 1.2863 + } 1.2864 + num_eligible_addr++; 1.2865 + } 1.2866 + return (NULL); 1.2867 +} 1.2868 + 1.2869 + 1.2870 +static int 1.2871 +sctp_count_num_preferred_boundall(struct sctp_ifn *ifn, 1.2872 + struct sctp_tcb *stcb, 1.2873 + int non_asoc_addr_ok, 1.2874 + uint8_t dest_is_loop, 1.2875 + uint8_t dest_is_priv, 1.2876 + sa_family_t fam) 1.2877 +{ 1.2878 + struct sctp_ifa *ifa, *sifa; 1.2879 + int num_eligible_addr = 0; 1.2880 + 1.2881 + LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) { 1.2882 + if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 1.2883 + (non_asoc_addr_ok == 0)) { 1.2884 + continue; 1.2885 + } 1.2886 + sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop, 1.2887 + dest_is_priv, fam); 1.2888 + if (sifa == NULL) { 1.2889 + continue; 1.2890 + } 1.2891 + if (stcb) { 1.2892 + if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) { 1.2893 + continue; 1.2894 + } 1.2895 + if (((non_asoc_addr_ok == 0) && 1.2896 + (sctp_is_addr_restricted(stcb, sifa))) || 1.2897 + (non_asoc_addr_ok && 1.2898 + (sctp_is_addr_restricted(stcb, sifa)) && 1.2899 + (!sctp_is_addr_pending(stcb, sifa)))) { 1.2900 + /* 1.2901 + * It is restricted for some reason.. 1.2902 + * probably not yet added. 1.2903 + */ 1.2904 + continue; 1.2905 + } 1.2906 + } 1.2907 + num_eligible_addr++; 1.2908 + } 1.2909 + return (num_eligible_addr); 1.2910 +} 1.2911 + 1.2912 +static struct sctp_ifa * 1.2913 +sctp_choose_boundall(struct sctp_tcb *stcb, 1.2914 + struct sctp_nets *net, 1.2915 + sctp_route_t *ro, 1.2916 + uint32_t vrf_id, 1.2917 + uint8_t dest_is_priv, 1.2918 + uint8_t dest_is_loop, 1.2919 + int non_asoc_addr_ok, 1.2920 + sa_family_t fam) 1.2921 +{ 1.2922 + int cur_addr_num = 0, num_preferred = 0; 1.2923 + void *ifn; 1.2924 + struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn; 1.2925 + struct sctp_ifa *sctp_ifa, *sifa; 1.2926 + uint32_t ifn_index; 1.2927 + struct sctp_vrf *vrf; 1.2928 +#ifdef INET 1.2929 + int retried = 0; 1.2930 +#endif 1.2931 + 1.2932 + /*- 1.2933 + * For boundall we can use any address in the association. 1.2934 + * If non_asoc_addr_ok is set we can use any address (at least in 1.2935 + * theory). So we look for preferred addresses first. If we find one, 1.2936 + * we use it. Otherwise we next try to get an address on the 1.2937 + * interface, which we should be able to do (unless non_asoc_addr_ok 1.2938 + * is false and we are routed out that way). In these cases where we 1.2939 + * can't use the address of the interface we go through all the 1.2940 + * ifn's looking for an address we can use and fill that in. Punting 1.2941 + * means we send back address 0, which will probably cause problems 1.2942 + * actually since then IP will fill in the address of the route ifn, 1.2943 + * which means we probably already rejected it.. i.e. here comes an 1.2944 + * abort :-<. 1.2945 + */ 1.2946 + vrf = sctp_find_vrf(vrf_id); 1.2947 + if (vrf == NULL) 1.2948 + return (NULL); 1.2949 + 1.2950 + ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); 1.2951 + ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro); 1.2952 + SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn from route:%p ifn_index:%d\n", ifn, ifn_index); 1.2953 + emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index); 1.2954 + if (sctp_ifn == NULL) { 1.2955 + /* ?? We don't have this guy ?? */ 1.2956 + SCTPDBG(SCTP_DEBUG_OUTPUT2,"No ifn emit interface?\n"); 1.2957 + goto bound_all_plan_b; 1.2958 + } 1.2959 + SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn_index:%d name:%s is emit interface\n", 1.2960 + ifn_index, sctp_ifn->ifn_name); 1.2961 + 1.2962 + if (net) { 1.2963 + cur_addr_num = net->indx_of_eligible_next_to_use; 1.2964 + } 1.2965 + num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, 1.2966 + stcb, 1.2967 + non_asoc_addr_ok, 1.2968 + dest_is_loop, 1.2969 + dest_is_priv, fam); 1.2970 + SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n", 1.2971 + num_preferred, sctp_ifn->ifn_name); 1.2972 + if (num_preferred == 0) { 1.2973 + /* 1.2974 + * no eligible addresses, we must use some other interface 1.2975 + * address if we can find one. 1.2976 + */ 1.2977 + goto bound_all_plan_b; 1.2978 + } 1.2979 + /* 1.2980 + * Ok we have num_eligible_addr set with how many we can use, this 1.2981 + * may vary from call to call due to addresses being deprecated 1.2982 + * etc.. 1.2983 + */ 1.2984 + if (cur_addr_num >= num_preferred) { 1.2985 + cur_addr_num = 0; 1.2986 + } 1.2987 + /* 1.2988 + * select the nth address from the list (where cur_addr_num is the 1.2989 + * nth) and 0 is the first one, 1 is the second one etc... 1.2990 + */ 1.2991 + SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num); 1.2992 + 1.2993 + sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop, 1.2994 + dest_is_priv, cur_addr_num, fam, ro); 1.2995 + 1.2996 + /* if sctp_ifa is NULL something changed??, fall to plan b. */ 1.2997 + if (sctp_ifa) { 1.2998 + atomic_add_int(&sctp_ifa->refcount, 1); 1.2999 + if (net) { 1.3000 + /* save off where the next one we will want */ 1.3001 + net->indx_of_eligible_next_to_use = cur_addr_num + 1; 1.3002 + } 1.3003 + return (sctp_ifa); 1.3004 + } 1.3005 + /* 1.3006 + * plan_b: Look at all interfaces and find a preferred address. If 1.3007 + * no preferred fall through to plan_c. 1.3008 + */ 1.3009 + bound_all_plan_b: 1.3010 + SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n"); 1.3011 + LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 1.3012 + SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n", 1.3013 + sctp_ifn->ifn_name); 1.3014 + if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 1.3015 + /* wrong base scope */ 1.3016 + SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n"); 1.3017 + continue; 1.3018 + } 1.3019 + if ((sctp_ifn == looked_at) && looked_at) { 1.3020 + /* already looked at this guy */ 1.3021 + SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n"); 1.3022 + continue; 1.3023 + } 1.3024 + num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, stcb, non_asoc_addr_ok, 1.3025 + dest_is_loop, dest_is_priv, fam); 1.3026 + SCTPDBG(SCTP_DEBUG_OUTPUT2, 1.3027 + "Found ifn:%p %d preferred source addresses\n", 1.3028 + ifn, num_preferred); 1.3029 + if (num_preferred == 0) { 1.3030 + /* None on this interface. */ 1.3031 + SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefered -- skipping to next\n"); 1.3032 + continue; 1.3033 + } 1.3034 + SCTPDBG(SCTP_DEBUG_OUTPUT2, 1.3035 + "num preferred:%d on interface:%p cur_addr_num:%d\n", 1.3036 + num_preferred, (void *)sctp_ifn, cur_addr_num); 1.3037 + 1.3038 + /* 1.3039 + * Ok we have num_eligible_addr set with how many we can 1.3040 + * use, this may vary from call to call due to addresses 1.3041 + * being deprecated etc.. 1.3042 + */ 1.3043 + if (cur_addr_num >= num_preferred) { 1.3044 + cur_addr_num = 0; 1.3045 + } 1.3046 + sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop, 1.3047 + dest_is_priv, cur_addr_num, fam, ro); 1.3048 + if (sifa == NULL) 1.3049 + continue; 1.3050 + if (net) { 1.3051 + net->indx_of_eligible_next_to_use = cur_addr_num + 1; 1.3052 + SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n", 1.3053 + cur_addr_num); 1.3054 + SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:"); 1.3055 + SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa); 1.3056 + SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:"); 1.3057 + SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa); 1.3058 + } 1.3059 + atomic_add_int(&sifa->refcount, 1); 1.3060 + return (sifa); 1.3061 + } 1.3062 +#ifdef INET 1.3063 +again_with_private_addresses_allowed: 1.3064 +#endif 1.3065 + /* plan_c: do we have an acceptable address on the emit interface */ 1.3066 + sifa = NULL; 1.3067 + SCTPDBG(SCTP_DEBUG_OUTPUT2,"Trying Plan C: find acceptable on interface\n"); 1.3068 + if (emit_ifn == NULL) { 1.3069 + SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jump to Plan D - no emit_ifn\n"); 1.3070 + goto plan_d; 1.3071 + } 1.3072 + LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) { 1.3073 + SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa); 1.3074 + if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 1.3075 + (non_asoc_addr_ok == 0)) { 1.3076 + SCTPDBG(SCTP_DEBUG_OUTPUT2,"Defer\n"); 1.3077 + continue; 1.3078 + } 1.3079 + sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, 1.3080 + dest_is_priv, fam); 1.3081 + if (sifa == NULL) { 1.3082 + SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n"); 1.3083 + continue; 1.3084 + } 1.3085 + if (stcb) { 1.3086 + if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) { 1.3087 + SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n"); 1.3088 + sifa = NULL; 1.3089 + continue; 1.3090 + } 1.3091 + if (((non_asoc_addr_ok == 0) && 1.3092 + (sctp_is_addr_restricted(stcb, sifa))) || 1.3093 + (non_asoc_addr_ok && 1.3094 + (sctp_is_addr_restricted(stcb, sifa)) && 1.3095 + (!sctp_is_addr_pending(stcb, sifa)))) { 1.3096 + /* 1.3097 + * It is restricted for some 1.3098 + * reason.. probably not yet added. 1.3099 + */ 1.3100 + SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its resticted\n"); 1.3101 + sifa = NULL; 1.3102 + continue; 1.3103 + } 1.3104 + } else { 1.3105 + SCTP_PRINTF("Stcb is null - no print\n"); 1.3106 + } 1.3107 + atomic_add_int(&sifa->refcount, 1); 1.3108 + goto out; 1.3109 + } 1.3110 + plan_d: 1.3111 + /* 1.3112 + * plan_d: We are in trouble. No preferred address on the emit 1.3113 + * interface. And not even a preferred address on all interfaces. 1.3114 + * Go out and see if we can find an acceptable address somewhere 1.3115 + * amongst all interfaces. 1.3116 + */ 1.3117 + SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", (void *)looked_at); 1.3118 + LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 1.3119 + if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 1.3120 + /* wrong base scope */ 1.3121 + continue; 1.3122 + } 1.3123 + LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 1.3124 + if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 1.3125 + (non_asoc_addr_ok == 0)) 1.3126 + continue; 1.3127 + sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, 1.3128 + dest_is_loop, 1.3129 + dest_is_priv, fam); 1.3130 + if (sifa == NULL) 1.3131 + continue; 1.3132 + if (stcb) { 1.3133 + if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) { 1.3134 + sifa = NULL; 1.3135 + continue; 1.3136 + } 1.3137 + if (((non_asoc_addr_ok == 0) && 1.3138 + (sctp_is_addr_restricted(stcb, sifa))) || 1.3139 + (non_asoc_addr_ok && 1.3140 + (sctp_is_addr_restricted(stcb, sifa)) && 1.3141 + (!sctp_is_addr_pending(stcb, sifa)))) { 1.3142 + /* 1.3143 + * It is restricted for some 1.3144 + * reason.. probably not yet added. 1.3145 + */ 1.3146 + sifa = NULL; 1.3147 + continue; 1.3148 + } 1.3149 + } 1.3150 + goto out; 1.3151 + } 1.3152 + } 1.3153 +#ifdef INET 1.3154 + if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) { 1.3155 + stcb->asoc.scope.ipv4_local_scope = 1; 1.3156 + retried = 1; 1.3157 + goto again_with_private_addresses_allowed; 1.3158 + } else if (retried == 1) { 1.3159 + stcb->asoc.scope.ipv4_local_scope = 0; 1.3160 + } 1.3161 +#endif 1.3162 +out: 1.3163 +#ifdef INET 1.3164 + if (sifa) { 1.3165 + if (retried == 1) { 1.3166 + LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 1.3167 + if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 1.3168 + /* wrong base scope */ 1.3169 + continue; 1.3170 + } 1.3171 + LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 1.3172 + struct sctp_ifa *tmp_sifa; 1.3173 + 1.3174 + if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && 1.3175 + (non_asoc_addr_ok == 0)) 1.3176 + continue; 1.3177 + tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, 1.3178 + dest_is_loop, 1.3179 + dest_is_priv, fam); 1.3180 + if (tmp_sifa == NULL) { 1.3181 + continue; 1.3182 + } 1.3183 + if (tmp_sifa == sifa) { 1.3184 + continue; 1.3185 + } 1.3186 + if (stcb) { 1.3187 + if (sctp_is_address_in_scope(tmp_sifa, 1.3188 + &stcb->asoc.scope, 0) == 0) { 1.3189 + continue; 1.3190 + } 1.3191 + if (((non_asoc_addr_ok == 0) && 1.3192 + (sctp_is_addr_restricted(stcb, tmp_sifa))) || 1.3193 + (non_asoc_addr_ok && 1.3194 + (sctp_is_addr_restricted(stcb, tmp_sifa)) && 1.3195 + (!sctp_is_addr_pending(stcb, tmp_sifa)))) { 1.3196 + /* 1.3197 + * It is restricted for some 1.3198 + * reason.. probably not yet added. 1.3199 + */ 1.3200 + continue; 1.3201 + } 1.3202 + } 1.3203 + if ((tmp_sifa->address.sin.sin_family == AF_INET) && 1.3204 + (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) { 1.3205 + sctp_add_local_addr_restricted(stcb, tmp_sifa); 1.3206 + } 1.3207 + } 1.3208 + } 1.3209 + } 1.3210 + atomic_add_int(&sifa->refcount, 1); 1.3211 + } 1.3212 +#endif 1.3213 + return (sifa); 1.3214 +} 1.3215 + 1.3216 + 1.3217 + 1.3218 +/* tcb may be NULL */ 1.3219 +struct sctp_ifa * 1.3220 +sctp_source_address_selection(struct sctp_inpcb *inp, 1.3221 + struct sctp_tcb *stcb, 1.3222 + sctp_route_t *ro, 1.3223 + struct sctp_nets *net, 1.3224 + int non_asoc_addr_ok, uint32_t vrf_id) 1.3225 +{ 1.3226 + struct sctp_ifa *answer; 1.3227 + uint8_t dest_is_priv, dest_is_loop; 1.3228 + sa_family_t fam; 1.3229 +#ifdef INET 1.3230 + struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst; 1.3231 +#endif 1.3232 +#ifdef INET6 1.3233 + struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst; 1.3234 +#endif 1.3235 + 1.3236 + /** 1.3237 + * Rules: - Find the route if needed, cache if I can. - Look at 1.3238 + * interface address in route, Is it in the bound list. If so we 1.3239 + * have the best source. - If not we must rotate amongst the 1.3240 + * addresses. 1.3241 + * 1.3242 + * Cavets and issues 1.3243 + * 1.3244 + * Do we need to pay attention to scope. We can have a private address 1.3245 + * or a global address we are sourcing or sending to. So if we draw 1.3246 + * it out 1.3247 + * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz 1.3248 + * For V4 1.3249 + * ------------------------------------------ 1.3250 + * source * dest * result 1.3251 + * ----------------------------------------- 1.3252 + * <a> Private * Global * NAT 1.3253 + * ----------------------------------------- 1.3254 + * <b> Private * Private * No problem 1.3255 + * ----------------------------------------- 1.3256 + * <c> Global * Private * Huh, How will this work? 1.3257 + * ----------------------------------------- 1.3258 + * <d> Global * Global * No Problem 1.3259 + *------------------------------------------ 1.3260 + * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz 1.3261 + * For V6 1.3262 + *------------------------------------------ 1.3263 + * source * dest * result 1.3264 + * ----------------------------------------- 1.3265 + * <a> Linklocal * Global * 1.3266 + * ----------------------------------------- 1.3267 + * <b> Linklocal * Linklocal * No problem 1.3268 + * ----------------------------------------- 1.3269 + * <c> Global * Linklocal * Huh, How will this work? 1.3270 + * ----------------------------------------- 1.3271 + * <d> Global * Global * No Problem 1.3272 + *------------------------------------------ 1.3273 + * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz 1.3274 + * 1.3275 + * And then we add to that what happens if there are multiple addresses 1.3276 + * assigned to an interface. Remember the ifa on a ifn is a linked 1.3277 + * list of addresses. So one interface can have more than one IP 1.3278 + * address. What happens if we have both a private and a global 1.3279 + * address? Do we then use context of destination to sort out which 1.3280 + * one is best? And what about NAT's sending P->G may get you a NAT 1.3281 + * translation, or should you select the G thats on the interface in 1.3282 + * preference. 1.3283 + * 1.3284 + * Decisions: 1.3285 + * 1.3286 + * - count the number of addresses on the interface. 1.3287 + * - if it is one, no problem except case <c>. 1.3288 + * For <a> we will assume a NAT out there. 1.3289 + * - if there are more than one, then we need to worry about scope P 1.3290 + * or G. We should prefer G -> G and P -> P if possible. 1.3291 + * Then as a secondary fall back to mixed types G->P being a last 1.3292 + * ditch one. 1.3293 + * - The above all works for bound all, but bound specific we need to 1.3294 + * use the same concept but instead only consider the bound 1.3295 + * addresses. If the bound set is NOT assigned to the interface then 1.3296 + * we must use rotation amongst the bound addresses.. 1.3297 + */ 1.3298 + if (ro->ro_rt == NULL) { 1.3299 + /* 1.3300 + * Need a route to cache. 1.3301 + */ 1.3302 + SCTP_RTALLOC(ro, vrf_id); 1.3303 + } 1.3304 + if (ro->ro_rt == NULL) { 1.3305 + return (NULL); 1.3306 + } 1.3307 + fam = ro->ro_dst.sa_family; 1.3308 + dest_is_priv = dest_is_loop = 0; 1.3309 + /* Setup our scopes for the destination */ 1.3310 + switch (fam) { 1.3311 +#ifdef INET 1.3312 + case AF_INET: 1.3313 + /* Scope based on outbound address */ 1.3314 + if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) { 1.3315 + dest_is_loop = 1; 1.3316 + if (net != NULL) { 1.3317 + /* mark it as local */ 1.3318 + net->addr_is_local = 1; 1.3319 + } 1.3320 + } else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) { 1.3321 + dest_is_priv = 1; 1.3322 + } 1.3323 + break; 1.3324 +#endif 1.3325 +#ifdef INET6 1.3326 + case AF_INET6: 1.3327 + /* Scope based on outbound address */ 1.3328 +#if defined(__Userspace_os_Windows) 1.3329 + if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) { 1.3330 +#else 1.3331 + if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) || 1.3332 + SCTP_ROUTE_IS_REAL_LOOP(ro)) { 1.3333 +#endif 1.3334 + /* 1.3335 + * If the address is a loopback address, which 1.3336 + * consists of "::1" OR "fe80::1%lo0", we are loopback 1.3337 + * scope. But we don't use dest_is_priv (link local 1.3338 + * addresses). 1.3339 + */ 1.3340 + dest_is_loop = 1; 1.3341 + if (net != NULL) { 1.3342 + /* mark it as local */ 1.3343 + net->addr_is_local = 1; 1.3344 + } 1.3345 + } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) { 1.3346 + dest_is_priv = 1; 1.3347 + } 1.3348 + break; 1.3349 +#endif 1.3350 + } 1.3351 + SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:"); 1.3352 + SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst); 1.3353 + SCTP_IPI_ADDR_RLOCK(); 1.3354 + if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1.3355 + /* 1.3356 + * Bound all case 1.3357 + */ 1.3358 + answer = sctp_choose_boundall(stcb, net, ro, vrf_id, 1.3359 + dest_is_priv, dest_is_loop, 1.3360 + non_asoc_addr_ok, fam); 1.3361 + SCTP_IPI_ADDR_RUNLOCK(); 1.3362 + return (answer); 1.3363 + } 1.3364 + /* 1.3365 + * Subset bound case 1.3366 + */ 1.3367 + if (stcb) { 1.3368 + answer = sctp_choose_boundspecific_stcb(inp, stcb, ro, 1.3369 + vrf_id, dest_is_priv, 1.3370 + dest_is_loop, 1.3371 + non_asoc_addr_ok, fam); 1.3372 + } else { 1.3373 + answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id, 1.3374 + non_asoc_addr_ok, 1.3375 + dest_is_priv, 1.3376 + dest_is_loop, fam); 1.3377 + } 1.3378 + SCTP_IPI_ADDR_RUNLOCK(); 1.3379 + return (answer); 1.3380 +} 1.3381 + 1.3382 +static int 1.3383 +sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize) 1.3384 +{ 1.3385 +#if defined(__Userspace_os_Windows) 1.3386 + WSACMSGHDR cmh; 1.3387 +#else 1.3388 + struct cmsghdr cmh; 1.3389 +#endif 1.3390 + int tlen, at, found; 1.3391 + struct sctp_sndinfo sndinfo; 1.3392 + struct sctp_prinfo prinfo; 1.3393 + struct sctp_authinfo authinfo; 1.3394 + 1.3395 + tlen = SCTP_BUF_LEN(control); 1.3396 + at = 0; 1.3397 + found = 0; 1.3398 + /* 1.3399 + * Independent of how many mbufs, find the c_type inside the control 1.3400 + * structure and copy out the data. 1.3401 + */ 1.3402 + while (at < tlen) { 1.3403 + if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) { 1.3404 + /* There is not enough room for one more. */ 1.3405 + return (found); 1.3406 + } 1.3407 + m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh); 1.3408 + if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) { 1.3409 + /* We dont't have a complete CMSG header. */ 1.3410 + return (found); 1.3411 + } 1.3412 + if (((int)cmh.cmsg_len + at) > tlen) { 1.3413 + /* We don't have the complete CMSG. */ 1.3414 + return (found); 1.3415 + } 1.3416 + if ((cmh.cmsg_level == IPPROTO_SCTP) && 1.3417 + ((c_type == cmh.cmsg_type) || 1.3418 + ((c_type == SCTP_SNDRCV) && 1.3419 + ((cmh.cmsg_type == SCTP_SNDINFO) || 1.3420 + (cmh.cmsg_type == SCTP_PRINFO) || 1.3421 + (cmh.cmsg_type == SCTP_AUTHINFO))))) { 1.3422 + if (c_type == cmh.cmsg_type) { 1.3423 + if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < cpsize) { 1.3424 + return (found); 1.3425 + } 1.3426 + /* It is exactly what we want. Copy it out. */ 1.3427 + m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), cpsize, (caddr_t)data); 1.3428 + return (1); 1.3429 + } else { 1.3430 + struct sctp_sndrcvinfo *sndrcvinfo; 1.3431 + 1.3432 + sndrcvinfo = (struct sctp_sndrcvinfo *)data; 1.3433 + if (found == 0) { 1.3434 + if (cpsize < sizeof(struct sctp_sndrcvinfo)) { 1.3435 + return (found); 1.3436 + } 1.3437 + memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo)); 1.3438 + } 1.3439 + switch (cmh.cmsg_type) { 1.3440 + case SCTP_SNDINFO: 1.3441 + if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_sndinfo)) { 1.3442 + return (found); 1.3443 + } 1.3444 + m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo); 1.3445 + sndrcvinfo->sinfo_stream = sndinfo.snd_sid; 1.3446 + sndrcvinfo->sinfo_flags = sndinfo.snd_flags; 1.3447 + sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid; 1.3448 + sndrcvinfo->sinfo_context = sndinfo.snd_context; 1.3449 + sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id; 1.3450 + break; 1.3451 + case SCTP_PRINFO: 1.3452 + if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_prinfo)) { 1.3453 + return (found); 1.3454 + } 1.3455 + m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_prinfo), (caddr_t)&prinfo); 1.3456 + if (prinfo.pr_policy != SCTP_PR_SCTP_NONE) { 1.3457 + sndrcvinfo->sinfo_timetolive = prinfo.pr_value; 1.3458 + } else { 1.3459 + sndrcvinfo->sinfo_timetolive = 0; 1.3460 + } 1.3461 + sndrcvinfo->sinfo_flags |= prinfo.pr_policy; 1.3462 + break; 1.3463 + case SCTP_AUTHINFO: 1.3464 + if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_authinfo)) { 1.3465 + return (found); 1.3466 + } 1.3467 + m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_authinfo), (caddr_t)&authinfo); 1.3468 + sndrcvinfo->sinfo_keynumber_valid = 1; 1.3469 + sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber; 1.3470 + break; 1.3471 + default: 1.3472 + return (found); 1.3473 + } 1.3474 + found = 1; 1.3475 + } 1.3476 + } 1.3477 + at += CMSG_ALIGN(cmh.cmsg_len); 1.3478 + } 1.3479 + return (found); 1.3480 +} 1.3481 + 1.3482 +static int 1.3483 +sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error) 1.3484 +{ 1.3485 +#if defined(__Userspace_os_Windows) 1.3486 + WSACMSGHDR cmh; 1.3487 +#else 1.3488 + struct cmsghdr cmh; 1.3489 +#endif 1.3490 + int tlen, at; 1.3491 + struct sctp_initmsg initmsg; 1.3492 +#ifdef INET 1.3493 + struct sockaddr_in sin; 1.3494 +#endif 1.3495 +#ifdef INET6 1.3496 + struct sockaddr_in6 sin6; 1.3497 +#endif 1.3498 + 1.3499 + tlen = SCTP_BUF_LEN(control); 1.3500 + at = 0; 1.3501 + while (at < tlen) { 1.3502 + if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) { 1.3503 + /* There is not enough room for one more. */ 1.3504 + *error = EINVAL; 1.3505 + return (1); 1.3506 + } 1.3507 + m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh); 1.3508 + if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) { 1.3509 + /* We dont't have a complete CMSG header. */ 1.3510 + *error = EINVAL; 1.3511 + return (1); 1.3512 + } 1.3513 + if (((int)cmh.cmsg_len + at) > tlen) { 1.3514 + /* We don't have the complete CMSG. */ 1.3515 + *error = EINVAL; 1.3516 + return (1); 1.3517 + } 1.3518 + if (cmh.cmsg_level == IPPROTO_SCTP) { 1.3519 + switch (cmh.cmsg_type) { 1.3520 + case SCTP_INIT: 1.3521 + if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_initmsg)) { 1.3522 + *error = EINVAL; 1.3523 + return (1); 1.3524 + } 1.3525 + m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_initmsg), (caddr_t)&initmsg); 1.3526 + if (initmsg.sinit_max_attempts) 1.3527 + stcb->asoc.max_init_times = initmsg.sinit_max_attempts; 1.3528 + if (initmsg.sinit_num_ostreams) 1.3529 + stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams; 1.3530 + if (initmsg.sinit_max_instreams) 1.3531 + stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams; 1.3532 + if (initmsg.sinit_max_init_timeo) 1.3533 + stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo; 1.3534 + if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) { 1.3535 + struct sctp_stream_out *tmp_str; 1.3536 + unsigned int i; 1.3537 + 1.3538 + /* Default is NOT correct */ 1.3539 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n", 1.3540 + stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams); 1.3541 + SCTP_TCB_UNLOCK(stcb); 1.3542 + SCTP_MALLOC(tmp_str, 1.3543 + struct sctp_stream_out *, 1.3544 + (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)), 1.3545 + SCTP_M_STRMO); 1.3546 + SCTP_TCB_LOCK(stcb); 1.3547 + if (tmp_str != NULL) { 1.3548 + SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO); 1.3549 + stcb->asoc.strmout = tmp_str; 1.3550 + stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams; 1.3551 + } else { 1.3552 + stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt; 1.3553 + } 1.3554 + for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1.3555 + TAILQ_INIT(&stcb->asoc.strmout[i].outqueue); 1.3556 + stcb->asoc.strmout[i].chunks_on_queues = 0; 1.3557 + stcb->asoc.strmout[i].next_sequence_send = 0; 1.3558 + stcb->asoc.strmout[i].stream_no = i; 1.3559 + stcb->asoc.strmout[i].last_msg_incomplete = 0; 1.3560 + stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL); 1.3561 + } 1.3562 + } 1.3563 + break; 1.3564 +#ifdef INET 1.3565 + case SCTP_DSTADDRV4: 1.3566 + if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) { 1.3567 + *error = EINVAL; 1.3568 + return (1); 1.3569 + } 1.3570 + memset(&sin, 0, sizeof(struct sockaddr_in)); 1.3571 + sin.sin_family = AF_INET; 1.3572 +#ifdef HAVE_SIN_LEN 1.3573 + sin.sin_len = sizeof(struct sockaddr_in); 1.3574 +#endif 1.3575 + sin.sin_port = stcb->rport; 1.3576 + m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr); 1.3577 + if ((sin.sin_addr.s_addr == INADDR_ANY) || 1.3578 + (sin.sin_addr.s_addr == INADDR_BROADCAST) || 1.3579 + IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) { 1.3580 + *error = EINVAL; 1.3581 + return (1); 1.3582 + } 1.3583 + if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, 1.3584 + SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { 1.3585 + *error = ENOBUFS; 1.3586 + return (1); 1.3587 + } 1.3588 + break; 1.3589 +#endif 1.3590 +#ifdef INET6 1.3591 + case SCTP_DSTADDRV6: 1.3592 + if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) { 1.3593 + *error = EINVAL; 1.3594 + return (1); 1.3595 + } 1.3596 + memset(&sin6, 0, sizeof(struct sockaddr_in6)); 1.3597 + sin6.sin6_family = AF_INET6; 1.3598 +#ifdef HAVE_SIN6_LEN 1.3599 + sin6.sin6_len = sizeof(struct sockaddr_in6); 1.3600 +#endif 1.3601 + sin6.sin6_port = stcb->rport; 1.3602 + m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr); 1.3603 + if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) || 1.3604 + IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) { 1.3605 + *error = EINVAL; 1.3606 + return (1); 1.3607 + } 1.3608 +#ifdef INET 1.3609 + if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) { 1.3610 + in6_sin6_2_sin(&sin, &sin6); 1.3611 + if ((sin.sin_addr.s_addr == INADDR_ANY) || 1.3612 + (sin.sin_addr.s_addr == INADDR_BROADCAST) || 1.3613 + IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) { 1.3614 + *error = EINVAL; 1.3615 + return (1); 1.3616 + } 1.3617 + if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, 1.3618 + SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { 1.3619 + *error = ENOBUFS; 1.3620 + return (1); 1.3621 + } 1.3622 + } else 1.3623 +#endif 1.3624 + if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL, 1.3625 + SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { 1.3626 + *error = ENOBUFS; 1.3627 + return (1); 1.3628 + } 1.3629 + break; 1.3630 +#endif 1.3631 + default: 1.3632 + break; 1.3633 + } 1.3634 + } 1.3635 + at += CMSG_ALIGN(cmh.cmsg_len); 1.3636 + } 1.3637 + return (0); 1.3638 +} 1.3639 + 1.3640 +static struct sctp_tcb * 1.3641 +sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p, 1.3642 + uint16_t port, 1.3643 + struct mbuf *control, 1.3644 + struct sctp_nets **net_p, 1.3645 + int *error) 1.3646 +{ 1.3647 +#if defined(__Userspace_os_Windows) 1.3648 + WSACMSGHDR cmh; 1.3649 +#else 1.3650 + struct cmsghdr cmh; 1.3651 +#endif 1.3652 + int tlen, at; 1.3653 + struct sctp_tcb *stcb; 1.3654 + struct sockaddr *addr; 1.3655 +#ifdef INET 1.3656 + struct sockaddr_in sin; 1.3657 +#endif 1.3658 +#ifdef INET6 1.3659 + struct sockaddr_in6 sin6; 1.3660 +#endif 1.3661 + 1.3662 + tlen = SCTP_BUF_LEN(control); 1.3663 + at = 0; 1.3664 + while (at < tlen) { 1.3665 + if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) { 1.3666 + /* There is not enough room for one more. */ 1.3667 + *error = EINVAL; 1.3668 + return (NULL); 1.3669 + } 1.3670 + m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh); 1.3671 + if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) { 1.3672 + /* We dont't have a complete CMSG header. */ 1.3673 + *error = EINVAL; 1.3674 + return (NULL); 1.3675 + } 1.3676 + if (((int)cmh.cmsg_len + at) > tlen) { 1.3677 + /* We don't have the complete CMSG. */ 1.3678 + *error = EINVAL; 1.3679 + return (NULL); 1.3680 + } 1.3681 + if (cmh.cmsg_level == IPPROTO_SCTP) { 1.3682 + switch (cmh.cmsg_type) { 1.3683 +#ifdef INET 1.3684 + case SCTP_DSTADDRV4: 1.3685 + if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) { 1.3686 + *error = EINVAL; 1.3687 + return (NULL); 1.3688 + } 1.3689 + memset(&sin, 0, sizeof(struct sockaddr_in)); 1.3690 + sin.sin_family = AF_INET; 1.3691 +#ifdef HAVE_SIN_LEN 1.3692 + sin.sin_len = sizeof(struct sockaddr_in); 1.3693 +#endif 1.3694 + sin.sin_port = port; 1.3695 + m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr); 1.3696 + addr = (struct sockaddr *)&sin; 1.3697 + break; 1.3698 +#endif 1.3699 +#ifdef INET6 1.3700 + case SCTP_DSTADDRV6: 1.3701 + if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) { 1.3702 + *error = EINVAL; 1.3703 + return (NULL); 1.3704 + } 1.3705 + memset(&sin6, 0, sizeof(struct sockaddr_in6)); 1.3706 + sin6.sin6_family = AF_INET6; 1.3707 +#ifdef HAVE_SIN6_LEN 1.3708 + sin6.sin6_len = sizeof(struct sockaddr_in6); 1.3709 +#endif 1.3710 + sin6.sin6_port = port; 1.3711 + m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr); 1.3712 +#ifdef INET 1.3713 + if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) { 1.3714 + in6_sin6_2_sin(&sin, &sin6); 1.3715 + addr = (struct sockaddr *)&sin; 1.3716 + } else 1.3717 +#endif 1.3718 + addr = (struct sockaddr *)&sin6; 1.3719 + break; 1.3720 +#endif 1.3721 + default: 1.3722 + addr = NULL; 1.3723 + break; 1.3724 + } 1.3725 + if (addr) { 1.3726 + stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL); 1.3727 + if (stcb != NULL) { 1.3728 + return (stcb); 1.3729 + } 1.3730 + } 1.3731 + } 1.3732 + at += CMSG_ALIGN(cmh.cmsg_len); 1.3733 + } 1.3734 + return (NULL); 1.3735 +} 1.3736 + 1.3737 +static struct mbuf * 1.3738 +sctp_add_cookie(struct mbuf *init, int init_offset, 1.3739 + struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t **signature) 1.3740 +{ 1.3741 + struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret; 1.3742 + struct sctp_state_cookie *stc; 1.3743 + struct sctp_paramhdr *ph; 1.3744 + uint8_t *foo; 1.3745 + int sig_offset; 1.3746 + uint16_t cookie_sz; 1.3747 + 1.3748 + mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) + 1.3749 + sizeof(struct sctp_paramhdr)), 0, 1.3750 + M_NOWAIT, 1, MT_DATA); 1.3751 + if (mret == NULL) { 1.3752 + return (NULL); 1.3753 + } 1.3754 + copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_NOWAIT); 1.3755 + if (copy_init == NULL) { 1.3756 + sctp_m_freem(mret); 1.3757 + return (NULL); 1.3758 + } 1.3759 +#ifdef SCTP_MBUF_LOGGING 1.3760 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 1.3761 + struct mbuf *mat; 1.3762 + 1.3763 + for (mat = copy_init; mat; mat = SCTP_BUF_NEXT(mat)) { 1.3764 + if (SCTP_BUF_IS_EXTENDED(mat)) { 1.3765 + sctp_log_mb(mat, SCTP_MBUF_ICOPY); 1.3766 + } 1.3767 + } 1.3768 + } 1.3769 +#endif 1.3770 + copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL, 1.3771 + M_NOWAIT); 1.3772 + if (copy_initack == NULL) { 1.3773 + sctp_m_freem(mret); 1.3774 + sctp_m_freem(copy_init); 1.3775 + return (NULL); 1.3776 + } 1.3777 +#ifdef SCTP_MBUF_LOGGING 1.3778 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 1.3779 + struct mbuf *mat; 1.3780 + 1.3781 + for (mat = copy_initack; mat; mat = SCTP_BUF_NEXT(mat)) { 1.3782 + if (SCTP_BUF_IS_EXTENDED(mat)) { 1.3783 + sctp_log_mb(mat, SCTP_MBUF_ICOPY); 1.3784 + } 1.3785 + } 1.3786 + } 1.3787 +#endif 1.3788 + /* easy side we just drop it on the end */ 1.3789 + ph = mtod(mret, struct sctp_paramhdr *); 1.3790 + SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) + 1.3791 + sizeof(struct sctp_paramhdr); 1.3792 + stc = (struct sctp_state_cookie *)((caddr_t)ph + 1.3793 + sizeof(struct sctp_paramhdr)); 1.3794 + ph->param_type = htons(SCTP_STATE_COOKIE); 1.3795 + ph->param_length = 0; /* fill in at the end */ 1.3796 + /* Fill in the stc cookie data */ 1.3797 + memcpy(stc, stc_in, sizeof(struct sctp_state_cookie)); 1.3798 + 1.3799 + /* tack the INIT and then the INIT-ACK onto the chain */ 1.3800 + cookie_sz = 0; 1.3801 + for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 1.3802 + cookie_sz += SCTP_BUF_LEN(m_at); 1.3803 + if (SCTP_BUF_NEXT(m_at) == NULL) { 1.3804 + SCTP_BUF_NEXT(m_at) = copy_init; 1.3805 + break; 1.3806 + } 1.3807 + } 1.3808 + for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 1.3809 + cookie_sz += SCTP_BUF_LEN(m_at); 1.3810 + if (SCTP_BUF_NEXT(m_at) == NULL) { 1.3811 + SCTP_BUF_NEXT(m_at) = copy_initack; 1.3812 + break; 1.3813 + } 1.3814 + } 1.3815 + for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 1.3816 + cookie_sz += SCTP_BUF_LEN(m_at); 1.3817 + if (SCTP_BUF_NEXT(m_at) == NULL) { 1.3818 + break; 1.3819 + } 1.3820 + } 1.3821 + sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_NOWAIT, 1, MT_DATA); 1.3822 + if (sig == NULL) { 1.3823 + /* no space, so free the entire chain */ 1.3824 + sctp_m_freem(mret); 1.3825 + return (NULL); 1.3826 + } 1.3827 + SCTP_BUF_LEN(sig) = 0; 1.3828 + SCTP_BUF_NEXT(m_at) = sig; 1.3829 + sig_offset = 0; 1.3830 + foo = (uint8_t *) (mtod(sig, caddr_t) + sig_offset); 1.3831 + memset(foo, 0, SCTP_SIGNATURE_SIZE); 1.3832 + *signature = foo; 1.3833 + SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE; 1.3834 + cookie_sz += SCTP_SIGNATURE_SIZE; 1.3835 + ph->param_length = htons(cookie_sz); 1.3836 + return (mret); 1.3837 +} 1.3838 + 1.3839 + 1.3840 +static uint8_t 1.3841 +sctp_get_ect(struct sctp_tcb *stcb) 1.3842 +{ 1.3843 + if ((stcb != NULL) && (stcb->asoc.ecn_allowed == 1)) { 1.3844 + return (SCTP_ECT0_BIT); 1.3845 + } else { 1.3846 + return (0); 1.3847 + } 1.3848 +} 1.3849 + 1.3850 +#if defined(INET) || defined(INET6) 1.3851 +static void 1.3852 +sctp_handle_no_route(struct sctp_tcb *stcb, 1.3853 + struct sctp_nets *net, 1.3854 + int so_locked) 1.3855 +{ 1.3856 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n"); 1.3857 + 1.3858 + if (net) { 1.3859 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was "); 1.3860 + SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa); 1.3861 + if (net->dest_state & SCTP_ADDR_CONFIRMED) { 1.3862 + if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) { 1.3863 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", (void *)net); 1.3864 + sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 1.3865 + stcb, 0, 1.3866 + (void *)net, 1.3867 + so_locked); 1.3868 + net->dest_state &= ~SCTP_ADDR_REACHABLE; 1.3869 + net->dest_state &= ~SCTP_ADDR_PF; 1.3870 + } 1.3871 + } 1.3872 + if (stcb) { 1.3873 + if (net == stcb->asoc.primary_destination) { 1.3874 + /* need a new primary */ 1.3875 + struct sctp_nets *alt; 1.3876 + 1.3877 + alt = sctp_find_alternate_net(stcb, net, 0); 1.3878 + if (alt != net) { 1.3879 + if (stcb->asoc.alternate) { 1.3880 + sctp_free_remote_addr(stcb->asoc.alternate); 1.3881 + } 1.3882 + stcb->asoc.alternate = alt; 1.3883 + atomic_add_int(&stcb->asoc.alternate->ref_count, 1); 1.3884 + if (net->ro._s_addr) { 1.3885 + sctp_free_ifa(net->ro._s_addr); 1.3886 + net->ro._s_addr = NULL; 1.3887 + } 1.3888 + net->src_addr_selected = 0; 1.3889 + } 1.3890 + } 1.3891 + } 1.3892 + } 1.3893 +} 1.3894 +#endif 1.3895 + 1.3896 +static int 1.3897 +sctp_lowlevel_chunk_output(struct sctp_inpcb *inp, 1.3898 + struct sctp_tcb *stcb, /* may be NULL */ 1.3899 + struct sctp_nets *net, 1.3900 + struct sockaddr *to, 1.3901 + struct mbuf *m, 1.3902 + uint32_t auth_offset, 1.3903 + struct sctp_auth_chunk *auth, 1.3904 + uint16_t auth_keyid, 1.3905 + int nofragment_flag, 1.3906 + int ecn_ok, 1.3907 + int out_of_asoc_ok, 1.3908 + uint16_t src_port, 1.3909 + uint16_t dest_port, 1.3910 + uint32_t v_tag, 1.3911 + uint16_t port, 1.3912 + union sctp_sockstore *over_addr, 1.3913 +#if defined(__FreeBSD__) 1.3914 + uint8_t use_mflowid, uint32_t mflowid, 1.3915 +#endif 1.3916 +#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 1.3917 + int so_locked SCTP_UNUSED 1.3918 +#else 1.3919 + int so_locked 1.3920 +#endif 1.3921 + ) 1.3922 +/* nofragment_flag to tell if IP_DF should be set (IPv4 only) */ 1.3923 +{ 1.3924 + /** 1.3925 + * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header 1.3926 + * WITH an SCTPHDR but no IP header, endpoint inp and sa structure: 1.3927 + * - fill in the HMAC digest of any AUTH chunk in the packet. 1.3928 + * - calculate and fill in the SCTP checksum. 1.3929 + * - prepend an IP address header. 1.3930 + * - if boundall use INADDR_ANY. 1.3931 + * - if boundspecific do source address selection. 1.3932 + * - set fragmentation option for ipV4. 1.3933 + * - On return from IP output, check/adjust mtu size of output 1.3934 + * interface and smallest_mtu size as well. 1.3935 + */ 1.3936 + /* Will need ifdefs around this */ 1.3937 +#ifdef __Panda__ 1.3938 + pakhandle_type o_pak; 1.3939 +#endif 1.3940 + struct mbuf *newm; 1.3941 + struct sctphdr *sctphdr; 1.3942 + int packet_length; 1.3943 + int ret; 1.3944 +#if defined(INET) || defined(INET6) 1.3945 + uint32_t vrf_id; 1.3946 +#endif 1.3947 +#if defined(INET) || defined(INET6) 1.3948 +#if !defined(__Panda__) 1.3949 + struct mbuf *o_pak; 1.3950 +#endif 1.3951 + sctp_route_t *ro = NULL; 1.3952 + struct udphdr *udp = NULL; 1.3953 +#endif 1.3954 + uint8_t tos_value; 1.3955 +#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1.3956 + struct socket *so = NULL; 1.3957 +#endif 1.3958 + 1.3959 +#if defined(__APPLE__) 1.3960 + if (so_locked) { 1.3961 + sctp_lock_assert(SCTP_INP_SO(inp)); 1.3962 + SCTP_TCB_LOCK_ASSERT(stcb); 1.3963 + } else { 1.3964 + sctp_unlock_assert(SCTP_INP_SO(inp)); 1.3965 + } 1.3966 +#endif 1.3967 + if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) { 1.3968 + SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT); 1.3969 + sctp_m_freem(m); 1.3970 + return (EFAULT); 1.3971 + } 1.3972 +#if defined(INET) || defined(INET6) 1.3973 + if (stcb) { 1.3974 + vrf_id = stcb->asoc.vrf_id; 1.3975 + } else { 1.3976 + vrf_id = inp->def_vrf_id; 1.3977 + } 1.3978 +#endif 1.3979 + /* fill in the HMAC digest for any AUTH chunk in the packet */ 1.3980 + if ((auth != NULL) && (stcb != NULL)) { 1.3981 + sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid); 1.3982 + } 1.3983 + 1.3984 + if (net) { 1.3985 + tos_value = net->dscp; 1.3986 + } else if (stcb) { 1.3987 + tos_value = stcb->asoc.default_dscp; 1.3988 + } else { 1.3989 + tos_value = inp->sctp_ep.default_dscp; 1.3990 + } 1.3991 + 1.3992 + switch (to->sa_family) { 1.3993 +#ifdef INET 1.3994 + case AF_INET: 1.3995 + { 1.3996 + struct ip *ip = NULL; 1.3997 + sctp_route_t iproute; 1.3998 + int len; 1.3999 + 1.4000 + len = sizeof(struct ip) + sizeof(struct sctphdr); 1.4001 + if (port) { 1.4002 + len += sizeof(struct udphdr); 1.4003 + } 1.4004 + newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA); 1.4005 + if (newm == NULL) { 1.4006 + sctp_m_freem(m); 1.4007 + SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 1.4008 + return (ENOMEM); 1.4009 + } 1.4010 + SCTP_ALIGN_TO_END(newm, len); 1.4011 + SCTP_BUF_LEN(newm) = len; 1.4012 + SCTP_BUF_NEXT(newm) = m; 1.4013 + m = newm; 1.4014 +#if defined(__FreeBSD__) 1.4015 + if (net != NULL) { 1.4016 +#ifdef INVARIANTS 1.4017 + if (net->flowidset == 0) { 1.4018 + panic("Flow ID not set"); 1.4019 + } 1.4020 +#endif 1.4021 + m->m_pkthdr.flowid = net->flowid; 1.4022 + m->m_flags |= M_FLOWID; 1.4023 + } else { 1.4024 + if (use_mflowid != 0) { 1.4025 + m->m_pkthdr.flowid = mflowid; 1.4026 + m->m_flags |= M_FLOWID; 1.4027 + } 1.4028 + } 1.4029 +#endif 1.4030 + packet_length = sctp_calculate_len(m); 1.4031 + ip = mtod(m, struct ip *); 1.4032 + ip->ip_v = IPVERSION; 1.4033 + ip->ip_hl = (sizeof(struct ip) >> 2); 1.4034 + if (tos_value == 0) { 1.4035 + /* 1.4036 + * This means especially, that it is not set at the 1.4037 + * SCTP layer. So use the value from the IP layer. 1.4038 + */ 1.4039 +#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__) 1.4040 + tos_value = inp->ip_inp.inp.inp_ip_tos; 1.4041 +#else 1.4042 + tos_value = inp->inp_ip_tos; 1.4043 +#endif 1.4044 + } 1.4045 + tos_value &= 0xfc; 1.4046 + if (ecn_ok) { 1.4047 + tos_value |= sctp_get_ect(stcb); 1.4048 + } 1.4049 + if ((nofragment_flag) && (port == 0)) { 1.4050 +#if defined(__FreeBSD__) 1.4051 +#if __FreeBSD_version >= 1000000 1.4052 + ip->ip_off = htons(IP_DF); 1.4053 +#else 1.4054 + ip->ip_off = IP_DF; 1.4055 +#endif 1.4056 +#elif defined(WITH_CONVERT_IP_OFF) || defined(__APPLE__) || defined(__Userspace__) 1.4057 + ip->ip_off = IP_DF; 1.4058 +#else 1.4059 + ip->ip_off = htons(IP_DF); 1.4060 +#endif 1.4061 + } else { 1.4062 +#if defined(__FreeBSD__) && __FreeBSD_version >= 1000000 1.4063 + ip->ip_off = htons(0); 1.4064 +#else 1.4065 + ip->ip_off = 0; 1.4066 +#endif 1.4067 + } 1.4068 +#if defined(__FreeBSD__) 1.4069 + /* FreeBSD has a function for ip_id's */ 1.4070 + ip->ip_id = ip_newid(); 1.4071 +#elif defined(RANDOM_IP_ID) 1.4072 + /* Apple has RANDOM_IP_ID switch */ 1.4073 + ip->ip_id = htons(ip_randomid()); 1.4074 +#elif defined(__Userspace__) 1.4075 + ip->ip_id = htons(SCTP_IP_ID(inp)++); 1.4076 +#else 1.4077 + ip->ip_id = SCTP_IP_ID(inp)++; 1.4078 +#endif 1.4079 + 1.4080 +#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__) 1.4081 + ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl; 1.4082 +#else 1.4083 + ip->ip_ttl = inp->inp_ip_ttl; 1.4084 +#endif 1.4085 +#if defined(__FreeBSD__) && __FreeBSD_version >= 1000000 1.4086 + ip->ip_len = htons(packet_length); 1.4087 +#else 1.4088 + ip->ip_len = packet_length; 1.4089 +#endif 1.4090 + ip->ip_tos = tos_value; 1.4091 + if (port) { 1.4092 + ip->ip_p = IPPROTO_UDP; 1.4093 + } else { 1.4094 + ip->ip_p = IPPROTO_SCTP; 1.4095 + } 1.4096 + ip->ip_sum = 0; 1.4097 + if (net == NULL) { 1.4098 + ro = &iproute; 1.4099 + memset(&iproute, 0, sizeof(iproute)); 1.4100 +#ifdef HAVE_SA_LEN 1.4101 + memcpy(&ro->ro_dst, to, to->sa_len); 1.4102 +#else 1.4103 + memcpy(&ro->ro_dst, to, sizeof(struct sockaddr_in)); 1.4104 +#endif 1.4105 + } else { 1.4106 + ro = (sctp_route_t *)&net->ro; 1.4107 + } 1.4108 + /* Now the address selection part */ 1.4109 + ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr; 1.4110 + 1.4111 + /* call the routine to select the src address */ 1.4112 + if (net && out_of_asoc_ok == 0) { 1.4113 + if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) { 1.4114 + sctp_free_ifa(net->ro._s_addr); 1.4115 + net->ro._s_addr = NULL; 1.4116 + net->src_addr_selected = 0; 1.4117 + if (ro->ro_rt) { 1.4118 + RTFREE(ro->ro_rt); 1.4119 + ro->ro_rt = NULL; 1.4120 + } 1.4121 + } 1.4122 + if (net->src_addr_selected == 0) { 1.4123 + /* Cache the source address */ 1.4124 + net->ro._s_addr = sctp_source_address_selection(inp,stcb, 1.4125 + ro, net, 0, 1.4126 + vrf_id); 1.4127 + net->src_addr_selected = 1; 1.4128 + } 1.4129 + if (net->ro._s_addr == NULL) { 1.4130 + /* No route to host */ 1.4131 + net->src_addr_selected = 0; 1.4132 + sctp_handle_no_route(stcb, net, so_locked); 1.4133 + SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); 1.4134 + sctp_m_freem(m); 1.4135 + return (EHOSTUNREACH); 1.4136 + } 1.4137 + ip->ip_src = net->ro._s_addr->address.sin.sin_addr; 1.4138 + } else { 1.4139 + if (over_addr == NULL) { 1.4140 + struct sctp_ifa *_lsrc; 1.4141 + 1.4142 + _lsrc = sctp_source_address_selection(inp, stcb, ro, 1.4143 + net, 1.4144 + out_of_asoc_ok, 1.4145 + vrf_id); 1.4146 + if (_lsrc == NULL) { 1.4147 + sctp_handle_no_route(stcb, net, so_locked); 1.4148 + SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); 1.4149 + sctp_m_freem(m); 1.4150 + return (EHOSTUNREACH); 1.4151 + } 1.4152 + ip->ip_src = _lsrc->address.sin.sin_addr; 1.4153 + sctp_free_ifa(_lsrc); 1.4154 + } else { 1.4155 + ip->ip_src = over_addr->sin.sin_addr; 1.4156 + SCTP_RTALLOC(ro, vrf_id); 1.4157 + } 1.4158 + } 1.4159 + if (port) { 1.4160 + if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) { 1.4161 + sctp_handle_no_route(stcb, net, so_locked); 1.4162 + SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); 1.4163 + sctp_m_freem(m); 1.4164 + return (EHOSTUNREACH); 1.4165 + } 1.4166 + udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip)); 1.4167 + udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)); 1.4168 + udp->uh_dport = port; 1.4169 + udp->uh_ulen = htons(packet_length - sizeof(struct ip)); 1.4170 +#if !defined(__Windows__) && !defined(__Userspace__) 1.4171 +#if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000) 1.4172 + if (V_udp_cksum) { 1.4173 + udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP)); 1.4174 + } else { 1.4175 + udp->uh_sum = 0; 1.4176 + } 1.4177 +#else 1.4178 + udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP)); 1.4179 +#endif 1.4180 +#else 1.4181 + udp->uh_sum = 0; 1.4182 +#endif 1.4183 + sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr)); 1.4184 + } else { 1.4185 + sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip)); 1.4186 + } 1.4187 + 1.4188 + sctphdr->src_port = src_port; 1.4189 + sctphdr->dest_port = dest_port; 1.4190 + sctphdr->v_tag = v_tag; 1.4191 + sctphdr->checksum = 0; 1.4192 + 1.4193 + /* 1.4194 + * If source address selection fails and we find no route 1.4195 + * then the ip_output should fail as well with a 1.4196 + * NO_ROUTE_TO_HOST type error. We probably should catch 1.4197 + * that somewhere and abort the association right away 1.4198 + * (assuming this is an INIT being sent). 1.4199 + */ 1.4200 + if (ro->ro_rt == NULL) { 1.4201 + /* 1.4202 + * src addr selection failed to find a route (or 1.4203 + * valid source addr), so we can't get there from 1.4204 + * here (yet)! 1.4205 + */ 1.4206 + sctp_handle_no_route(stcb, net, so_locked); 1.4207 + SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); 1.4208 + sctp_m_freem(m); 1.4209 + return (EHOSTUNREACH); 1.4210 + } 1.4211 + if (ro != &iproute) { 1.4212 + memcpy(&iproute, ro, sizeof(*ro)); 1.4213 + } 1.4214 + SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n", 1.4215 + (uint32_t) (ntohl(ip->ip_src.s_addr))); 1.4216 + SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n", 1.4217 + (uint32_t)(ntohl(ip->ip_dst.s_addr))); 1.4218 + SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n", 1.4219 + (void *)ro->ro_rt); 1.4220 + 1.4221 + if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { 1.4222 + /* failed to prepend data, give up */ 1.4223 + SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 1.4224 + sctp_m_freem(m); 1.4225 + return (ENOMEM); 1.4226 + } 1.4227 + SCTP_ATTACH_CHAIN(o_pak, m, packet_length); 1.4228 + if (port) { 1.4229 +#if defined(SCTP_WITH_NO_CSUM) 1.4230 + SCTP_STAT_INCR(sctps_sendnocrc); 1.4231 +#else 1.4232 + sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr)); 1.4233 + SCTP_STAT_INCR(sctps_sendswcrc); 1.4234 +#endif 1.4235 +#if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000) 1.4236 + if (V_udp_cksum) { 1.4237 + SCTP_ENABLE_UDP_CSUM(o_pak); 1.4238 + } 1.4239 +#else 1.4240 + SCTP_ENABLE_UDP_CSUM(o_pak); 1.4241 +#endif 1.4242 + } else { 1.4243 +#if defined(SCTP_WITH_NO_CSUM) 1.4244 + SCTP_STAT_INCR(sctps_sendnocrc); 1.4245 +#else 1.4246 +#if defined(__FreeBSD__) && __FreeBSD_version >= 800000 1.4247 + m->m_pkthdr.csum_flags = CSUM_SCTP; 1.4248 + m->m_pkthdr.csum_data = 0; 1.4249 + SCTP_STAT_INCR(sctps_sendhwcrc); 1.4250 +#else 1.4251 + if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) && 1.4252 + (stcb) && (stcb->asoc.scope.loopback_scope))) { 1.4253 + sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip)); 1.4254 + SCTP_STAT_INCR(sctps_sendswcrc); 1.4255 + } else { 1.4256 + SCTP_STAT_INCR(sctps_sendnocrc); 1.4257 + } 1.4258 +#endif 1.4259 +#endif 1.4260 + } 1.4261 +#ifdef SCTP_PACKET_LOGGING 1.4262 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) 1.4263 + sctp_packet_log(o_pak); 1.4264 +#endif 1.4265 + /* send it out. table id is taken from stcb */ 1.4266 +#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1.4267 + if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) { 1.4268 + so = SCTP_INP_SO(inp); 1.4269 + SCTP_SOCKET_UNLOCK(so, 0); 1.4270 + } 1.4271 +#endif 1.4272 + SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id); 1.4273 +#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1.4274 + if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) { 1.4275 + atomic_add_int(&stcb->asoc.refcnt, 1); 1.4276 + SCTP_TCB_UNLOCK(stcb); 1.4277 + SCTP_SOCKET_LOCK(so, 0); 1.4278 + SCTP_TCB_LOCK(stcb); 1.4279 + atomic_subtract_int(&stcb->asoc.refcnt, 1); 1.4280 + } 1.4281 +#endif 1.4282 + SCTP_STAT_INCR(sctps_sendpackets); 1.4283 + SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 1.4284 + if (ret) 1.4285 + SCTP_STAT_INCR(sctps_senderrors); 1.4286 + 1.4287 + SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret); 1.4288 + if (net == NULL) { 1.4289 + /* free tempy routes */ 1.4290 +#if defined(__FreeBSD__) && __FreeBSD_version > 901000 1.4291 + RO_RTFREE(ro); 1.4292 +#else 1.4293 + if (ro->ro_rt) { 1.4294 + RTFREE(ro->ro_rt); 1.4295 + ro->ro_rt = NULL; 1.4296 + } 1.4297 +#endif 1.4298 + } else { 1.4299 + /* PMTU check versus smallest asoc MTU goes here */ 1.4300 + if ((ro->ro_rt != NULL) && 1.4301 + (net->ro._s_addr)) { 1.4302 + uint32_t mtu; 1.4303 + mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt); 1.4304 + if (net->port) { 1.4305 + mtu -= sizeof(struct udphdr); 1.4306 + } 1.4307 + if (mtu && (stcb->asoc.smallest_mtu > mtu)) { 1.4308 + sctp_mtu_size_reset(inp, &stcb->asoc, mtu); 1.4309 + net->mtu = mtu; 1.4310 + } 1.4311 + } else if (ro->ro_rt == NULL) { 1.4312 + /* route was freed */ 1.4313 + if (net->ro._s_addr && 1.4314 + net->src_addr_selected) { 1.4315 + sctp_free_ifa(net->ro._s_addr); 1.4316 + net->ro._s_addr = NULL; 1.4317 + } 1.4318 + net->src_addr_selected = 0; 1.4319 + } 1.4320 + } 1.4321 + return (ret); 1.4322 + } 1.4323 +#endif 1.4324 +#ifdef INET6 1.4325 + case AF_INET6: 1.4326 + { 1.4327 + uint32_t flowlabel, flowinfo; 1.4328 + struct ip6_hdr *ip6h; 1.4329 + struct route_in6 ip6route; 1.4330 +#if !(defined(__Panda__) || defined(__Userspace__)) 1.4331 + struct ifnet *ifp; 1.4332 +#endif 1.4333 + struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp; 1.4334 + int prev_scope = 0; 1.4335 +#ifdef SCTP_EMBEDDED_V6_SCOPE 1.4336 + struct sockaddr_in6 lsa6_storage; 1.4337 + int error; 1.4338 +#endif 1.4339 + u_short prev_port = 0; 1.4340 + int len; 1.4341 + 1.4342 + if (net) { 1.4343 + flowlabel = net->flowlabel; 1.4344 + } else if (stcb) { 1.4345 + flowlabel = stcb->asoc.default_flowlabel; 1.4346 + } else { 1.4347 + flowlabel = inp->sctp_ep.default_flowlabel; 1.4348 + } 1.4349 + if (flowlabel == 0) { 1.4350 + /* 1.4351 + * This means especially, that it is not set at the 1.4352 + * SCTP layer. So use the value from the IP layer. 1.4353 + */ 1.4354 +#if defined(__APPLE__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION)) 1.4355 + flowlabel = ntohl(inp->ip_inp.inp.inp_flow); 1.4356 +#else 1.4357 + flowlabel = ntohl(((struct in6pcb *)inp)->in6p_flowinfo); 1.4358 +#endif 1.4359 + } 1.4360 + flowlabel &= 0x000fffff; 1.4361 + len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr); 1.4362 + if (port) { 1.4363 + len += sizeof(struct udphdr); 1.4364 + } 1.4365 + newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA); 1.4366 + if (newm == NULL) { 1.4367 + sctp_m_freem(m); 1.4368 + SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 1.4369 + return (ENOMEM); 1.4370 + } 1.4371 + SCTP_ALIGN_TO_END(newm, len); 1.4372 + SCTP_BUF_LEN(newm) = len; 1.4373 + SCTP_BUF_NEXT(newm) = m; 1.4374 + m = newm; 1.4375 +#if defined(__FreeBSD__) 1.4376 + if (net != NULL) { 1.4377 +#ifdef INVARIANTS 1.4378 + if (net->flowidset == 0) { 1.4379 + panic("Flow ID not set"); 1.4380 + } 1.4381 +#endif 1.4382 + m->m_pkthdr.flowid = net->flowid; 1.4383 + m->m_flags |= M_FLOWID; 1.4384 + } else { 1.4385 + if (use_mflowid != 0) { 1.4386 + m->m_pkthdr.flowid = mflowid; 1.4387 + m->m_flags |= M_FLOWID; 1.4388 + } 1.4389 + } 1.4390 +#endif 1.4391 + packet_length = sctp_calculate_len(m); 1.4392 + 1.4393 + ip6h = mtod(m, struct ip6_hdr *); 1.4394 + /* protect *sin6 from overwrite */ 1.4395 + sin6 = (struct sockaddr_in6 *)to; 1.4396 + tmp = *sin6; 1.4397 + sin6 = &tmp; 1.4398 + 1.4399 +#ifdef SCTP_EMBEDDED_V6_SCOPE 1.4400 + /* KAME hack: embed scopeid */ 1.4401 +#if defined(__APPLE__) 1.4402 +#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) 1.4403 + if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0) 1.4404 +#else 1.4405 + if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0) 1.4406 +#endif 1.4407 +#elif defined(SCTP_KAME) 1.4408 + if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) 1.4409 +#else 1.4410 + if (in6_embedscope(&sin6->sin6_addr, sin6) != 0) 1.4411 +#endif 1.4412 + { 1.4413 + SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 1.4414 + return (EINVAL); 1.4415 + } 1.4416 +#endif /* SCTP_EMBEDDED_V6_SCOPE */ 1.4417 + if (net == NULL) { 1.4418 + memset(&ip6route, 0, sizeof(ip6route)); 1.4419 + ro = (sctp_route_t *)&ip6route; 1.4420 +#ifdef HAVE_SIN6_LEN 1.4421 + memcpy(&ro->ro_dst, sin6, sin6->sin6_len); 1.4422 +#else 1.4423 + memcpy(&ro->ro_dst, sin6, sizeof(struct sockaddr_in6)); 1.4424 +#endif 1.4425 + } else { 1.4426 + ro = (sctp_route_t *)&net->ro; 1.4427 + } 1.4428 + /* 1.4429 + * We assume here that inp_flow is in host byte order within 1.4430 + * the TCB! 1.4431 + */ 1.4432 + if (tos_value == 0) { 1.4433 + /* 1.4434 + * This means especially, that it is not set at the 1.4435 + * SCTP layer. So use the value from the IP layer. 1.4436 + */ 1.4437 +#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__) 1.4438 +#if defined(__APPLE__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION)) 1.4439 + tos_value = (ntohl(inp->ip_inp.inp.inp_flow) >> 20) & 0xff; 1.4440 +#else 1.4441 + tos_value = (ntohl(((struct in6pcb *)inp)->in6p_flowinfo) >> 20) & 0xff; 1.4442 +#endif 1.4443 +#endif 1.4444 + } 1.4445 + tos_value &= 0xfc; 1.4446 + if (ecn_ok) { 1.4447 + tos_value |= sctp_get_ect(stcb); 1.4448 + } 1.4449 + flowinfo = 0x06; 1.4450 + flowinfo <<= 8; 1.4451 + flowinfo |= tos_value; 1.4452 + flowinfo <<= 20; 1.4453 + flowinfo |= flowlabel; 1.4454 + ip6h->ip6_flow = htonl(flowinfo); 1.4455 + if (port) { 1.4456 + ip6h->ip6_nxt = IPPROTO_UDP; 1.4457 + } else { 1.4458 + ip6h->ip6_nxt = IPPROTO_SCTP; 1.4459 + } 1.4460 + ip6h->ip6_plen = (packet_length - sizeof(struct ip6_hdr)); 1.4461 + ip6h->ip6_dst = sin6->sin6_addr; 1.4462 + 1.4463 + /* 1.4464 + * Add SRC address selection here: we can only reuse to a 1.4465 + * limited degree the kame src-addr-sel, since we can try 1.4466 + * their selection but it may not be bound. 1.4467 + */ 1.4468 + bzero(&lsa6_tmp, sizeof(lsa6_tmp)); 1.4469 + lsa6_tmp.sin6_family = AF_INET6; 1.4470 +#ifdef HAVE_SIN6_LEN 1.4471 + lsa6_tmp.sin6_len = sizeof(lsa6_tmp); 1.4472 +#endif 1.4473 + lsa6 = &lsa6_tmp; 1.4474 + if (net && out_of_asoc_ok == 0) { 1.4475 + if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) { 1.4476 + sctp_free_ifa(net->ro._s_addr); 1.4477 + net->ro._s_addr = NULL; 1.4478 + net->src_addr_selected = 0; 1.4479 + if (ro->ro_rt) { 1.4480 + RTFREE(ro->ro_rt); 1.4481 + ro->ro_rt = NULL; 1.4482 + } 1.4483 + } 1.4484 + if (net->src_addr_selected == 0) { 1.4485 +#ifdef SCTP_EMBEDDED_V6_SCOPE 1.4486 + sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 1.4487 + /* KAME hack: embed scopeid */ 1.4488 +#if defined(__APPLE__) 1.4489 +#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) 1.4490 + if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0) 1.4491 +#else 1.4492 + if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0) 1.4493 +#endif 1.4494 +#elif defined(SCTP_KAME) 1.4495 + if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) 1.4496 +#else 1.4497 + if (in6_embedscope(&sin6->sin6_addr, sin6) != 0) 1.4498 +#endif 1.4499 + { 1.4500 + SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 1.4501 + return (EINVAL); 1.4502 + } 1.4503 +#endif /* SCTP_EMBEDDED_V6_SCOPE */ 1.4504 + /* Cache the source address */ 1.4505 + net->ro._s_addr = sctp_source_address_selection(inp, 1.4506 + stcb, 1.4507 + ro, 1.4508 + net, 1.4509 + 0, 1.4510 + vrf_id); 1.4511 +#ifdef SCTP_EMBEDDED_V6_SCOPE 1.4512 +#ifdef SCTP_KAME 1.4513 + (void)sa6_recoverscope(sin6); 1.4514 +#else 1.4515 + (void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL); 1.4516 +#endif /* SCTP_KAME */ 1.4517 +#endif /* SCTP_EMBEDDED_V6_SCOPE */ 1.4518 + net->src_addr_selected = 1; 1.4519 + } 1.4520 + if (net->ro._s_addr == NULL) { 1.4521 + SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n"); 1.4522 + net->src_addr_selected = 0; 1.4523 + sctp_handle_no_route(stcb, net, so_locked); 1.4524 + SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); 1.4525 + sctp_m_freem(m); 1.4526 + return (EHOSTUNREACH); 1.4527 + } 1.4528 + lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr; 1.4529 + } else { 1.4530 +#ifdef SCTP_EMBEDDED_V6_SCOPE 1.4531 + sin6 = (struct sockaddr_in6 *)&ro->ro_dst; 1.4532 + /* KAME hack: embed scopeid */ 1.4533 +#if defined(__APPLE__) 1.4534 +#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) 1.4535 + if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0) 1.4536 +#else 1.4537 + if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0) 1.4538 +#endif 1.4539 +#elif defined(SCTP_KAME) 1.4540 + if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) 1.4541 +#else 1.4542 + if (in6_embedscope(&sin6->sin6_addr, sin6) != 0) 1.4543 +#endif 1.4544 + { 1.4545 + SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 1.4546 + return (EINVAL); 1.4547 + } 1.4548 +#endif /* SCTP_EMBEDDED_V6_SCOPE */ 1.4549 + if (over_addr == NULL) { 1.4550 + struct sctp_ifa *_lsrc; 1.4551 + 1.4552 + _lsrc = sctp_source_address_selection(inp, stcb, ro, 1.4553 + net, 1.4554 + out_of_asoc_ok, 1.4555 + vrf_id); 1.4556 + if (_lsrc == NULL) { 1.4557 + sctp_handle_no_route(stcb, net, so_locked); 1.4558 + SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); 1.4559 + sctp_m_freem(m); 1.4560 + return (EHOSTUNREACH); 1.4561 + } 1.4562 + lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr; 1.4563 + sctp_free_ifa(_lsrc); 1.4564 + } else { 1.4565 + lsa6->sin6_addr = over_addr->sin6.sin6_addr; 1.4566 + SCTP_RTALLOC(ro, vrf_id); 1.4567 + } 1.4568 +#ifdef SCTP_EMBEDDED_V6_SCOPE 1.4569 +#ifdef SCTP_KAME 1.4570 + (void)sa6_recoverscope(sin6); 1.4571 +#else 1.4572 + (void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL); 1.4573 +#endif /* SCTP_KAME */ 1.4574 +#endif /* SCTP_EMBEDDED_V6_SCOPE */ 1.4575 + } 1.4576 + lsa6->sin6_port = inp->sctp_lport; 1.4577 + 1.4578 + if (ro->ro_rt == NULL) { 1.4579 + /* 1.4580 + * src addr selection failed to find a route (or 1.4581 + * valid source addr), so we can't get there from 1.4582 + * here! 1.4583 + */ 1.4584 + sctp_handle_no_route(stcb, net, so_locked); 1.4585 + SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); 1.4586 + sctp_m_freem(m); 1.4587 + return (EHOSTUNREACH); 1.4588 + } 1.4589 +#ifndef SCOPEDROUTING 1.4590 +#ifdef SCTP_EMBEDDED_V6_SCOPE 1.4591 + /* 1.4592 + * XXX: sa6 may not have a valid sin6_scope_id in the 1.4593 + * non-SCOPEDROUTING case. 1.4594 + */ 1.4595 + bzero(&lsa6_storage, sizeof(lsa6_storage)); 1.4596 + lsa6_storage.sin6_family = AF_INET6; 1.4597 +#ifdef HAVE_SIN6_LEN 1.4598 + lsa6_storage.sin6_len = sizeof(lsa6_storage); 1.4599 +#endif 1.4600 +#ifdef SCTP_KAME 1.4601 + lsa6_storage.sin6_addr = lsa6->sin6_addr; 1.4602 + if ((error = sa6_recoverscope(&lsa6_storage)) != 0) { 1.4603 +#else 1.4604 + if ((error = in6_recoverscope(&lsa6_storage, &lsa6->sin6_addr, 1.4605 + NULL)) != 0) { 1.4606 +#endif /* SCTP_KAME */ 1.4607 + SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error); 1.4608 + sctp_m_freem(m); 1.4609 + return (error); 1.4610 + } 1.4611 + /* XXX */ 1.4612 + lsa6_storage.sin6_addr = lsa6->sin6_addr; 1.4613 + lsa6_storage.sin6_port = inp->sctp_lport; 1.4614 + lsa6 = &lsa6_storage; 1.4615 +#endif /* SCTP_EMBEDDED_V6_SCOPE */ 1.4616 +#endif /* SCOPEDROUTING */ 1.4617 + ip6h->ip6_src = lsa6->sin6_addr; 1.4618 + 1.4619 + if (port) { 1.4620 + if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) { 1.4621 + sctp_handle_no_route(stcb, net, so_locked); 1.4622 + SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH); 1.4623 + sctp_m_freem(m); 1.4624 + return (EHOSTUNREACH); 1.4625 + } 1.4626 + udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr)); 1.4627 + udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)); 1.4628 + udp->uh_dport = port; 1.4629 + udp->uh_ulen = htons(packet_length - sizeof(struct ip6_hdr)); 1.4630 + udp->uh_sum = 0; 1.4631 + sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr)); 1.4632 + } else { 1.4633 + sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr)); 1.4634 + } 1.4635 + 1.4636 + sctphdr->src_port = src_port; 1.4637 + sctphdr->dest_port = dest_port; 1.4638 + sctphdr->v_tag = v_tag; 1.4639 + sctphdr->checksum = 0; 1.4640 + 1.4641 + /* 1.4642 + * We set the hop limit now since there is a good chance 1.4643 + * that our ro pointer is now filled 1.4644 + */ 1.4645 + ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro); 1.4646 +#if !(defined(__Panda__) || defined(__Userspace__)) 1.4647 + ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro); 1.4648 +#endif 1.4649 + 1.4650 +#ifdef SCTP_DEBUG 1.4651 + /* Copy to be sure something bad is not happening */ 1.4652 + sin6->sin6_addr = ip6h->ip6_dst; 1.4653 + lsa6->sin6_addr = ip6h->ip6_src; 1.4654 +#endif 1.4655 + 1.4656 + SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n"); 1.4657 + SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: "); 1.4658 + SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6); 1.4659 + SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: "); 1.4660 + SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6); 1.4661 + if (net) { 1.4662 + sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 1.4663 + /* preserve the port and scope for link local send */ 1.4664 + prev_scope = sin6->sin6_scope_id; 1.4665 + prev_port = sin6->sin6_port; 1.4666 + } 1.4667 + 1.4668 + if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { 1.4669 + /* failed to prepend data, give up */ 1.4670 + sctp_m_freem(m); 1.4671 + SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 1.4672 + return (ENOMEM); 1.4673 + } 1.4674 + SCTP_ATTACH_CHAIN(o_pak, m, packet_length); 1.4675 + if (port) { 1.4676 +#if defined(SCTP_WITH_NO_CSUM) 1.4677 + SCTP_STAT_INCR(sctps_sendnocrc); 1.4678 +#else 1.4679 + sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr)); 1.4680 + SCTP_STAT_INCR(sctps_sendswcrc); 1.4681 +#endif 1.4682 +#if defined(__Windows__) 1.4683 + udp->uh_sum = 0; 1.4684 +#elif !defined(__Userspace__) 1.4685 + if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) { 1.4686 + udp->uh_sum = 0xffff; 1.4687 + } 1.4688 +#endif 1.4689 + } else { 1.4690 +#if defined(SCTP_WITH_NO_CSUM) 1.4691 + SCTP_STAT_INCR(sctps_sendnocrc); 1.4692 +#else 1.4693 +#if defined(__FreeBSD__) && __FreeBSD_version >= 800000 1.4694 +#if __FreeBSD_version < 900000 1.4695 + sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr)); 1.4696 + SCTP_STAT_INCR(sctps_sendswcrc); 1.4697 +#else 1.4698 +#if __FreeBSD_version > 901000 1.4699 + m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6; 1.4700 +#else 1.4701 + m->m_pkthdr.csum_flags = CSUM_SCTP; 1.4702 +#endif 1.4703 + m->m_pkthdr.csum_data = 0; 1.4704 + SCTP_STAT_INCR(sctps_sendhwcrc); 1.4705 +#endif 1.4706 +#else 1.4707 + if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) && 1.4708 + (stcb) && (stcb->asoc.scope.loopback_scope))) { 1.4709 + sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr)); 1.4710 + SCTP_STAT_INCR(sctps_sendswcrc); 1.4711 + } else { 1.4712 + SCTP_STAT_INCR(sctps_sendnocrc); 1.4713 + } 1.4714 +#endif 1.4715 +#endif 1.4716 + } 1.4717 + /* send it out. table id is taken from stcb */ 1.4718 +#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1.4719 + if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) { 1.4720 + so = SCTP_INP_SO(inp); 1.4721 + SCTP_SOCKET_UNLOCK(so, 0); 1.4722 + } 1.4723 +#endif 1.4724 +#ifdef SCTP_PACKET_LOGGING 1.4725 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) 1.4726 + sctp_packet_log(o_pak); 1.4727 +#endif 1.4728 +#if !(defined(__Panda__) || defined(__Userspace__)) 1.4729 + SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id); 1.4730 +#else 1.4731 + SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, NULL, stcb, vrf_id); 1.4732 +#endif 1.4733 +#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1.4734 + if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) { 1.4735 + atomic_add_int(&stcb->asoc.refcnt, 1); 1.4736 + SCTP_TCB_UNLOCK(stcb); 1.4737 + SCTP_SOCKET_LOCK(so, 0); 1.4738 + SCTP_TCB_LOCK(stcb); 1.4739 + atomic_subtract_int(&stcb->asoc.refcnt, 1); 1.4740 + } 1.4741 +#endif 1.4742 + if (net) { 1.4743 + /* for link local this must be done */ 1.4744 + sin6->sin6_scope_id = prev_scope; 1.4745 + sin6->sin6_port = prev_port; 1.4746 + } 1.4747 + SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret); 1.4748 + SCTP_STAT_INCR(sctps_sendpackets); 1.4749 + SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 1.4750 + if (ret) { 1.4751 + SCTP_STAT_INCR(sctps_senderrors); 1.4752 + } 1.4753 + if (net == NULL) { 1.4754 + /* Now if we had a temp route free it */ 1.4755 +#if defined(__FreeBSD__) && __FreeBSD_version > 901000 1.4756 + RO_RTFREE(ro); 1.4757 +#else 1.4758 + if (ro->ro_rt) { 1.4759 + RTFREE(ro->ro_rt); 1.4760 + ro->ro_rt = NULL; 1.4761 + } 1.4762 +#endif 1.4763 + } else { 1.4764 + /* PMTU check versus smallest asoc MTU goes here */ 1.4765 + if (ro->ro_rt == NULL) { 1.4766 + /* Route was freed */ 1.4767 + if (net->ro._s_addr && 1.4768 + net->src_addr_selected) { 1.4769 + sctp_free_ifa(net->ro._s_addr); 1.4770 + net->ro._s_addr = NULL; 1.4771 + } 1.4772 + net->src_addr_selected = 0; 1.4773 + } 1.4774 + if ((ro->ro_rt != NULL) && 1.4775 + (net->ro._s_addr)) { 1.4776 + uint32_t mtu; 1.4777 + mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt); 1.4778 + if (mtu && 1.4779 + (stcb->asoc.smallest_mtu > mtu)) { 1.4780 + sctp_mtu_size_reset(inp, &stcb->asoc, mtu); 1.4781 + net->mtu = mtu; 1.4782 + if (net->port) { 1.4783 + net->mtu -= sizeof(struct udphdr); 1.4784 + } 1.4785 + } 1.4786 + } 1.4787 +#if !defined(__Panda__) && !defined(__Userspace__) 1.4788 + else if (ifp) { 1.4789 +#if defined(__Windows__) 1.4790 +#define ND_IFINFO(ifp) (ifp) 1.4791 +#define linkmtu if_mtu 1.4792 +#endif 1.4793 + if (ND_IFINFO(ifp)->linkmtu && 1.4794 + (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) { 1.4795 + sctp_mtu_size_reset(inp, 1.4796 + &stcb->asoc, 1.4797 + ND_IFINFO(ifp)->linkmtu); 1.4798 + } 1.4799 + } 1.4800 +#endif 1.4801 + } 1.4802 + return (ret); 1.4803 + } 1.4804 +#endif 1.4805 +#if defined(__Userspace__) 1.4806 + case AF_CONN: 1.4807 + { 1.4808 + char *buffer; 1.4809 + struct sockaddr_conn *sconn; 1.4810 + int len; 1.4811 + 1.4812 + sconn = (struct sockaddr_conn *)to; 1.4813 + len = sizeof(struct sctphdr); 1.4814 + newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA); 1.4815 + if (newm == NULL) { 1.4816 + sctp_m_freem(m); 1.4817 + SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 1.4818 + return (ENOMEM); 1.4819 + } 1.4820 + SCTP_ALIGN_TO_END(newm, len); 1.4821 + SCTP_BUF_LEN(newm) = len; 1.4822 + SCTP_BUF_NEXT(newm) = m; 1.4823 + m = newm; 1.4824 + packet_length = sctp_calculate_len(m); 1.4825 + sctphdr = mtod(m, struct sctphdr *); 1.4826 + sctphdr->src_port = src_port; 1.4827 + sctphdr->dest_port = dest_port; 1.4828 + sctphdr->v_tag = v_tag; 1.4829 + sctphdr->checksum = 0; 1.4830 +#if defined(SCTP_WITH_NO_CSUM) 1.4831 + SCTP_STAT_INCR(sctps_sendnocrc); 1.4832 +#else 1.4833 + sctphdr->checksum = sctp_calculate_cksum(m, 0); 1.4834 + SCTP_STAT_INCR(sctps_sendswcrc); 1.4835 +#endif 1.4836 + if (tos_value == 0) { 1.4837 + tos_value = inp->ip_inp.inp.inp_ip_tos; 1.4838 + } 1.4839 + tos_value &= 0xfc; 1.4840 + if (ecn_ok) { 1.4841 + tos_value |= sctp_get_ect(stcb); 1.4842 + } 1.4843 + /* Don't alloc/free for each packet */ 1.4844 + if ((buffer = malloc(packet_length)) != NULL) { 1.4845 + m_copydata(m, 0, packet_length, buffer); 1.4846 + ret = SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, packet_length, tos_value, nofragment_flag); 1.4847 + free(buffer); 1.4848 + } else { 1.4849 + ret = ENOMEM; 1.4850 + } 1.4851 + sctp_m_freem(m); 1.4852 + return (ret); 1.4853 + } 1.4854 +#endif 1.4855 + default: 1.4856 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n", 1.4857 + ((struct sockaddr *)to)->sa_family); 1.4858 + sctp_m_freem(m); 1.4859 + SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT); 1.4860 + return (EFAULT); 1.4861 + } 1.4862 +} 1.4863 + 1.4864 + 1.4865 +void 1.4866 +sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked 1.4867 +#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 1.4868 + SCTP_UNUSED 1.4869 +#endif 1.4870 + ) 1.4871 +{ 1.4872 + struct mbuf *m; 1.4873 + struct sctp_nets *net; 1.4874 + struct sctp_init_chunk *init; 1.4875 + struct sctp_supported_addr_param *sup_addr; 1.4876 + struct sctp_adaptation_layer_indication *ali; 1.4877 + struct sctp_supported_chunk_types_param *pr_supported; 1.4878 + struct sctp_paramhdr *ph; 1.4879 + int cnt_inits_to = 0; 1.4880 + int ret; 1.4881 + uint16_t num_ext, chunk_len, padding_len, parameter_len; 1.4882 + 1.4883 +#if defined(__APPLE__) 1.4884 + if (so_locked) { 1.4885 + sctp_lock_assert(SCTP_INP_SO(inp)); 1.4886 + } else { 1.4887 + sctp_unlock_assert(SCTP_INP_SO(inp)); 1.4888 + } 1.4889 +#endif 1.4890 + /* INIT's always go to the primary (and usually ONLY address) */ 1.4891 + net = stcb->asoc.primary_destination; 1.4892 + if (net == NULL) { 1.4893 + net = TAILQ_FIRST(&stcb->asoc.nets); 1.4894 + if (net == NULL) { 1.4895 + /* TSNH */ 1.4896 + return; 1.4897 + } 1.4898 + /* we confirm any address we send an INIT to */ 1.4899 + net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 1.4900 + (void)sctp_set_primary_addr(stcb, NULL, net); 1.4901 + } else { 1.4902 + /* we confirm any address we send an INIT to */ 1.4903 + net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 1.4904 + } 1.4905 + SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n"); 1.4906 +#ifdef INET6 1.4907 + if (net->ro._l_addr.sa.sa_family == AF_INET6) { 1.4908 + /* 1.4909 + * special hook, if we are sending to link local it will not 1.4910 + * show up in our private address count. 1.4911 + */ 1.4912 + if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr)) 1.4913 + cnt_inits_to = 1; 1.4914 + } 1.4915 +#endif 1.4916 + if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 1.4917 + /* This case should not happen */ 1.4918 + SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n"); 1.4919 + return; 1.4920 + } 1.4921 + /* start the INIT timer */ 1.4922 + sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net); 1.4923 + 1.4924 + m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_NOWAIT, 1, MT_DATA); 1.4925 + if (m == NULL) { 1.4926 + /* No memory, INIT timer will re-attempt. */ 1.4927 + SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n"); 1.4928 + return; 1.4929 + } 1.4930 + chunk_len = (uint16_t)sizeof(struct sctp_init_chunk); 1.4931 + padding_len = 0; 1.4932 + /* 1.4933 + * assume peer supports asconf in order to be able to queue 1.4934 + * local address changes while an INIT is in flight and before 1.4935 + * the assoc is established. 1.4936 + */ 1.4937 + stcb->asoc.peer_supports_asconf = 1; 1.4938 + /* Now lets put the chunk header in place */ 1.4939 + init = mtod(m, struct sctp_init_chunk *); 1.4940 + /* now the chunk header */ 1.4941 + init->ch.chunk_type = SCTP_INITIATION; 1.4942 + init->ch.chunk_flags = 0; 1.4943 + /* fill in later from mbuf we build */ 1.4944 + init->ch.chunk_length = 0; 1.4945 + /* place in my tag */ 1.4946 + init->init.initiate_tag = htonl(stcb->asoc.my_vtag); 1.4947 + /* set up some of the credits. */ 1.4948 + init->init.a_rwnd = htonl(max(inp->sctp_socket?SCTP_SB_LIMIT_RCV(inp->sctp_socket):0, 1.4949 + SCTP_MINIMAL_RWND)); 1.4950 + init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams); 1.4951 + init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams); 1.4952 + init->init.initial_tsn = htonl(stcb->asoc.init_seq_number); 1.4953 + 1.4954 + if (stcb->asoc.scope.ipv4_addr_legal || stcb->asoc.scope.ipv6_addr_legal) { 1.4955 + uint8_t i; 1.4956 + 1.4957 + parameter_len = (uint16_t)sizeof(struct sctp_paramhdr); 1.4958 + if (stcb->asoc.scope.ipv4_addr_legal) { 1.4959 + parameter_len += (uint16_t)sizeof(uint16_t); 1.4960 + } 1.4961 + if (stcb->asoc.scope.ipv6_addr_legal) { 1.4962 + parameter_len += (uint16_t)sizeof(uint16_t); 1.4963 + } 1.4964 + sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t) + chunk_len); 1.4965 + sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE); 1.4966 + sup_addr->ph.param_length = htons(parameter_len); 1.4967 + i = 0; 1.4968 + if (stcb->asoc.scope.ipv4_addr_legal) { 1.4969 + sup_addr->addr_type[i++] = htons(SCTP_IPV4_ADDRESS); 1.4970 + } 1.4971 + if (stcb->asoc.scope.ipv6_addr_legal) { 1.4972 + sup_addr->addr_type[i++] = htons(SCTP_IPV6_ADDRESS); 1.4973 + } 1.4974 + padding_len = 4 - 2 * i; 1.4975 + chunk_len += parameter_len; 1.4976 + } 1.4977 + 1.4978 + /* Adaptation layer indication parameter */ 1.4979 + if (inp->sctp_ep.adaptation_layer_indicator_provided) { 1.4980 + if (padding_len > 0) { 1.4981 + memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); 1.4982 + chunk_len += padding_len; 1.4983 + padding_len = 0; 1.4984 + } 1.4985 + parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication); 1.4986 + ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t) + chunk_len); 1.4987 + ali->ph.param_type = htons(SCTP_ULP_ADAPTATION); 1.4988 + ali->ph.param_length = htons(parameter_len); 1.4989 + ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator); 1.4990 + chunk_len += parameter_len; 1.4991 + } 1.4992 + 1.4993 + if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) { 1.4994 + /* Add NAT friendly parameter. */ 1.4995 + if (padding_len > 0) { 1.4996 + memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); 1.4997 + chunk_len += padding_len; 1.4998 + padding_len = 0; 1.4999 + } 1.5000 + parameter_len = (uint16_t)sizeof(struct sctp_paramhdr); 1.5001 + ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len); 1.5002 + ph->param_type = htons(SCTP_HAS_NAT_SUPPORT); 1.5003 + ph->param_length = htons(parameter_len); 1.5004 + chunk_len += parameter_len; 1.5005 + } 1.5006 + 1.5007 + /* now any cookie time extensions */ 1.5008 + if (stcb->asoc.cookie_preserve_req) { 1.5009 + struct sctp_cookie_perserve_param *cookie_preserve; 1.5010 + 1.5011 + if (padding_len > 0) { 1.5012 + memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); 1.5013 + chunk_len += padding_len; 1.5014 + padding_len = 0; 1.5015 + } 1.5016 + parameter_len = (uint16_t)sizeof(struct sctp_cookie_perserve_param); 1.5017 + cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t) + chunk_len); 1.5018 + cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE); 1.5019 + cookie_preserve->ph.param_length = htons(parameter_len); 1.5020 + cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req); 1.5021 + stcb->asoc.cookie_preserve_req = 0; 1.5022 + chunk_len += parameter_len; 1.5023 + } 1.5024 + 1.5025 + /* ECN parameter */ 1.5026 + if (stcb->asoc.ecn_allowed == 1) { 1.5027 + if (padding_len > 0) { 1.5028 + memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); 1.5029 + chunk_len += padding_len; 1.5030 + padding_len = 0; 1.5031 + } 1.5032 + parameter_len = (uint16_t)sizeof(struct sctp_paramhdr); 1.5033 + ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len); 1.5034 + ph->param_type = htons(SCTP_ECN_CAPABLE); 1.5035 + ph->param_length = htons(parameter_len); 1.5036 + chunk_len += parameter_len; 1.5037 + } 1.5038 + 1.5039 + /* And now tell the peer we do support PR-SCTP. */ 1.5040 + if (padding_len > 0) { 1.5041 + memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); 1.5042 + chunk_len += padding_len; 1.5043 + padding_len = 0; 1.5044 + } 1.5045 + parameter_len = (uint16_t)sizeof(struct sctp_paramhdr); 1.5046 + ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len); 1.5047 + ph->param_type = htons(SCTP_PRSCTP_SUPPORTED); 1.5048 + ph->param_length = htons(parameter_len); 1.5049 + chunk_len += parameter_len; 1.5050 + 1.5051 + /* And now tell the peer we do all the extensions */ 1.5052 + pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + chunk_len); 1.5053 + pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT); 1.5054 + num_ext = 0; 1.5055 + pr_supported->chunk_types[num_ext++] = SCTP_ASCONF; 1.5056 + pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK; 1.5057 + pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN; 1.5058 + pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED; 1.5059 + pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET; 1.5060 + if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) { 1.5061 + pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION; 1.5062 + } 1.5063 + if (stcb->asoc.sctp_nr_sack_on_off == 1) { 1.5064 + pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK; 1.5065 + } 1.5066 + parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext; 1.5067 + pr_supported->ph.param_length = htons(parameter_len); 1.5068 + padding_len = SCTP_SIZE32(parameter_len) - parameter_len; 1.5069 + chunk_len += parameter_len; 1.5070 + 1.5071 + /* add authentication parameters */ 1.5072 + if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) { 1.5073 + /* attach RANDOM parameter, if available */ 1.5074 + if (stcb->asoc.authinfo.random != NULL) { 1.5075 + struct sctp_auth_random *randp; 1.5076 + 1.5077 + if (padding_len > 0) { 1.5078 + memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); 1.5079 + chunk_len += padding_len; 1.5080 + padding_len = 0; 1.5081 + } 1.5082 + randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + chunk_len); 1.5083 + parameter_len = (uint16_t)sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len; 1.5084 + /* random key already contains the header */ 1.5085 + memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len); 1.5086 + padding_len = SCTP_SIZE32(parameter_len) - parameter_len; 1.5087 + chunk_len += parameter_len; 1.5088 + } 1.5089 + /* add HMAC_ALGO parameter */ 1.5090 + if ((stcb->asoc.local_hmacs != NULL) && 1.5091 + (stcb->asoc.local_hmacs->num_algo > 0)) { 1.5092 + struct sctp_auth_hmac_algo *hmacs; 1.5093 + 1.5094 + if (padding_len > 0) { 1.5095 + memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); 1.5096 + chunk_len += padding_len; 1.5097 + padding_len = 0; 1.5098 + } 1.5099 + hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + chunk_len); 1.5100 + parameter_len = (uint16_t)(sizeof(struct sctp_auth_hmac_algo) + 1.5101 + stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t)); 1.5102 + hmacs->ph.param_type = htons(SCTP_HMAC_LIST); 1.5103 + hmacs->ph.param_length = htons(parameter_len); 1.5104 + sctp_serialize_hmaclist(stcb->asoc.local_hmacs, (uint8_t *)hmacs->hmac_ids); 1.5105 + padding_len = SCTP_SIZE32(parameter_len) - parameter_len; 1.5106 + chunk_len += parameter_len; 1.5107 + } 1.5108 + /* add CHUNKS parameter */ 1.5109 + if (sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks) > 0) { 1.5110 + struct sctp_auth_chunk_list *chunks; 1.5111 + 1.5112 + if (padding_len > 0) { 1.5113 + memset(mtod(m, caddr_t) + chunk_len, 0, padding_len); 1.5114 + chunk_len += padding_len; 1.5115 + padding_len = 0; 1.5116 + } 1.5117 + chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + chunk_len); 1.5118 + parameter_len = (uint16_t)(sizeof(struct sctp_auth_chunk_list) + 1.5119 + sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks)); 1.5120 + chunks->ph.param_type = htons(SCTP_CHUNK_LIST); 1.5121 + chunks->ph.param_length = htons(parameter_len); 1.5122 + sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types); 1.5123 + padding_len = SCTP_SIZE32(parameter_len) - parameter_len; 1.5124 + chunk_len += parameter_len; 1.5125 + } 1.5126 + } 1.5127 + SCTP_BUF_LEN(m) = chunk_len; 1.5128 + 1.5129 + /* now the addresses */ 1.5130 + /* To optimize this we could put the scoping stuff 1.5131 + * into a structure and remove the individual uint8's from 1.5132 + * the assoc structure. Then we could just sifa in the 1.5133 + * address within the stcb. But for now this is a quick 1.5134 + * hack to get the address stuff teased apart. 1.5135 + */ 1.5136 + sctp_add_addresses_to_i_ia(inp, stcb, &stcb->asoc.scope, m, cnt_inits_to, &padding_len, &chunk_len); 1.5137 + 1.5138 + init->ch.chunk_length = htons(chunk_len); 1.5139 + if (padding_len > 0) { 1.5140 + struct mbuf *m_at, *mp_last; 1.5141 + 1.5142 + mp_last = NULL; 1.5143 + for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 1.5144 + if (SCTP_BUF_NEXT(m_at) == NULL) 1.5145 + mp_last = m_at; 1.5146 + } 1.5147 + if ((mp_last == NULL) || sctp_add_pad_tombuf(mp_last, padding_len)) { 1.5148 + sctp_m_freem(m); 1.5149 + return; 1.5150 + } 1.5151 + } 1.5152 + SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n"); 1.5153 + ret = sctp_lowlevel_chunk_output(inp, stcb, net, 1.5154 + (struct sockaddr *)&net->ro._l_addr, 1.5155 + m, 0, NULL, 0, 0, 0, 0, 1.5156 + inp->sctp_lport, stcb->rport, htonl(0), 1.5157 + net->port, NULL, 1.5158 +#if defined(__FreeBSD__) 1.5159 + 0, 0, 1.5160 +#endif 1.5161 + so_locked); 1.5162 + SCTPDBG(SCTP_DEBUG_OUTPUT4, "lowlevel_output - %d\n", ret); 1.5163 + SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 1.5164 + (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 1.5165 +} 1.5166 + 1.5167 +struct mbuf * 1.5168 +sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt, 1.5169 + int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly) 1.5170 +{ 1.5171 + /* 1.5172 + * Given a mbuf containing an INIT or INIT-ACK with the param_offset 1.5173 + * being equal to the beginning of the params i.e. (iphlen + 1.5174 + * sizeof(struct sctp_init_msg) parse through the parameters to the 1.5175 + * end of the mbuf verifying that all parameters are known. 1.5176 + * 1.5177 + * For unknown parameters build and return a mbuf with 1.5178 + * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop 1.5179 + * processing this chunk stop, and set *abort_processing to 1. 1.5180 + * 1.5181 + * By having param_offset be pre-set to where parameters begin it is 1.5182 + * hoped that this routine may be reused in the future by new 1.5183 + * features. 1.5184 + */ 1.5185 + struct sctp_paramhdr *phdr, params; 1.5186 + 1.5187 + struct mbuf *mat, *op_err; 1.5188 + char tempbuf[SCTP_PARAM_BUFFER_SIZE]; 1.5189 + int at, limit, pad_needed; 1.5190 + uint16_t ptype, plen, padded_size; 1.5191 + int err_at; 1.5192 + 1.5193 + *abort_processing = 0; 1.5194 + mat = in_initpkt; 1.5195 + err_at = 0; 1.5196 + limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk); 1.5197 + at = param_offset; 1.5198 + op_err = NULL; 1.5199 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n"); 1.5200 + phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params)); 1.5201 + while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) { 1.5202 + ptype = ntohs(phdr->param_type); 1.5203 + plen = ntohs(phdr->param_length); 1.5204 + if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) { 1.5205 + /* wacked parameter */ 1.5206 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen); 1.5207 + goto invalid_size; 1.5208 + } 1.5209 + limit -= SCTP_SIZE32(plen); 1.5210 + /*- 1.5211 + * All parameters for all chunks that we know/understand are 1.5212 + * listed here. We process them other places and make 1.5213 + * appropriate stop actions per the upper bits. However this 1.5214 + * is the generic routine processor's can call to get back 1.5215 + * an operr.. to either incorporate (init-ack) or send. 1.5216 + */ 1.5217 + padded_size = SCTP_SIZE32(plen); 1.5218 + switch (ptype) { 1.5219 + /* Param's with variable size */ 1.5220 + case SCTP_HEARTBEAT_INFO: 1.5221 + case SCTP_STATE_COOKIE: 1.5222 + case SCTP_UNRECOG_PARAM: 1.5223 + case SCTP_ERROR_CAUSE_IND: 1.5224 + /* ok skip fwd */ 1.5225 + at += padded_size; 1.5226 + break; 1.5227 + /* Param's with variable size within a range */ 1.5228 + case SCTP_CHUNK_LIST: 1.5229 + case SCTP_SUPPORTED_CHUNK_EXT: 1.5230 + if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) { 1.5231 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen); 1.5232 + goto invalid_size; 1.5233 + } 1.5234 + at += padded_size; 1.5235 + break; 1.5236 + case SCTP_SUPPORTED_ADDRTYPE: 1.5237 + if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) { 1.5238 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen); 1.5239 + goto invalid_size; 1.5240 + } 1.5241 + at += padded_size; 1.5242 + break; 1.5243 + case SCTP_RANDOM: 1.5244 + if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) { 1.5245 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen); 1.5246 + goto invalid_size; 1.5247 + } 1.5248 + at += padded_size; 1.5249 + break; 1.5250 + case SCTP_SET_PRIM_ADDR: 1.5251 + case SCTP_DEL_IP_ADDRESS: 1.5252 + case SCTP_ADD_IP_ADDRESS: 1.5253 + if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) && 1.5254 + (padded_size != sizeof(struct sctp_asconf_addr_param))) { 1.5255 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen); 1.5256 + goto invalid_size; 1.5257 + } 1.5258 + at += padded_size; 1.5259 + break; 1.5260 + /* Param's with a fixed size */ 1.5261 + case SCTP_IPV4_ADDRESS: 1.5262 + if (padded_size != sizeof(struct sctp_ipv4addr_param)) { 1.5263 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen); 1.5264 + goto invalid_size; 1.5265 + } 1.5266 + at += padded_size; 1.5267 + break; 1.5268 + case SCTP_IPV6_ADDRESS: 1.5269 + if (padded_size != sizeof(struct sctp_ipv6addr_param)) { 1.5270 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen); 1.5271 + goto invalid_size; 1.5272 + } 1.5273 + at += padded_size; 1.5274 + break; 1.5275 + case SCTP_COOKIE_PRESERVE: 1.5276 + if (padded_size != sizeof(struct sctp_cookie_perserve_param)) { 1.5277 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen); 1.5278 + goto invalid_size; 1.5279 + } 1.5280 + at += padded_size; 1.5281 + break; 1.5282 + case SCTP_HAS_NAT_SUPPORT: 1.5283 + *nat_friendly = 1; 1.5284 + /* fall through */ 1.5285 + case SCTP_PRSCTP_SUPPORTED: 1.5286 + 1.5287 + if (padded_size != sizeof(struct sctp_paramhdr)) { 1.5288 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen); 1.5289 + goto invalid_size; 1.5290 + } 1.5291 + at += padded_size; 1.5292 + break; 1.5293 + case SCTP_ECN_CAPABLE: 1.5294 + if (padded_size != sizeof(struct sctp_ecn_supported_param)) { 1.5295 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen); 1.5296 + goto invalid_size; 1.5297 + } 1.5298 + at += padded_size; 1.5299 + break; 1.5300 + case SCTP_ULP_ADAPTATION: 1.5301 + if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) { 1.5302 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen); 1.5303 + goto invalid_size; 1.5304 + } 1.5305 + at += padded_size; 1.5306 + break; 1.5307 + case SCTP_SUCCESS_REPORT: 1.5308 + if (padded_size != sizeof(struct sctp_asconf_paramhdr)) { 1.5309 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen); 1.5310 + goto invalid_size; 1.5311 + } 1.5312 + at += padded_size; 1.5313 + break; 1.5314 + case SCTP_HOSTNAME_ADDRESS: 1.5315 + { 1.5316 + /* We can NOT handle HOST NAME addresses!! */ 1.5317 + int l_len; 1.5318 + 1.5319 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n"); 1.5320 + *abort_processing = 1; 1.5321 + if (op_err == NULL) { 1.5322 + /* Ok need to try to get a mbuf */ 1.5323 +#ifdef INET6 1.5324 + l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 1.5325 +#else 1.5326 + l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 1.5327 +#endif 1.5328 + l_len += plen; 1.5329 + l_len += sizeof(struct sctp_paramhdr); 1.5330 + op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA); 1.5331 + if (op_err) { 1.5332 + SCTP_BUF_LEN(op_err) = 0; 1.5333 + /* 1.5334 + * pre-reserve space for ip and sctp 1.5335 + * header and chunk hdr 1.5336 + */ 1.5337 +#ifdef INET6 1.5338 + SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 1.5339 +#else 1.5340 + SCTP_BUF_RESV_UF(op_err, sizeof(struct ip)); 1.5341 +#endif 1.5342 + SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 1.5343 + SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1.5344 + } 1.5345 + } 1.5346 + if (op_err) { 1.5347 + /* If we have space */ 1.5348 + struct sctp_paramhdr s; 1.5349 + 1.5350 + if (err_at % 4) { 1.5351 + uint32_t cpthis = 0; 1.5352 + 1.5353 + pad_needed = 4 - (err_at % 4); 1.5354 + m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis); 1.5355 + err_at += pad_needed; 1.5356 + } 1.5357 + s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR); 1.5358 + s.param_length = htons(sizeof(s) + plen); 1.5359 + m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s); 1.5360 + err_at += sizeof(s); 1.5361 + phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf),plen)); 1.5362 + if (phdr == NULL) { 1.5363 + sctp_m_freem(op_err); 1.5364 + /* 1.5365 + * we are out of memory but we still 1.5366 + * need to have a look at what to do 1.5367 + * (the system is in trouble 1.5368 + * though). 1.5369 + */ 1.5370 + return (NULL); 1.5371 + } 1.5372 + m_copyback(op_err, err_at, plen, (caddr_t)phdr); 1.5373 + } 1.5374 + return (op_err); 1.5375 + break; 1.5376 + } 1.5377 + default: 1.5378 + /* 1.5379 + * we do not recognize the parameter figure out what 1.5380 + * we do. 1.5381 + */ 1.5382 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype); 1.5383 + if ((ptype & 0x4000) == 0x4000) { 1.5384 + /* Report bit is set?? */ 1.5385 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n"); 1.5386 + if (op_err == NULL) { 1.5387 + int l_len; 1.5388 + /* Ok need to try to get an mbuf */ 1.5389 +#ifdef INET6 1.5390 + l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 1.5391 +#else 1.5392 + l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 1.5393 +#endif 1.5394 + l_len += plen; 1.5395 + l_len += sizeof(struct sctp_paramhdr); 1.5396 + op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA); 1.5397 + if (op_err) { 1.5398 + SCTP_BUF_LEN(op_err) = 0; 1.5399 +#ifdef INET6 1.5400 + SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 1.5401 +#else 1.5402 + SCTP_BUF_RESV_UF(op_err, sizeof(struct ip)); 1.5403 +#endif 1.5404 + SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 1.5405 + SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1.5406 + } 1.5407 + } 1.5408 + if (op_err) { 1.5409 + /* If we have space */ 1.5410 + struct sctp_paramhdr s; 1.5411 + 1.5412 + if (err_at % 4) { 1.5413 + uint32_t cpthis = 0; 1.5414 + 1.5415 + pad_needed = 4 - (err_at % 4); 1.5416 + m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis); 1.5417 + err_at += pad_needed; 1.5418 + } 1.5419 + s.param_type = htons(SCTP_UNRECOG_PARAM); 1.5420 + s.param_length = htons(sizeof(s) + plen); 1.5421 + m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s); 1.5422 + err_at += sizeof(s); 1.5423 + if (plen > sizeof(tempbuf)) { 1.5424 + plen = sizeof(tempbuf); 1.5425 + } 1.5426 + phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf),plen)); 1.5427 + if (phdr == NULL) { 1.5428 + sctp_m_freem(op_err); 1.5429 + /* 1.5430 + * we are out of memory but 1.5431 + * we still need to have a 1.5432 + * look at what to do (the 1.5433 + * system is in trouble 1.5434 + * though). 1.5435 + */ 1.5436 + op_err = NULL; 1.5437 + goto more_processing; 1.5438 + } 1.5439 + m_copyback(op_err, err_at, plen, (caddr_t)phdr); 1.5440 + err_at += plen; 1.5441 + } 1.5442 + } 1.5443 + more_processing: 1.5444 + if ((ptype & 0x8000) == 0x0000) { 1.5445 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n"); 1.5446 + return (op_err); 1.5447 + } else { 1.5448 + /* skip this chunk and continue processing */ 1.5449 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n"); 1.5450 + at += SCTP_SIZE32(plen); 1.5451 + } 1.5452 + break; 1.5453 + 1.5454 + } 1.5455 + phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params)); 1.5456 + } 1.5457 + return (op_err); 1.5458 + invalid_size: 1.5459 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n"); 1.5460 + *abort_processing = 1; 1.5461 + if ((op_err == NULL) && phdr) { 1.5462 + int l_len; 1.5463 +#ifdef INET6 1.5464 + l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 1.5465 +#else 1.5466 + l_len = sizeof(struct ip) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 1.5467 +#endif 1.5468 + l_len += (2 * sizeof(struct sctp_paramhdr)); 1.5469 + op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA); 1.5470 + if (op_err) { 1.5471 + SCTP_BUF_LEN(op_err) = 0; 1.5472 +#ifdef INET6 1.5473 + SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 1.5474 +#else 1.5475 + SCTP_BUF_RESV_UF(op_err, sizeof(struct ip)); 1.5476 +#endif 1.5477 + SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 1.5478 + SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1.5479 + } 1.5480 + } 1.5481 + if ((op_err) && phdr) { 1.5482 + struct sctp_paramhdr s; 1.5483 + 1.5484 + if (err_at % 4) { 1.5485 + uint32_t cpthis = 0; 1.5486 + 1.5487 + pad_needed = 4 - (err_at % 4); 1.5488 + m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis); 1.5489 + err_at += pad_needed; 1.5490 + } 1.5491 + s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1.5492 + s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr)); 1.5493 + m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s); 1.5494 + err_at += sizeof(s); 1.5495 + /* Only copy back the p-hdr that caused the issue */ 1.5496 + m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr); 1.5497 + } 1.5498 + return (op_err); 1.5499 +} 1.5500 + 1.5501 +static int 1.5502 +sctp_are_there_new_addresses(struct sctp_association *asoc, 1.5503 + struct mbuf *in_initpkt, int offset, struct sockaddr *src) 1.5504 +{ 1.5505 + /* 1.5506 + * Given a INIT packet, look through the packet to verify that there 1.5507 + * are NO new addresses. As we go through the parameters add reports 1.5508 + * of any un-understood parameters that require an error. Also we 1.5509 + * must return (1) to drop the packet if we see a un-understood 1.5510 + * parameter that tells us to drop the chunk. 1.5511 + */ 1.5512 + struct sockaddr *sa_touse; 1.5513 + struct sockaddr *sa; 1.5514 + struct sctp_paramhdr *phdr, params; 1.5515 + uint16_t ptype, plen; 1.5516 + uint8_t fnd; 1.5517 + struct sctp_nets *net; 1.5518 +#ifdef INET 1.5519 + struct sockaddr_in sin4, *sa4; 1.5520 +#endif 1.5521 +#ifdef INET6 1.5522 + struct sockaddr_in6 sin6, *sa6; 1.5523 +#endif 1.5524 + 1.5525 +#ifdef INET 1.5526 + memset(&sin4, 0, sizeof(sin4)); 1.5527 + sin4.sin_family = AF_INET; 1.5528 +#ifdef HAVE_SIN_LEN 1.5529 + sin4.sin_len = sizeof(sin4); 1.5530 +#endif 1.5531 +#endif 1.5532 +#ifdef INET6 1.5533 + memset(&sin6, 0, sizeof(sin6)); 1.5534 + sin6.sin6_family = AF_INET6; 1.5535 +#ifdef HAVE_SIN6_LEN 1.5536 + sin6.sin6_len = sizeof(sin6); 1.5537 +#endif 1.5538 +#endif 1.5539 + /* First what about the src address of the pkt ? */ 1.5540 + fnd = 0; 1.5541 + TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1.5542 + sa = (struct sockaddr *)&net->ro._l_addr; 1.5543 + if (sa->sa_family == src->sa_family) { 1.5544 +#ifdef INET 1.5545 + if (sa->sa_family == AF_INET) { 1.5546 + struct sockaddr_in *src4; 1.5547 + 1.5548 + sa4 = (struct sockaddr_in *)sa; 1.5549 + src4 = (struct sockaddr_in *)src; 1.5550 + if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) { 1.5551 + fnd = 1; 1.5552 + break; 1.5553 + } 1.5554 + } 1.5555 +#endif 1.5556 +#ifdef INET6 1.5557 + if (sa->sa_family == AF_INET6) { 1.5558 + struct sockaddr_in6 *src6; 1.5559 + 1.5560 + sa6 = (struct sockaddr_in6 *)sa; 1.5561 + src6 = (struct sockaddr_in6 *)src; 1.5562 + if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) { 1.5563 + fnd = 1; 1.5564 + break; 1.5565 + } 1.5566 + } 1.5567 +#endif 1.5568 + } 1.5569 + } 1.5570 + if (fnd == 0) { 1.5571 + /* New address added! no need to look futher. */ 1.5572 + return (1); 1.5573 + } 1.5574 + /* Ok so far lets munge through the rest of the packet */ 1.5575 + offset += sizeof(struct sctp_init_chunk); 1.5576 + phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params)); 1.5577 + while (phdr) { 1.5578 + sa_touse = NULL; 1.5579 + ptype = ntohs(phdr->param_type); 1.5580 + plen = ntohs(phdr->param_length); 1.5581 + switch (ptype) { 1.5582 +#ifdef INET 1.5583 + case SCTP_IPV4_ADDRESS: 1.5584 + { 1.5585 + struct sctp_ipv4addr_param *p4, p4_buf; 1.5586 + 1.5587 + phdr = sctp_get_next_param(in_initpkt, offset, 1.5588 + (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf)); 1.5589 + if (plen != sizeof(struct sctp_ipv4addr_param) || 1.5590 + phdr == NULL) { 1.5591 + return (1); 1.5592 + } 1.5593 + p4 = (struct sctp_ipv4addr_param *)phdr; 1.5594 + sin4.sin_addr.s_addr = p4->addr; 1.5595 + sa_touse = (struct sockaddr *)&sin4; 1.5596 + break; 1.5597 + } 1.5598 +#endif 1.5599 +#ifdef INET6 1.5600 + case SCTP_IPV6_ADDRESS: 1.5601 + { 1.5602 + struct sctp_ipv6addr_param *p6, p6_buf; 1.5603 + 1.5604 + phdr = sctp_get_next_param(in_initpkt, offset, 1.5605 + (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf)); 1.5606 + if (plen != sizeof(struct sctp_ipv6addr_param) || 1.5607 + phdr == NULL) { 1.5608 + return (1); 1.5609 + } 1.5610 + p6 = (struct sctp_ipv6addr_param *)phdr; 1.5611 + memcpy((caddr_t)&sin6.sin6_addr, p6->addr, 1.5612 + sizeof(p6->addr)); 1.5613 + sa_touse = (struct sockaddr *)&sin6; 1.5614 + break; 1.5615 + } 1.5616 +#endif 1.5617 + default: 1.5618 + sa_touse = NULL; 1.5619 + break; 1.5620 + } 1.5621 + if (sa_touse) { 1.5622 + /* ok, sa_touse points to one to check */ 1.5623 + fnd = 0; 1.5624 + TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1.5625 + sa = (struct sockaddr *)&net->ro._l_addr; 1.5626 + if (sa->sa_family != sa_touse->sa_family) { 1.5627 + continue; 1.5628 + } 1.5629 +#ifdef INET 1.5630 + if (sa->sa_family == AF_INET) { 1.5631 + sa4 = (struct sockaddr_in *)sa; 1.5632 + if (sa4->sin_addr.s_addr == 1.5633 + sin4.sin_addr.s_addr) { 1.5634 + fnd = 1; 1.5635 + break; 1.5636 + } 1.5637 + } 1.5638 +#endif 1.5639 +#ifdef INET6 1.5640 + if (sa->sa_family == AF_INET6) { 1.5641 + sa6 = (struct sockaddr_in6 *)sa; 1.5642 + if (SCTP6_ARE_ADDR_EQUAL( 1.5643 + sa6, &sin6)) { 1.5644 + fnd = 1; 1.5645 + break; 1.5646 + } 1.5647 + } 1.5648 +#endif 1.5649 + } 1.5650 + if (!fnd) { 1.5651 + /* New addr added! no need to look further */ 1.5652 + return (1); 1.5653 + } 1.5654 + } 1.5655 + offset += SCTP_SIZE32(plen); 1.5656 + phdr = sctp_get_next_param(in_initpkt, offset, ¶ms, sizeof(params)); 1.5657 + } 1.5658 + return (0); 1.5659 +} 1.5660 + 1.5661 +/* 1.5662 + * Given a MBUF chain that was sent into us containing an INIT. Build a 1.5663 + * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done 1.5664 + * a pullup to include IPv6/4header, SCTP header and initial part of INIT 1.5665 + * message (i.e. the struct sctp_init_msg). 1.5666 + */ 1.5667 +void 1.5668 +sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1.5669 + struct mbuf *init_pkt, int iphlen, int offset, 1.5670 + struct sockaddr *src, struct sockaddr *dst, 1.5671 + struct sctphdr *sh, struct sctp_init_chunk *init_chk, 1.5672 +#if defined(__FreeBSD__) 1.5673 + uint8_t use_mflowid, uint32_t mflowid, 1.5674 +#endif 1.5675 + uint32_t vrf_id, uint16_t port, int hold_inp_lock) 1.5676 +{ 1.5677 + struct sctp_association *asoc; 1.5678 + struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *mp_last; 1.5679 + struct sctp_init_ack_chunk *initack; 1.5680 + struct sctp_adaptation_layer_indication *ali; 1.5681 + struct sctp_ecn_supported_param *ecn; 1.5682 + struct sctp_prsctp_supported_param *prsctp; 1.5683 + struct sctp_supported_chunk_types_param *pr_supported; 1.5684 + union sctp_sockstore *over_addr; 1.5685 +#ifdef INET 1.5686 + struct sockaddr_in *dst4 = (struct sockaddr_in *)dst; 1.5687 + struct sockaddr_in *src4 = (struct sockaddr_in *)src; 1.5688 + struct sockaddr_in *sin; 1.5689 +#endif 1.5690 +#ifdef INET6 1.5691 + struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst; 1.5692 + struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src; 1.5693 + struct sockaddr_in6 *sin6; 1.5694 +#endif 1.5695 +#if defined(__Userspace__) 1.5696 + struct sockaddr_conn *dstconn = (struct sockaddr_conn *)dst; 1.5697 + struct sockaddr_conn *srcconn = (struct sockaddr_conn *)src; 1.5698 + struct sockaddr_conn *sconn; 1.5699 +#endif 1.5700 + struct sockaddr *to; 1.5701 + struct sctp_state_cookie stc; 1.5702 + struct sctp_nets *net = NULL; 1.5703 + uint8_t *signature = NULL; 1.5704 + int cnt_inits_to = 0; 1.5705 + uint16_t his_limit, i_want; 1.5706 + int abort_flag, padval; 1.5707 + int num_ext; 1.5708 + int p_len; 1.5709 + int nat_friendly = 0; 1.5710 + struct socket *so; 1.5711 + 1.5712 + if (stcb) { 1.5713 + asoc = &stcb->asoc; 1.5714 + } else { 1.5715 + asoc = NULL; 1.5716 + } 1.5717 + mp_last = NULL; 1.5718 + if ((asoc != NULL) && 1.5719 + (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) && 1.5720 + (sctp_are_there_new_addresses(asoc, init_pkt, offset, src))) { 1.5721 + /* new addresses, out of here in non-cookie-wait states */ 1.5722 + /* 1.5723 + * Send a ABORT, we don't add the new address error clause 1.5724 + * though we even set the T bit and copy in the 0 tag.. this 1.5725 + * looks no different than if no listener was present. 1.5726 + */ 1.5727 + sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, NULL, 1.5728 +#if defined(__FreeBSD__) 1.5729 + use_mflowid, mflowid, 1.5730 +#endif 1.5731 + vrf_id, port); 1.5732 + return; 1.5733 + } 1.5734 + abort_flag = 0; 1.5735 + op_err = sctp_arethere_unrecognized_parameters(init_pkt, 1.5736 + (offset + sizeof(struct sctp_init_chunk)), 1.5737 + &abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly); 1.5738 + if (abort_flag) { 1.5739 + do_a_abort: 1.5740 + sctp_send_abort(init_pkt, iphlen, src, dst, sh, 1.5741 + init_chk->init.initiate_tag, op_err, 1.5742 +#if defined(__FreeBSD__) 1.5743 + use_mflowid, mflowid, 1.5744 +#endif 1.5745 + vrf_id, port); 1.5746 + return; 1.5747 + } 1.5748 + m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 1.5749 + if (m == NULL) { 1.5750 + /* No memory, INIT timer will re-attempt. */ 1.5751 + if (op_err) 1.5752 + sctp_m_freem(op_err); 1.5753 + return; 1.5754 + } 1.5755 + SCTP_BUF_LEN(m) = sizeof(struct sctp_init_chunk); 1.5756 + 1.5757 + /* 1.5758 + * We might not overwrite the identification[] completely and on 1.5759 + * some platforms time_entered will contain some padding. 1.5760 + * Therefore zero out the cookie to avoid putting 1.5761 + * uninitialized memory on the wire. 1.5762 + */ 1.5763 + memset(&stc, 0, sizeof(struct sctp_state_cookie)); 1.5764 + 1.5765 + /* the time I built cookie */ 1.5766 + (void)SCTP_GETTIME_TIMEVAL(&stc.time_entered); 1.5767 + 1.5768 + /* populate any tie tags */ 1.5769 + if (asoc != NULL) { 1.5770 + /* unlock before tag selections */ 1.5771 + stc.tie_tag_my_vtag = asoc->my_vtag_nonce; 1.5772 + stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce; 1.5773 + stc.cookie_life = asoc->cookie_life; 1.5774 + net = asoc->primary_destination; 1.5775 + } else { 1.5776 + stc.tie_tag_my_vtag = 0; 1.5777 + stc.tie_tag_peer_vtag = 0; 1.5778 + /* life I will award this cookie */ 1.5779 + stc.cookie_life = inp->sctp_ep.def_cookie_life; 1.5780 + } 1.5781 + 1.5782 + /* copy in the ports for later check */ 1.5783 + stc.myport = sh->dest_port; 1.5784 + stc.peerport = sh->src_port; 1.5785 + 1.5786 + /* 1.5787 + * If we wanted to honor cookie life extentions, we would add to 1.5788 + * stc.cookie_life. For now we should NOT honor any extension 1.5789 + */ 1.5790 + stc.site_scope = stc.local_scope = stc.loopback_scope = 0; 1.5791 + if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1.5792 + stc.ipv6_addr_legal = 1; 1.5793 + if (SCTP_IPV6_V6ONLY(inp)) { 1.5794 + stc.ipv4_addr_legal = 0; 1.5795 + } else { 1.5796 + stc.ipv4_addr_legal = 1; 1.5797 + } 1.5798 +#if defined(__Userspace__) 1.5799 + stc.conn_addr_legal = 0; 1.5800 +#endif 1.5801 + } else { 1.5802 + stc.ipv6_addr_legal = 0; 1.5803 +#if defined(__Userspace__) 1.5804 + if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) { 1.5805 + stc.conn_addr_legal = 1; 1.5806 + stc.ipv4_addr_legal = 0; 1.5807 + } else { 1.5808 + stc.conn_addr_legal = 0; 1.5809 + stc.ipv4_addr_legal = 1; 1.5810 + } 1.5811 +#else 1.5812 + stc.ipv4_addr_legal = 1; 1.5813 +#endif 1.5814 + } 1.5815 +#ifdef SCTP_DONT_DO_PRIVADDR_SCOPE 1.5816 + stc.ipv4_scope = 1; 1.5817 +#else 1.5818 + stc.ipv4_scope = 0; 1.5819 +#endif 1.5820 + if (net == NULL) { 1.5821 + to = src; 1.5822 + switch (dst->sa_family) { 1.5823 +#ifdef INET 1.5824 + case AF_INET: 1.5825 + { 1.5826 + /* lookup address */ 1.5827 + stc.address[0] = src4->sin_addr.s_addr; 1.5828 + stc.address[1] = 0; 1.5829 + stc.address[2] = 0; 1.5830 + stc.address[3] = 0; 1.5831 + stc.addr_type = SCTP_IPV4_ADDRESS; 1.5832 + /* local from address */ 1.5833 + stc.laddress[0] = dst4->sin_addr.s_addr; 1.5834 + stc.laddress[1] = 0; 1.5835 + stc.laddress[2] = 0; 1.5836 + stc.laddress[3] = 0; 1.5837 + stc.laddr_type = SCTP_IPV4_ADDRESS; 1.5838 + /* scope_id is only for v6 */ 1.5839 + stc.scope_id = 0; 1.5840 +#ifndef SCTP_DONT_DO_PRIVADDR_SCOPE 1.5841 + if (IN4_ISPRIVATE_ADDRESS(&src4->sin_addr)) { 1.5842 + stc.ipv4_scope = 1; 1.5843 + } 1.5844 +#else 1.5845 + stc.ipv4_scope = 1; 1.5846 +#endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */ 1.5847 + /* Must use the address in this case */ 1.5848 + if (sctp_is_address_on_local_host(src, vrf_id)) { 1.5849 + stc.loopback_scope = 1; 1.5850 + stc.ipv4_scope = 1; 1.5851 + stc.site_scope = 1; 1.5852 + stc.local_scope = 0; 1.5853 + } 1.5854 + break; 1.5855 + } 1.5856 +#endif 1.5857 +#ifdef INET6 1.5858 + case AF_INET6: 1.5859 + { 1.5860 + stc.addr_type = SCTP_IPV6_ADDRESS; 1.5861 + memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr)); 1.5862 +#if defined(__FreeBSD__) && (((__FreeBSD_version < 900000) && (__FreeBSD_version >= 804000)) || (__FreeBSD_version > 900000)) 1.5863 + stc.scope_id = in6_getscope(&src6->sin6_addr); 1.5864 +#else 1.5865 + stc.scope_id = 0; 1.5866 +#endif 1.5867 + if (sctp_is_address_on_local_host(src, vrf_id)) { 1.5868 + stc.loopback_scope = 1; 1.5869 + stc.local_scope = 0; 1.5870 + stc.site_scope = 1; 1.5871 + stc.ipv4_scope = 1; 1.5872 + } else if (IN6_IS_ADDR_LINKLOCAL(&src6->sin6_addr)) { 1.5873 + /* 1.5874 + * If the new destination is a LINK_LOCAL we 1.5875 + * must have common both site and local 1.5876 + * scope. Don't set local scope though since 1.5877 + * we must depend on the source to be added 1.5878 + * implicitly. We cannot assure just because 1.5879 + * we share one link that all links are 1.5880 + * common. 1.5881 + */ 1.5882 +#if defined(__APPLE__) 1.5883 + /* Mac OS X currently doesn't have in6_getscope() */ 1.5884 + stc.scope_id = src6->sin6_addr.s6_addr16[1]; 1.5885 +#endif 1.5886 + stc.local_scope = 0; 1.5887 + stc.site_scope = 1; 1.5888 + stc.ipv4_scope = 1; 1.5889 + /* 1.5890 + * we start counting for the private address 1.5891 + * stuff at 1. since the link local we 1.5892 + * source from won't show up in our scoped 1.5893 + * count. 1.5894 + */ 1.5895 + cnt_inits_to = 1; 1.5896 + /* pull out the scope_id from incoming pkt */ 1.5897 + } else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr)) { 1.5898 + /* 1.5899 + * If the new destination is SITE_LOCAL then 1.5900 + * we must have site scope in common. 1.5901 + */ 1.5902 + stc.site_scope = 1; 1.5903 + } 1.5904 + memcpy(&stc.laddress, &dst6->sin6_addr, sizeof(struct in6_addr)); 1.5905 + stc.laddr_type = SCTP_IPV6_ADDRESS; 1.5906 + break; 1.5907 + } 1.5908 +#endif 1.5909 +#if defined(__Userspace__) 1.5910 + case AF_CONN: 1.5911 + { 1.5912 + /* lookup address */ 1.5913 + stc.address[0] = 0; 1.5914 + stc.address[1] = 0; 1.5915 + stc.address[2] = 0; 1.5916 + stc.address[3] = 0; 1.5917 + memcpy(&stc.address, &srcconn->sconn_addr, sizeof(void *)); 1.5918 + stc.addr_type = SCTP_CONN_ADDRESS; 1.5919 + /* local from address */ 1.5920 + stc.laddress[0] = 0; 1.5921 + stc.laddress[1] = 0; 1.5922 + stc.laddress[2] = 0; 1.5923 + stc.laddress[3] = 0; 1.5924 + memcpy(&stc.laddress, &dstconn->sconn_addr, sizeof(void *)); 1.5925 + stc.laddr_type = SCTP_CONN_ADDRESS; 1.5926 + /* scope_id is only for v6 */ 1.5927 + stc.scope_id = 0; 1.5928 + break; 1.5929 + } 1.5930 +#endif 1.5931 + default: 1.5932 + /* TSNH */ 1.5933 + goto do_a_abort; 1.5934 + break; 1.5935 + } 1.5936 + } else { 1.5937 + /* set the scope per the existing tcb */ 1.5938 + 1.5939 +#ifdef INET6 1.5940 + struct sctp_nets *lnet; 1.5941 +#endif 1.5942 + 1.5943 + stc.loopback_scope = asoc->scope.loopback_scope; 1.5944 + stc.ipv4_scope = asoc->scope.ipv4_local_scope; 1.5945 + stc.site_scope = asoc->scope.site_scope; 1.5946 + stc.local_scope = asoc->scope.local_scope; 1.5947 +#ifdef INET6 1.5948 + /* Why do we not consider IPv4 LL addresses? */ 1.5949 + TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) { 1.5950 + if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) { 1.5951 + if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) { 1.5952 + /* 1.5953 + * if we have a LL address, start 1.5954 + * counting at 1. 1.5955 + */ 1.5956 + cnt_inits_to = 1; 1.5957 + } 1.5958 + } 1.5959 + } 1.5960 +#endif 1.5961 + /* use the net pointer */ 1.5962 + to = (struct sockaddr *)&net->ro._l_addr; 1.5963 + switch (to->sa_family) { 1.5964 +#ifdef INET 1.5965 + case AF_INET: 1.5966 + sin = (struct sockaddr_in *)to; 1.5967 + stc.address[0] = sin->sin_addr.s_addr; 1.5968 + stc.address[1] = 0; 1.5969 + stc.address[2] = 0; 1.5970 + stc.address[3] = 0; 1.5971 + stc.addr_type = SCTP_IPV4_ADDRESS; 1.5972 + if (net->src_addr_selected == 0) { 1.5973 + /* 1.5974 + * strange case here, the INIT should have 1.5975 + * did the selection. 1.5976 + */ 1.5977 + net->ro._s_addr = sctp_source_address_selection(inp, 1.5978 + stcb, (sctp_route_t *)&net->ro, 1.5979 + net, 0, vrf_id); 1.5980 + if (net->ro._s_addr == NULL) 1.5981 + return; 1.5982 + 1.5983 + net->src_addr_selected = 1; 1.5984 + 1.5985 + } 1.5986 + stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr; 1.5987 + stc.laddress[1] = 0; 1.5988 + stc.laddress[2] = 0; 1.5989 + stc.laddress[3] = 0; 1.5990 + stc.laddr_type = SCTP_IPV4_ADDRESS; 1.5991 + /* scope_id is only for v6 */ 1.5992 + stc.scope_id = 0; 1.5993 + break; 1.5994 +#endif 1.5995 +#ifdef INET6 1.5996 + case AF_INET6: 1.5997 + sin6 = (struct sockaddr_in6 *)to; 1.5998 + memcpy(&stc.address, &sin6->sin6_addr, 1.5999 + sizeof(struct in6_addr)); 1.6000 + stc.addr_type = SCTP_IPV6_ADDRESS; 1.6001 + stc.scope_id = sin6->sin6_scope_id; 1.6002 + if (net->src_addr_selected == 0) { 1.6003 + /* 1.6004 + * strange case here, the INIT should have 1.6005 + * done the selection. 1.6006 + */ 1.6007 + net->ro._s_addr = sctp_source_address_selection(inp, 1.6008 + stcb, (sctp_route_t *)&net->ro, 1.6009 + net, 0, vrf_id); 1.6010 + if (net->ro._s_addr == NULL) 1.6011 + return; 1.6012 + 1.6013 + net->src_addr_selected = 1; 1.6014 + } 1.6015 + memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr, 1.6016 + sizeof(struct in6_addr)); 1.6017 + stc.laddr_type = SCTP_IPV6_ADDRESS; 1.6018 + break; 1.6019 +#endif 1.6020 +#if defined(__Userspace__) 1.6021 + case AF_CONN: 1.6022 + sconn = (struct sockaddr_conn *)to; 1.6023 + stc.address[0] = 0; 1.6024 + stc.address[1] = 0; 1.6025 + stc.address[2] = 0; 1.6026 + stc.address[3] = 0; 1.6027 + memcpy(&stc.address, &sconn->sconn_addr, sizeof(void *)); 1.6028 + stc.addr_type = SCTP_CONN_ADDRESS; 1.6029 + stc.laddress[0] = 0; 1.6030 + stc.laddress[1] = 0; 1.6031 + stc.laddress[2] = 0; 1.6032 + stc.laddress[3] = 0; 1.6033 + memcpy(&stc.laddress, &sconn->sconn_addr, sizeof(void *)); 1.6034 + stc.laddr_type = SCTP_CONN_ADDRESS; 1.6035 + stc.scope_id = 0; 1.6036 + break; 1.6037 +#endif 1.6038 + } 1.6039 + } 1.6040 + /* Now lets put the SCTP header in place */ 1.6041 + initack = mtod(m, struct sctp_init_ack_chunk *); 1.6042 + /* Save it off for quick ref */ 1.6043 + stc.peers_vtag = init_chk->init.initiate_tag; 1.6044 + /* who are we */ 1.6045 + memcpy(stc.identification, SCTP_VERSION_STRING, 1.6046 + min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification))); 1.6047 + memset(stc.reserved, 0, SCTP_RESERVE_SPACE); 1.6048 + /* now the chunk header */ 1.6049 + initack->ch.chunk_type = SCTP_INITIATION_ACK; 1.6050 + initack->ch.chunk_flags = 0; 1.6051 + /* fill in later from mbuf we build */ 1.6052 + initack->ch.chunk_length = 0; 1.6053 + /* place in my tag */ 1.6054 + if ((asoc != NULL) && 1.6055 + ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 1.6056 + (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) || 1.6057 + (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) { 1.6058 + /* re-use the v-tags and init-seq here */ 1.6059 + initack->init.initiate_tag = htonl(asoc->my_vtag); 1.6060 + initack->init.initial_tsn = htonl(asoc->init_seq_number); 1.6061 + } else { 1.6062 + uint32_t vtag, itsn; 1.6063 + if (hold_inp_lock) { 1.6064 + SCTP_INP_INCR_REF(inp); 1.6065 + SCTP_INP_RUNLOCK(inp); 1.6066 + } 1.6067 + if (asoc) { 1.6068 + atomic_add_int(&asoc->refcnt, 1); 1.6069 + SCTP_TCB_UNLOCK(stcb); 1.6070 + new_tag: 1.6071 + vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1); 1.6072 + if ((asoc->peer_supports_nat) && (vtag == asoc->my_vtag)) { 1.6073 + /* Got a duplicate vtag on some guy behind a nat 1.6074 + * make sure we don't use it. 1.6075 + */ 1.6076 + goto new_tag; 1.6077 + } 1.6078 + initack->init.initiate_tag = htonl(vtag); 1.6079 + /* get a TSN to use too */ 1.6080 + itsn = sctp_select_initial_TSN(&inp->sctp_ep); 1.6081 + initack->init.initial_tsn = htonl(itsn); 1.6082 + SCTP_TCB_LOCK(stcb); 1.6083 + atomic_add_int(&asoc->refcnt, -1); 1.6084 + } else { 1.6085 + vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1); 1.6086 + initack->init.initiate_tag = htonl(vtag); 1.6087 + /* get a TSN to use too */ 1.6088 + initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep)); 1.6089 + } 1.6090 + if (hold_inp_lock) { 1.6091 + SCTP_INP_RLOCK(inp); 1.6092 + SCTP_INP_DECR_REF(inp); 1.6093 + } 1.6094 + } 1.6095 + /* save away my tag to */ 1.6096 + stc.my_vtag = initack->init.initiate_tag; 1.6097 + 1.6098 + /* set up some of the credits. */ 1.6099 + so = inp->sctp_socket; 1.6100 + if (so == NULL) { 1.6101 + /* memory problem */ 1.6102 + sctp_m_freem(m); 1.6103 + return; 1.6104 + } else { 1.6105 + initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND)); 1.6106 + } 1.6107 + /* set what I want */ 1.6108 + his_limit = ntohs(init_chk->init.num_inbound_streams); 1.6109 + /* choose what I want */ 1.6110 + if (asoc != NULL) { 1.6111 + if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) { 1.6112 + i_want = asoc->streamoutcnt; 1.6113 + } else { 1.6114 + i_want = inp->sctp_ep.pre_open_stream_count; 1.6115 + } 1.6116 + } else { 1.6117 + i_want = inp->sctp_ep.pre_open_stream_count; 1.6118 + } 1.6119 + if (his_limit < i_want) { 1.6120 + /* I Want more :< */ 1.6121 + initack->init.num_outbound_streams = init_chk->init.num_inbound_streams; 1.6122 + } else { 1.6123 + /* I can have what I want :> */ 1.6124 + initack->init.num_outbound_streams = htons(i_want); 1.6125 + } 1.6126 + /* tell him his limit. */ 1.6127 + initack->init.num_inbound_streams = 1.6128 + htons(inp->sctp_ep.max_open_streams_intome); 1.6129 + 1.6130 + /* adaptation layer indication parameter */ 1.6131 + if (inp->sctp_ep.adaptation_layer_indicator_provided) { 1.6132 + ali = (struct sctp_adaptation_layer_indication *)((caddr_t)initack + sizeof(*initack)); 1.6133 + ali->ph.param_type = htons(SCTP_ULP_ADAPTATION); 1.6134 + ali->ph.param_length = htons(sizeof(*ali)); 1.6135 + ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator); 1.6136 + SCTP_BUF_LEN(m) += sizeof(*ali); 1.6137 + ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali + sizeof(*ali)); 1.6138 + } else { 1.6139 + ecn = (struct sctp_ecn_supported_param *)((caddr_t)initack + sizeof(*initack)); 1.6140 + } 1.6141 + 1.6142 + /* ECN parameter */ 1.6143 + if (((asoc != NULL) && (asoc->ecn_allowed == 1)) || 1.6144 + (inp->sctp_ecn_enable == 1)) { 1.6145 + ecn->ph.param_type = htons(SCTP_ECN_CAPABLE); 1.6146 + ecn->ph.param_length = htons(sizeof(*ecn)); 1.6147 + SCTP_BUF_LEN(m) += sizeof(*ecn); 1.6148 + 1.6149 + prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn + 1.6150 + sizeof(*ecn)); 1.6151 + } else { 1.6152 + prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn); 1.6153 + } 1.6154 + /* And now tell the peer we do pr-sctp */ 1.6155 + prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED); 1.6156 + prsctp->ph.param_length = htons(sizeof(*prsctp)); 1.6157 + SCTP_BUF_LEN(m) += sizeof(*prsctp); 1.6158 + if (nat_friendly) { 1.6159 + /* Add NAT friendly parameter */ 1.6160 + struct sctp_paramhdr *ph; 1.6161 + 1.6162 + ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + SCTP_BUF_LEN(m)); 1.6163 + ph->param_type = htons(SCTP_HAS_NAT_SUPPORT); 1.6164 + ph->param_length = htons(sizeof(struct sctp_paramhdr)); 1.6165 + SCTP_BUF_LEN(m) += sizeof(struct sctp_paramhdr); 1.6166 + } 1.6167 + /* And now tell the peer we do all the extensions */ 1.6168 + pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + SCTP_BUF_LEN(m)); 1.6169 + pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT); 1.6170 + num_ext = 0; 1.6171 + pr_supported->chunk_types[num_ext++] = SCTP_ASCONF; 1.6172 + pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK; 1.6173 + pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN; 1.6174 + pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED; 1.6175 + pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET; 1.6176 + if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) 1.6177 + pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION; 1.6178 + if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off)) 1.6179 + pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK; 1.6180 + p_len = sizeof(*pr_supported) + num_ext; 1.6181 + pr_supported->ph.param_length = htons(p_len); 1.6182 + bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len); 1.6183 + SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 1.6184 + 1.6185 + /* add authentication parameters */ 1.6186 + if (!SCTP_BASE_SYSCTL(sctp_auth_disable)) { 1.6187 + struct sctp_auth_random *randp; 1.6188 + struct sctp_auth_hmac_algo *hmacs; 1.6189 + struct sctp_auth_chunk_list *chunks; 1.6190 + uint16_t random_len; 1.6191 + 1.6192 + /* generate and add RANDOM parameter */ 1.6193 + random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT; 1.6194 + randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + SCTP_BUF_LEN(m)); 1.6195 + randp->ph.param_type = htons(SCTP_RANDOM); 1.6196 + p_len = sizeof(*randp) + random_len; 1.6197 + randp->ph.param_length = htons(p_len); 1.6198 + SCTP_READ_RANDOM(randp->random_data, random_len); 1.6199 + /* zero out any padding required */ 1.6200 + bzero((caddr_t)randp + p_len, SCTP_SIZE32(p_len) - p_len); 1.6201 + SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 1.6202 + 1.6203 + /* add HMAC_ALGO parameter */ 1.6204 + hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + SCTP_BUF_LEN(m)); 1.6205 + p_len = sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs, 1.6206 + (uint8_t *) hmacs->hmac_ids); 1.6207 + if (p_len > 0) { 1.6208 + p_len += sizeof(*hmacs); 1.6209 + hmacs->ph.param_type = htons(SCTP_HMAC_LIST); 1.6210 + hmacs->ph.param_length = htons(p_len); 1.6211 + /* zero out any padding required */ 1.6212 + bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len); 1.6213 + SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 1.6214 + } 1.6215 + /* add CHUNKS parameter */ 1.6216 + chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + SCTP_BUF_LEN(m)); 1.6217 + p_len = sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks, 1.6218 + chunks->chunk_types); 1.6219 + if (p_len > 0) { 1.6220 + p_len += sizeof(*chunks); 1.6221 + chunks->ph.param_type = htons(SCTP_CHUNK_LIST); 1.6222 + chunks->ph.param_length = htons(p_len); 1.6223 + /* zero out any padding required */ 1.6224 + bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len); 1.6225 + SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len); 1.6226 + } 1.6227 + } 1.6228 + m_at = m; 1.6229 + /* now the addresses */ 1.6230 + { 1.6231 + struct sctp_scoping scp; 1.6232 + /* To optimize this we could put the scoping stuff 1.6233 + * into a structure and remove the individual uint8's from 1.6234 + * the stc structure. Then we could just sifa in the 1.6235 + * address within the stc.. but for now this is a quick 1.6236 + * hack to get the address stuff teased apart. 1.6237 + */ 1.6238 + scp.ipv4_addr_legal = stc.ipv4_addr_legal; 1.6239 + scp.ipv6_addr_legal = stc.ipv6_addr_legal; 1.6240 +#if defined(__Userspace__) 1.6241 + scp.conn_addr_legal = stc.conn_addr_legal; 1.6242 +#endif 1.6243 + scp.loopback_scope = stc.loopback_scope; 1.6244 + scp.ipv4_local_scope = stc.ipv4_scope; 1.6245 + scp.local_scope = stc.local_scope; 1.6246 + scp.site_scope = stc.site_scope; 1.6247 + m_at = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_at, cnt_inits_to, NULL, NULL); 1.6248 + } 1.6249 + 1.6250 + /* tack on the operational error if present */ 1.6251 + if (op_err) { 1.6252 + struct mbuf *ol; 1.6253 + int llen; 1.6254 + llen = 0; 1.6255 + ol = op_err; 1.6256 + 1.6257 + while (ol) { 1.6258 + llen += SCTP_BUF_LEN(ol); 1.6259 + ol = SCTP_BUF_NEXT(ol); 1.6260 + } 1.6261 + if (llen % 4) { 1.6262 + /* must add a pad to the param */ 1.6263 + uint32_t cpthis = 0; 1.6264 + int padlen; 1.6265 + 1.6266 + padlen = 4 - (llen % 4); 1.6267 + m_copyback(op_err, llen, padlen, (caddr_t)&cpthis); 1.6268 + } 1.6269 + while (SCTP_BUF_NEXT(m_at) != NULL) { 1.6270 + m_at = SCTP_BUF_NEXT(m_at); 1.6271 + } 1.6272 + SCTP_BUF_NEXT(m_at) = op_err; 1.6273 + while (SCTP_BUF_NEXT(m_at) != NULL) { 1.6274 + m_at = SCTP_BUF_NEXT(m_at); 1.6275 + } 1.6276 + } 1.6277 + /* pre-calulate the size and update pkt header and chunk header */ 1.6278 + p_len = 0; 1.6279 + for (m_tmp = m; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) { 1.6280 + p_len += SCTP_BUF_LEN(m_tmp); 1.6281 + if (SCTP_BUF_NEXT(m_tmp) == NULL) { 1.6282 + /* m_tmp should now point to last one */ 1.6283 + break; 1.6284 + } 1.6285 + } 1.6286 + 1.6287 + /* Now we must build a cookie */ 1.6288 + m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature); 1.6289 + if (m_cookie == NULL) { 1.6290 + /* memory problem */ 1.6291 + sctp_m_freem(m); 1.6292 + return; 1.6293 + } 1.6294 + /* Now append the cookie to the end and update the space/size */ 1.6295 + SCTP_BUF_NEXT(m_tmp) = m_cookie; 1.6296 + 1.6297 + for (m_tmp = m_cookie; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) { 1.6298 + p_len += SCTP_BUF_LEN(m_tmp); 1.6299 + if (SCTP_BUF_NEXT(m_tmp) == NULL) { 1.6300 + /* m_tmp should now point to last one */ 1.6301 + mp_last = m_tmp; 1.6302 + break; 1.6303 + } 1.6304 + } 1.6305 + /* Place in the size, but we don't include 1.6306 + * the last pad (if any) in the INIT-ACK. 1.6307 + */ 1.6308 + initack->ch.chunk_length = htons(p_len); 1.6309 + 1.6310 + /* Time to sign the cookie, we don't sign over the cookie 1.6311 + * signature though thus we set trailer. 1.6312 + */ 1.6313 + (void)sctp_hmac_m(SCTP_HMAC, 1.6314 + (uint8_t *)inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)], 1.6315 + SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr), 1.6316 + (uint8_t *)signature, SCTP_SIGNATURE_SIZE); 1.6317 + /* 1.6318 + * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return 1.6319 + * here since the timer will drive a retranmission. 1.6320 + */ 1.6321 + padval = p_len % 4; 1.6322 + if ((padval) && (mp_last)) { 1.6323 + /* see my previous comments on mp_last */ 1.6324 + if (sctp_add_pad_tombuf(mp_last, (4 - padval))) { 1.6325 + /* Houston we have a problem, no space */ 1.6326 + sctp_m_freem(m); 1.6327 + return; 1.6328 + } 1.6329 + } 1.6330 + if (stc.loopback_scope) { 1.6331 + over_addr = (union sctp_sockstore *)dst; 1.6332 + } else { 1.6333 + over_addr = NULL; 1.6334 + } 1.6335 + 1.6336 + (void)sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0, 1.6337 + 0, 0, 1.6338 + inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag, 1.6339 + port, over_addr, 1.6340 +#if defined(__FreeBSD__) 1.6341 + use_mflowid, mflowid, 1.6342 +#endif 1.6343 + SCTP_SO_NOT_LOCKED); 1.6344 + SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 1.6345 +} 1.6346 + 1.6347 + 1.6348 +static void 1.6349 +sctp_prune_prsctp(struct sctp_tcb *stcb, 1.6350 + struct sctp_association *asoc, 1.6351 + struct sctp_sndrcvinfo *srcv, 1.6352 + int dataout) 1.6353 +{ 1.6354 + int freed_spc = 0; 1.6355 + struct sctp_tmit_chunk *chk, *nchk; 1.6356 + 1.6357 + SCTP_TCB_LOCK_ASSERT(stcb); 1.6358 + if ((asoc->peer_supports_prsctp) && 1.6359 + (asoc->sent_queue_cnt_removeable > 0)) { 1.6360 + TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 1.6361 + /* 1.6362 + * Look for chunks marked with the PR_SCTP flag AND 1.6363 + * the buffer space flag. If the one being sent is 1.6364 + * equal or greater priority then purge the old one 1.6365 + * and free some space. 1.6366 + */ 1.6367 + if (PR_SCTP_BUF_ENABLED(chk->flags)) { 1.6368 + /* 1.6369 + * This one is PR-SCTP AND buffer space 1.6370 + * limited type 1.6371 + */ 1.6372 + if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) { 1.6373 + /* 1.6374 + * Lower numbers equates to higher 1.6375 + * priority so if the one we are 1.6376 + * looking at has a larger or equal 1.6377 + * priority we want to drop the data 1.6378 + * and NOT retransmit it. 1.6379 + */ 1.6380 + if (chk->data) { 1.6381 + /* 1.6382 + * We release the book_size 1.6383 + * if the mbuf is here 1.6384 + */ 1.6385 + int ret_spc; 1.6386 + uint8_t sent; 1.6387 + 1.6388 + if (chk->sent > SCTP_DATAGRAM_UNSENT) 1.6389 + sent = 1; 1.6390 + else 1.6391 + sent = 0; 1.6392 + ret_spc = sctp_release_pr_sctp_chunk(stcb, chk, 1.6393 + sent, 1.6394 + SCTP_SO_LOCKED); 1.6395 + freed_spc += ret_spc; 1.6396 + if (freed_spc >= dataout) { 1.6397 + return; 1.6398 + } 1.6399 + } /* if chunk was present */ 1.6400 + } /* if of sufficent priority */ 1.6401 + } /* if chunk has enabled */ 1.6402 + } /* tailqforeach */ 1.6403 + 1.6404 + TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 1.6405 + /* Here we must move to the sent queue and mark */ 1.6406 + if (PR_SCTP_BUF_ENABLED(chk->flags)) { 1.6407 + if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) { 1.6408 + if (chk->data) { 1.6409 + /* 1.6410 + * We release the book_size 1.6411 + * if the mbuf is here 1.6412 + */ 1.6413 + int ret_spc; 1.6414 + 1.6415 + ret_spc = sctp_release_pr_sctp_chunk(stcb, chk, 1.6416 + 0, SCTP_SO_LOCKED); 1.6417 + 1.6418 + freed_spc += ret_spc; 1.6419 + if (freed_spc >= dataout) { 1.6420 + return; 1.6421 + } 1.6422 + } /* end if chk->data */ 1.6423 + } /* end if right class */ 1.6424 + } /* end if chk pr-sctp */ 1.6425 + } /* tailqforeachsafe (chk) */ 1.6426 + } /* if enabled in asoc */ 1.6427 +} 1.6428 + 1.6429 +int 1.6430 +sctp_get_frag_point(struct sctp_tcb *stcb, 1.6431 + struct sctp_association *asoc) 1.6432 +{ 1.6433 + int siz, ovh; 1.6434 + 1.6435 + /* 1.6436 + * For endpoints that have both v6 and v4 addresses we must reserve 1.6437 + * room for the ipv6 header, for those that are only dealing with V4 1.6438 + * we use a larger frag point. 1.6439 + */ 1.6440 + if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1.6441 + ovh = SCTP_MED_OVERHEAD; 1.6442 + } else { 1.6443 + ovh = SCTP_MED_V4_OVERHEAD; 1.6444 + } 1.6445 + 1.6446 + if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu) 1.6447 + siz = asoc->smallest_mtu - ovh; 1.6448 + else 1.6449 + siz = (stcb->asoc.sctp_frag_point - ovh); 1.6450 + /* 1.6451 + * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) { 1.6452 + */ 1.6453 + /* A data chunk MUST fit in a cluster */ 1.6454 + /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */ 1.6455 + /* } */ 1.6456 + 1.6457 + /* adjust for an AUTH chunk if DATA requires auth */ 1.6458 + if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) 1.6459 + siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 1.6460 + 1.6461 + if (siz % 4) { 1.6462 + /* make it an even word boundary please */ 1.6463 + siz -= (siz % 4); 1.6464 + } 1.6465 + return (siz); 1.6466 +} 1.6467 + 1.6468 +static void 1.6469 +sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp) 1.6470 +{ 1.6471 + /* 1.6472 + * We assume that the user wants PR_SCTP_TTL if the user 1.6473 + * provides a positive lifetime but does not specify any 1.6474 + * PR_SCTP policy. 1.6475 + */ 1.6476 + if (PR_SCTP_ENABLED(sp->sinfo_flags)) { 1.6477 + sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags); 1.6478 + } else if (sp->timetolive > 0) { 1.6479 + sp->sinfo_flags |= SCTP_PR_SCTP_TTL; 1.6480 + sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags); 1.6481 + } else { 1.6482 + return; 1.6483 + } 1.6484 + switch (PR_SCTP_POLICY(sp->sinfo_flags)) { 1.6485 + case CHUNK_FLAGS_PR_SCTP_BUF: 1.6486 + /* 1.6487 + * Time to live is a priority stored in tv_sec when 1.6488 + * doing the buffer drop thing. 1.6489 + */ 1.6490 + sp->ts.tv_sec = sp->timetolive; 1.6491 + sp->ts.tv_usec = 0; 1.6492 + break; 1.6493 + case CHUNK_FLAGS_PR_SCTP_TTL: 1.6494 + { 1.6495 + struct timeval tv; 1.6496 + (void)SCTP_GETTIME_TIMEVAL(&sp->ts); 1.6497 + tv.tv_sec = sp->timetolive / 1000; 1.6498 + tv.tv_usec = (sp->timetolive * 1000) % 1000000; 1.6499 + /* TODO sctp_constants.h needs alternative time macros when 1.6500 + * _KERNEL is undefined. 1.6501 + */ 1.6502 +#ifndef __FreeBSD__ 1.6503 + timeradd(&sp->ts, &tv, &sp->ts); 1.6504 +#else 1.6505 + timevaladd(&sp->ts, &tv); 1.6506 +#endif 1.6507 + } 1.6508 + break; 1.6509 + case CHUNK_FLAGS_PR_SCTP_RTX: 1.6510 + /* 1.6511 + * Time to live is a the number or retransmissions 1.6512 + * stored in tv_sec. 1.6513 + */ 1.6514 + sp->ts.tv_sec = sp->timetolive; 1.6515 + sp->ts.tv_usec = 0; 1.6516 + break; 1.6517 + default: 1.6518 + SCTPDBG(SCTP_DEBUG_USRREQ1, 1.6519 + "Unknown PR_SCTP policy %u.\n", 1.6520 + PR_SCTP_POLICY(sp->sinfo_flags)); 1.6521 + break; 1.6522 + } 1.6523 +} 1.6524 + 1.6525 +static int 1.6526 +sctp_msg_append(struct sctp_tcb *stcb, 1.6527 + struct sctp_nets *net, 1.6528 + struct mbuf *m, 1.6529 + struct sctp_sndrcvinfo *srcv, int hold_stcb_lock) 1.6530 +{ 1.6531 + int error = 0; 1.6532 + struct mbuf *at; 1.6533 + struct sctp_stream_queue_pending *sp = NULL; 1.6534 + struct sctp_stream_out *strm; 1.6535 + 1.6536 + /* Given an mbuf chain, put it 1.6537 + * into the association send queue and 1.6538 + * place it on the wheel 1.6539 + */ 1.6540 + if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) { 1.6541 + /* Invalid stream number */ 1.6542 + SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 1.6543 + error = EINVAL; 1.6544 + goto out_now; 1.6545 + } 1.6546 + if ((stcb->asoc.stream_locked) && 1.6547 + (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) { 1.6548 + SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 1.6549 + error = EINVAL; 1.6550 + goto out_now; 1.6551 + } 1.6552 + strm = &stcb->asoc.strmout[srcv->sinfo_stream]; 1.6553 + /* Now can we send this? */ 1.6554 + if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) || 1.6555 + (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 1.6556 + (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) || 1.6557 + (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) { 1.6558 + /* got data while shutting down */ 1.6559 + SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET); 1.6560 + error = ECONNRESET; 1.6561 + goto out_now; 1.6562 + } 1.6563 + sctp_alloc_a_strmoq(stcb, sp); 1.6564 + if (sp == NULL) { 1.6565 + SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 1.6566 + error = ENOMEM; 1.6567 + goto out_now; 1.6568 + } 1.6569 + sp->sinfo_flags = srcv->sinfo_flags; 1.6570 + sp->timetolive = srcv->sinfo_timetolive; 1.6571 + sp->ppid = srcv->sinfo_ppid; 1.6572 + sp->context = srcv->sinfo_context; 1.6573 + if (sp->sinfo_flags & SCTP_ADDR_OVER) { 1.6574 + sp->net = net; 1.6575 + atomic_add_int(&sp->net->ref_count, 1); 1.6576 + } else { 1.6577 + sp->net = NULL; 1.6578 + } 1.6579 + (void)SCTP_GETTIME_TIMEVAL(&sp->ts); 1.6580 + sp->stream = srcv->sinfo_stream; 1.6581 + sp->msg_is_complete = 1; 1.6582 + sp->sender_all_done = 1; 1.6583 + sp->some_taken = 0; 1.6584 + sp->data = m; 1.6585 + sp->tail_mbuf = NULL; 1.6586 + sctp_set_prsctp_policy(sp); 1.6587 + /* We could in theory (for sendall) sifa the length 1.6588 + * in, but we would still have to hunt through the 1.6589 + * chain since we need to setup the tail_mbuf 1.6590 + */ 1.6591 + sp->length = 0; 1.6592 + for (at = m; at; at = SCTP_BUF_NEXT(at)) { 1.6593 + if (SCTP_BUF_NEXT(at) == NULL) 1.6594 + sp->tail_mbuf = at; 1.6595 + sp->length += SCTP_BUF_LEN(at); 1.6596 + } 1.6597 + if (srcv->sinfo_keynumber_valid) { 1.6598 + sp->auth_keyid = srcv->sinfo_keynumber; 1.6599 + } else { 1.6600 + sp->auth_keyid = stcb->asoc.authinfo.active_keyid; 1.6601 + } 1.6602 + if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) { 1.6603 + sctp_auth_key_acquire(stcb, sp->auth_keyid); 1.6604 + sp->holds_key_ref = 1; 1.6605 + } 1.6606 + if (hold_stcb_lock == 0) { 1.6607 + SCTP_TCB_SEND_LOCK(stcb); 1.6608 + } 1.6609 + sctp_snd_sb_alloc(stcb, sp->length); 1.6610 + atomic_add_int(&stcb->asoc.stream_queue_cnt, 1); 1.6611 + TAILQ_INSERT_TAIL(&strm->outqueue, sp, next); 1.6612 + stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1); 1.6613 + m = NULL; 1.6614 + if (hold_stcb_lock == 0) { 1.6615 + SCTP_TCB_SEND_UNLOCK(stcb); 1.6616 + } 1.6617 +out_now: 1.6618 + if (m) { 1.6619 + sctp_m_freem(m); 1.6620 + } 1.6621 + return (error); 1.6622 +} 1.6623 + 1.6624 + 1.6625 +static struct mbuf * 1.6626 +sctp_copy_mbufchain(struct mbuf *clonechain, 1.6627 + struct mbuf *outchain, 1.6628 + struct mbuf **endofchain, 1.6629 + int can_take_mbuf, 1.6630 + int sizeofcpy, 1.6631 + uint8_t copy_by_ref) 1.6632 +{ 1.6633 + struct mbuf *m; 1.6634 + struct mbuf *appendchain; 1.6635 + caddr_t cp; 1.6636 + int len; 1.6637 + 1.6638 + if (endofchain == NULL) { 1.6639 + /* error */ 1.6640 + error_out: 1.6641 + if (outchain) 1.6642 + sctp_m_freem(outchain); 1.6643 + return (NULL); 1.6644 + } 1.6645 + if (can_take_mbuf) { 1.6646 + appendchain = clonechain; 1.6647 + } else { 1.6648 + if (!copy_by_ref && 1.6649 +#if defined(__Panda__) 1.6650 + 0 1.6651 +#else 1.6652 + (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN))) 1.6653 +#endif 1.6654 + ) { 1.6655 + /* Its not in a cluster */ 1.6656 + if (*endofchain == NULL) { 1.6657 + /* lets get a mbuf cluster */ 1.6658 + if (outchain == NULL) { 1.6659 + /* This is the general case */ 1.6660 + new_mbuf: 1.6661 + outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER); 1.6662 + if (outchain == NULL) { 1.6663 + goto error_out; 1.6664 + } 1.6665 + SCTP_BUF_LEN(outchain) = 0; 1.6666 + *endofchain = outchain; 1.6667 + /* get the prepend space */ 1.6668 + SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV+4)); 1.6669 + } else { 1.6670 + /* We really should not get a NULL in endofchain */ 1.6671 + /* find end */ 1.6672 + m = outchain; 1.6673 + while (m) { 1.6674 + if (SCTP_BUF_NEXT(m) == NULL) { 1.6675 + *endofchain = m; 1.6676 + break; 1.6677 + } 1.6678 + m = SCTP_BUF_NEXT(m); 1.6679 + } 1.6680 + /* sanity */ 1.6681 + if (*endofchain == NULL) { 1.6682 + /* huh, TSNH XXX maybe we should panic */ 1.6683 + sctp_m_freem(outchain); 1.6684 + goto new_mbuf; 1.6685 + } 1.6686 + } 1.6687 + /* get the new end of length */ 1.6688 + len = M_TRAILINGSPACE(*endofchain); 1.6689 + } else { 1.6690 + /* how much is left at the end? */ 1.6691 + len = M_TRAILINGSPACE(*endofchain); 1.6692 + } 1.6693 + /* Find the end of the data, for appending */ 1.6694 + cp = (mtod((*endofchain), caddr_t) + SCTP_BUF_LEN((*endofchain))); 1.6695 + 1.6696 + /* Now lets copy it out */ 1.6697 + if (len >= sizeofcpy) { 1.6698 + /* It all fits, copy it in */ 1.6699 + m_copydata(clonechain, 0, sizeofcpy, cp); 1.6700 + SCTP_BUF_LEN((*endofchain)) += sizeofcpy; 1.6701 + } else { 1.6702 + /* fill up the end of the chain */ 1.6703 + if (len > 0) { 1.6704 + m_copydata(clonechain, 0, len, cp); 1.6705 + SCTP_BUF_LEN((*endofchain)) += len; 1.6706 + /* now we need another one */ 1.6707 + sizeofcpy -= len; 1.6708 + } 1.6709 + m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER); 1.6710 + if (m == NULL) { 1.6711 + /* We failed */ 1.6712 + goto error_out; 1.6713 + } 1.6714 + SCTP_BUF_NEXT((*endofchain)) = m; 1.6715 + *endofchain = m; 1.6716 + cp = mtod((*endofchain), caddr_t); 1.6717 + m_copydata(clonechain, len, sizeofcpy, cp); 1.6718 + SCTP_BUF_LEN((*endofchain)) += sizeofcpy; 1.6719 + } 1.6720 + return (outchain); 1.6721 + } else { 1.6722 + /* copy the old fashion way */ 1.6723 + appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_NOWAIT); 1.6724 +#ifdef SCTP_MBUF_LOGGING 1.6725 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 1.6726 + struct mbuf *mat; 1.6727 + 1.6728 + for (mat = appendchain; mat; mat = SCTP_BUF_NEXT(mat)) { 1.6729 + if (SCTP_BUF_IS_EXTENDED(mat)) { 1.6730 + sctp_log_mb(mat, SCTP_MBUF_ICOPY); 1.6731 + } 1.6732 + } 1.6733 + } 1.6734 +#endif 1.6735 + } 1.6736 + } 1.6737 + if (appendchain == NULL) { 1.6738 + /* error */ 1.6739 + if (outchain) 1.6740 + sctp_m_freem(outchain); 1.6741 + return (NULL); 1.6742 + } 1.6743 + if (outchain) { 1.6744 + /* tack on to the end */ 1.6745 + if (*endofchain != NULL) { 1.6746 + SCTP_BUF_NEXT(((*endofchain))) = appendchain; 1.6747 + } else { 1.6748 + m = outchain; 1.6749 + while (m) { 1.6750 + if (SCTP_BUF_NEXT(m) == NULL) { 1.6751 + SCTP_BUF_NEXT(m) = appendchain; 1.6752 + break; 1.6753 + } 1.6754 + m = SCTP_BUF_NEXT(m); 1.6755 + } 1.6756 + } 1.6757 + /* 1.6758 + * save off the end and update the end-chain 1.6759 + * postion 1.6760 + */ 1.6761 + m = appendchain; 1.6762 + while (m) { 1.6763 + if (SCTP_BUF_NEXT(m) == NULL) { 1.6764 + *endofchain = m; 1.6765 + break; 1.6766 + } 1.6767 + m = SCTP_BUF_NEXT(m); 1.6768 + } 1.6769 + return (outchain); 1.6770 + } else { 1.6771 + /* save off the end and update the end-chain postion */ 1.6772 + m = appendchain; 1.6773 + while (m) { 1.6774 + if (SCTP_BUF_NEXT(m) == NULL) { 1.6775 + *endofchain = m; 1.6776 + break; 1.6777 + } 1.6778 + m = SCTP_BUF_NEXT(m); 1.6779 + } 1.6780 + return (appendchain); 1.6781 + } 1.6782 +} 1.6783 + 1.6784 +static int 1.6785 +sctp_med_chunk_output(struct sctp_inpcb *inp, 1.6786 + struct sctp_tcb *stcb, 1.6787 + struct sctp_association *asoc, 1.6788 + int *num_out, 1.6789 + int *reason_code, 1.6790 + int control_only, int from_where, 1.6791 + struct timeval *now, int *now_filled, int frag_point, int so_locked 1.6792 +#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 1.6793 + SCTP_UNUSED 1.6794 +#endif 1.6795 + ); 1.6796 + 1.6797 +static void 1.6798 +sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr, 1.6799 + uint32_t val SCTP_UNUSED) 1.6800 +{ 1.6801 + struct sctp_copy_all *ca; 1.6802 + struct mbuf *m; 1.6803 + int ret = 0; 1.6804 + int added_control = 0; 1.6805 + int un_sent, do_chunk_output = 1; 1.6806 + struct sctp_association *asoc; 1.6807 + struct sctp_nets *net; 1.6808 + 1.6809 + ca = (struct sctp_copy_all *)ptr; 1.6810 + if (ca->m == NULL) { 1.6811 + return; 1.6812 + } 1.6813 + if (ca->inp != inp) { 1.6814 + /* TSNH */ 1.6815 + return; 1.6816 + } 1.6817 + if (ca->sndlen > 0) { 1.6818 + m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_NOWAIT); 1.6819 + if (m == NULL) { 1.6820 + /* can't copy so we are done */ 1.6821 + ca->cnt_failed++; 1.6822 + return; 1.6823 + } 1.6824 +#ifdef SCTP_MBUF_LOGGING 1.6825 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 1.6826 + struct mbuf *mat; 1.6827 + 1.6828 + for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 1.6829 + if (SCTP_BUF_IS_EXTENDED(mat)) { 1.6830 + sctp_log_mb(mat, SCTP_MBUF_ICOPY); 1.6831 + } 1.6832 + } 1.6833 + } 1.6834 +#endif 1.6835 + } else { 1.6836 + m = NULL; 1.6837 + } 1.6838 + SCTP_TCB_LOCK_ASSERT(stcb); 1.6839 + if (stcb->asoc.alternate) { 1.6840 + net = stcb->asoc.alternate; 1.6841 + } else { 1.6842 + net = stcb->asoc.primary_destination; 1.6843 + } 1.6844 + if (ca->sndrcv.sinfo_flags & SCTP_ABORT) { 1.6845 + /* Abort this assoc with m as the user defined reason */ 1.6846 + if (m != NULL) { 1.6847 + SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_NOWAIT); 1.6848 + } else { 1.6849 + m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 1.6850 + 0, M_NOWAIT, 1, MT_DATA); 1.6851 + SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr); 1.6852 + } 1.6853 + if (m != NULL) { 1.6854 + struct sctp_paramhdr *ph; 1.6855 + 1.6856 + ph = mtod(m, struct sctp_paramhdr *); 1.6857 + ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 1.6858 + ph->param_length = htons(sizeof(struct sctp_paramhdr) + ca->sndlen); 1.6859 + } 1.6860 + /* We add one here to keep the assoc from 1.6861 + * dis-appearing on us. 1.6862 + */ 1.6863 + atomic_add_int(&stcb->asoc.refcnt, 1); 1.6864 + sctp_abort_an_association(inp, stcb, m, SCTP_SO_NOT_LOCKED); 1.6865 + /* sctp_abort_an_association calls sctp_free_asoc() 1.6866 + * free association will NOT free it since we 1.6867 + * incremented the refcnt .. we do this to prevent 1.6868 + * it being freed and things getting tricky since 1.6869 + * we could end up (from free_asoc) calling inpcb_free 1.6870 + * which would get a recursive lock call to the 1.6871 + * iterator lock.. But as a consequence of that the 1.6872 + * stcb will return to us un-locked.. since free_asoc 1.6873 + * returns with either no TCB or the TCB unlocked, we 1.6874 + * must relock.. to unlock in the iterator timer :-0 1.6875 + */ 1.6876 + SCTP_TCB_LOCK(stcb); 1.6877 + atomic_add_int(&stcb->asoc.refcnt, -1); 1.6878 + goto no_chunk_output; 1.6879 + } else { 1.6880 + if (m) { 1.6881 + ret = sctp_msg_append(stcb, net, m, 1.6882 + &ca->sndrcv, 1); 1.6883 + } 1.6884 + asoc = &stcb->asoc; 1.6885 + if (ca->sndrcv.sinfo_flags & SCTP_EOF) { 1.6886 + /* shutdown this assoc */ 1.6887 + int cnt; 1.6888 + cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED); 1.6889 + 1.6890 + if (TAILQ_EMPTY(&asoc->send_queue) && 1.6891 + TAILQ_EMPTY(&asoc->sent_queue) && 1.6892 + (cnt == 0)) { 1.6893 + if (asoc->locked_on_sending) { 1.6894 + goto abort_anyway; 1.6895 + } 1.6896 + /* there is nothing queued to send, so I'm done... */ 1.6897 + if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 1.6898 + (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 1.6899 + (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 1.6900 + /* only send SHUTDOWN the first time through */ 1.6901 + if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1.6902 + SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1.6903 + } 1.6904 + SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 1.6905 + SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 1.6906 + sctp_stop_timers_for_shutdown(stcb); 1.6907 + sctp_send_shutdown(stcb, net); 1.6908 + sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, 1.6909 + net); 1.6910 + sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 1.6911 + asoc->primary_destination); 1.6912 + added_control = 1; 1.6913 + do_chunk_output = 0; 1.6914 + } 1.6915 + } else { 1.6916 + /* 1.6917 + * we still got (or just got) data to send, so set 1.6918 + * SHUTDOWN_PENDING 1.6919 + */ 1.6920 + /* 1.6921 + * XXX sockets draft says that SCTP_EOF should be 1.6922 + * sent with no data. currently, we will allow user 1.6923 + * data to be sent first and move to 1.6924 + * SHUTDOWN-PENDING 1.6925 + */ 1.6926 + if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 1.6927 + (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 1.6928 + (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 1.6929 + if (asoc->locked_on_sending) { 1.6930 + /* Locked to send out the data */ 1.6931 + struct sctp_stream_queue_pending *sp; 1.6932 + sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 1.6933 + if (sp) { 1.6934 + if ((sp->length == 0) && (sp->msg_is_complete == 0)) 1.6935 + asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 1.6936 + } 1.6937 + } 1.6938 + asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 1.6939 + if (TAILQ_EMPTY(&asoc->send_queue) && 1.6940 + TAILQ_EMPTY(&asoc->sent_queue) && 1.6941 + (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 1.6942 + abort_anyway: 1.6943 + atomic_add_int(&stcb->asoc.refcnt, 1); 1.6944 + sctp_abort_an_association(stcb->sctp_ep, stcb, 1.6945 + NULL, SCTP_SO_NOT_LOCKED); 1.6946 + atomic_add_int(&stcb->asoc.refcnt, -1); 1.6947 + goto no_chunk_output; 1.6948 + } 1.6949 + sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 1.6950 + asoc->primary_destination); 1.6951 + } 1.6952 + } 1.6953 + 1.6954 + } 1.6955 + } 1.6956 + un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 1.6957 + (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk))); 1.6958 + 1.6959 + if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && 1.6960 + (stcb->asoc.total_flight > 0) && 1.6961 + (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) { 1.6962 + do_chunk_output = 0; 1.6963 + } 1.6964 + if (do_chunk_output) 1.6965 + sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED); 1.6966 + else if (added_control) { 1.6967 + int num_out = 0, reason = 0, now_filled = 0; 1.6968 + struct timeval now; 1.6969 + int frag_point; 1.6970 + frag_point = sctp_get_frag_point(stcb, &stcb->asoc); 1.6971 + (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out, 1.6972 + &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED); 1.6973 + } 1.6974 + no_chunk_output: 1.6975 + if (ret) { 1.6976 + ca->cnt_failed++; 1.6977 + } else { 1.6978 + ca->cnt_sent++; 1.6979 + } 1.6980 +} 1.6981 + 1.6982 +static void 1.6983 +sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED) 1.6984 +{ 1.6985 + struct sctp_copy_all *ca; 1.6986 + 1.6987 + ca = (struct sctp_copy_all *)ptr; 1.6988 + /* 1.6989 + * Do a notify here? Kacheong suggests that the notify be done at 1.6990 + * the send time.. so you would push up a notification if any send 1.6991 + * failed. Don't know if this is feasable since the only failures we 1.6992 + * have is "memory" related and if you cannot get an mbuf to send 1.6993 + * the data you surely can't get an mbuf to send up to notify the 1.6994 + * user you can't send the data :-> 1.6995 + */ 1.6996 + 1.6997 + /* now free everything */ 1.6998 + sctp_m_freem(ca->m); 1.6999 + SCTP_FREE(ca, SCTP_M_COPYAL); 1.7000 +} 1.7001 + 1.7002 + 1.7003 +#define MC_ALIGN(m, len) do { \ 1.7004 + SCTP_BUF_RESV_UF(m, ((MCLBYTES - (len)) & ~(sizeof(long) - 1)); \ 1.7005 +} while (0) 1.7006 + 1.7007 + 1.7008 + 1.7009 +static struct mbuf * 1.7010 +sctp_copy_out_all(struct uio *uio, int len) 1.7011 +{ 1.7012 + struct mbuf *ret, *at; 1.7013 + int left, willcpy, cancpy, error; 1.7014 + 1.7015 + ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAITOK, 1, MT_DATA); 1.7016 + if (ret == NULL) { 1.7017 + /* TSNH */ 1.7018 + return (NULL); 1.7019 + } 1.7020 + left = len; 1.7021 + SCTP_BUF_LEN(ret) = 0; 1.7022 + /* save space for the data chunk header */ 1.7023 + cancpy = M_TRAILINGSPACE(ret); 1.7024 + willcpy = min(cancpy, left); 1.7025 + at = ret; 1.7026 + while (left > 0) { 1.7027 + /* Align data to the end */ 1.7028 + error = uiomove(mtod(at, caddr_t), willcpy, uio); 1.7029 + if (error) { 1.7030 + err_out_now: 1.7031 + sctp_m_freem(at); 1.7032 + return (NULL); 1.7033 + } 1.7034 + SCTP_BUF_LEN(at) = willcpy; 1.7035 + SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0; 1.7036 + left -= willcpy; 1.7037 + if (left > 0) { 1.7038 + SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 1, MT_DATA); 1.7039 + if (SCTP_BUF_NEXT(at) == NULL) { 1.7040 + goto err_out_now; 1.7041 + } 1.7042 + at = SCTP_BUF_NEXT(at); 1.7043 + SCTP_BUF_LEN(at) = 0; 1.7044 + cancpy = M_TRAILINGSPACE(at); 1.7045 + willcpy = min(cancpy, left); 1.7046 + } 1.7047 + } 1.7048 + return (ret); 1.7049 +} 1.7050 + 1.7051 +static int 1.7052 +sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m, 1.7053 + struct sctp_sndrcvinfo *srcv) 1.7054 +{ 1.7055 + int ret; 1.7056 + struct sctp_copy_all *ca; 1.7057 + 1.7058 + SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all), 1.7059 + SCTP_M_COPYAL); 1.7060 + if (ca == NULL) { 1.7061 + sctp_m_freem(m); 1.7062 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 1.7063 + return (ENOMEM); 1.7064 + } 1.7065 + memset(ca, 0, sizeof(struct sctp_copy_all)); 1.7066 + 1.7067 + ca->inp = inp; 1.7068 + if (srcv) { 1.7069 + memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo)); 1.7070 + } 1.7071 + /* 1.7072 + * take off the sendall flag, it would be bad if we failed to do 1.7073 + * this :-0 1.7074 + */ 1.7075 + ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL; 1.7076 + /* get length and mbuf chain */ 1.7077 + if (uio) { 1.7078 +#if defined(__APPLE__) 1.7079 +#if defined(APPLE_LEOPARD) 1.7080 + ca->sndlen = uio->uio_resid; 1.7081 +#else 1.7082 + ca->sndlen = uio_resid(uio); 1.7083 +#endif 1.7084 +#else 1.7085 + ca->sndlen = uio->uio_resid; 1.7086 +#endif 1.7087 +#if defined(__APPLE__) 1.7088 + SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 0); 1.7089 +#endif 1.7090 + ca->m = sctp_copy_out_all(uio, ca->sndlen); 1.7091 +#if defined(__APPLE__) 1.7092 + SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 0); 1.7093 +#endif 1.7094 + if (ca->m == NULL) { 1.7095 + SCTP_FREE(ca, SCTP_M_COPYAL); 1.7096 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 1.7097 + return (ENOMEM); 1.7098 + } 1.7099 + } else { 1.7100 + /* Gather the length of the send */ 1.7101 + struct mbuf *mat; 1.7102 + 1.7103 + ca->sndlen = 0; 1.7104 + for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 1.7105 + ca->sndlen += SCTP_BUF_LEN(mat); 1.7106 + } 1.7107 + } 1.7108 + ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL, 1.7109 + SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES, 1.7110 + SCTP_ASOC_ANY_STATE, 1.7111 + (void *)ca, 0, 1.7112 + sctp_sendall_completes, inp, 1); 1.7113 + if (ret) { 1.7114 + SCTP_PRINTF("Failed to initiate iterator for sendall\n"); 1.7115 + SCTP_FREE(ca, SCTP_M_COPYAL); 1.7116 + SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT); 1.7117 + return (EFAULT); 1.7118 + } 1.7119 + return (0); 1.7120 +} 1.7121 + 1.7122 + 1.7123 +void 1.7124 +sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc) 1.7125 +{ 1.7126 + struct sctp_tmit_chunk *chk, *nchk; 1.7127 + 1.7128 + TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) { 1.7129 + if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 1.7130 + TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 1.7131 + if (chk->data) { 1.7132 + sctp_m_freem(chk->data); 1.7133 + chk->data = NULL; 1.7134 + } 1.7135 + asoc->ctrl_queue_cnt--; 1.7136 + sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1.7137 + } 1.7138 + } 1.7139 +} 1.7140 + 1.7141 +void 1.7142 +sctp_toss_old_asconf(struct sctp_tcb *stcb) 1.7143 +{ 1.7144 + struct sctp_association *asoc; 1.7145 + struct sctp_tmit_chunk *chk, *nchk; 1.7146 + struct sctp_asconf_chunk *acp; 1.7147 + 1.7148 + asoc = &stcb->asoc; 1.7149 + TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) { 1.7150 + /* find SCTP_ASCONF chunk in queue */ 1.7151 + if (chk->rec.chunk_id.id == SCTP_ASCONF) { 1.7152 + if (chk->data) { 1.7153 + acp = mtod(chk->data, struct sctp_asconf_chunk *); 1.7154 + if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) { 1.7155 + /* Not Acked yet */ 1.7156 + break; 1.7157 + } 1.7158 + } 1.7159 + TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next); 1.7160 + if (chk->data) { 1.7161 + sctp_m_freem(chk->data); 1.7162 + chk->data = NULL; 1.7163 + } 1.7164 + asoc->ctrl_queue_cnt--; 1.7165 + sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1.7166 + } 1.7167 + } 1.7168 +} 1.7169 + 1.7170 + 1.7171 +static void 1.7172 +sctp_clean_up_datalist(struct sctp_tcb *stcb, 1.7173 + struct sctp_association *asoc, 1.7174 + struct sctp_tmit_chunk **data_list, 1.7175 + int bundle_at, 1.7176 + struct sctp_nets *net) 1.7177 +{ 1.7178 + int i; 1.7179 + struct sctp_tmit_chunk *tp1; 1.7180 + 1.7181 + for (i = 0; i < bundle_at; i++) { 1.7182 + /* off of the send queue */ 1.7183 + TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next); 1.7184 + asoc->send_queue_cnt--; 1.7185 + if (i > 0) { 1.7186 + /* 1.7187 + * Any chunk NOT 0 you zap the time chunk 0 gets 1.7188 + * zapped or set based on if a RTO measurment is 1.7189 + * needed. 1.7190 + */ 1.7191 + data_list[i]->do_rtt = 0; 1.7192 + } 1.7193 + /* record time */ 1.7194 + data_list[i]->sent_rcv_time = net->last_sent_time; 1.7195 + data_list[i]->rec.data.cwnd_at_send = net->cwnd; 1.7196 + data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq; 1.7197 + if (data_list[i]->whoTo == NULL) { 1.7198 + data_list[i]->whoTo = net; 1.7199 + atomic_add_int(&net->ref_count, 1); 1.7200 + } 1.7201 + /* on to the sent queue */ 1.7202 + tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead); 1.7203 + if ((tp1) && SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) { 1.7204 + struct sctp_tmit_chunk *tpp; 1.7205 + 1.7206 + /* need to move back */ 1.7207 + back_up_more: 1.7208 + tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next); 1.7209 + if (tpp == NULL) { 1.7210 + TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next); 1.7211 + goto all_done; 1.7212 + } 1.7213 + tp1 = tpp; 1.7214 + if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, data_list[i]->rec.data.TSN_seq)) { 1.7215 + goto back_up_more; 1.7216 + } 1.7217 + TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next); 1.7218 + } else { 1.7219 + TAILQ_INSERT_TAIL(&asoc->sent_queue, 1.7220 + data_list[i], 1.7221 + sctp_next); 1.7222 + } 1.7223 + all_done: 1.7224 + /* This does not lower until the cum-ack passes it */ 1.7225 + asoc->sent_queue_cnt++; 1.7226 + if ((asoc->peers_rwnd <= 0) && 1.7227 + (asoc->total_flight == 0) && 1.7228 + (bundle_at == 1)) { 1.7229 + /* Mark the chunk as being a window probe */ 1.7230 + SCTP_STAT_INCR(sctps_windowprobed); 1.7231 + } 1.7232 +#ifdef SCTP_AUDITING_ENABLED 1.7233 + sctp_audit_log(0xC2, 3); 1.7234 +#endif 1.7235 + data_list[i]->sent = SCTP_DATAGRAM_SENT; 1.7236 + data_list[i]->snd_count = 1; 1.7237 + data_list[i]->rec.data.chunk_was_revoked = 0; 1.7238 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 1.7239 + sctp_misc_ints(SCTP_FLIGHT_LOG_UP, 1.7240 + data_list[i]->whoTo->flight_size, 1.7241 + data_list[i]->book_size, 1.7242 + (uintptr_t)data_list[i]->whoTo, 1.7243 + data_list[i]->rec.data.TSN_seq); 1.7244 + } 1.7245 + sctp_flight_size_increase(data_list[i]); 1.7246 + sctp_total_flight_increase(stcb, data_list[i]); 1.7247 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 1.7248 + sctp_log_rwnd(SCTP_DECREASE_PEER_RWND, 1.7249 + asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 1.7250 + } 1.7251 + asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd, 1.7252 + (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))); 1.7253 + if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 1.7254 + /* SWS sender side engages */ 1.7255 + asoc->peers_rwnd = 0; 1.7256 + } 1.7257 + } 1.7258 + if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) { 1.7259 + (*asoc->cc_functions.sctp_cwnd_update_packet_transmitted)(stcb, net); 1.7260 + } 1.7261 +} 1.7262 + 1.7263 +static void 1.7264 +sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked 1.7265 +#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 1.7266 + SCTP_UNUSED 1.7267 +#endif 1.7268 +) 1.7269 +{ 1.7270 + struct sctp_tmit_chunk *chk, *nchk; 1.7271 + 1.7272 + TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) { 1.7273 + if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || 1.7274 + (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */ 1.7275 + (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) || 1.7276 + (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) || 1.7277 + (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) || 1.7278 + (chk->rec.chunk_id.id == SCTP_SHUTDOWN) || 1.7279 + (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) || 1.7280 + (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) || 1.7281 + (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) || 1.7282 + (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) || 1.7283 + (chk->rec.chunk_id.id == SCTP_ECN_CWR) || 1.7284 + (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) { 1.7285 + /* Stray chunks must be cleaned up */ 1.7286 + clean_up_anyway: 1.7287 + TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 1.7288 + if (chk->data) { 1.7289 + sctp_m_freem(chk->data); 1.7290 + chk->data = NULL; 1.7291 + } 1.7292 + asoc->ctrl_queue_cnt--; 1.7293 + if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) 1.7294 + asoc->fwd_tsn_cnt--; 1.7295 + sctp_free_a_chunk(stcb, chk, so_locked); 1.7296 + } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) { 1.7297 + /* special handling, we must look into the param */ 1.7298 + if (chk != asoc->str_reset) { 1.7299 + goto clean_up_anyway; 1.7300 + } 1.7301 + } 1.7302 + } 1.7303 +} 1.7304 + 1.7305 + 1.7306 +static int 1.7307 +sctp_can_we_split_this(struct sctp_tcb *stcb, 1.7308 + uint32_t length, 1.7309 + uint32_t goal_mtu, uint32_t frag_point, int eeor_on) 1.7310 +{ 1.7311 + /* Make a decision on if I should split a 1.7312 + * msg into multiple parts. This is only asked of 1.7313 + * incomplete messages. 1.7314 + */ 1.7315 + if (eeor_on) { 1.7316 + /* If we are doing EEOR we need to always send 1.7317 + * it if its the entire thing, since it might 1.7318 + * be all the guy is putting in the hopper. 1.7319 + */ 1.7320 + if (goal_mtu >= length) { 1.7321 + /*- 1.7322 + * If we have data outstanding, 1.7323 + * we get another chance when the sack 1.7324 + * arrives to transmit - wait for more data 1.7325 + */ 1.7326 + if (stcb->asoc.total_flight == 0) { 1.7327 + /* If nothing is in flight, we zero 1.7328 + * the packet counter. 1.7329 + */ 1.7330 + return (length); 1.7331 + } 1.7332 + return (0); 1.7333 + 1.7334 + } else { 1.7335 + /* You can fill the rest */ 1.7336 + return (goal_mtu); 1.7337 + } 1.7338 + } 1.7339 + /*- 1.7340 + * For those strange folk that make the send buffer 1.7341 + * smaller than our fragmentation point, we can't 1.7342 + * get a full msg in so we have to allow splitting. 1.7343 + */ 1.7344 + if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) { 1.7345 + return (length); 1.7346 + } 1.7347 + 1.7348 + if ((length <= goal_mtu) || 1.7349 + ((length - goal_mtu) < SCTP_BASE_SYSCTL(sctp_min_residual))) { 1.7350 + /* Sub-optimial residual don't split in non-eeor mode. */ 1.7351 + return (0); 1.7352 + } 1.7353 + /* If we reach here length is larger 1.7354 + * than the goal_mtu. Do we wish to split 1.7355 + * it for the sake of packet putting together? 1.7356 + */ 1.7357 + if (goal_mtu >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) { 1.7358 + /* Its ok to split it */ 1.7359 + return (min(goal_mtu, frag_point)); 1.7360 + } 1.7361 + /* Nope, can't split */ 1.7362 + return (0); 1.7363 + 1.7364 +} 1.7365 + 1.7366 +static uint32_t 1.7367 +sctp_move_to_outqueue(struct sctp_tcb *stcb, 1.7368 + struct sctp_stream_out *strq, 1.7369 + uint32_t goal_mtu, 1.7370 + uint32_t frag_point, 1.7371 + int *locked, 1.7372 + int *giveup, 1.7373 + int eeor_mode, 1.7374 + int *bail, 1.7375 + int so_locked 1.7376 +#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 1.7377 + SCTP_UNUSED 1.7378 +#endif 1.7379 + ) 1.7380 +{ 1.7381 + /* Move from the stream to the send_queue keeping track of the total */ 1.7382 + struct sctp_association *asoc; 1.7383 + struct sctp_stream_queue_pending *sp; 1.7384 + struct sctp_tmit_chunk *chk; 1.7385 + struct sctp_data_chunk *dchkh; 1.7386 + uint32_t to_move, length; 1.7387 + uint8_t rcv_flags = 0; 1.7388 + uint8_t some_taken; 1.7389 + uint8_t send_lock_up = 0; 1.7390 + 1.7391 + SCTP_TCB_LOCK_ASSERT(stcb); 1.7392 + asoc = &stcb->asoc; 1.7393 +one_more_time: 1.7394 + /*sa_ignore FREED_MEMORY*/ 1.7395 + sp = TAILQ_FIRST(&strq->outqueue); 1.7396 + if (sp == NULL) { 1.7397 + *locked = 0; 1.7398 + if (send_lock_up == 0) { 1.7399 + SCTP_TCB_SEND_LOCK(stcb); 1.7400 + send_lock_up = 1; 1.7401 + } 1.7402 + sp = TAILQ_FIRST(&strq->outqueue); 1.7403 + if (sp) { 1.7404 + goto one_more_time; 1.7405 + } 1.7406 + if (strq->last_msg_incomplete) { 1.7407 + SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n", 1.7408 + strq->stream_no, 1.7409 + strq->last_msg_incomplete); 1.7410 + strq->last_msg_incomplete = 0; 1.7411 + } 1.7412 + to_move = 0; 1.7413 + if (send_lock_up) { 1.7414 + SCTP_TCB_SEND_UNLOCK(stcb); 1.7415 + send_lock_up = 0; 1.7416 + } 1.7417 + goto out_of; 1.7418 + } 1.7419 + if ((sp->msg_is_complete) && (sp->length == 0)) { 1.7420 + if (sp->sender_all_done) { 1.7421 + /* We are doing differed cleanup. Last 1.7422 + * time through when we took all the data 1.7423 + * the sender_all_done was not set. 1.7424 + */ 1.7425 + if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) { 1.7426 + SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n"); 1.7427 + SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n", 1.7428 + sp->sender_all_done, 1.7429 + sp->length, 1.7430 + sp->msg_is_complete, 1.7431 + sp->put_last_out, 1.7432 + send_lock_up); 1.7433 + } 1.7434 + if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up == 0)) { 1.7435 + SCTP_TCB_SEND_LOCK(stcb); 1.7436 + send_lock_up = 1; 1.7437 + } 1.7438 + atomic_subtract_int(&asoc->stream_queue_cnt, 1); 1.7439 + TAILQ_REMOVE(&strq->outqueue, sp, next); 1.7440 + stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up); 1.7441 + if (sp->net) { 1.7442 + sctp_free_remote_addr(sp->net); 1.7443 + sp->net = NULL; 1.7444 + } 1.7445 + if (sp->data) { 1.7446 + sctp_m_freem(sp->data); 1.7447 + sp->data = NULL; 1.7448 + } 1.7449 + sctp_free_a_strmoq(stcb, sp, so_locked); 1.7450 + /* we can't be locked to it */ 1.7451 + *locked = 0; 1.7452 + stcb->asoc.locked_on_sending = NULL; 1.7453 + if (send_lock_up) { 1.7454 + SCTP_TCB_SEND_UNLOCK(stcb); 1.7455 + send_lock_up = 0; 1.7456 + } 1.7457 + /* back to get the next msg */ 1.7458 + goto one_more_time; 1.7459 + } else { 1.7460 + /* sender just finished this but 1.7461 + * still holds a reference 1.7462 + */ 1.7463 + *locked = 1; 1.7464 + *giveup = 1; 1.7465 + to_move = 0; 1.7466 + goto out_of; 1.7467 + } 1.7468 + } else { 1.7469 + /* is there some to get */ 1.7470 + if (sp->length == 0) { 1.7471 + /* no */ 1.7472 + *locked = 1; 1.7473 + *giveup = 1; 1.7474 + to_move = 0; 1.7475 + goto out_of; 1.7476 + } else if (sp->discard_rest) { 1.7477 + if (send_lock_up == 0) { 1.7478 + SCTP_TCB_SEND_LOCK(stcb); 1.7479 + send_lock_up = 1; 1.7480 + } 1.7481 + /* Whack down the size */ 1.7482 + atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length); 1.7483 + if ((stcb->sctp_socket != NULL) && \ 1.7484 + ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1.7485 + (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 1.7486 + atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length); 1.7487 + } 1.7488 + if (sp->data) { 1.7489 + sctp_m_freem(sp->data); 1.7490 + sp->data = NULL; 1.7491 + sp->tail_mbuf = NULL; 1.7492 + } 1.7493 + sp->length = 0; 1.7494 + sp->some_taken = 1; 1.7495 + *locked = 1; 1.7496 + *giveup = 1; 1.7497 + to_move = 0; 1.7498 + goto out_of; 1.7499 + } 1.7500 + } 1.7501 + some_taken = sp->some_taken; 1.7502 + if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1.7503 + sp->msg_is_complete = 1; 1.7504 + } 1.7505 +re_look: 1.7506 + length = sp->length; 1.7507 + if (sp->msg_is_complete) { 1.7508 + /* The message is complete */ 1.7509 + to_move = min(length, frag_point); 1.7510 + if (to_move == length) { 1.7511 + /* All of it fits in the MTU */ 1.7512 + if (sp->some_taken) { 1.7513 + rcv_flags |= SCTP_DATA_LAST_FRAG; 1.7514 + sp->put_last_out = 1; 1.7515 + } else { 1.7516 + rcv_flags |= SCTP_DATA_NOT_FRAG; 1.7517 + sp->put_last_out = 1; 1.7518 + } 1.7519 + } else { 1.7520 + /* Not all of it fits, we fragment */ 1.7521 + if (sp->some_taken == 0) { 1.7522 + rcv_flags |= SCTP_DATA_FIRST_FRAG; 1.7523 + } 1.7524 + sp->some_taken = 1; 1.7525 + } 1.7526 + } else { 1.7527 + to_move = sctp_can_we_split_this(stcb, length, goal_mtu, frag_point, eeor_mode); 1.7528 + if (to_move) { 1.7529 + /*- 1.7530 + * We use a snapshot of length in case it 1.7531 + * is expanding during the compare. 1.7532 + */ 1.7533 + uint32_t llen; 1.7534 + 1.7535 + llen = length; 1.7536 + if (to_move >= llen) { 1.7537 + to_move = llen; 1.7538 + if (send_lock_up == 0) { 1.7539 + /*- 1.7540 + * We are taking all of an incomplete msg 1.7541 + * thus we need a send lock. 1.7542 + */ 1.7543 + SCTP_TCB_SEND_LOCK(stcb); 1.7544 + send_lock_up = 1; 1.7545 + if (sp->msg_is_complete) { 1.7546 + /* the sender finished the msg */ 1.7547 + goto re_look; 1.7548 + } 1.7549 + } 1.7550 + } 1.7551 + if (sp->some_taken == 0) { 1.7552 + rcv_flags |= SCTP_DATA_FIRST_FRAG; 1.7553 + sp->some_taken = 1; 1.7554 + } 1.7555 + } else { 1.7556 + /* Nothing to take. */ 1.7557 + if (sp->some_taken) { 1.7558 + *locked = 1; 1.7559 + } 1.7560 + *giveup = 1; 1.7561 + to_move = 0; 1.7562 + goto out_of; 1.7563 + } 1.7564 + } 1.7565 + 1.7566 + /* If we reach here, we can copy out a chunk */ 1.7567 + sctp_alloc_a_chunk(stcb, chk); 1.7568 + if (chk == NULL) { 1.7569 + /* No chunk memory */ 1.7570 + *giveup = 1; 1.7571 + to_move = 0; 1.7572 + goto out_of; 1.7573 + } 1.7574 + /* Setup for unordered if needed by looking 1.7575 + * at the user sent info flags. 1.7576 + */ 1.7577 + if (sp->sinfo_flags & SCTP_UNORDERED) { 1.7578 + rcv_flags |= SCTP_DATA_UNORDERED; 1.7579 + } 1.7580 + if ((SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && ((sp->sinfo_flags & SCTP_EOF) == SCTP_EOF)) || 1.7581 + ((sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) == SCTP_SACK_IMMEDIATELY)) { 1.7582 + rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY; 1.7583 + } 1.7584 + /* clear out the chunk before setting up */ 1.7585 + memset(chk, 0, sizeof(*chk)); 1.7586 + chk->rec.data.rcv_flags = rcv_flags; 1.7587 + 1.7588 + if (to_move >= length) { 1.7589 + /* we think we can steal the whole thing */ 1.7590 + if ((sp->sender_all_done == 0) && (send_lock_up == 0)) { 1.7591 + SCTP_TCB_SEND_LOCK(stcb); 1.7592 + send_lock_up = 1; 1.7593 + } 1.7594 + if (to_move < sp->length) { 1.7595 + /* bail, it changed */ 1.7596 + goto dont_do_it; 1.7597 + } 1.7598 + chk->data = sp->data; 1.7599 + chk->last_mbuf = sp->tail_mbuf; 1.7600 + /* register the stealing */ 1.7601 + sp->data = sp->tail_mbuf = NULL; 1.7602 + } else { 1.7603 + struct mbuf *m; 1.7604 + dont_do_it: 1.7605 + chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_NOWAIT); 1.7606 + chk->last_mbuf = NULL; 1.7607 + if (chk->data == NULL) { 1.7608 + sp->some_taken = some_taken; 1.7609 + sctp_free_a_chunk(stcb, chk, so_locked); 1.7610 + *bail = 1; 1.7611 + to_move = 0; 1.7612 + goto out_of; 1.7613 + } 1.7614 +#ifdef SCTP_MBUF_LOGGING 1.7615 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 1.7616 + struct mbuf *mat; 1.7617 + 1.7618 + for (mat = chk->data; mat; mat = SCTP_BUF_NEXT(mat)) { 1.7619 + if (SCTP_BUF_IS_EXTENDED(mat)) { 1.7620 + sctp_log_mb(mat, SCTP_MBUF_ICOPY); 1.7621 + } 1.7622 + } 1.7623 + } 1.7624 +#endif 1.7625 + /* Pull off the data */ 1.7626 + m_adj(sp->data, to_move); 1.7627 + /* Now lets work our way down and compact it */ 1.7628 + m = sp->data; 1.7629 + while (m && (SCTP_BUF_LEN(m) == 0)) { 1.7630 + sp->data = SCTP_BUF_NEXT(m); 1.7631 + SCTP_BUF_NEXT(m) = NULL; 1.7632 + if (sp->tail_mbuf == m) { 1.7633 + /*- 1.7634 + * Freeing tail? TSNH since 1.7635 + * we supposedly were taking less 1.7636 + * than the sp->length. 1.7637 + */ 1.7638 +#ifdef INVARIANTS 1.7639 + panic("Huh, freing tail? - TSNH"); 1.7640 +#else 1.7641 + SCTP_PRINTF("Huh, freeing tail? - TSNH\n"); 1.7642 + sp->tail_mbuf = sp->data = NULL; 1.7643 + sp->length = 0; 1.7644 +#endif 1.7645 + 1.7646 + } 1.7647 + sctp_m_free(m); 1.7648 + m = sp->data; 1.7649 + } 1.7650 + } 1.7651 + if (SCTP_BUF_IS_EXTENDED(chk->data)) { 1.7652 + chk->copy_by_ref = 1; 1.7653 + } else { 1.7654 + chk->copy_by_ref = 0; 1.7655 + } 1.7656 + /* get last_mbuf and counts of mb useage 1.7657 + * This is ugly but hopefully its only one mbuf. 1.7658 + */ 1.7659 + if (chk->last_mbuf == NULL) { 1.7660 + chk->last_mbuf = chk->data; 1.7661 + while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) { 1.7662 + chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf); 1.7663 + } 1.7664 + } 1.7665 + 1.7666 + if (to_move > length) { 1.7667 + /*- This should not happen either 1.7668 + * since we always lower to_move to the size 1.7669 + * of sp->length if its larger. 1.7670 + */ 1.7671 +#ifdef INVARIANTS 1.7672 + panic("Huh, how can to_move be larger?"); 1.7673 +#else 1.7674 + SCTP_PRINTF("Huh, how can to_move be larger?\n"); 1.7675 + sp->length = 0; 1.7676 +#endif 1.7677 + } else { 1.7678 + atomic_subtract_int(&sp->length, to_move); 1.7679 + } 1.7680 + if (M_LEADINGSPACE(chk->data) < (int)sizeof(struct sctp_data_chunk)) { 1.7681 + /* Not enough room for a chunk header, get some */ 1.7682 + struct mbuf *m; 1.7683 + m = sctp_get_mbuf_for_msg(1, 0, M_NOWAIT, 0, MT_DATA); 1.7684 + if (m == NULL) { 1.7685 + /* 1.7686 + * we're in trouble here. _PREPEND below will free 1.7687 + * all the data if there is no leading space, so we 1.7688 + * must put the data back and restore. 1.7689 + */ 1.7690 + if (send_lock_up == 0) { 1.7691 + SCTP_TCB_SEND_LOCK(stcb); 1.7692 + send_lock_up = 1; 1.7693 + } 1.7694 + if (chk->data == NULL) { 1.7695 + /* unsteal the data */ 1.7696 + sp->data = chk->data; 1.7697 + sp->tail_mbuf = chk->last_mbuf; 1.7698 + } else { 1.7699 + struct mbuf *m_tmp; 1.7700 + /* reassemble the data */ 1.7701 + m_tmp = sp->data; 1.7702 + sp->data = chk->data; 1.7703 + SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp; 1.7704 + } 1.7705 + sp->some_taken = some_taken; 1.7706 + atomic_add_int(&sp->length, to_move); 1.7707 + chk->data = NULL; 1.7708 + *bail = 1; 1.7709 + sctp_free_a_chunk(stcb, chk, so_locked); 1.7710 + to_move = 0; 1.7711 + goto out_of; 1.7712 + } else { 1.7713 + SCTP_BUF_LEN(m) = 0; 1.7714 + SCTP_BUF_NEXT(m) = chk->data; 1.7715 + chk->data = m; 1.7716 + M_ALIGN(chk->data, 4); 1.7717 + } 1.7718 + } 1.7719 + SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_NOWAIT); 1.7720 + if (chk->data == NULL) { 1.7721 + /* HELP, TSNH since we assured it would not above? */ 1.7722 +#ifdef INVARIANTS 1.7723 + panic("prepend failes HELP?"); 1.7724 +#else 1.7725 + SCTP_PRINTF("prepend fails HELP?\n"); 1.7726 + sctp_free_a_chunk(stcb, chk, so_locked); 1.7727 +#endif 1.7728 + *bail = 1; 1.7729 + to_move = 0; 1.7730 + goto out_of; 1.7731 + } 1.7732 + sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk)); 1.7733 + chk->book_size = chk->send_size = (to_move + sizeof(struct sctp_data_chunk)); 1.7734 + chk->book_size_scale = 0; 1.7735 + chk->sent = SCTP_DATAGRAM_UNSENT; 1.7736 + 1.7737 + chk->flags = 0; 1.7738 + chk->asoc = &stcb->asoc; 1.7739 + chk->pad_inplace = 0; 1.7740 + chk->no_fr_allowed = 0; 1.7741 + chk->rec.data.stream_seq = strq->next_sequence_send; 1.7742 + if (rcv_flags & SCTP_DATA_LAST_FRAG) { 1.7743 + strq->next_sequence_send++; 1.7744 + } 1.7745 + chk->rec.data.stream_number = sp->stream; 1.7746 + chk->rec.data.payloadtype = sp->ppid; 1.7747 + chk->rec.data.context = sp->context; 1.7748 + chk->rec.data.doing_fast_retransmit = 0; 1.7749 + 1.7750 + chk->rec.data.timetodrop = sp->ts; 1.7751 + chk->flags = sp->act_flags; 1.7752 + 1.7753 + if (sp->net) { 1.7754 + chk->whoTo = sp->net; 1.7755 + atomic_add_int(&chk->whoTo->ref_count, 1); 1.7756 + } else 1.7757 + chk->whoTo = NULL; 1.7758 + 1.7759 + if (sp->holds_key_ref) { 1.7760 + chk->auth_keyid = sp->auth_keyid; 1.7761 + sctp_auth_key_acquire(stcb, chk->auth_keyid); 1.7762 + chk->holds_key_ref = 1; 1.7763 + } 1.7764 + 1.7765 +#if defined(__FreeBSD__) || defined(__Panda__) 1.7766 + chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1); 1.7767 +#else 1.7768 + chk->rec.data.TSN_seq = asoc->sending_seq++; 1.7769 +#endif 1.7770 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) { 1.7771 + sctp_misc_ints(SCTP_STRMOUT_LOG_SEND, 1.7772 + (uintptr_t)stcb, sp->length, 1.7773 + (uint32_t)((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq), 1.7774 + chk->rec.data.TSN_seq); 1.7775 + } 1.7776 + dchkh = mtod(chk->data, struct sctp_data_chunk *); 1.7777 + /* 1.7778 + * Put the rest of the things in place now. Size was done 1.7779 + * earlier in previous loop prior to padding. 1.7780 + */ 1.7781 + 1.7782 +#ifdef SCTP_ASOCLOG_OF_TSNS 1.7783 + SCTP_TCB_LOCK_ASSERT(stcb); 1.7784 + if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) { 1.7785 + asoc->tsn_out_at = 0; 1.7786 + asoc->tsn_out_wrapped = 1; 1.7787 + } 1.7788 + asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq; 1.7789 + asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number; 1.7790 + asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq; 1.7791 + asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size; 1.7792 + asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags; 1.7793 + asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb; 1.7794 + asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at; 1.7795 + asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2; 1.7796 + asoc->tsn_out_at++; 1.7797 +#endif 1.7798 + 1.7799 + dchkh->ch.chunk_type = SCTP_DATA; 1.7800 + dchkh->ch.chunk_flags = chk->rec.data.rcv_flags; 1.7801 + dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq); 1.7802 + dchkh->dp.stream_id = htons(strq->stream_no); 1.7803 + dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq); 1.7804 + dchkh->dp.protocol_id = chk->rec.data.payloadtype; 1.7805 + dchkh->ch.chunk_length = htons(chk->send_size); 1.7806 + /* Now advance the chk->send_size by the actual pad needed. */ 1.7807 + if (chk->send_size < SCTP_SIZE32(chk->book_size)) { 1.7808 + /* need a pad */ 1.7809 + struct mbuf *lm; 1.7810 + int pads; 1.7811 + 1.7812 + pads = SCTP_SIZE32(chk->book_size) - chk->send_size; 1.7813 + if (sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf) == 0) { 1.7814 + chk->pad_inplace = 1; 1.7815 + } 1.7816 + if ((lm = SCTP_BUF_NEXT(chk->last_mbuf)) != NULL) { 1.7817 + /* pad added an mbuf */ 1.7818 + chk->last_mbuf = lm; 1.7819 + } 1.7820 + chk->send_size += pads; 1.7821 + } 1.7822 + if (PR_SCTP_ENABLED(chk->flags)) { 1.7823 + asoc->pr_sctp_cnt++; 1.7824 + } 1.7825 + if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) { 1.7826 + /* All done pull and kill the message */ 1.7827 + atomic_subtract_int(&asoc->stream_queue_cnt, 1); 1.7828 + if (sp->put_last_out == 0) { 1.7829 + SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n"); 1.7830 + SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n", 1.7831 + sp->sender_all_done, 1.7832 + sp->length, 1.7833 + sp->msg_is_complete, 1.7834 + sp->put_last_out, 1.7835 + send_lock_up); 1.7836 + } 1.7837 + if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) { 1.7838 + SCTP_TCB_SEND_LOCK(stcb); 1.7839 + send_lock_up = 1; 1.7840 + } 1.7841 + TAILQ_REMOVE(&strq->outqueue, sp, next); 1.7842 + stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up); 1.7843 + if (sp->net) { 1.7844 + sctp_free_remote_addr(sp->net); 1.7845 + sp->net = NULL; 1.7846 + } 1.7847 + if (sp->data) { 1.7848 + sctp_m_freem(sp->data); 1.7849 + sp->data = NULL; 1.7850 + } 1.7851 + sctp_free_a_strmoq(stcb, sp, so_locked); 1.7852 + 1.7853 + /* we can't be locked to it */ 1.7854 + *locked = 0; 1.7855 + stcb->asoc.locked_on_sending = NULL; 1.7856 + } else { 1.7857 + /* more to go, we are locked */ 1.7858 + *locked = 1; 1.7859 + } 1.7860 + asoc->chunks_on_out_queue++; 1.7861 + strq->chunks_on_queues++; 1.7862 + TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next); 1.7863 + asoc->send_queue_cnt++; 1.7864 +out_of: 1.7865 + if (send_lock_up) { 1.7866 + SCTP_TCB_SEND_UNLOCK(stcb); 1.7867 + } 1.7868 + return (to_move); 1.7869 +} 1.7870 + 1.7871 + 1.7872 +static void 1.7873 +sctp_fill_outqueue(struct sctp_tcb *stcb, 1.7874 + struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked 1.7875 +#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 1.7876 + SCTP_UNUSED 1.7877 +#endif 1.7878 +) 1.7879 +{ 1.7880 + struct sctp_association *asoc; 1.7881 + struct sctp_stream_out *strq; 1.7882 + int goal_mtu, moved_how_much, total_moved = 0, bail = 0; 1.7883 + int locked, giveup; 1.7884 + 1.7885 + SCTP_TCB_LOCK_ASSERT(stcb); 1.7886 + asoc = &stcb->asoc; 1.7887 + switch (net->ro._l_addr.sa.sa_family) { 1.7888 +#ifdef INET 1.7889 + case AF_INET: 1.7890 + goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; 1.7891 + break; 1.7892 +#endif 1.7893 +#ifdef INET6 1.7894 + case AF_INET6: 1.7895 + goal_mtu = net->mtu - SCTP_MIN_OVERHEAD; 1.7896 + break; 1.7897 +#endif 1.7898 +#if defined(__Userspace__) 1.7899 + case AF_CONN: 1.7900 + goal_mtu = net->mtu - sizeof(struct sctphdr); 1.7901 + break; 1.7902 +#endif 1.7903 + default: 1.7904 + /* TSNH */ 1.7905 + goal_mtu = net->mtu; 1.7906 + break; 1.7907 + } 1.7908 + /* Need an allowance for the data chunk header too */ 1.7909 + goal_mtu -= sizeof(struct sctp_data_chunk); 1.7910 + 1.7911 + /* must make even word boundary */ 1.7912 + goal_mtu &= 0xfffffffc; 1.7913 + if (asoc->locked_on_sending) { 1.7914 + /* We are stuck on one stream until the message completes. */ 1.7915 + strq = asoc->locked_on_sending; 1.7916 + locked = 1; 1.7917 + } else { 1.7918 + strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc); 1.7919 + locked = 0; 1.7920 + } 1.7921 + while ((goal_mtu > 0) && strq) { 1.7922 + giveup = 0; 1.7923 + bail = 0; 1.7924 + moved_how_much = sctp_move_to_outqueue(stcb, strq, goal_mtu, frag_point, &locked, 1.7925 + &giveup, eeor_mode, &bail, so_locked); 1.7926 + if (moved_how_much) 1.7927 + stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved_how_much); 1.7928 + 1.7929 + if (locked) { 1.7930 + asoc->locked_on_sending = strq; 1.7931 + if ((moved_how_much == 0) || (giveup) || bail) 1.7932 + /* no more to move for now */ 1.7933 + break; 1.7934 + } else { 1.7935 + asoc->locked_on_sending = NULL; 1.7936 + if ((giveup) || bail) { 1.7937 + break; 1.7938 + } 1.7939 + strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc); 1.7940 + if (strq == NULL) { 1.7941 + break; 1.7942 + } 1.7943 + } 1.7944 + total_moved += moved_how_much; 1.7945 + goal_mtu -= (moved_how_much + sizeof(struct sctp_data_chunk)); 1.7946 + goal_mtu &= 0xfffffffc; 1.7947 + } 1.7948 + if (bail) 1.7949 + *quit_now = 1; 1.7950 + 1.7951 + stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc); 1.7952 + 1.7953 + if (total_moved == 0) { 1.7954 + if ((stcb->asoc.sctp_cmt_on_off == 0) && 1.7955 + (net == stcb->asoc.primary_destination)) { 1.7956 + /* ran dry for primary network net */ 1.7957 + SCTP_STAT_INCR(sctps_primary_randry); 1.7958 + } else if (stcb->asoc.sctp_cmt_on_off > 0) { 1.7959 + /* ran dry with CMT on */ 1.7960 + SCTP_STAT_INCR(sctps_cmt_randry); 1.7961 + } 1.7962 + } 1.7963 +} 1.7964 + 1.7965 +void 1.7966 +sctp_fix_ecn_echo(struct sctp_association *asoc) 1.7967 +{ 1.7968 + struct sctp_tmit_chunk *chk; 1.7969 + 1.7970 + TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 1.7971 + if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) { 1.7972 + chk->sent = SCTP_DATAGRAM_UNSENT; 1.7973 + } 1.7974 + } 1.7975 +} 1.7976 + 1.7977 +void 1.7978 +sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net) 1.7979 +{ 1.7980 + struct sctp_association *asoc; 1.7981 + struct sctp_tmit_chunk *chk; 1.7982 + struct sctp_stream_queue_pending *sp; 1.7983 + unsigned int i; 1.7984 + 1.7985 + if (net == NULL) { 1.7986 + return; 1.7987 + } 1.7988 + asoc = &stcb->asoc; 1.7989 + for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1.7990 + TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) { 1.7991 + if (sp->net == net) { 1.7992 + sctp_free_remote_addr(sp->net); 1.7993 + sp->net = NULL; 1.7994 + } 1.7995 + } 1.7996 + } 1.7997 + TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 1.7998 + if (chk->whoTo == net) { 1.7999 + sctp_free_remote_addr(chk->whoTo); 1.8000 + chk->whoTo = NULL; 1.8001 + } 1.8002 + } 1.8003 +} 1.8004 + 1.8005 +int 1.8006 +sctp_med_chunk_output(struct sctp_inpcb *inp, 1.8007 + struct sctp_tcb *stcb, 1.8008 + struct sctp_association *asoc, 1.8009 + int *num_out, 1.8010 + int *reason_code, 1.8011 + int control_only, int from_where, 1.8012 + struct timeval *now, int *now_filled, int frag_point, int so_locked 1.8013 +#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 1.8014 + SCTP_UNUSED 1.8015 +#endif 1.8016 + ) 1.8017 +{ 1.8018 + /** 1.8019 + * Ok this is the generic chunk service queue. we must do the 1.8020 + * following: - Service the stream queue that is next, moving any 1.8021 + * message (note I must get a complete message i.e. FIRST/MIDDLE and 1.8022 + * LAST to the out queue in one pass) and assigning TSN's - Check to 1.8023 + * see if the cwnd/rwnd allows any output, if so we go ahead and 1.8024 + * fomulate and send the low level chunks. Making sure to combine 1.8025 + * any control in the control chunk queue also. 1.8026 + */ 1.8027 + struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL; 1.8028 + struct mbuf *outchain, *endoutchain; 1.8029 + struct sctp_tmit_chunk *chk, *nchk; 1.8030 + 1.8031 + /* temp arrays for unlinking */ 1.8032 + struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING]; 1.8033 + int no_fragmentflg, error; 1.8034 + unsigned int max_rwnd_per_dest, max_send_per_dest; 1.8035 + int one_chunk, hbflag, skip_data_for_this_net; 1.8036 + int asconf, cookie, no_out_cnt; 1.8037 + int bundle_at, ctl_cnt, no_data_chunks, eeor_mode; 1.8038 + unsigned int mtu, r_mtu, omtu, mx_mtu, to_out; 1.8039 + int tsns_sent = 0; 1.8040 + uint32_t auth_offset = 0; 1.8041 + struct sctp_auth_chunk *auth = NULL; 1.8042 + uint16_t auth_keyid; 1.8043 + int override_ok = 1; 1.8044 + int skip_fill_up = 0; 1.8045 + int data_auth_reqd = 0; 1.8046 + /* JRS 5/14/07 - Add flag for whether a heartbeat is sent to 1.8047 + the destination. */ 1.8048 + int quit_now = 0; 1.8049 + 1.8050 +#if defined(__APPLE__) 1.8051 + if (so_locked) { 1.8052 + sctp_lock_assert(SCTP_INP_SO(inp)); 1.8053 + } else { 1.8054 + sctp_unlock_assert(SCTP_INP_SO(inp)); 1.8055 + } 1.8056 +#endif 1.8057 + *num_out = 0; 1.8058 + auth_keyid = stcb->asoc.authinfo.active_keyid; 1.8059 + 1.8060 + if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 1.8061 + (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) || 1.8062 + (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) { 1.8063 + eeor_mode = 1; 1.8064 + } else { 1.8065 + eeor_mode = 0; 1.8066 + } 1.8067 + ctl_cnt = no_out_cnt = asconf = cookie = 0; 1.8068 + /* 1.8069 + * First lets prime the pump. For each destination, if there is room 1.8070 + * in the flight size, attempt to pull an MTU's worth out of the 1.8071 + * stream queues into the general send_queue 1.8072 + */ 1.8073 +#ifdef SCTP_AUDITING_ENABLED 1.8074 + sctp_audit_log(0xC2, 2); 1.8075 +#endif 1.8076 + SCTP_TCB_LOCK_ASSERT(stcb); 1.8077 + hbflag = 0; 1.8078 + if ((control_only) || (asoc->stream_reset_outstanding)) 1.8079 + no_data_chunks = 1; 1.8080 + else 1.8081 + no_data_chunks = 0; 1.8082 + 1.8083 + /* Nothing to possible to send? */ 1.8084 + if ((TAILQ_EMPTY(&asoc->control_send_queue) || 1.8085 + (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) && 1.8086 + TAILQ_EMPTY(&asoc->asconf_send_queue) && 1.8087 + TAILQ_EMPTY(&asoc->send_queue) && 1.8088 + stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) { 1.8089 + nothing_to_send: 1.8090 + *reason_code = 9; 1.8091 + return (0); 1.8092 + } 1.8093 + if (asoc->peers_rwnd == 0) { 1.8094 + /* No room in peers rwnd */ 1.8095 + *reason_code = 1; 1.8096 + if (asoc->total_flight > 0) { 1.8097 + /* we are allowed one chunk in flight */ 1.8098 + no_data_chunks = 1; 1.8099 + } 1.8100 + } 1.8101 + if (stcb->asoc.ecn_echo_cnt_onq) { 1.8102 + /* Record where a sack goes, if any */ 1.8103 + if (no_data_chunks && 1.8104 + (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) { 1.8105 + /* Nothing but ECNe to send - we don't do that */ 1.8106 + goto nothing_to_send; 1.8107 + } 1.8108 + TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 1.8109 + if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || 1.8110 + (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) { 1.8111 + sack_goes_to = chk->whoTo; 1.8112 + break; 1.8113 + } 1.8114 + } 1.8115 + } 1.8116 + max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets); 1.8117 + if (stcb->sctp_socket) 1.8118 + max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets; 1.8119 + else 1.8120 + max_send_per_dest = 0; 1.8121 + if (no_data_chunks == 0) { 1.8122 + /* How many non-directed chunks are there? */ 1.8123 + TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 1.8124 + if (chk->whoTo == NULL) { 1.8125 + /* We already have non-directed 1.8126 + * chunks on the queue, no need 1.8127 + * to do a fill-up. 1.8128 + */ 1.8129 + skip_fill_up = 1; 1.8130 + break; 1.8131 + } 1.8132 + } 1.8133 + 1.8134 + } 1.8135 + if ((no_data_chunks == 0) && 1.8136 + (skip_fill_up == 0) && 1.8137 + (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) { 1.8138 + TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1.8139 + /* 1.8140 + * This for loop we are in takes in 1.8141 + * each net, if its's got space in cwnd and 1.8142 + * has data sent to it (when CMT is off) then it 1.8143 + * calls sctp_fill_outqueue for the net. This gets 1.8144 + * data on the send queue for that network. 1.8145 + * 1.8146 + * In sctp_fill_outqueue TSN's are assigned and 1.8147 + * data is copied out of the stream buffers. Note 1.8148 + * mostly copy by reference (we hope). 1.8149 + */ 1.8150 + net->window_probe = 0; 1.8151 + if ((net != stcb->asoc.alternate) && 1.8152 + ((net->dest_state & SCTP_ADDR_PF) || 1.8153 + (!(net->dest_state & SCTP_ADDR_REACHABLE)) || 1.8154 + (net->dest_state & SCTP_ADDR_UNCONFIRMED))) { 1.8155 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1.8156 + sctp_log_cwnd(stcb, net, 1, 1.8157 + SCTP_CWND_LOG_FILL_OUTQ_CALLED); 1.8158 + } 1.8159 + continue; 1.8160 + } 1.8161 + if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) && 1.8162 + (net->flight_size == 0)) { 1.8163 + (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins)(stcb, net); 1.8164 + } 1.8165 + if (net->flight_size >= net->cwnd) { 1.8166 + /* skip this network, no room - can't fill */ 1.8167 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1.8168 + sctp_log_cwnd(stcb, net, 3, 1.8169 + SCTP_CWND_LOG_FILL_OUTQ_CALLED); 1.8170 + } 1.8171 + continue; 1.8172 + } 1.8173 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1.8174 + sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED); 1.8175 + } 1.8176 + sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked); 1.8177 + if (quit_now) { 1.8178 + /* memory alloc failure */ 1.8179 + no_data_chunks = 1; 1.8180 + break; 1.8181 + } 1.8182 + } 1.8183 + } 1.8184 + /* now service each destination and send out what we can for it */ 1.8185 + /* Nothing to send? */ 1.8186 + if (TAILQ_EMPTY(&asoc->control_send_queue) && 1.8187 + TAILQ_EMPTY(&asoc->asconf_send_queue) && 1.8188 + TAILQ_EMPTY(&asoc->send_queue)) { 1.8189 + *reason_code = 8; 1.8190 + return (0); 1.8191 + } 1.8192 + 1.8193 + if (asoc->sctp_cmt_on_off > 0) { 1.8194 + /* get the last start point */ 1.8195 + start_at = asoc->last_net_cmt_send_started; 1.8196 + if (start_at == NULL) { 1.8197 + /* null so to beginning */ 1.8198 + start_at = TAILQ_FIRST(&asoc->nets); 1.8199 + } else { 1.8200 + start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next); 1.8201 + if (start_at == NULL) { 1.8202 + start_at = TAILQ_FIRST(&asoc->nets); 1.8203 + } 1.8204 + } 1.8205 + asoc->last_net_cmt_send_started = start_at; 1.8206 + } else { 1.8207 + start_at = TAILQ_FIRST(&asoc->nets); 1.8208 + } 1.8209 + TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 1.8210 + if (chk->whoTo == NULL) { 1.8211 + if (asoc->alternate) { 1.8212 + chk->whoTo = asoc->alternate; 1.8213 + } else { 1.8214 + chk->whoTo = asoc->primary_destination; 1.8215 + } 1.8216 + atomic_add_int(&chk->whoTo->ref_count, 1); 1.8217 + } 1.8218 + } 1.8219 + old_start_at = NULL; 1.8220 +again_one_more_time: 1.8221 + for (net = start_at ; net != NULL; net = TAILQ_NEXT(net, sctp_next)) { 1.8222 + /* how much can we send? */ 1.8223 + /* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */ 1.8224 + if (old_start_at && (old_start_at == net)) { 1.8225 + /* through list ocmpletely. */ 1.8226 + break; 1.8227 + } 1.8228 + tsns_sent = 0xa; 1.8229 + if (TAILQ_EMPTY(&asoc->control_send_queue) && 1.8230 + TAILQ_EMPTY(&asoc->asconf_send_queue) && 1.8231 + (net->flight_size >= net->cwnd)) { 1.8232 + /* Nothing on control or asconf and flight is full, we can skip 1.8233 + * even in the CMT case. 1.8234 + */ 1.8235 + continue; 1.8236 + } 1.8237 + bundle_at = 0; 1.8238 + endoutchain = outchain = NULL; 1.8239 + no_fragmentflg = 1; 1.8240 + one_chunk = 0; 1.8241 + if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { 1.8242 + skip_data_for_this_net = 1; 1.8243 + } else { 1.8244 + skip_data_for_this_net = 0; 1.8245 + } 1.8246 +#if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__) || defined(__APPLE__)) 1.8247 + if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) { 1.8248 + /* 1.8249 + * if we have a route and an ifp check to see if we 1.8250 + * have room to send to this guy 1.8251 + */ 1.8252 + struct ifnet *ifp; 1.8253 + 1.8254 + ifp = net->ro.ro_rt->rt_ifp; 1.8255 + if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) { 1.8256 + SCTP_STAT_INCR(sctps_ifnomemqueued); 1.8257 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) { 1.8258 + sctp_log_maxburst(stcb, net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED); 1.8259 + } 1.8260 + continue; 1.8261 + } 1.8262 + } 1.8263 +#endif 1.8264 + switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) { 1.8265 +#ifdef INET 1.8266 + case AF_INET: 1.8267 + mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr)); 1.8268 + break; 1.8269 +#endif 1.8270 +#ifdef INET6 1.8271 + case AF_INET6: 1.8272 + mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)); 1.8273 + break; 1.8274 +#endif 1.8275 +#if defined(__Userspace__) 1.8276 + case AF_CONN: 1.8277 + mtu = net->mtu - sizeof(struct sctphdr); 1.8278 + break; 1.8279 +#endif 1.8280 + default: 1.8281 + /* TSNH */ 1.8282 + mtu = net->mtu; 1.8283 + break; 1.8284 + } 1.8285 + mx_mtu = mtu; 1.8286 + to_out = 0; 1.8287 + if (mtu > asoc->peers_rwnd) { 1.8288 + if (asoc->total_flight > 0) { 1.8289 + /* We have a packet in flight somewhere */ 1.8290 + r_mtu = asoc->peers_rwnd; 1.8291 + } else { 1.8292 + /* We are always allowed to send one MTU out */ 1.8293 + one_chunk = 1; 1.8294 + r_mtu = mtu; 1.8295 + } 1.8296 + } else { 1.8297 + r_mtu = mtu; 1.8298 + } 1.8299 + /************************/ 1.8300 + /* ASCONF transmission */ 1.8301 + /************************/ 1.8302 + /* Now first lets go through the asconf queue */ 1.8303 + TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) { 1.8304 + if (chk->rec.chunk_id.id != SCTP_ASCONF) { 1.8305 + continue; 1.8306 + } 1.8307 + if (chk->whoTo == NULL) { 1.8308 + if (asoc->alternate == NULL) { 1.8309 + if (asoc->primary_destination != net) { 1.8310 + break; 1.8311 + } 1.8312 + } else { 1.8313 + if (asoc->alternate != net) { 1.8314 + break; 1.8315 + } 1.8316 + } 1.8317 + } else { 1.8318 + if (chk->whoTo != net) { 1.8319 + break; 1.8320 + } 1.8321 + } 1.8322 + if (chk->data == NULL) { 1.8323 + break; 1.8324 + } 1.8325 + if (chk->sent != SCTP_DATAGRAM_UNSENT && 1.8326 + chk->sent != SCTP_DATAGRAM_RESEND) { 1.8327 + break; 1.8328 + } 1.8329 + /* 1.8330 + * if no AUTH is yet included and this chunk 1.8331 + * requires it, make sure to account for it. We 1.8332 + * don't apply the size until the AUTH chunk is 1.8333 + * actually added below in case there is no room for 1.8334 + * this chunk. NOTE: we overload the use of "omtu" 1.8335 + * here 1.8336 + */ 1.8337 + if ((auth == NULL) && 1.8338 + sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 1.8339 + stcb->asoc.peer_auth_chunks)) { 1.8340 + omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 1.8341 + } else 1.8342 + omtu = 0; 1.8343 + /* Here we do NOT factor the r_mtu */ 1.8344 + if ((chk->send_size < (int)(mtu - omtu)) || 1.8345 + (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) { 1.8346 + /* 1.8347 + * We probably should glom the mbuf chain 1.8348 + * from the chk->data for control but the 1.8349 + * problem is it becomes yet one more level 1.8350 + * of tracking to do if for some reason 1.8351 + * output fails. Then I have got to 1.8352 + * reconstruct the merged control chain.. el 1.8353 + * yucko.. for now we take the easy way and 1.8354 + * do the copy 1.8355 + */ 1.8356 + /* 1.8357 + * Add an AUTH chunk, if chunk requires it 1.8358 + * save the offset into the chain for AUTH 1.8359 + */ 1.8360 + if ((auth == NULL) && 1.8361 + (sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 1.8362 + stcb->asoc.peer_auth_chunks))) { 1.8363 + outchain = sctp_add_auth_chunk(outchain, 1.8364 + &endoutchain, 1.8365 + &auth, 1.8366 + &auth_offset, 1.8367 + stcb, 1.8368 + chk->rec.chunk_id.id); 1.8369 + SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 1.8370 + } 1.8371 + outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 1.8372 + (int)chk->rec.chunk_id.can_take_data, 1.8373 + chk->send_size, chk->copy_by_ref); 1.8374 + if (outchain == NULL) { 1.8375 + *reason_code = 8; 1.8376 + SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 1.8377 + return (ENOMEM); 1.8378 + } 1.8379 + SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 1.8380 + /* update our MTU size */ 1.8381 + if (mtu > (chk->send_size + omtu)) 1.8382 + mtu -= (chk->send_size + omtu); 1.8383 + else 1.8384 + mtu = 0; 1.8385 + to_out += (chk->send_size + omtu); 1.8386 + /* Do clear IP_DF ? */ 1.8387 + if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 1.8388 + no_fragmentflg = 0; 1.8389 + } 1.8390 + if (chk->rec.chunk_id.can_take_data) 1.8391 + chk->data = NULL; 1.8392 + /* 1.8393 + * set hb flag since we can 1.8394 + * use these for RTO 1.8395 + */ 1.8396 + hbflag = 1; 1.8397 + asconf = 1; 1.8398 + /* 1.8399 + * should sysctl this: don't 1.8400 + * bundle data with ASCONF 1.8401 + * since it requires AUTH 1.8402 + */ 1.8403 + no_data_chunks = 1; 1.8404 + chk->sent = SCTP_DATAGRAM_SENT; 1.8405 + if (chk->whoTo == NULL) { 1.8406 + chk->whoTo = net; 1.8407 + atomic_add_int(&net->ref_count, 1); 1.8408 + } 1.8409 + chk->snd_count++; 1.8410 + if (mtu == 0) { 1.8411 + /* 1.8412 + * Ok we are out of room but we can 1.8413 + * output without effecting the 1.8414 + * flight size since this little guy 1.8415 + * is a control only packet. 1.8416 + */ 1.8417 + sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net); 1.8418 + /* 1.8419 + * do NOT clear the asconf 1.8420 + * flag as it is used to do 1.8421 + * appropriate source address 1.8422 + * selection. 1.8423 + */ 1.8424 + if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, 1.8425 + (struct sockaddr *)&net->ro._l_addr, 1.8426 + outchain, auth_offset, auth, 1.8427 + stcb->asoc.authinfo.active_keyid, 1.8428 + no_fragmentflg, 0, asconf, 1.8429 + inp->sctp_lport, stcb->rport, 1.8430 + htonl(stcb->asoc.peer_vtag), 1.8431 + net->port, NULL, 1.8432 +#if defined(__FreeBSD__) 1.8433 + 0, 0, 1.8434 +#endif 1.8435 + so_locked))) { 1.8436 + if (error == ENOBUFS) { 1.8437 + asoc->ifp_had_enobuf = 1; 1.8438 + SCTP_STAT_INCR(sctps_lowlevelerr); 1.8439 + } 1.8440 + if (from_where == 0) { 1.8441 + SCTP_STAT_INCR(sctps_lowlevelerrusr); 1.8442 + } 1.8443 + if (*now_filled == 0) { 1.8444 + (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 1.8445 + *now_filled = 1; 1.8446 + *now = net->last_sent_time; 1.8447 + } else { 1.8448 + net->last_sent_time = *now; 1.8449 + } 1.8450 + hbflag = 0; 1.8451 + /* error, could not output */ 1.8452 + if (error == EHOSTUNREACH) { 1.8453 + /* 1.8454 + * Destination went 1.8455 + * unreachable 1.8456 + * during this send 1.8457 + */ 1.8458 + sctp_move_chunks_from_net(stcb, net); 1.8459 + } 1.8460 + *reason_code = 7; 1.8461 + continue; 1.8462 + } else 1.8463 + asoc->ifp_had_enobuf = 0; 1.8464 + if (*now_filled == 0) { 1.8465 + (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 1.8466 + *now_filled = 1; 1.8467 + *now = net->last_sent_time; 1.8468 + } else { 1.8469 + net->last_sent_time = *now; 1.8470 + } 1.8471 + hbflag = 0; 1.8472 + /* 1.8473 + * increase the number we sent, if a 1.8474 + * cookie is sent we don't tell them 1.8475 + * any was sent out. 1.8476 + */ 1.8477 + outchain = endoutchain = NULL; 1.8478 + auth = NULL; 1.8479 + auth_offset = 0; 1.8480 + if (!no_out_cnt) 1.8481 + *num_out += ctl_cnt; 1.8482 + /* recalc a clean slate and setup */ 1.8483 + switch (net->ro._l_addr.sa.sa_family) { 1.8484 +#ifdef INET 1.8485 + case AF_INET: 1.8486 + mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; 1.8487 + break; 1.8488 +#endif 1.8489 +#ifdef INET6 1.8490 + case AF_INET6: 1.8491 + mtu = net->mtu - SCTP_MIN_OVERHEAD; 1.8492 + break; 1.8493 +#endif 1.8494 +#if defined(__Userspace__) 1.8495 + case AF_CONN: 1.8496 + mtu = net->mtu - sizeof(struct sctphdr); 1.8497 + break; 1.8498 +#endif 1.8499 + default: 1.8500 + /* TSNH */ 1.8501 + mtu = net->mtu; 1.8502 + break; 1.8503 + } 1.8504 + to_out = 0; 1.8505 + no_fragmentflg = 1; 1.8506 + } 1.8507 + } 1.8508 + } 1.8509 + /************************/ 1.8510 + /* Control transmission */ 1.8511 + /************************/ 1.8512 + /* Now first lets go through the control queue */ 1.8513 + TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) { 1.8514 + if ((sack_goes_to) && 1.8515 + (chk->rec.chunk_id.id == SCTP_ECN_ECHO) && 1.8516 + (chk->whoTo != sack_goes_to)) { 1.8517 + /* 1.8518 + * if we have a sack in queue, and we are looking at an 1.8519 + * ecn echo that is NOT queued to where the sack is going.. 1.8520 + */ 1.8521 + if (chk->whoTo == net) { 1.8522 + /* Don't transmit it to where its going (current net) */ 1.8523 + continue; 1.8524 + } else if (sack_goes_to == net) { 1.8525 + /* But do transmit it to this address */ 1.8526 + goto skip_net_check; 1.8527 + } 1.8528 + } 1.8529 + if (chk->whoTo == NULL) { 1.8530 + if (asoc->alternate == NULL) { 1.8531 + if (asoc->primary_destination != net) { 1.8532 + continue; 1.8533 + } 1.8534 + } else { 1.8535 + if (asoc->alternate != net) { 1.8536 + continue; 1.8537 + } 1.8538 + } 1.8539 + } else { 1.8540 + if (chk->whoTo != net) { 1.8541 + continue; 1.8542 + } 1.8543 + } 1.8544 + skip_net_check: 1.8545 + if (chk->data == NULL) { 1.8546 + continue; 1.8547 + } 1.8548 + if (chk->sent != SCTP_DATAGRAM_UNSENT) { 1.8549 + /* 1.8550 + * It must be unsent. Cookies and ASCONF's 1.8551 + * hang around but there timers will force 1.8552 + * when marked for resend. 1.8553 + */ 1.8554 + continue; 1.8555 + } 1.8556 + /* 1.8557 + * if no AUTH is yet included and this chunk 1.8558 + * requires it, make sure to account for it. We 1.8559 + * don't apply the size until the AUTH chunk is 1.8560 + * actually added below in case there is no room for 1.8561 + * this chunk. NOTE: we overload the use of "omtu" 1.8562 + * here 1.8563 + */ 1.8564 + if ((auth == NULL) && 1.8565 + sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 1.8566 + stcb->asoc.peer_auth_chunks)) { 1.8567 + omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 1.8568 + } else 1.8569 + omtu = 0; 1.8570 + /* Here we do NOT factor the r_mtu */ 1.8571 + if ((chk->send_size <= (int)(mtu - omtu)) || 1.8572 + (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) { 1.8573 + /* 1.8574 + * We probably should glom the mbuf chain 1.8575 + * from the chk->data for control but the 1.8576 + * problem is it becomes yet one more level 1.8577 + * of tracking to do if for some reason 1.8578 + * output fails. Then I have got to 1.8579 + * reconstruct the merged control chain.. el 1.8580 + * yucko.. for now we take the easy way and 1.8581 + * do the copy 1.8582 + */ 1.8583 + /* 1.8584 + * Add an AUTH chunk, if chunk requires it 1.8585 + * save the offset into the chain for AUTH 1.8586 + */ 1.8587 + if ((auth == NULL) && 1.8588 + (sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 1.8589 + stcb->asoc.peer_auth_chunks))) { 1.8590 + outchain = sctp_add_auth_chunk(outchain, 1.8591 + &endoutchain, 1.8592 + &auth, 1.8593 + &auth_offset, 1.8594 + stcb, 1.8595 + chk->rec.chunk_id.id); 1.8596 + SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 1.8597 + } 1.8598 + outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 1.8599 + (int)chk->rec.chunk_id.can_take_data, 1.8600 + chk->send_size, chk->copy_by_ref); 1.8601 + if (outchain == NULL) { 1.8602 + *reason_code = 8; 1.8603 + SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 1.8604 + return (ENOMEM); 1.8605 + } 1.8606 + SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 1.8607 + /* update our MTU size */ 1.8608 + if (mtu > (chk->send_size + omtu)) 1.8609 + mtu -= (chk->send_size + omtu); 1.8610 + else 1.8611 + mtu = 0; 1.8612 + to_out += (chk->send_size + omtu); 1.8613 + /* Do clear IP_DF ? */ 1.8614 + if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 1.8615 + no_fragmentflg = 0; 1.8616 + } 1.8617 + if (chk->rec.chunk_id.can_take_data) 1.8618 + chk->data = NULL; 1.8619 + /* Mark things to be removed, if needed */ 1.8620 + if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || 1.8621 + (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */ 1.8622 + (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) || 1.8623 + (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) || 1.8624 + (chk->rec.chunk_id.id == SCTP_SHUTDOWN) || 1.8625 + (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) || 1.8626 + (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) || 1.8627 + (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) || 1.8628 + (chk->rec.chunk_id.id == SCTP_ECN_CWR) || 1.8629 + (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) || 1.8630 + (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) { 1.8631 + if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) { 1.8632 + hbflag = 1; 1.8633 + } 1.8634 + /* remove these chunks at the end */ 1.8635 + if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) || 1.8636 + (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) { 1.8637 + /* turn off the timer */ 1.8638 + if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 1.8639 + sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 1.8640 + inp, stcb, net, SCTP_FROM_SCTP_OUTPUT+SCTP_LOC_1); 1.8641 + } 1.8642 + } 1.8643 + ctl_cnt++; 1.8644 + } else { 1.8645 + /* 1.8646 + * Other chunks, since they have 1.8647 + * timers running (i.e. COOKIE) 1.8648 + * we just "trust" that it 1.8649 + * gets sent or retransmitted. 1.8650 + */ 1.8651 + ctl_cnt++; 1.8652 + if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 1.8653 + cookie = 1; 1.8654 + no_out_cnt = 1; 1.8655 + } else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) { 1.8656 + /* 1.8657 + * Increment ecne send count here 1.8658 + * this means we may be over-zealous in 1.8659 + * our counting if the send fails, but its 1.8660 + * the best place to do it (we used to do 1.8661 + * it in the queue of the chunk, but that did 1.8662 + * not tell how many times it was sent. 1.8663 + */ 1.8664 + SCTP_STAT_INCR(sctps_sendecne); 1.8665 + } 1.8666 + chk->sent = SCTP_DATAGRAM_SENT; 1.8667 + if (chk->whoTo == NULL) { 1.8668 + chk->whoTo = net; 1.8669 + atomic_add_int(&net->ref_count, 1); 1.8670 + } 1.8671 + chk->snd_count++; 1.8672 + } 1.8673 + if (mtu == 0) { 1.8674 + /* 1.8675 + * Ok we are out of room but we can 1.8676 + * output without effecting the 1.8677 + * flight size since this little guy 1.8678 + * is a control only packet. 1.8679 + */ 1.8680 + if (asconf) { 1.8681 + sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net); 1.8682 + /* 1.8683 + * do NOT clear the asconf 1.8684 + * flag as it is used to do 1.8685 + * appropriate source address 1.8686 + * selection. 1.8687 + */ 1.8688 + } 1.8689 + if (cookie) { 1.8690 + sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net); 1.8691 + cookie = 0; 1.8692 + } 1.8693 + if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, 1.8694 + (struct sockaddr *)&net->ro._l_addr, 1.8695 + outchain, 1.8696 + auth_offset, auth, 1.8697 + stcb->asoc.authinfo.active_keyid, 1.8698 + no_fragmentflg, 0, asconf, 1.8699 + inp->sctp_lport, stcb->rport, 1.8700 + htonl(stcb->asoc.peer_vtag), 1.8701 + net->port, NULL, 1.8702 +#if defined(__FreeBSD__) 1.8703 + 0, 0, 1.8704 +#endif 1.8705 + so_locked))) { 1.8706 + if (error == ENOBUFS) { 1.8707 + asoc->ifp_had_enobuf = 1; 1.8708 + SCTP_STAT_INCR(sctps_lowlevelerr); 1.8709 + } 1.8710 + if (from_where == 0) { 1.8711 + SCTP_STAT_INCR(sctps_lowlevelerrusr); 1.8712 + } 1.8713 + /* error, could not output */ 1.8714 + if (hbflag) { 1.8715 + if (*now_filled == 0) { 1.8716 + (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 1.8717 + *now_filled = 1; 1.8718 + *now = net->last_sent_time; 1.8719 + } else { 1.8720 + net->last_sent_time = *now; 1.8721 + } 1.8722 + hbflag = 0; 1.8723 + } 1.8724 + if (error == EHOSTUNREACH) { 1.8725 + /* 1.8726 + * Destination went 1.8727 + * unreachable 1.8728 + * during this send 1.8729 + */ 1.8730 + sctp_move_chunks_from_net(stcb, net); 1.8731 + } 1.8732 + *reason_code = 7; 1.8733 + continue; 1.8734 + } else 1.8735 + asoc->ifp_had_enobuf = 0; 1.8736 + /* Only HB or ASCONF advances time */ 1.8737 + if (hbflag) { 1.8738 + if (*now_filled == 0) { 1.8739 + (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 1.8740 + *now_filled = 1; 1.8741 + *now = net->last_sent_time; 1.8742 + } else { 1.8743 + net->last_sent_time = *now; 1.8744 + } 1.8745 + hbflag = 0; 1.8746 + } 1.8747 + /* 1.8748 + * increase the number we sent, if a 1.8749 + * cookie is sent we don't tell them 1.8750 + * any was sent out. 1.8751 + */ 1.8752 + outchain = endoutchain = NULL; 1.8753 + auth = NULL; 1.8754 + auth_offset = 0; 1.8755 + if (!no_out_cnt) 1.8756 + *num_out += ctl_cnt; 1.8757 + /* recalc a clean slate and setup */ 1.8758 + switch (net->ro._l_addr.sa.sa_family) { 1.8759 +#ifdef INET 1.8760 + case AF_INET: 1.8761 + mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; 1.8762 + break; 1.8763 +#endif 1.8764 +#ifdef INET6 1.8765 + case AF_INET6: 1.8766 + mtu = net->mtu - SCTP_MIN_OVERHEAD; 1.8767 + break; 1.8768 +#endif 1.8769 +#if defined(__Userspace__) 1.8770 + case AF_CONN: 1.8771 + mtu = net->mtu - sizeof(struct sctphdr); 1.8772 + break; 1.8773 +#endif 1.8774 + default: 1.8775 + /* TSNH */ 1.8776 + mtu = net->mtu; 1.8777 + break; 1.8778 + } 1.8779 + to_out = 0; 1.8780 + no_fragmentflg = 1; 1.8781 + } 1.8782 + } 1.8783 + } 1.8784 + /* JRI: if dest is in PF state, do not send data to it */ 1.8785 + if ((asoc->sctp_cmt_on_off > 0) && 1.8786 + (net != stcb->asoc.alternate) && 1.8787 + (net->dest_state & SCTP_ADDR_PF)) { 1.8788 + goto no_data_fill; 1.8789 + } 1.8790 + if (net->flight_size >= net->cwnd) { 1.8791 + goto no_data_fill; 1.8792 + } 1.8793 + if ((asoc->sctp_cmt_on_off > 0) && 1.8794 + (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) && 1.8795 + (net->flight_size > max_rwnd_per_dest)) { 1.8796 + goto no_data_fill; 1.8797 + } 1.8798 + /* 1.8799 + * We need a specific accounting for the usage of the 1.8800 + * send buffer. We also need to check the number of messages 1.8801 + * per net. For now, this is better than nothing and it 1.8802 + * disabled by default... 1.8803 + */ 1.8804 + if ((asoc->sctp_cmt_on_off > 0) && 1.8805 + (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) && 1.8806 + (max_send_per_dest > 0) && 1.8807 + (net->flight_size > max_send_per_dest)) { 1.8808 + goto no_data_fill; 1.8809 + } 1.8810 + /*********************/ 1.8811 + /* Data transmission */ 1.8812 + /*********************/ 1.8813 + /* 1.8814 + * if AUTH for DATA is required and no AUTH has been added 1.8815 + * yet, account for this in the mtu now... if no data can be 1.8816 + * bundled, this adjustment won't matter anyways since the 1.8817 + * packet will be going out... 1.8818 + */ 1.8819 + data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, 1.8820 + stcb->asoc.peer_auth_chunks); 1.8821 + if (data_auth_reqd && (auth == NULL)) { 1.8822 + mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 1.8823 + } 1.8824 + /* now lets add any data within the MTU constraints */ 1.8825 + switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) { 1.8826 +#ifdef INET 1.8827 + case AF_INET: 1.8828 + if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr))) 1.8829 + omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr)); 1.8830 + else 1.8831 + omtu = 0; 1.8832 + break; 1.8833 +#endif 1.8834 +#ifdef INET6 1.8835 + case AF_INET6: 1.8836 + if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr))) 1.8837 + omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)); 1.8838 + else 1.8839 + omtu = 0; 1.8840 + break; 1.8841 +#endif 1.8842 +#if defined(__Userspace__) 1.8843 + case AF_CONN: 1.8844 + if (net->mtu > sizeof(struct sctphdr)) { 1.8845 + omtu = net->mtu - sizeof(struct sctphdr); 1.8846 + } else { 1.8847 + omtu = 0; 1.8848 + } 1.8849 + break; 1.8850 +#endif 1.8851 + default: 1.8852 + /* TSNH */ 1.8853 + omtu = 0; 1.8854 + break; 1.8855 + } 1.8856 + if ((((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) && 1.8857 + (skip_data_for_this_net == 0)) || 1.8858 + (cookie)) { 1.8859 + TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 1.8860 + if (no_data_chunks) { 1.8861 + /* let only control go out */ 1.8862 + *reason_code = 1; 1.8863 + break; 1.8864 + } 1.8865 + if (net->flight_size >= net->cwnd) { 1.8866 + /* skip this net, no room for data */ 1.8867 + *reason_code = 2; 1.8868 + break; 1.8869 + } 1.8870 + if ((chk->whoTo != NULL) && 1.8871 + (chk->whoTo != net)) { 1.8872 + /* Don't send the chunk on this net */ 1.8873 + continue; 1.8874 + } 1.8875 + 1.8876 + if (asoc->sctp_cmt_on_off == 0) { 1.8877 + if ((asoc->alternate) && 1.8878 + (asoc->alternate != net) && 1.8879 + (chk->whoTo == NULL)) { 1.8880 + continue; 1.8881 + } else if ((net != asoc->primary_destination) && 1.8882 + (asoc->alternate == NULL) && 1.8883 + (chk->whoTo == NULL)) { 1.8884 + continue; 1.8885 + } 1.8886 + } 1.8887 + if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) { 1.8888 + /*- 1.8889 + * strange, we have a chunk that is 1.8890 + * to big for its destination and 1.8891 + * yet no fragment ok flag. 1.8892 + * Something went wrong when the 1.8893 + * PMTU changed...we did not mark 1.8894 + * this chunk for some reason?? I 1.8895 + * will fix it here by letting IP 1.8896 + * fragment it for now and printing 1.8897 + * a warning. This really should not 1.8898 + * happen ... 1.8899 + */ 1.8900 + SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n", 1.8901 + chk->send_size, mtu); 1.8902 + chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 1.8903 + } 1.8904 + if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) && 1.8905 + ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) == SCTP_STATE_SHUTDOWN_PENDING)) { 1.8906 + struct sctp_data_chunk *dchkh; 1.8907 + 1.8908 + dchkh = mtod(chk->data, struct sctp_data_chunk *); 1.8909 + dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY; 1.8910 + } 1.8911 + if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) || 1.8912 + ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) { 1.8913 + /* ok we will add this one */ 1.8914 + 1.8915 + /* 1.8916 + * Add an AUTH chunk, if chunk 1.8917 + * requires it, save the offset into 1.8918 + * the chain for AUTH 1.8919 + */ 1.8920 + if (data_auth_reqd) { 1.8921 + if (auth == NULL) { 1.8922 + outchain = sctp_add_auth_chunk(outchain, 1.8923 + &endoutchain, 1.8924 + &auth, 1.8925 + &auth_offset, 1.8926 + stcb, 1.8927 + SCTP_DATA); 1.8928 + auth_keyid = chk->auth_keyid; 1.8929 + override_ok = 0; 1.8930 + SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 1.8931 + } else if (override_ok) { 1.8932 + /* use this data's keyid */ 1.8933 + auth_keyid = chk->auth_keyid; 1.8934 + override_ok = 0; 1.8935 + } else if (auth_keyid != chk->auth_keyid) { 1.8936 + /* different keyid, so done bundling */ 1.8937 + break; 1.8938 + } 1.8939 + } 1.8940 + outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0, 1.8941 + chk->send_size, chk->copy_by_ref); 1.8942 + if (outchain == NULL) { 1.8943 + SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n"); 1.8944 + if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 1.8945 + sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 1.8946 + } 1.8947 + *reason_code = 3; 1.8948 + SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 1.8949 + return (ENOMEM); 1.8950 + } 1.8951 + /* upate our MTU size */ 1.8952 + /* Do clear IP_DF ? */ 1.8953 + if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 1.8954 + no_fragmentflg = 0; 1.8955 + } 1.8956 + /* unsigned subtraction of mtu */ 1.8957 + if (mtu > chk->send_size) 1.8958 + mtu -= chk->send_size; 1.8959 + else 1.8960 + mtu = 0; 1.8961 + /* unsigned subtraction of r_mtu */ 1.8962 + if (r_mtu > chk->send_size) 1.8963 + r_mtu -= chk->send_size; 1.8964 + else 1.8965 + r_mtu = 0; 1.8966 + 1.8967 + to_out += chk->send_size; 1.8968 + if ((to_out > mx_mtu) && no_fragmentflg) { 1.8969 +#ifdef INVARIANTS 1.8970 + panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out); 1.8971 +#else 1.8972 + SCTP_PRINTF("Exceeding mtu of %d out size is %d\n", 1.8973 + mx_mtu, to_out); 1.8974 +#endif 1.8975 + } 1.8976 + chk->window_probe = 0; 1.8977 + data_list[bundle_at++] = chk; 1.8978 + if (bundle_at >= SCTP_MAX_DATA_BUNDLING) { 1.8979 + break; 1.8980 + } 1.8981 + if (chk->sent == SCTP_DATAGRAM_UNSENT) { 1.8982 + if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) { 1.8983 + SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks); 1.8984 + } else { 1.8985 + SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks); 1.8986 + } 1.8987 + if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) && 1.8988 + ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0)) 1.8989 + /* Count number of user msg's that were fragmented 1.8990 + * we do this by counting when we see a LAST fragment 1.8991 + * only. 1.8992 + */ 1.8993 + SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs); 1.8994 + } 1.8995 + if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) { 1.8996 + if ((one_chunk) && (stcb->asoc.total_flight == 0)) { 1.8997 + data_list[0]->window_probe = 1; 1.8998 + net->window_probe = 1; 1.8999 + } 1.9000 + break; 1.9001 + } 1.9002 + } else { 1.9003 + /* 1.9004 + * Must be sent in order of the 1.9005 + * TSN's (on a network) 1.9006 + */ 1.9007 + break; 1.9008 + } 1.9009 + } /* for (chunk gather loop for this net) */ 1.9010 + } /* if asoc.state OPEN */ 1.9011 + no_data_fill: 1.9012 + /* Is there something to send for this destination? */ 1.9013 + if (outchain) { 1.9014 + /* We may need to start a control timer or two */ 1.9015 + if (asconf) { 1.9016 + sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, 1.9017 + stcb, net); 1.9018 + /* 1.9019 + * do NOT clear the asconf flag as it is used 1.9020 + * to do appropriate source address selection. 1.9021 + */ 1.9022 + } 1.9023 + if (cookie) { 1.9024 + sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net); 1.9025 + cookie = 0; 1.9026 + } 1.9027 + /* must start a send timer if data is being sent */ 1.9028 + if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) { 1.9029 + /* 1.9030 + * no timer running on this destination 1.9031 + * restart it. 1.9032 + */ 1.9033 + sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 1.9034 + } 1.9035 + /* Now send it, if there is anything to send :> */ 1.9036 + if ((error = sctp_lowlevel_chunk_output(inp, 1.9037 + stcb, 1.9038 + net, 1.9039 + (struct sockaddr *)&net->ro._l_addr, 1.9040 + outchain, 1.9041 + auth_offset, 1.9042 + auth, 1.9043 + auth_keyid, 1.9044 + no_fragmentflg, 1.9045 + bundle_at, 1.9046 + asconf, 1.9047 + inp->sctp_lport, stcb->rport, 1.9048 + htonl(stcb->asoc.peer_vtag), 1.9049 + net->port, NULL, 1.9050 +#if defined(__FreeBSD__) 1.9051 + 0, 0, 1.9052 +#endif 1.9053 + so_locked))) { 1.9054 + /* error, we could not output */ 1.9055 + if (error == ENOBUFS) { 1.9056 + SCTP_STAT_INCR(sctps_lowlevelerr); 1.9057 + asoc->ifp_had_enobuf = 1; 1.9058 + } 1.9059 + if (from_where == 0) { 1.9060 + SCTP_STAT_INCR(sctps_lowlevelerrusr); 1.9061 + } 1.9062 + SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error); 1.9063 + if (hbflag) { 1.9064 + if (*now_filled == 0) { 1.9065 + (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 1.9066 + *now_filled = 1; 1.9067 + *now = net->last_sent_time; 1.9068 + } else { 1.9069 + net->last_sent_time = *now; 1.9070 + } 1.9071 + hbflag = 0; 1.9072 + } 1.9073 + if (error == EHOSTUNREACH) { 1.9074 + /* 1.9075 + * Destination went unreachable 1.9076 + * during this send 1.9077 + */ 1.9078 + sctp_move_chunks_from_net(stcb, net); 1.9079 + } 1.9080 + *reason_code = 6; 1.9081 + /*- 1.9082 + * I add this line to be paranoid. As far as 1.9083 + * I can tell the continue, takes us back to 1.9084 + * the top of the for, but just to make sure 1.9085 + * I will reset these again here. 1.9086 + */ 1.9087 + ctl_cnt = bundle_at = 0; 1.9088 + continue; /* This takes us back to the for() for the nets. */ 1.9089 + } else { 1.9090 + asoc->ifp_had_enobuf = 0; 1.9091 + } 1.9092 + endoutchain = NULL; 1.9093 + auth = NULL; 1.9094 + auth_offset = 0; 1.9095 + if (bundle_at || hbflag) { 1.9096 + /* For data/asconf and hb set time */ 1.9097 + if (*now_filled == 0) { 1.9098 + (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); 1.9099 + *now_filled = 1; 1.9100 + *now = net->last_sent_time; 1.9101 + } else { 1.9102 + net->last_sent_time = *now; 1.9103 + } 1.9104 + } 1.9105 + if (!no_out_cnt) { 1.9106 + *num_out += (ctl_cnt + bundle_at); 1.9107 + } 1.9108 + if (bundle_at) { 1.9109 + /* setup for a RTO measurement */ 1.9110 + tsns_sent = data_list[0]->rec.data.TSN_seq; 1.9111 + /* fill time if not already filled */ 1.9112 + if (*now_filled == 0) { 1.9113 + (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent); 1.9114 + *now_filled = 1; 1.9115 + *now = asoc->time_last_sent; 1.9116 + } else { 1.9117 + asoc->time_last_sent = *now; 1.9118 + } 1.9119 + if (net->rto_needed) { 1.9120 + data_list[0]->do_rtt = 1; 1.9121 + net->rto_needed = 0; 1.9122 + } 1.9123 + SCTP_STAT_INCR_BY(sctps_senddata, bundle_at); 1.9124 + sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net); 1.9125 + } 1.9126 + if (one_chunk) { 1.9127 + break; 1.9128 + } 1.9129 + } 1.9130 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1.9131 + sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND); 1.9132 + } 1.9133 + } 1.9134 + if (old_start_at == NULL) { 1.9135 + old_start_at = start_at; 1.9136 + start_at = TAILQ_FIRST(&asoc->nets); 1.9137 + if (old_start_at) 1.9138 + goto again_one_more_time; 1.9139 + } 1.9140 + 1.9141 + /* 1.9142 + * At the end there should be no NON timed chunks hanging on this 1.9143 + * queue. 1.9144 + */ 1.9145 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1.9146 + sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND); 1.9147 + } 1.9148 + if ((*num_out == 0) && (*reason_code == 0)) { 1.9149 + *reason_code = 4; 1.9150 + } else { 1.9151 + *reason_code = 5; 1.9152 + } 1.9153 + sctp_clean_up_ctl(stcb, asoc, so_locked); 1.9154 + return (0); 1.9155 +} 1.9156 + 1.9157 +void 1.9158 +sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err) 1.9159 +{ 1.9160 + /*- 1.9161 + * Prepend a OPERATIONAL_ERROR chunk header and put on the end of 1.9162 + * the control chunk queue. 1.9163 + */ 1.9164 + struct sctp_chunkhdr *hdr; 1.9165 + struct sctp_tmit_chunk *chk; 1.9166 + struct mbuf *mat; 1.9167 + 1.9168 + SCTP_TCB_LOCK_ASSERT(stcb); 1.9169 + sctp_alloc_a_chunk(stcb, chk); 1.9170 + if (chk == NULL) { 1.9171 + /* no memory */ 1.9172 + sctp_m_freem(op_err); 1.9173 + return; 1.9174 + } 1.9175 + chk->copy_by_ref = 0; 1.9176 + SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_NOWAIT); 1.9177 + if (op_err == NULL) { 1.9178 + sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1.9179 + return; 1.9180 + } 1.9181 + chk->send_size = 0; 1.9182 + mat = op_err; 1.9183 + while (mat != NULL) { 1.9184 + chk->send_size += SCTP_BUF_LEN(mat); 1.9185 + mat = SCTP_BUF_NEXT(mat); 1.9186 + } 1.9187 + chk->rec.chunk_id.id = SCTP_OPERATION_ERROR; 1.9188 + chk->rec.chunk_id.can_take_data = 1; 1.9189 + chk->sent = SCTP_DATAGRAM_UNSENT; 1.9190 + chk->snd_count = 0; 1.9191 + chk->flags = 0; 1.9192 + chk->asoc = &stcb->asoc; 1.9193 + chk->data = op_err; 1.9194 + chk->whoTo = NULL; 1.9195 + hdr = mtod(op_err, struct sctp_chunkhdr *); 1.9196 + hdr->chunk_type = SCTP_OPERATION_ERROR; 1.9197 + hdr->chunk_flags = 0; 1.9198 + hdr->chunk_length = htons(chk->send_size); 1.9199 + TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, 1.9200 + chk, 1.9201 + sctp_next); 1.9202 + chk->asoc->ctrl_queue_cnt++; 1.9203 +} 1.9204 + 1.9205 +int 1.9206 +sctp_send_cookie_echo(struct mbuf *m, 1.9207 + int offset, 1.9208 + struct sctp_tcb *stcb, 1.9209 + struct sctp_nets *net) 1.9210 +{ 1.9211 + /*- 1.9212 + * pull out the cookie and put it at the front of the control chunk 1.9213 + * queue. 1.9214 + */ 1.9215 + int at; 1.9216 + struct mbuf *cookie; 1.9217 + struct sctp_paramhdr parm, *phdr; 1.9218 + struct sctp_chunkhdr *hdr; 1.9219 + struct sctp_tmit_chunk *chk; 1.9220 + uint16_t ptype, plen; 1.9221 + 1.9222 + /* First find the cookie in the param area */ 1.9223 + cookie = NULL; 1.9224 + at = offset + sizeof(struct sctp_init_chunk); 1.9225 + 1.9226 + SCTP_TCB_LOCK_ASSERT(stcb); 1.9227 + do { 1.9228 + phdr = sctp_get_next_param(m, at, &parm, sizeof(parm)); 1.9229 + if (phdr == NULL) { 1.9230 + return (-3); 1.9231 + } 1.9232 + ptype = ntohs(phdr->param_type); 1.9233 + plen = ntohs(phdr->param_length); 1.9234 + if (ptype == SCTP_STATE_COOKIE) { 1.9235 + int pad; 1.9236 + 1.9237 + /* found the cookie */ 1.9238 + if ((pad = (plen % 4))) { 1.9239 + plen += 4 - pad; 1.9240 + } 1.9241 + cookie = SCTP_M_COPYM(m, at, plen, M_NOWAIT); 1.9242 + if (cookie == NULL) { 1.9243 + /* No memory */ 1.9244 + return (-2); 1.9245 + } 1.9246 +#ifdef SCTP_MBUF_LOGGING 1.9247 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 1.9248 + struct mbuf *mat; 1.9249 + 1.9250 + for (mat = cookie; mat; mat = SCTP_BUF_NEXT(mat)) { 1.9251 + if (SCTP_BUF_IS_EXTENDED(mat)) { 1.9252 + sctp_log_mb(mat, SCTP_MBUF_ICOPY); 1.9253 + } 1.9254 + } 1.9255 + } 1.9256 +#endif 1.9257 + break; 1.9258 + } 1.9259 + at += SCTP_SIZE32(plen); 1.9260 + } while (phdr); 1.9261 + if (cookie == NULL) { 1.9262 + /* Did not find the cookie */ 1.9263 + return (-3); 1.9264 + } 1.9265 + /* ok, we got the cookie lets change it into a cookie echo chunk */ 1.9266 + 1.9267 + /* first the change from param to cookie */ 1.9268 + hdr = mtod(cookie, struct sctp_chunkhdr *); 1.9269 + hdr->chunk_type = SCTP_COOKIE_ECHO; 1.9270 + hdr->chunk_flags = 0; 1.9271 + /* get the chunk stuff now and place it in the FRONT of the queue */ 1.9272 + sctp_alloc_a_chunk(stcb, chk); 1.9273 + if (chk == NULL) { 1.9274 + /* no memory */ 1.9275 + sctp_m_freem(cookie); 1.9276 + return (-5); 1.9277 + } 1.9278 + chk->copy_by_ref = 0; 1.9279 + chk->send_size = plen; 1.9280 + chk->rec.chunk_id.id = SCTP_COOKIE_ECHO; 1.9281 + chk->rec.chunk_id.can_take_data = 0; 1.9282 + chk->sent = SCTP_DATAGRAM_UNSENT; 1.9283 + chk->snd_count = 0; 1.9284 + chk->flags = CHUNK_FLAGS_FRAGMENT_OK; 1.9285 + chk->asoc = &stcb->asoc; 1.9286 + chk->data = cookie; 1.9287 + chk->whoTo = net; 1.9288 + atomic_add_int(&chk->whoTo->ref_count, 1); 1.9289 + TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next); 1.9290 + chk->asoc->ctrl_queue_cnt++; 1.9291 + return (0); 1.9292 +} 1.9293 + 1.9294 +void 1.9295 +sctp_send_heartbeat_ack(struct sctp_tcb *stcb, 1.9296 + struct mbuf *m, 1.9297 + int offset, 1.9298 + int chk_length, 1.9299 + struct sctp_nets *net) 1.9300 +{ 1.9301 + /* 1.9302 + * take a HB request and make it into a HB ack and send it. 1.9303 + */ 1.9304 + struct mbuf *outchain; 1.9305 + struct sctp_chunkhdr *chdr; 1.9306 + struct sctp_tmit_chunk *chk; 1.9307 + 1.9308 + 1.9309 + if (net == NULL) 1.9310 + /* must have a net pointer */ 1.9311 + return; 1.9312 + 1.9313 + outchain = SCTP_M_COPYM(m, offset, chk_length, M_NOWAIT); 1.9314 + if (outchain == NULL) { 1.9315 + /* gak out of memory */ 1.9316 + return; 1.9317 + } 1.9318 +#ifdef SCTP_MBUF_LOGGING 1.9319 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 1.9320 + struct mbuf *mat; 1.9321 + 1.9322 + for (mat = outchain; mat; mat = SCTP_BUF_NEXT(mat)) { 1.9323 + if (SCTP_BUF_IS_EXTENDED(mat)) { 1.9324 + sctp_log_mb(mat, SCTP_MBUF_ICOPY); 1.9325 + } 1.9326 + } 1.9327 + } 1.9328 +#endif 1.9329 + chdr = mtod(outchain, struct sctp_chunkhdr *); 1.9330 + chdr->chunk_type = SCTP_HEARTBEAT_ACK; 1.9331 + chdr->chunk_flags = 0; 1.9332 + if (chk_length % 4) { 1.9333 + /* need pad */ 1.9334 + uint32_t cpthis = 0; 1.9335 + int padlen; 1.9336 + 1.9337 + padlen = 4 - (chk_length % 4); 1.9338 + m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis); 1.9339 + } 1.9340 + sctp_alloc_a_chunk(stcb, chk); 1.9341 + if (chk == NULL) { 1.9342 + /* no memory */ 1.9343 + sctp_m_freem(outchain); 1.9344 + return; 1.9345 + } 1.9346 + chk->copy_by_ref = 0; 1.9347 + chk->send_size = chk_length; 1.9348 + chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK; 1.9349 + chk->rec.chunk_id.can_take_data = 1; 1.9350 + chk->sent = SCTP_DATAGRAM_UNSENT; 1.9351 + chk->snd_count = 0; 1.9352 + chk->flags = 0; 1.9353 + chk->asoc = &stcb->asoc; 1.9354 + chk->data = outchain; 1.9355 + chk->whoTo = net; 1.9356 + atomic_add_int(&chk->whoTo->ref_count, 1); 1.9357 + TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 1.9358 + chk->asoc->ctrl_queue_cnt++; 1.9359 +} 1.9360 + 1.9361 +void 1.9362 +sctp_send_cookie_ack(struct sctp_tcb *stcb) 1.9363 +{ 1.9364 + /* formulate and queue a cookie-ack back to sender */ 1.9365 + struct mbuf *cookie_ack; 1.9366 + struct sctp_chunkhdr *hdr; 1.9367 + struct sctp_tmit_chunk *chk; 1.9368 + 1.9369 + SCTP_TCB_LOCK_ASSERT(stcb); 1.9370 + 1.9371 + cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER); 1.9372 + if (cookie_ack == NULL) { 1.9373 + /* no mbuf's */ 1.9374 + return; 1.9375 + } 1.9376 + SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD); 1.9377 + sctp_alloc_a_chunk(stcb, chk); 1.9378 + if (chk == NULL) { 1.9379 + /* no memory */ 1.9380 + sctp_m_freem(cookie_ack); 1.9381 + return; 1.9382 + } 1.9383 + chk->copy_by_ref = 0; 1.9384 + chk->send_size = sizeof(struct sctp_chunkhdr); 1.9385 + chk->rec.chunk_id.id = SCTP_COOKIE_ACK; 1.9386 + chk->rec.chunk_id.can_take_data = 1; 1.9387 + chk->sent = SCTP_DATAGRAM_UNSENT; 1.9388 + chk->snd_count = 0; 1.9389 + chk->flags = 0; 1.9390 + chk->asoc = &stcb->asoc; 1.9391 + chk->data = cookie_ack; 1.9392 + if (chk->asoc->last_control_chunk_from != NULL) { 1.9393 + chk->whoTo = chk->asoc->last_control_chunk_from; 1.9394 + atomic_add_int(&chk->whoTo->ref_count, 1); 1.9395 + } else { 1.9396 + chk->whoTo = NULL; 1.9397 + } 1.9398 + hdr = mtod(cookie_ack, struct sctp_chunkhdr *); 1.9399 + hdr->chunk_type = SCTP_COOKIE_ACK; 1.9400 + hdr->chunk_flags = 0; 1.9401 + hdr->chunk_length = htons(chk->send_size); 1.9402 + SCTP_BUF_LEN(cookie_ack) = chk->send_size; 1.9403 + TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 1.9404 + chk->asoc->ctrl_queue_cnt++; 1.9405 + return; 1.9406 +} 1.9407 + 1.9408 + 1.9409 +void 1.9410 +sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net) 1.9411 +{ 1.9412 + /* formulate and queue a SHUTDOWN-ACK back to the sender */ 1.9413 + struct mbuf *m_shutdown_ack; 1.9414 + struct sctp_shutdown_ack_chunk *ack_cp; 1.9415 + struct sctp_tmit_chunk *chk; 1.9416 + 1.9417 + m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_NOWAIT, 1, MT_HEADER); 1.9418 + if (m_shutdown_ack == NULL) { 1.9419 + /* no mbuf's */ 1.9420 + return; 1.9421 + } 1.9422 + SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD); 1.9423 + sctp_alloc_a_chunk(stcb, chk); 1.9424 + if (chk == NULL) { 1.9425 + /* no memory */ 1.9426 + sctp_m_freem(m_shutdown_ack); 1.9427 + return; 1.9428 + } 1.9429 + chk->copy_by_ref = 0; 1.9430 + chk->send_size = sizeof(struct sctp_chunkhdr); 1.9431 + chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK; 1.9432 + chk->rec.chunk_id.can_take_data = 1; 1.9433 + chk->sent = SCTP_DATAGRAM_UNSENT; 1.9434 + chk->snd_count = 0; 1.9435 + chk->flags = 0; 1.9436 + chk->asoc = &stcb->asoc; 1.9437 + chk->data = m_shutdown_ack; 1.9438 + chk->whoTo = net; 1.9439 + if (chk->whoTo) { 1.9440 + atomic_add_int(&chk->whoTo->ref_count, 1); 1.9441 + } 1.9442 + ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *); 1.9443 + ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK; 1.9444 + ack_cp->ch.chunk_flags = 0; 1.9445 + ack_cp->ch.chunk_length = htons(chk->send_size); 1.9446 + SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size; 1.9447 + TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 1.9448 + chk->asoc->ctrl_queue_cnt++; 1.9449 + return; 1.9450 +} 1.9451 + 1.9452 +void 1.9453 +sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net) 1.9454 +{ 1.9455 + /* formulate and queue a SHUTDOWN to the sender */ 1.9456 + struct mbuf *m_shutdown; 1.9457 + struct sctp_shutdown_chunk *shutdown_cp; 1.9458 + struct sctp_tmit_chunk *chk; 1.9459 + 1.9460 + m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_NOWAIT, 1, MT_HEADER); 1.9461 + if (m_shutdown == NULL) { 1.9462 + /* no mbuf's */ 1.9463 + return; 1.9464 + } 1.9465 + SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD); 1.9466 + sctp_alloc_a_chunk(stcb, chk); 1.9467 + if (chk == NULL) { 1.9468 + /* no memory */ 1.9469 + sctp_m_freem(m_shutdown); 1.9470 + return; 1.9471 + } 1.9472 + chk->copy_by_ref = 0; 1.9473 + chk->send_size = sizeof(struct sctp_shutdown_chunk); 1.9474 + chk->rec.chunk_id.id = SCTP_SHUTDOWN; 1.9475 + chk->rec.chunk_id.can_take_data = 1; 1.9476 + chk->sent = SCTP_DATAGRAM_UNSENT; 1.9477 + chk->snd_count = 0; 1.9478 + chk->flags = 0; 1.9479 + chk->asoc = &stcb->asoc; 1.9480 + chk->data = m_shutdown; 1.9481 + chk->whoTo = net; 1.9482 + if (chk->whoTo) { 1.9483 + atomic_add_int(&chk->whoTo->ref_count, 1); 1.9484 + } 1.9485 + shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *); 1.9486 + shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN; 1.9487 + shutdown_cp->ch.chunk_flags = 0; 1.9488 + shutdown_cp->ch.chunk_length = htons(chk->send_size); 1.9489 + shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn); 1.9490 + SCTP_BUF_LEN(m_shutdown) = chk->send_size; 1.9491 + TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 1.9492 + chk->asoc->ctrl_queue_cnt++; 1.9493 + return; 1.9494 +} 1.9495 + 1.9496 +void 1.9497 +sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked) 1.9498 +{ 1.9499 + /* 1.9500 + * formulate and queue an ASCONF to the peer. 1.9501 + * ASCONF parameters should be queued on the assoc queue. 1.9502 + */ 1.9503 + struct sctp_tmit_chunk *chk; 1.9504 + struct mbuf *m_asconf; 1.9505 + int len; 1.9506 + 1.9507 + SCTP_TCB_LOCK_ASSERT(stcb); 1.9508 + 1.9509 + if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) && 1.9510 + (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) { 1.9511 + /* can't send a new one if there is one in flight already */ 1.9512 + return; 1.9513 + } 1.9514 + 1.9515 + /* compose an ASCONF chunk, maximum length is PMTU */ 1.9516 + m_asconf = sctp_compose_asconf(stcb, &len, addr_locked); 1.9517 + if (m_asconf == NULL) { 1.9518 + return; 1.9519 + } 1.9520 + 1.9521 + sctp_alloc_a_chunk(stcb, chk); 1.9522 + if (chk == NULL) { 1.9523 + /* no memory */ 1.9524 + sctp_m_freem(m_asconf); 1.9525 + return; 1.9526 + } 1.9527 + 1.9528 + chk->copy_by_ref = 0; 1.9529 + chk->data = m_asconf; 1.9530 + chk->send_size = len; 1.9531 + chk->rec.chunk_id.id = SCTP_ASCONF; 1.9532 + chk->rec.chunk_id.can_take_data = 0; 1.9533 + chk->sent = SCTP_DATAGRAM_UNSENT; 1.9534 + chk->snd_count = 0; 1.9535 + chk->flags = CHUNK_FLAGS_FRAGMENT_OK; 1.9536 + chk->asoc = &stcb->asoc; 1.9537 + chk->whoTo = net; 1.9538 + if (chk->whoTo) { 1.9539 + atomic_add_int(&chk->whoTo->ref_count, 1); 1.9540 + } 1.9541 + TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next); 1.9542 + chk->asoc->ctrl_queue_cnt++; 1.9543 + return; 1.9544 +} 1.9545 + 1.9546 +void 1.9547 +sctp_send_asconf_ack(struct sctp_tcb *stcb) 1.9548 +{ 1.9549 + /* 1.9550 + * formulate and queue a asconf-ack back to sender. 1.9551 + * the asconf-ack must be stored in the tcb. 1.9552 + */ 1.9553 + struct sctp_tmit_chunk *chk; 1.9554 + struct sctp_asconf_ack *ack, *latest_ack; 1.9555 + struct mbuf *m_ack; 1.9556 + struct sctp_nets *net = NULL; 1.9557 + 1.9558 + SCTP_TCB_LOCK_ASSERT(stcb); 1.9559 + /* Get the latest ASCONF-ACK */ 1.9560 + latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead); 1.9561 + if (latest_ack == NULL) { 1.9562 + return; 1.9563 + } 1.9564 + if (latest_ack->last_sent_to != NULL && 1.9565 + latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) { 1.9566 + /* we're doing a retransmission */ 1.9567 + net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0); 1.9568 + if (net == NULL) { 1.9569 + /* no alternate */ 1.9570 + if (stcb->asoc.last_control_chunk_from == NULL) { 1.9571 + if (stcb->asoc.alternate) { 1.9572 + net = stcb->asoc.alternate; 1.9573 + } else { 1.9574 + net = stcb->asoc.primary_destination; 1.9575 + } 1.9576 + } else { 1.9577 + net = stcb->asoc.last_control_chunk_from; 1.9578 + } 1.9579 + } 1.9580 + } else { 1.9581 + /* normal case */ 1.9582 + if (stcb->asoc.last_control_chunk_from == NULL) { 1.9583 + if (stcb->asoc.alternate) { 1.9584 + net = stcb->asoc.alternate; 1.9585 + } else { 1.9586 + net = stcb->asoc.primary_destination; 1.9587 + } 1.9588 + } else { 1.9589 + net = stcb->asoc.last_control_chunk_from; 1.9590 + } 1.9591 + } 1.9592 + latest_ack->last_sent_to = net; 1.9593 + 1.9594 + TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) { 1.9595 + if (ack->data == NULL) { 1.9596 + continue; 1.9597 + } 1.9598 + 1.9599 + /* copy the asconf_ack */ 1.9600 + m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_NOWAIT); 1.9601 + if (m_ack == NULL) { 1.9602 + /* couldn't copy it */ 1.9603 + return; 1.9604 + } 1.9605 +#ifdef SCTP_MBUF_LOGGING 1.9606 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 1.9607 + struct mbuf *mat; 1.9608 + 1.9609 + for (mat = m_ack; mat; mat = SCTP_BUF_NEXT(mat)) { 1.9610 + if (SCTP_BUF_IS_EXTENDED(mat)) { 1.9611 + sctp_log_mb(mat, SCTP_MBUF_ICOPY); 1.9612 + } 1.9613 + } 1.9614 + } 1.9615 +#endif 1.9616 + 1.9617 + sctp_alloc_a_chunk(stcb, chk); 1.9618 + if (chk == NULL) { 1.9619 + /* no memory */ 1.9620 + if (m_ack) 1.9621 + sctp_m_freem(m_ack); 1.9622 + return; 1.9623 + } 1.9624 + chk->copy_by_ref = 0; 1.9625 + 1.9626 + chk->whoTo = net; 1.9627 + if (chk->whoTo) { 1.9628 + atomic_add_int(&chk->whoTo->ref_count, 1); 1.9629 + } 1.9630 + chk->data = m_ack; 1.9631 + chk->send_size = 0; 1.9632 + /* Get size */ 1.9633 + chk->send_size = ack->len; 1.9634 + chk->rec.chunk_id.id = SCTP_ASCONF_ACK; 1.9635 + chk->rec.chunk_id.can_take_data = 1; 1.9636 + chk->sent = SCTP_DATAGRAM_UNSENT; 1.9637 + chk->snd_count = 0; 1.9638 + chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; /* XXX */ 1.9639 + chk->asoc = &stcb->asoc; 1.9640 + 1.9641 + TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next); 1.9642 + chk->asoc->ctrl_queue_cnt++; 1.9643 + } 1.9644 + return; 1.9645 +} 1.9646 + 1.9647 + 1.9648 +static int 1.9649 +sctp_chunk_retransmission(struct sctp_inpcb *inp, 1.9650 + struct sctp_tcb *stcb, 1.9651 + struct sctp_association *asoc, 1.9652 + int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked 1.9653 +#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 1.9654 + SCTP_UNUSED 1.9655 +#endif 1.9656 + ) 1.9657 +{ 1.9658 + /*- 1.9659 + * send out one MTU of retransmission. If fast_retransmit is 1.9660 + * happening we ignore the cwnd. Otherwise we obey the cwnd and 1.9661 + * rwnd. For a Cookie or Asconf in the control chunk queue we 1.9662 + * retransmit them by themselves. 1.9663 + * 1.9664 + * For data chunks we will pick out the lowest TSN's in the sent_queue 1.9665 + * marked for resend and bundle them all together (up to a MTU of 1.9666 + * destination). The address to send to should have been 1.9667 + * selected/changed where the retransmission was marked (i.e. in FR 1.9668 + * or t3-timeout routines). 1.9669 + */ 1.9670 + struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING]; 1.9671 + struct sctp_tmit_chunk *chk, *fwd; 1.9672 + struct mbuf *m, *endofchain; 1.9673 + struct sctp_nets *net = NULL; 1.9674 + uint32_t tsns_sent = 0; 1.9675 + int no_fragmentflg, bundle_at, cnt_thru; 1.9676 + unsigned int mtu; 1.9677 + int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started; 1.9678 + struct sctp_auth_chunk *auth = NULL; 1.9679 + uint32_t auth_offset = 0; 1.9680 + uint16_t auth_keyid; 1.9681 + int override_ok = 1; 1.9682 + int data_auth_reqd = 0; 1.9683 + uint32_t dmtu = 0; 1.9684 + 1.9685 +#if defined(__APPLE__) 1.9686 + if (so_locked) { 1.9687 + sctp_lock_assert(SCTP_INP_SO(inp)); 1.9688 + } else { 1.9689 + sctp_unlock_assert(SCTP_INP_SO(inp)); 1.9690 + } 1.9691 +#endif 1.9692 + SCTP_TCB_LOCK_ASSERT(stcb); 1.9693 + tmr_started = ctl_cnt = bundle_at = error = 0; 1.9694 + no_fragmentflg = 1; 1.9695 + fwd_tsn = 0; 1.9696 + *cnt_out = 0; 1.9697 + fwd = NULL; 1.9698 + endofchain = m = NULL; 1.9699 + auth_keyid = stcb->asoc.authinfo.active_keyid; 1.9700 +#ifdef SCTP_AUDITING_ENABLED 1.9701 + sctp_audit_log(0xC3, 1); 1.9702 +#endif 1.9703 + if ((TAILQ_EMPTY(&asoc->sent_queue)) && 1.9704 + (TAILQ_EMPTY(&asoc->control_send_queue))) { 1.9705 + SCTPDBG(SCTP_DEBUG_OUTPUT1,"SCTP hits empty queue with cnt set to %d?\n", 1.9706 + asoc->sent_queue_retran_cnt); 1.9707 + asoc->sent_queue_cnt = 0; 1.9708 + asoc->sent_queue_cnt_removeable = 0; 1.9709 + /* send back 0/0 so we enter normal transmission */ 1.9710 + *cnt_out = 0; 1.9711 + return (0); 1.9712 + } 1.9713 + TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 1.9714 + if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) || 1.9715 + (chk->rec.chunk_id.id == SCTP_STREAM_RESET) || 1.9716 + (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) { 1.9717 + if (chk->sent != SCTP_DATAGRAM_RESEND) { 1.9718 + continue; 1.9719 + } 1.9720 + if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) { 1.9721 + if (chk != asoc->str_reset) { 1.9722 + /* 1.9723 + * not eligible for retran if its 1.9724 + * not ours 1.9725 + */ 1.9726 + continue; 1.9727 + } 1.9728 + } 1.9729 + ctl_cnt++; 1.9730 + if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) { 1.9731 + fwd_tsn = 1; 1.9732 + } 1.9733 + /* 1.9734 + * Add an AUTH chunk, if chunk requires it save the 1.9735 + * offset into the chain for AUTH 1.9736 + */ 1.9737 + if ((auth == NULL) && 1.9738 + (sctp_auth_is_required_chunk(chk->rec.chunk_id.id, 1.9739 + stcb->asoc.peer_auth_chunks))) { 1.9740 + m = sctp_add_auth_chunk(m, &endofchain, 1.9741 + &auth, &auth_offset, 1.9742 + stcb, 1.9743 + chk->rec.chunk_id.id); 1.9744 + SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 1.9745 + } 1.9746 + m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref); 1.9747 + break; 1.9748 + } 1.9749 + } 1.9750 + one_chunk = 0; 1.9751 + cnt_thru = 0; 1.9752 + /* do we have control chunks to retransmit? */ 1.9753 + if (m != NULL) { 1.9754 + /* Start a timer no matter if we suceed or fail */ 1.9755 + if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 1.9756 + sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo); 1.9757 + } else if (chk->rec.chunk_id.id == SCTP_ASCONF) 1.9758 + sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo); 1.9759 + chk->snd_count++; /* update our count */ 1.9760 + if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo, 1.9761 + (struct sockaddr *)&chk->whoTo->ro._l_addr, m, 1.9762 + auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1.9763 + no_fragmentflg, 0, 0, 1.9764 + inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag), 1.9765 + chk->whoTo->port, NULL, 1.9766 +#if defined(__FreeBSD__) 1.9767 + 0, 0, 1.9768 +#endif 1.9769 + so_locked))) { 1.9770 + SCTP_STAT_INCR(sctps_lowlevelerr); 1.9771 + return (error); 1.9772 + } 1.9773 + endofchain = NULL; 1.9774 + auth = NULL; 1.9775 + auth_offset = 0; 1.9776 + /* 1.9777 + * We don't want to mark the net->sent time here since this 1.9778 + * we use this for HB and retrans cannot measure RTT 1.9779 + */ 1.9780 + /* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */ 1.9781 + *cnt_out += 1; 1.9782 + chk->sent = SCTP_DATAGRAM_SENT; 1.9783 + sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt); 1.9784 + if (fwd_tsn == 0) { 1.9785 + return (0); 1.9786 + } else { 1.9787 + /* Clean up the fwd-tsn list */ 1.9788 + sctp_clean_up_ctl(stcb, asoc, so_locked); 1.9789 + return (0); 1.9790 + } 1.9791 + } 1.9792 + /* 1.9793 + * Ok, it is just data retransmission we need to do or that and a 1.9794 + * fwd-tsn with it all. 1.9795 + */ 1.9796 + if (TAILQ_EMPTY(&asoc->sent_queue)) { 1.9797 + return (SCTP_RETRAN_DONE); 1.9798 + } 1.9799 + if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) || 1.9800 + (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) { 1.9801 + /* not yet open, resend the cookie and that is it */ 1.9802 + return (1); 1.9803 + } 1.9804 +#ifdef SCTP_AUDITING_ENABLED 1.9805 + sctp_auditing(20, inp, stcb, NULL); 1.9806 +#endif 1.9807 + data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks); 1.9808 + TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 1.9809 + if (chk->sent != SCTP_DATAGRAM_RESEND) { 1.9810 + /* No, not sent to this net or not ready for rtx */ 1.9811 + continue; 1.9812 + } 1.9813 + if (chk->data == NULL) { 1.9814 + SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n", 1.9815 + chk->rec.data.TSN_seq, chk->snd_count, chk->sent); 1.9816 + continue; 1.9817 + } 1.9818 + if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) && 1.9819 + (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) { 1.9820 + /* Gak, we have exceeded max unlucky retran, abort! */ 1.9821 + SCTP_PRINTF("Gak, chk->snd_count:%d >= max:%d - send abort\n", 1.9822 + chk->snd_count, 1.9823 + SCTP_BASE_SYSCTL(sctp_max_retran_chunk)); 1.9824 + atomic_add_int(&stcb->asoc.refcnt, 1); 1.9825 + sctp_abort_an_association(stcb->sctp_ep, stcb, NULL, so_locked); 1.9826 + SCTP_TCB_LOCK(stcb); 1.9827 + atomic_subtract_int(&stcb->asoc.refcnt, 1); 1.9828 + return (SCTP_RETRAN_EXIT); 1.9829 + } 1.9830 + /* pick up the net */ 1.9831 + net = chk->whoTo; 1.9832 + switch (net->ro._l_addr.sa.sa_family) { 1.9833 +#ifdef INET 1.9834 + case AF_INET: 1.9835 + mtu = net->mtu - SCTP_MIN_V4_OVERHEAD; 1.9836 + break; 1.9837 +#endif 1.9838 +#ifdef INET6 1.9839 + case AF_INET6: 1.9840 + mtu = net->mtu - SCTP_MIN_OVERHEAD; 1.9841 + break; 1.9842 +#endif 1.9843 +#if defined(__Userspace__) 1.9844 + case AF_CONN: 1.9845 + mtu = net->mtu - sizeof(struct sctphdr); 1.9846 + break; 1.9847 +#endif 1.9848 + default: 1.9849 + /* TSNH */ 1.9850 + mtu = net->mtu; 1.9851 + break; 1.9852 + } 1.9853 + 1.9854 + if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) { 1.9855 + /* No room in peers rwnd */ 1.9856 + uint32_t tsn; 1.9857 + 1.9858 + tsn = asoc->last_acked_seq + 1; 1.9859 + if (tsn == chk->rec.data.TSN_seq) { 1.9860 + /* 1.9861 + * we make a special exception for this 1.9862 + * case. The peer has no rwnd but is missing 1.9863 + * the lowest chunk.. which is probably what 1.9864 + * is holding up the rwnd. 1.9865 + */ 1.9866 + goto one_chunk_around; 1.9867 + } 1.9868 + return (1); 1.9869 + } 1.9870 + one_chunk_around: 1.9871 + if (asoc->peers_rwnd < mtu) { 1.9872 + one_chunk = 1; 1.9873 + if ((asoc->peers_rwnd == 0) && 1.9874 + (asoc->total_flight == 0)) { 1.9875 + chk->window_probe = 1; 1.9876 + chk->whoTo->window_probe = 1; 1.9877 + } 1.9878 + } 1.9879 +#ifdef SCTP_AUDITING_ENABLED 1.9880 + sctp_audit_log(0xC3, 2); 1.9881 +#endif 1.9882 + bundle_at = 0; 1.9883 + m = NULL; 1.9884 + net->fast_retran_ip = 0; 1.9885 + if (chk->rec.data.doing_fast_retransmit == 0) { 1.9886 + /* 1.9887 + * if no FR in progress skip destination that have 1.9888 + * flight_size > cwnd. 1.9889 + */ 1.9890 + if (net->flight_size >= net->cwnd) { 1.9891 + continue; 1.9892 + } 1.9893 + } else { 1.9894 + /* 1.9895 + * Mark the destination net to have FR recovery 1.9896 + * limits put on it. 1.9897 + */ 1.9898 + *fr_done = 1; 1.9899 + net->fast_retran_ip = 1; 1.9900 + } 1.9901 + 1.9902 + /* 1.9903 + * if no AUTH is yet included and this chunk requires it, 1.9904 + * make sure to account for it. We don't apply the size 1.9905 + * until the AUTH chunk is actually added below in case 1.9906 + * there is no room for this chunk. 1.9907 + */ 1.9908 + if (data_auth_reqd && (auth == NULL)) { 1.9909 + dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 1.9910 + } else 1.9911 + dmtu = 0; 1.9912 + 1.9913 + if ((chk->send_size <= (mtu - dmtu)) || 1.9914 + (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) { 1.9915 + /* ok we will add this one */ 1.9916 + if (data_auth_reqd) { 1.9917 + if (auth == NULL) { 1.9918 + m = sctp_add_auth_chunk(m, 1.9919 + &endofchain, 1.9920 + &auth, 1.9921 + &auth_offset, 1.9922 + stcb, 1.9923 + SCTP_DATA); 1.9924 + auth_keyid = chk->auth_keyid; 1.9925 + override_ok = 0; 1.9926 + SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 1.9927 + } else if (override_ok) { 1.9928 + auth_keyid = chk->auth_keyid; 1.9929 + override_ok = 0; 1.9930 + } else if (chk->auth_keyid != auth_keyid) { 1.9931 + /* different keyid, so done bundling */ 1.9932 + break; 1.9933 + } 1.9934 + } 1.9935 + m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref); 1.9936 + if (m == NULL) { 1.9937 + SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 1.9938 + return (ENOMEM); 1.9939 + } 1.9940 + /* Do clear IP_DF ? */ 1.9941 + if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) { 1.9942 + no_fragmentflg = 0; 1.9943 + } 1.9944 + /* upate our MTU size */ 1.9945 + if (mtu > (chk->send_size + dmtu)) 1.9946 + mtu -= (chk->send_size + dmtu); 1.9947 + else 1.9948 + mtu = 0; 1.9949 + data_list[bundle_at++] = chk; 1.9950 + if (one_chunk && (asoc->total_flight <= 0)) { 1.9951 + SCTP_STAT_INCR(sctps_windowprobed); 1.9952 + } 1.9953 + } 1.9954 + if (one_chunk == 0) { 1.9955 + /* 1.9956 + * now are there anymore forward from chk to pick 1.9957 + * up? 1.9958 + */ 1.9959 + for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) { 1.9960 + if (fwd->sent != SCTP_DATAGRAM_RESEND) { 1.9961 + /* Nope, not for retran */ 1.9962 + continue; 1.9963 + } 1.9964 + if (fwd->whoTo != net) { 1.9965 + /* Nope, not the net in question */ 1.9966 + continue; 1.9967 + } 1.9968 + if (data_auth_reqd && (auth == NULL)) { 1.9969 + dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id); 1.9970 + } else 1.9971 + dmtu = 0; 1.9972 + if (fwd->send_size <= (mtu - dmtu)) { 1.9973 + if (data_auth_reqd) { 1.9974 + if (auth == NULL) { 1.9975 + m = sctp_add_auth_chunk(m, 1.9976 + &endofchain, 1.9977 + &auth, 1.9978 + &auth_offset, 1.9979 + stcb, 1.9980 + SCTP_DATA); 1.9981 + auth_keyid = fwd->auth_keyid; 1.9982 + override_ok = 0; 1.9983 + SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 1.9984 + } else if (override_ok) { 1.9985 + auth_keyid = fwd->auth_keyid; 1.9986 + override_ok = 0; 1.9987 + } else if (fwd->auth_keyid != auth_keyid) { 1.9988 + /* different keyid, so done bundling */ 1.9989 + break; 1.9990 + } 1.9991 + } 1.9992 + m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref); 1.9993 + if (m == NULL) { 1.9994 + SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 1.9995 + return (ENOMEM); 1.9996 + } 1.9997 + /* Do clear IP_DF ? */ 1.9998 + if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) { 1.9999 + no_fragmentflg = 0; 1.10000 + } 1.10001 + /* upate our MTU size */ 1.10002 + if (mtu > (fwd->send_size + dmtu)) 1.10003 + mtu -= (fwd->send_size + dmtu); 1.10004 + else 1.10005 + mtu = 0; 1.10006 + data_list[bundle_at++] = fwd; 1.10007 + if (bundle_at >= SCTP_MAX_DATA_BUNDLING) { 1.10008 + break; 1.10009 + } 1.10010 + } else { 1.10011 + /* can't fit so we are done */ 1.10012 + break; 1.10013 + } 1.10014 + } 1.10015 + } 1.10016 + /* Is there something to send for this destination? */ 1.10017 + if (m) { 1.10018 + /* 1.10019 + * No matter if we fail/or suceed we should start a 1.10020 + * timer. A failure is like a lost IP packet :-) 1.10021 + */ 1.10022 + if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 1.10023 + /* 1.10024 + * no timer running on this destination 1.10025 + * restart it. 1.10026 + */ 1.10027 + sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 1.10028 + tmr_started = 1; 1.10029 + } 1.10030 + /* Now lets send it, if there is anything to send :> */ 1.10031 + if ((error = sctp_lowlevel_chunk_output(inp, stcb, net, 1.10032 + (struct sockaddr *)&net->ro._l_addr, m, 1.10033 + auth_offset, auth, auth_keyid, 1.10034 + no_fragmentflg, 0, 0, 1.10035 + inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag), 1.10036 + net->port, NULL, 1.10037 +#if defined(__FreeBSD__) 1.10038 + 0, 0, 1.10039 +#endif 1.10040 + so_locked))) { 1.10041 + /* error, we could not output */ 1.10042 + SCTP_STAT_INCR(sctps_lowlevelerr); 1.10043 + return (error); 1.10044 + } 1.10045 + endofchain = NULL; 1.10046 + auth = NULL; 1.10047 + auth_offset = 0; 1.10048 + /* For HB's */ 1.10049 + /* 1.10050 + * We don't want to mark the net->sent time here 1.10051 + * since this we use this for HB and retrans cannot 1.10052 + * measure RTT 1.10053 + */ 1.10054 + /* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */ 1.10055 + 1.10056 + /* For auto-close */ 1.10057 + cnt_thru++; 1.10058 + if (*now_filled == 0) { 1.10059 + (void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent); 1.10060 + *now = asoc->time_last_sent; 1.10061 + *now_filled = 1; 1.10062 + } else { 1.10063 + asoc->time_last_sent = *now; 1.10064 + } 1.10065 + *cnt_out += bundle_at; 1.10066 +#ifdef SCTP_AUDITING_ENABLED 1.10067 + sctp_audit_log(0xC4, bundle_at); 1.10068 +#endif 1.10069 + if (bundle_at) { 1.10070 + tsns_sent = data_list[0]->rec.data.TSN_seq; 1.10071 + } 1.10072 + for (i = 0; i < bundle_at; i++) { 1.10073 + SCTP_STAT_INCR(sctps_sendretransdata); 1.10074 + data_list[i]->sent = SCTP_DATAGRAM_SENT; 1.10075 + /* 1.10076 + * When we have a revoked data, and we 1.10077 + * retransmit it, then we clear the revoked 1.10078 + * flag since this flag dictates if we 1.10079 + * subtracted from the fs 1.10080 + */ 1.10081 + if (data_list[i]->rec.data.chunk_was_revoked) { 1.10082 + /* Deflate the cwnd */ 1.10083 + data_list[i]->whoTo->cwnd -= data_list[i]->book_size; 1.10084 + data_list[i]->rec.data.chunk_was_revoked = 0; 1.10085 + } 1.10086 + data_list[i]->snd_count++; 1.10087 + sctp_ucount_decr(asoc->sent_queue_retran_cnt); 1.10088 + /* record the time */ 1.10089 + data_list[i]->sent_rcv_time = asoc->time_last_sent; 1.10090 + if (data_list[i]->book_size_scale) { 1.10091 + /* 1.10092 + * need to double the book size on 1.10093 + * this one 1.10094 + */ 1.10095 + data_list[i]->book_size_scale = 0; 1.10096 + /* Since we double the booksize, we must 1.10097 + * also double the output queue size, since this 1.10098 + * get shrunk when we free by this amount. 1.10099 + */ 1.10100 + atomic_add_int(&((asoc)->total_output_queue_size),data_list[i]->book_size); 1.10101 + data_list[i]->book_size *= 2; 1.10102 + 1.10103 + 1.10104 + } else { 1.10105 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 1.10106 + sctp_log_rwnd(SCTP_DECREASE_PEER_RWND, 1.10107 + asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 1.10108 + } 1.10109 + asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd, 1.10110 + (uint32_t) (data_list[i]->send_size + 1.10111 + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))); 1.10112 + } 1.10113 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 1.10114 + sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND, 1.10115 + data_list[i]->whoTo->flight_size, 1.10116 + data_list[i]->book_size, 1.10117 + (uintptr_t)data_list[i]->whoTo, 1.10118 + data_list[i]->rec.data.TSN_seq); 1.10119 + } 1.10120 + sctp_flight_size_increase(data_list[i]); 1.10121 + sctp_total_flight_increase(stcb, data_list[i]); 1.10122 + if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 1.10123 + /* SWS sender side engages */ 1.10124 + asoc->peers_rwnd = 0; 1.10125 + } 1.10126 + if ((i == 0) && 1.10127 + (data_list[i]->rec.data.doing_fast_retransmit)) { 1.10128 + SCTP_STAT_INCR(sctps_sendfastretrans); 1.10129 + if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) && 1.10130 + (tmr_started == 0)) { 1.10131 + /*- 1.10132 + * ok we just fast-retrans'd 1.10133 + * the lowest TSN, i.e the 1.10134 + * first on the list. In 1.10135 + * this case we want to give 1.10136 + * some more time to get a 1.10137 + * SACK back without a 1.10138 + * t3-expiring. 1.10139 + */ 1.10140 + sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net, 1.10141 + SCTP_FROM_SCTP_OUTPUT+SCTP_LOC_4); 1.10142 + sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 1.10143 + } 1.10144 + } 1.10145 + } 1.10146 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1.10147 + sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND); 1.10148 + } 1.10149 +#ifdef SCTP_AUDITING_ENABLED 1.10150 + sctp_auditing(21, inp, stcb, NULL); 1.10151 +#endif 1.10152 + } else { 1.10153 + /* None will fit */ 1.10154 + return (1); 1.10155 + } 1.10156 + if (asoc->sent_queue_retran_cnt <= 0) { 1.10157 + /* all done we have no more to retran */ 1.10158 + asoc->sent_queue_retran_cnt = 0; 1.10159 + break; 1.10160 + } 1.10161 + if (one_chunk) { 1.10162 + /* No more room in rwnd */ 1.10163 + return (1); 1.10164 + } 1.10165 + /* stop the for loop here. we sent out a packet */ 1.10166 + break; 1.10167 + } 1.10168 + return (0); 1.10169 +} 1.10170 + 1.10171 +static void 1.10172 +sctp_timer_validation(struct sctp_inpcb *inp, 1.10173 + struct sctp_tcb *stcb, 1.10174 + struct sctp_association *asoc) 1.10175 +{ 1.10176 + struct sctp_nets *net; 1.10177 + 1.10178 + /* Validate that a timer is running somewhere */ 1.10179 + TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1.10180 + if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 1.10181 + /* Here is a timer */ 1.10182 + return; 1.10183 + } 1.10184 + } 1.10185 + SCTP_TCB_LOCK_ASSERT(stcb); 1.10186 + /* Gak, we did not have a timer somewhere */ 1.10187 + SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n"); 1.10188 + if (asoc->alternate) { 1.10189 + sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate); 1.10190 + } else { 1.10191 + sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination); 1.10192 + } 1.10193 + return; 1.10194 +} 1.10195 + 1.10196 +void 1.10197 +sctp_chunk_output (struct sctp_inpcb *inp, 1.10198 + struct sctp_tcb *stcb, 1.10199 + int from_where, 1.10200 + int so_locked 1.10201 +#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 1.10202 + SCTP_UNUSED 1.10203 +#endif 1.10204 + ) 1.10205 +{ 1.10206 + /*- 1.10207 + * Ok this is the generic chunk service queue. we must do the 1.10208 + * following: 1.10209 + * - See if there are retransmits pending, if so we must 1.10210 + * do these first. 1.10211 + * - Service the stream queue that is next, moving any 1.10212 + * message (note I must get a complete message i.e. 1.10213 + * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning 1.10214 + * TSN's 1.10215 + * - Check to see if the cwnd/rwnd allows any output, if so we 1.10216 + * go ahead and fomulate and send the low level chunks. Making sure 1.10217 + * to combine any control in the control chunk queue also. 1.10218 + */ 1.10219 + struct sctp_association *asoc; 1.10220 + struct sctp_nets *net; 1.10221 + int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0; 1.10222 + unsigned int burst_cnt = 0; 1.10223 + struct timeval now; 1.10224 + int now_filled = 0; 1.10225 + int nagle_on; 1.10226 + int frag_point = sctp_get_frag_point(stcb, &stcb->asoc); 1.10227 + int un_sent = 0; 1.10228 + int fr_done; 1.10229 + unsigned int tot_frs = 0; 1.10230 + 1.10231 +#if defined(__APPLE__) 1.10232 + if (so_locked) { 1.10233 + sctp_lock_assert(SCTP_INP_SO(inp)); 1.10234 + } else { 1.10235 + sctp_unlock_assert(SCTP_INP_SO(inp)); 1.10236 + } 1.10237 +#endif 1.10238 + asoc = &stcb->asoc; 1.10239 + /* The Nagle algorithm is only applied when handling a send call. */ 1.10240 + if (from_where == SCTP_OUTPUT_FROM_USR_SEND) { 1.10241 + if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) { 1.10242 + nagle_on = 0; 1.10243 + } else { 1.10244 + nagle_on = 1; 1.10245 + } 1.10246 + } else { 1.10247 + nagle_on = 0; 1.10248 + } 1.10249 + SCTP_TCB_LOCK_ASSERT(stcb); 1.10250 + 1.10251 + un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight); 1.10252 + 1.10253 + if ((un_sent <= 0) && 1.10254 + (TAILQ_EMPTY(&asoc->control_send_queue)) && 1.10255 + (TAILQ_EMPTY(&asoc->asconf_send_queue)) && 1.10256 + (asoc->sent_queue_retran_cnt == 0)) { 1.10257 + /* Nothing to do unless there is something to be sent left */ 1.10258 + return; 1.10259 + } 1.10260 + /* Do we have something to send, data or control AND 1.10261 + * a sack timer running, if so piggy-back the sack. 1.10262 + */ 1.10263 + if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 1.10264 + sctp_send_sack(stcb, so_locked); 1.10265 + (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 1.10266 + } 1.10267 + while (asoc->sent_queue_retran_cnt) { 1.10268 + /*- 1.10269 + * Ok, it is retransmission time only, we send out only ONE 1.10270 + * packet with a single call off to the retran code. 1.10271 + */ 1.10272 + if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) { 1.10273 + /*- 1.10274 + * Special hook for handling cookiess discarded 1.10275 + * by peer that carried data. Send cookie-ack only 1.10276 + * and then the next call with get the retran's. 1.10277 + */ 1.10278 + (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, 1.10279 + from_where, 1.10280 + &now, &now_filled, frag_point, so_locked); 1.10281 + return; 1.10282 + } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) { 1.10283 + /* if its not from a HB then do it */ 1.10284 + fr_done = 0; 1.10285 + ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked); 1.10286 + if (fr_done) { 1.10287 + tot_frs++; 1.10288 + } 1.10289 + } else { 1.10290 + /* 1.10291 + * its from any other place, we don't allow retran 1.10292 + * output (only control) 1.10293 + */ 1.10294 + ret = 1; 1.10295 + } 1.10296 + if (ret > 0) { 1.10297 + /* Can't send anymore */ 1.10298 + /*- 1.10299 + * now lets push out control by calling med-level 1.10300 + * output once. this assures that we WILL send HB's 1.10301 + * if queued too. 1.10302 + */ 1.10303 + (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, 1.10304 + from_where, 1.10305 + &now, &now_filled, frag_point, so_locked); 1.10306 +#ifdef SCTP_AUDITING_ENABLED 1.10307 + sctp_auditing(8, inp, stcb, NULL); 1.10308 +#endif 1.10309 + sctp_timer_validation(inp, stcb, asoc); 1.10310 + return; 1.10311 + } 1.10312 + if (ret < 0) { 1.10313 + /*- 1.10314 + * The count was off.. retran is not happening so do 1.10315 + * the normal retransmission. 1.10316 + */ 1.10317 +#ifdef SCTP_AUDITING_ENABLED 1.10318 + sctp_auditing(9, inp, stcb, NULL); 1.10319 +#endif 1.10320 + if (ret == SCTP_RETRAN_EXIT) { 1.10321 + return; 1.10322 + } 1.10323 + break; 1.10324 + } 1.10325 + if (from_where == SCTP_OUTPUT_FROM_T3) { 1.10326 + /* Only one transmission allowed out of a timeout */ 1.10327 +#ifdef SCTP_AUDITING_ENABLED 1.10328 + sctp_auditing(10, inp, stcb, NULL); 1.10329 +#endif 1.10330 + /* Push out any control */ 1.10331 + (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where, 1.10332 + &now, &now_filled, frag_point, so_locked); 1.10333 + return; 1.10334 + } 1.10335 + if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) { 1.10336 + /* Hit FR burst limit */ 1.10337 + return; 1.10338 + } 1.10339 + if ((num_out == 0) && (ret == 0)) { 1.10340 + /* No more retrans to send */ 1.10341 + break; 1.10342 + } 1.10343 + } 1.10344 +#ifdef SCTP_AUDITING_ENABLED 1.10345 + sctp_auditing(12, inp, stcb, NULL); 1.10346 +#endif 1.10347 + /* Check for bad destinations, if they exist move chunks around. */ 1.10348 + TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1.10349 + if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 1.10350 + /*- 1.10351 + * if possible move things off of this address we 1.10352 + * still may send below due to the dormant state but 1.10353 + * we try to find an alternate address to send to 1.10354 + * and if we have one we move all queued data on the 1.10355 + * out wheel to this alternate address. 1.10356 + */ 1.10357 + if (net->ref_count > 1) 1.10358 + sctp_move_chunks_from_net(stcb, net); 1.10359 + } else { 1.10360 + /*- 1.10361 + * if ((asoc->sat_network) || (net->addr_is_local)) 1.10362 + * { burst_limit = asoc->max_burst * 1.10363 + * SCTP_SAT_NETWORK_BURST_INCR; } 1.10364 + */ 1.10365 + if (asoc->max_burst > 0) { 1.10366 + if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) { 1.10367 + if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) { 1.10368 + /* JRS - Use the congestion control given in the congestion control module */ 1.10369 + asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst); 1.10370 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) { 1.10371 + sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED); 1.10372 + } 1.10373 + SCTP_STAT_INCR(sctps_maxburstqueued); 1.10374 + } 1.10375 + net->fast_retran_ip = 0; 1.10376 + } else { 1.10377 + if (net->flight_size == 0) { 1.10378 + /* Should be decaying the cwnd here */ 1.10379 + ; 1.10380 + } 1.10381 + } 1.10382 + } 1.10383 + } 1.10384 + 1.10385 + } 1.10386 + burst_cnt = 0; 1.10387 + do { 1.10388 + error = sctp_med_chunk_output(inp, stcb, asoc, &num_out, 1.10389 + &reason_code, 0, from_where, 1.10390 + &now, &now_filled, frag_point, so_locked); 1.10391 + if (error) { 1.10392 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error); 1.10393 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) { 1.10394 + sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP); 1.10395 + } 1.10396 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1.10397 + sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES); 1.10398 + sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES); 1.10399 + } 1.10400 + break; 1.10401 + } 1.10402 + SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out); 1.10403 + 1.10404 + tot_out += num_out; 1.10405 + burst_cnt++; 1.10406 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1.10407 + sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES); 1.10408 + if (num_out == 0) { 1.10409 + sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES); 1.10410 + } 1.10411 + } 1.10412 + if (nagle_on) { 1.10413 + /* 1.10414 + * When the Nagle algorithm is used, look at how much 1.10415 + * is unsent, then if its smaller than an MTU and we 1.10416 + * have data in flight we stop, except if we are 1.10417 + * handling a fragmented user message. 1.10418 + */ 1.10419 + un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 1.10420 + (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk))); 1.10421 + if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) && 1.10422 + (stcb->asoc.total_flight > 0) && 1.10423 + ((stcb->asoc.locked_on_sending == NULL) || 1.10424 + sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) { 1.10425 + break; 1.10426 + } 1.10427 + } 1.10428 + if (TAILQ_EMPTY(&asoc->control_send_queue) && 1.10429 + TAILQ_EMPTY(&asoc->send_queue) && 1.10430 + stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) { 1.10431 + /* Nothing left to send */ 1.10432 + break; 1.10433 + } 1.10434 + if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) { 1.10435 + /* Nothing left to send */ 1.10436 + break; 1.10437 + } 1.10438 + } while (num_out && 1.10439 + ((asoc->max_burst == 0) || 1.10440 + SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) || 1.10441 + (burst_cnt < asoc->max_burst))); 1.10442 + 1.10443 + if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) { 1.10444 + if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) { 1.10445 + SCTP_STAT_INCR(sctps_maxburstqueued); 1.10446 + asoc->burst_limit_applied = 1; 1.10447 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) { 1.10448 + sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED); 1.10449 + } 1.10450 + } else { 1.10451 + asoc->burst_limit_applied = 0; 1.10452 + } 1.10453 + } 1.10454 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1.10455 + sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES); 1.10456 + } 1.10457 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n", 1.10458 + tot_out); 1.10459 + 1.10460 + /*- 1.10461 + * Now we need to clean up the control chunk chain if a ECNE is on 1.10462 + * it. It must be marked as UNSENT again so next call will continue 1.10463 + * to send it until such time that we get a CWR, to remove it. 1.10464 + */ 1.10465 + if (stcb->asoc.ecn_echo_cnt_onq) 1.10466 + sctp_fix_ecn_echo(asoc); 1.10467 + return; 1.10468 +} 1.10469 + 1.10470 + 1.10471 +int 1.10472 +sctp_output( 1.10473 + struct sctp_inpcb *inp, 1.10474 +#if defined(__Panda__) 1.10475 + pakhandle_type m, 1.10476 +#else 1.10477 + struct mbuf *m, 1.10478 +#endif 1.10479 + struct sockaddr *addr, 1.10480 +#if defined(__Panda__) 1.10481 + pakhandle_type control, 1.10482 +#else 1.10483 + struct mbuf *control, 1.10484 +#endif 1.10485 +#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 1.10486 + struct thread *p, 1.10487 +#elif defined(__Windows__) 1.10488 + PKTHREAD p, 1.10489 +#else 1.10490 +#if defined(__APPLE__) 1.10491 + struct proc *p SCTP_UNUSED, 1.10492 +#else 1.10493 + struct proc *p, 1.10494 +#endif 1.10495 +#endif 1.10496 + int flags) 1.10497 +{ 1.10498 + if (inp == NULL) { 1.10499 + SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); 1.10500 + return (EINVAL); 1.10501 + } 1.10502 + 1.10503 + if (inp->sctp_socket == NULL) { 1.10504 + SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); 1.10505 + return (EINVAL); 1.10506 + } 1.10507 + return (sctp_sosend(inp->sctp_socket, 1.10508 + addr, 1.10509 + (struct uio *)NULL, 1.10510 + m, 1.10511 + control, 1.10512 +#if defined(__APPLE__) || defined(__Panda__) 1.10513 + flags 1.10514 +#else 1.10515 + flags, p 1.10516 +#endif 1.10517 + )); 1.10518 +} 1.10519 + 1.10520 +void 1.10521 +send_forward_tsn(struct sctp_tcb *stcb, 1.10522 + struct sctp_association *asoc) 1.10523 +{ 1.10524 + struct sctp_tmit_chunk *chk; 1.10525 + struct sctp_forward_tsn_chunk *fwdtsn; 1.10526 + uint32_t advance_peer_ack_point; 1.10527 + 1.10528 + SCTP_TCB_LOCK_ASSERT(stcb); 1.10529 + TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 1.10530 + if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) { 1.10531 + /* mark it to unsent */ 1.10532 + chk->sent = SCTP_DATAGRAM_UNSENT; 1.10533 + chk->snd_count = 0; 1.10534 + /* Do we correct its output location? */ 1.10535 + if (chk->whoTo) { 1.10536 + sctp_free_remote_addr(chk->whoTo); 1.10537 + chk->whoTo = NULL; 1.10538 + } 1.10539 + goto sctp_fill_in_rest; 1.10540 + } 1.10541 + } 1.10542 + /* Ok if we reach here we must build one */ 1.10543 + sctp_alloc_a_chunk(stcb, chk); 1.10544 + if (chk == NULL) { 1.10545 + return; 1.10546 + } 1.10547 + asoc->fwd_tsn_cnt++; 1.10548 + chk->copy_by_ref = 0; 1.10549 + chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN; 1.10550 + chk->rec.chunk_id.can_take_data = 0; 1.10551 + chk->asoc = asoc; 1.10552 + chk->whoTo = NULL; 1.10553 + chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 1.10554 + if (chk->data == NULL) { 1.10555 + sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1.10556 + return; 1.10557 + } 1.10558 + SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 1.10559 + chk->sent = SCTP_DATAGRAM_UNSENT; 1.10560 + chk->snd_count = 0; 1.10561 + TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next); 1.10562 + asoc->ctrl_queue_cnt++; 1.10563 +sctp_fill_in_rest: 1.10564 + /*- 1.10565 + * Here we go through and fill out the part that deals with 1.10566 + * stream/seq of the ones we skip. 1.10567 + */ 1.10568 + SCTP_BUF_LEN(chk->data) = 0; 1.10569 + { 1.10570 + struct sctp_tmit_chunk *at, *tp1, *last; 1.10571 + struct sctp_strseq *strseq; 1.10572 + unsigned int cnt_of_space, i, ovh; 1.10573 + unsigned int space_needed; 1.10574 + unsigned int cnt_of_skipped = 0; 1.10575 + 1.10576 + TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) { 1.10577 + if ((at->sent != SCTP_FORWARD_TSN_SKIP) && 1.10578 + (at->sent != SCTP_DATAGRAM_NR_ACKED)) { 1.10579 + /* no more to look at */ 1.10580 + break; 1.10581 + } 1.10582 + if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) { 1.10583 + /* We don't report these */ 1.10584 + continue; 1.10585 + } 1.10586 + cnt_of_skipped++; 1.10587 + } 1.10588 + space_needed = (sizeof(struct sctp_forward_tsn_chunk) + 1.10589 + (cnt_of_skipped * sizeof(struct sctp_strseq))); 1.10590 + 1.10591 + cnt_of_space = M_TRAILINGSPACE(chk->data); 1.10592 + 1.10593 + if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1.10594 + ovh = SCTP_MIN_OVERHEAD; 1.10595 + } else { 1.10596 + ovh = SCTP_MIN_V4_OVERHEAD; 1.10597 + } 1.10598 + if (cnt_of_space > (asoc->smallest_mtu - ovh)) { 1.10599 + /* trim to a mtu size */ 1.10600 + cnt_of_space = asoc->smallest_mtu - ovh; 1.10601 + } 1.10602 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 1.10603 + sctp_misc_ints(SCTP_FWD_TSN_CHECK, 1.10604 + 0xff, 0, cnt_of_skipped, 1.10605 + asoc->advanced_peer_ack_point); 1.10606 + 1.10607 + } 1.10608 + advance_peer_ack_point = asoc->advanced_peer_ack_point; 1.10609 + if (cnt_of_space < space_needed) { 1.10610 + /*- 1.10611 + * ok we must trim down the chunk by lowering the 1.10612 + * advance peer ack point. 1.10613 + */ 1.10614 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 1.10615 + sctp_misc_ints(SCTP_FWD_TSN_CHECK, 1.10616 + 0xff, 0xff, cnt_of_space, 1.10617 + space_needed); 1.10618 + } 1.10619 + cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk); 1.10620 + cnt_of_skipped /= sizeof(struct sctp_strseq); 1.10621 + /*- 1.10622 + * Go through and find the TSN that will be the one 1.10623 + * we report. 1.10624 + */ 1.10625 + at = TAILQ_FIRST(&asoc->sent_queue); 1.10626 + if (at != NULL) { 1.10627 + for (i = 0; i < cnt_of_skipped; i++) { 1.10628 + tp1 = TAILQ_NEXT(at, sctp_next); 1.10629 + if (tp1 == NULL) { 1.10630 + break; 1.10631 + } 1.10632 + at = tp1; 1.10633 + } 1.10634 + } 1.10635 + if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 1.10636 + sctp_misc_ints(SCTP_FWD_TSN_CHECK, 1.10637 + 0xff, cnt_of_skipped, at->rec.data.TSN_seq, 1.10638 + asoc->advanced_peer_ack_point); 1.10639 + } 1.10640 + last = at; 1.10641 + /*- 1.10642 + * last now points to last one I can report, update 1.10643 + * peer ack point 1.10644 + */ 1.10645 + if (last) 1.10646 + advance_peer_ack_point = last->rec.data.TSN_seq; 1.10647 + space_needed = sizeof(struct sctp_forward_tsn_chunk) + 1.10648 + cnt_of_skipped * sizeof(struct sctp_strseq); 1.10649 + } 1.10650 + chk->send_size = space_needed; 1.10651 + /* Setup the chunk */ 1.10652 + fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *); 1.10653 + fwdtsn->ch.chunk_length = htons(chk->send_size); 1.10654 + fwdtsn->ch.chunk_flags = 0; 1.10655 + fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN; 1.10656 + fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point); 1.10657 + SCTP_BUF_LEN(chk->data) = chk->send_size; 1.10658 + fwdtsn++; 1.10659 + /*- 1.10660 + * Move pointer to after the fwdtsn and transfer to the 1.10661 + * strseq pointer. 1.10662 + */ 1.10663 + strseq = (struct sctp_strseq *)fwdtsn; 1.10664 + /*- 1.10665 + * Now populate the strseq list. This is done blindly 1.10666 + * without pulling out duplicate stream info. This is 1.10667 + * inefficent but won't harm the process since the peer will 1.10668 + * look at these in sequence and will thus release anything. 1.10669 + * It could mean we exceed the PMTU and chop off some that 1.10670 + * we could have included.. but this is unlikely (aka 1432/4 1.10671 + * would mean 300+ stream seq's would have to be reported in 1.10672 + * one FWD-TSN. With a bit of work we can later FIX this to 1.10673 + * optimize and pull out duplcates.. but it does add more 1.10674 + * overhead. So for now... not! 1.10675 + */ 1.10676 + at = TAILQ_FIRST(&asoc->sent_queue); 1.10677 + for (i = 0; i < cnt_of_skipped; i++) { 1.10678 + tp1 = TAILQ_NEXT(at, sctp_next); 1.10679 + if (tp1 == NULL) 1.10680 + break; 1.10681 + if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) { 1.10682 + /* We don't report these */ 1.10683 + i--; 1.10684 + at = tp1; 1.10685 + continue; 1.10686 + } 1.10687 + if (at->rec.data.TSN_seq == advance_peer_ack_point) { 1.10688 + at->rec.data.fwd_tsn_cnt = 0; 1.10689 + } 1.10690 + strseq->stream = ntohs(at->rec.data.stream_number); 1.10691 + strseq->sequence = ntohs(at->rec.data.stream_seq); 1.10692 + strseq++; 1.10693 + at = tp1; 1.10694 + } 1.10695 + } 1.10696 + return; 1.10697 +} 1.10698 + 1.10699 +void 1.10700 +sctp_send_sack(struct sctp_tcb *stcb, int so_locked 1.10701 +#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 1.10702 + SCTP_UNUSED 1.10703 +#endif 1.10704 +) 1.10705 +{ 1.10706 + /*- 1.10707 + * Queue up a SACK or NR-SACK in the control queue. 1.10708 + * We must first check to see if a SACK or NR-SACK is 1.10709 + * somehow on the control queue. 1.10710 + * If so, we will take and and remove the old one. 1.10711 + */ 1.10712 + struct sctp_association *asoc; 1.10713 + struct sctp_tmit_chunk *chk, *a_chk; 1.10714 + struct sctp_sack_chunk *sack; 1.10715 + struct sctp_nr_sack_chunk *nr_sack; 1.10716 + struct sctp_gap_ack_block *gap_descriptor; 1.10717 + struct sack_track *selector; 1.10718 + int mergeable = 0; 1.10719 + int offset; 1.10720 + caddr_t limit; 1.10721 + uint32_t *dup; 1.10722 + int limit_reached = 0; 1.10723 + unsigned int i, siz, j; 1.10724 + unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space; 1.10725 + int num_dups = 0; 1.10726 + int space_req; 1.10727 + uint32_t highest_tsn; 1.10728 + uint8_t flags; 1.10729 + uint8_t type; 1.10730 + uint8_t tsn_map; 1.10731 + 1.10732 + if ((stcb->asoc.sctp_nr_sack_on_off == 1) && 1.10733 + (stcb->asoc.peer_supports_nr_sack == 1)) { 1.10734 + type = SCTP_NR_SELECTIVE_ACK; 1.10735 + } else { 1.10736 + type = SCTP_SELECTIVE_ACK; 1.10737 + } 1.10738 + a_chk = NULL; 1.10739 + asoc = &stcb->asoc; 1.10740 + SCTP_TCB_LOCK_ASSERT(stcb); 1.10741 + if (asoc->last_data_chunk_from == NULL) { 1.10742 + /* Hmm we never received anything */ 1.10743 + return; 1.10744 + } 1.10745 + sctp_slide_mapping_arrays(stcb); 1.10746 + sctp_set_rwnd(stcb, asoc); 1.10747 + TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 1.10748 + if (chk->rec.chunk_id.id == type) { 1.10749 + /* Hmm, found a sack already on queue, remove it */ 1.10750 + TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 1.10751 + asoc->ctrl_queue_cnt--; 1.10752 + a_chk = chk; 1.10753 + if (a_chk->data) { 1.10754 + sctp_m_freem(a_chk->data); 1.10755 + a_chk->data = NULL; 1.10756 + } 1.10757 + if (a_chk->whoTo) { 1.10758 + sctp_free_remote_addr(a_chk->whoTo); 1.10759 + a_chk->whoTo = NULL; 1.10760 + } 1.10761 + break; 1.10762 + } 1.10763 + } 1.10764 + if (a_chk == NULL) { 1.10765 + sctp_alloc_a_chunk(stcb, a_chk); 1.10766 + if (a_chk == NULL) { 1.10767 + /* No memory so we drop the idea, and set a timer */ 1.10768 + if (stcb->asoc.delayed_ack) { 1.10769 + sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 1.10770 + stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5); 1.10771 + sctp_timer_start(SCTP_TIMER_TYPE_RECV, 1.10772 + stcb->sctp_ep, stcb, NULL); 1.10773 + } else { 1.10774 + stcb->asoc.send_sack = 1; 1.10775 + } 1.10776 + return; 1.10777 + } 1.10778 + a_chk->copy_by_ref = 0; 1.10779 + a_chk->rec.chunk_id.id = type; 1.10780 + a_chk->rec.chunk_id.can_take_data = 1; 1.10781 + } 1.10782 + /* Clear our pkt counts */ 1.10783 + asoc->data_pkts_seen = 0; 1.10784 + 1.10785 + a_chk->asoc = asoc; 1.10786 + a_chk->snd_count = 0; 1.10787 + a_chk->send_size = 0; /* fill in later */ 1.10788 + a_chk->sent = SCTP_DATAGRAM_UNSENT; 1.10789 + a_chk->whoTo = NULL; 1.10790 + 1.10791 + if ((asoc->numduptsns) || 1.10792 + (!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE))) { 1.10793 + /*- 1.10794 + * Ok, we have some duplicates or the destination for the 1.10795 + * sack is unreachable, lets see if we can select an 1.10796 + * alternate than asoc->last_data_chunk_from 1.10797 + */ 1.10798 + if ((asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE) && 1.10799 + (asoc->used_alt_onsack > asoc->numnets)) { 1.10800 + /* We used an alt last time, don't this time */ 1.10801 + a_chk->whoTo = NULL; 1.10802 + } else { 1.10803 + asoc->used_alt_onsack++; 1.10804 + a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0); 1.10805 + } 1.10806 + if (a_chk->whoTo == NULL) { 1.10807 + /* Nope, no alternate */ 1.10808 + a_chk->whoTo = asoc->last_data_chunk_from; 1.10809 + asoc->used_alt_onsack = 0; 1.10810 + } 1.10811 + } else { 1.10812 + /* 1.10813 + * No duplicates so we use the last place we received data 1.10814 + * from. 1.10815 + */ 1.10816 + asoc->used_alt_onsack = 0; 1.10817 + a_chk->whoTo = asoc->last_data_chunk_from; 1.10818 + } 1.10819 + if (a_chk->whoTo) { 1.10820 + atomic_add_int(&a_chk->whoTo->ref_count, 1); 1.10821 + } 1.10822 + if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) { 1.10823 + highest_tsn = asoc->highest_tsn_inside_map; 1.10824 + } else { 1.10825 + highest_tsn = asoc->highest_tsn_inside_nr_map; 1.10826 + } 1.10827 + if (highest_tsn == asoc->cumulative_tsn) { 1.10828 + /* no gaps */ 1.10829 + if (type == SCTP_SELECTIVE_ACK) { 1.10830 + space_req = sizeof(struct sctp_sack_chunk); 1.10831 + } else { 1.10832 + space_req = sizeof(struct sctp_nr_sack_chunk); 1.10833 + } 1.10834 + } else { 1.10835 + /* gaps get a cluster */ 1.10836 + space_req = MCLBYTES; 1.10837 + } 1.10838 + /* Ok now lets formulate a MBUF with our sack */ 1.10839 + a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_NOWAIT, 1, MT_DATA); 1.10840 + if ((a_chk->data == NULL) || 1.10841 + (a_chk->whoTo == NULL)) { 1.10842 + /* rats, no mbuf memory */ 1.10843 + if (a_chk->data) { 1.10844 + /* was a problem with the destination */ 1.10845 + sctp_m_freem(a_chk->data); 1.10846 + a_chk->data = NULL; 1.10847 + } 1.10848 + sctp_free_a_chunk(stcb, a_chk, so_locked); 1.10849 + /* sa_ignore NO_NULL_CHK */ 1.10850 + if (stcb->asoc.delayed_ack) { 1.10851 + sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 1.10852 + stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6); 1.10853 + sctp_timer_start(SCTP_TIMER_TYPE_RECV, 1.10854 + stcb->sctp_ep, stcb, NULL); 1.10855 + } else { 1.10856 + stcb->asoc.send_sack = 1; 1.10857 + } 1.10858 + return; 1.10859 + } 1.10860 + /* ok, lets go through and fill it in */ 1.10861 + SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD); 1.10862 + space = M_TRAILINGSPACE(a_chk->data); 1.10863 + if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) { 1.10864 + space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD); 1.10865 + } 1.10866 + limit = mtod(a_chk->data, caddr_t); 1.10867 + limit += space; 1.10868 + 1.10869 + flags = 0; 1.10870 + 1.10871 + if ((asoc->sctp_cmt_on_off > 0) && 1.10872 + SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 1.10873 + /*- 1.10874 + * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been 1.10875 + * received, then set high bit to 1, else 0. Reset 1.10876 + * pkts_rcvd. 1.10877 + */ 1.10878 + flags |= (asoc->cmt_dac_pkts_rcvd << 6); 1.10879 + asoc->cmt_dac_pkts_rcvd = 0; 1.10880 + } 1.10881 +#ifdef SCTP_ASOCLOG_OF_TSNS 1.10882 + stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn; 1.10883 + stcb->asoc.cumack_log_atsnt++; 1.10884 + if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) { 1.10885 + stcb->asoc.cumack_log_atsnt = 0; 1.10886 + } 1.10887 +#endif 1.10888 + /* reset the readers interpretation */ 1.10889 + stcb->freed_by_sorcv_sincelast = 0; 1.10890 + 1.10891 + if (type == SCTP_SELECTIVE_ACK) { 1.10892 + sack = mtod(a_chk->data, struct sctp_sack_chunk *); 1.10893 + nr_sack = NULL; 1.10894 + gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk)); 1.10895 + if (highest_tsn > asoc->mapping_array_base_tsn) { 1.10896 + siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8; 1.10897 + } else { 1.10898 + siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8; 1.10899 + } 1.10900 + } else { 1.10901 + sack = NULL; 1.10902 + nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *); 1.10903 + gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk)); 1.10904 + if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) { 1.10905 + siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8; 1.10906 + } else { 1.10907 + siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8; 1.10908 + } 1.10909 + } 1.10910 + 1.10911 + if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) { 1.10912 + offset = 1; 1.10913 + } else { 1.10914 + offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn; 1.10915 + } 1.10916 + if (((type == SCTP_SELECTIVE_ACK) && 1.10917 + SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) || 1.10918 + ((type == SCTP_NR_SELECTIVE_ACK) && 1.10919 + SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) { 1.10920 + /* we have a gap .. maybe */ 1.10921 + for (i = 0; i < siz; i++) { 1.10922 + tsn_map = asoc->mapping_array[i]; 1.10923 + if (type == SCTP_SELECTIVE_ACK) { 1.10924 + tsn_map |= asoc->nr_mapping_array[i]; 1.10925 + } 1.10926 + if (i == 0) { 1.10927 + /* 1.10928 + * Clear all bits corresponding to TSNs 1.10929 + * smaller or equal to the cumulative TSN. 1.10930 + */ 1.10931 + tsn_map &= (~0 << (1 - offset)); 1.10932 + } 1.10933 + selector = &sack_array[tsn_map]; 1.10934 + if (mergeable && selector->right_edge) { 1.10935 + /* 1.10936 + * Backup, left and right edges were ok to 1.10937 + * merge. 1.10938 + */ 1.10939 + num_gap_blocks--; 1.10940 + gap_descriptor--; 1.10941 + } 1.10942 + if (selector->num_entries == 0) 1.10943 + mergeable = 0; 1.10944 + else { 1.10945 + for (j = 0; j < selector->num_entries; j++) { 1.10946 + if (mergeable && selector->right_edge) { 1.10947 + /* 1.10948 + * do a merge by NOT setting 1.10949 + * the left side 1.10950 + */ 1.10951 + mergeable = 0; 1.10952 + } else { 1.10953 + /* 1.10954 + * no merge, set the left 1.10955 + * side 1.10956 + */ 1.10957 + mergeable = 0; 1.10958 + gap_descriptor->start = htons((selector->gaps[j].start + offset)); 1.10959 + } 1.10960 + gap_descriptor->end = htons((selector->gaps[j].end + offset)); 1.10961 + num_gap_blocks++; 1.10962 + gap_descriptor++; 1.10963 + if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) { 1.10964 + /* no more room */ 1.10965 + limit_reached = 1; 1.10966 + break; 1.10967 + } 1.10968 + } 1.10969 + if (selector->left_edge) { 1.10970 + mergeable = 1; 1.10971 + } 1.10972 + } 1.10973 + if (limit_reached) { 1.10974 + /* Reached the limit stop */ 1.10975 + break; 1.10976 + } 1.10977 + offset += 8; 1.10978 + } 1.10979 + } 1.10980 + if ((type == SCTP_NR_SELECTIVE_ACK) && 1.10981 + (limit_reached == 0)) { 1.10982 + 1.10983 + mergeable = 0; 1.10984 + 1.10985 + if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) { 1.10986 + siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8; 1.10987 + } else { 1.10988 + siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8; 1.10989 + } 1.10990 + 1.10991 + if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) { 1.10992 + offset = 1; 1.10993 + } else { 1.10994 + offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn; 1.10995 + } 1.10996 + if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) { 1.10997 + /* we have a gap .. maybe */ 1.10998 + for (i = 0; i < siz; i++) { 1.10999 + tsn_map = asoc->nr_mapping_array[i]; 1.11000 + if (i == 0) { 1.11001 + /* 1.11002 + * Clear all bits corresponding to TSNs 1.11003 + * smaller or equal to the cumulative TSN. 1.11004 + */ 1.11005 + tsn_map &= (~0 << (1 - offset)); 1.11006 + } 1.11007 + selector = &sack_array[tsn_map]; 1.11008 + if (mergeable && selector->right_edge) { 1.11009 + /* 1.11010 + * Backup, left and right edges were ok to 1.11011 + * merge. 1.11012 + */ 1.11013 + num_nr_gap_blocks--; 1.11014 + gap_descriptor--; 1.11015 + } 1.11016 + if (selector->num_entries == 0) 1.11017 + mergeable = 0; 1.11018 + else { 1.11019 + for (j = 0; j < selector->num_entries; j++) { 1.11020 + if (mergeable && selector->right_edge) { 1.11021 + /* 1.11022 + * do a merge by NOT setting 1.11023 + * the left side 1.11024 + */ 1.11025 + mergeable = 0; 1.11026 + } else { 1.11027 + /* 1.11028 + * no merge, set the left 1.11029 + * side 1.11030 + */ 1.11031 + mergeable = 0; 1.11032 + gap_descriptor->start = htons((selector->gaps[j].start + offset)); 1.11033 + } 1.11034 + gap_descriptor->end = htons((selector->gaps[j].end + offset)); 1.11035 + num_nr_gap_blocks++; 1.11036 + gap_descriptor++; 1.11037 + if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) { 1.11038 + /* no more room */ 1.11039 + limit_reached = 1; 1.11040 + break; 1.11041 + } 1.11042 + } 1.11043 + if (selector->left_edge) { 1.11044 + mergeable = 1; 1.11045 + } 1.11046 + } 1.11047 + if (limit_reached) { 1.11048 + /* Reached the limit stop */ 1.11049 + break; 1.11050 + } 1.11051 + offset += 8; 1.11052 + } 1.11053 + } 1.11054 + } 1.11055 + /* now we must add any dups we are going to report. */ 1.11056 + if ((limit_reached == 0) && (asoc->numduptsns)) { 1.11057 + dup = (uint32_t *) gap_descriptor; 1.11058 + for (i = 0; i < asoc->numduptsns; i++) { 1.11059 + *dup = htonl(asoc->dup_tsns[i]); 1.11060 + dup++; 1.11061 + num_dups++; 1.11062 + if (((caddr_t)dup + sizeof(uint32_t)) > limit) { 1.11063 + /* no more room */ 1.11064 + break; 1.11065 + } 1.11066 + } 1.11067 + asoc->numduptsns = 0; 1.11068 + } 1.11069 + /* 1.11070 + * now that the chunk is prepared queue it to the control chunk 1.11071 + * queue. 1.11072 + */ 1.11073 + if (type == SCTP_SELECTIVE_ACK) { 1.11074 + a_chk->send_size = sizeof(struct sctp_sack_chunk) + 1.11075 + (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) + 1.11076 + num_dups * sizeof(int32_t); 1.11077 + SCTP_BUF_LEN(a_chk->data) = a_chk->send_size; 1.11078 + sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn); 1.11079 + sack->sack.a_rwnd = htonl(asoc->my_rwnd); 1.11080 + sack->sack.num_gap_ack_blks = htons(num_gap_blocks); 1.11081 + sack->sack.num_dup_tsns = htons(num_dups); 1.11082 + sack->ch.chunk_type = type; 1.11083 + sack->ch.chunk_flags = flags; 1.11084 + sack->ch.chunk_length = htons(a_chk->send_size); 1.11085 + } else { 1.11086 + a_chk->send_size = sizeof(struct sctp_nr_sack_chunk) + 1.11087 + (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) + 1.11088 + num_dups * sizeof(int32_t); 1.11089 + SCTP_BUF_LEN(a_chk->data) = a_chk->send_size; 1.11090 + nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn); 1.11091 + nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd); 1.11092 + nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks); 1.11093 + nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks); 1.11094 + nr_sack->nr_sack.num_dup_tsns = htons(num_dups); 1.11095 + nr_sack->nr_sack.reserved = 0; 1.11096 + nr_sack->ch.chunk_type = type; 1.11097 + nr_sack->ch.chunk_flags = flags; 1.11098 + nr_sack->ch.chunk_length = htons(a_chk->send_size); 1.11099 + } 1.11100 + TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next); 1.11101 + asoc->my_last_reported_rwnd = asoc->my_rwnd; 1.11102 + asoc->ctrl_queue_cnt++; 1.11103 + asoc->send_sack = 0; 1.11104 + SCTP_STAT_INCR(sctps_sendsacks); 1.11105 + return; 1.11106 +} 1.11107 + 1.11108 +void 1.11109 +sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked 1.11110 +#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 1.11111 + SCTP_UNUSED 1.11112 +#endif 1.11113 + ) 1.11114 +{ 1.11115 + struct mbuf *m_abort, *m, *m_last; 1.11116 + struct mbuf *m_out, *m_end = NULL; 1.11117 + struct sctp_abort_chunk *abort; 1.11118 + struct sctp_auth_chunk *auth = NULL; 1.11119 + struct sctp_nets *net; 1.11120 + uint32_t vtag; 1.11121 + uint32_t auth_offset = 0; 1.11122 + uint16_t cause_len, chunk_len, padding_len; 1.11123 + 1.11124 +#if defined(__APPLE__) 1.11125 + if (so_locked) { 1.11126 + sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep)); 1.11127 + } else { 1.11128 + sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep)); 1.11129 + } 1.11130 +#endif 1.11131 + SCTP_TCB_LOCK_ASSERT(stcb); 1.11132 + /*- 1.11133 + * Add an AUTH chunk, if chunk requires it and save the offset into 1.11134 + * the chain for AUTH 1.11135 + */ 1.11136 + if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION, 1.11137 + stcb->asoc.peer_auth_chunks)) { 1.11138 + m_out = sctp_add_auth_chunk(NULL, &m_end, &auth, &auth_offset, 1.11139 + stcb, SCTP_ABORT_ASSOCIATION); 1.11140 + SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 1.11141 + } else { 1.11142 + m_out = NULL; 1.11143 + } 1.11144 + m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_NOWAIT, 1, MT_HEADER); 1.11145 + if (m_abort == NULL) { 1.11146 + if (m_out) { 1.11147 + sctp_m_freem(m_out); 1.11148 + } 1.11149 + if (operr) { 1.11150 + sctp_m_freem(operr); 1.11151 + } 1.11152 + return; 1.11153 + } 1.11154 + /* link in any error */ 1.11155 + SCTP_BUF_NEXT(m_abort) = operr; 1.11156 + cause_len = 0; 1.11157 + m_last = NULL; 1.11158 + for (m = operr; m; m = SCTP_BUF_NEXT(m)) { 1.11159 + cause_len += (uint16_t)SCTP_BUF_LEN(m); 1.11160 + if (SCTP_BUF_NEXT(m) == NULL) { 1.11161 + m_last = m; 1.11162 + } 1.11163 + } 1.11164 + SCTP_BUF_LEN(m_abort) = sizeof(struct sctp_abort_chunk); 1.11165 + chunk_len = (uint16_t)sizeof(struct sctp_abort_chunk) + cause_len; 1.11166 + padding_len = SCTP_SIZE32(chunk_len) - chunk_len; 1.11167 + if (m_out == NULL) { 1.11168 + /* NO Auth chunk prepended, so reserve space in front */ 1.11169 + SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD); 1.11170 + m_out = m_abort; 1.11171 + } else { 1.11172 + /* Put AUTH chunk at the front of the chain */ 1.11173 + SCTP_BUF_NEXT(m_end) = m_abort; 1.11174 + } 1.11175 + if (stcb->asoc.alternate) { 1.11176 + net = stcb->asoc.alternate; 1.11177 + } else { 1.11178 + net = stcb->asoc.primary_destination; 1.11179 + } 1.11180 + /* Fill in the ABORT chunk header. */ 1.11181 + abort = mtod(m_abort, struct sctp_abort_chunk *); 1.11182 + abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION; 1.11183 + if (stcb->asoc.peer_vtag == 0) { 1.11184 + /* This happens iff the assoc is in COOKIE-WAIT state. */ 1.11185 + vtag = stcb->asoc.my_vtag; 1.11186 + abort->ch.chunk_flags = SCTP_HAD_NO_TCB; 1.11187 + } else { 1.11188 + vtag = stcb->asoc.peer_vtag; 1.11189 + abort->ch.chunk_flags = 0; 1.11190 + } 1.11191 + abort->ch.chunk_length = htons(chunk_len); 1.11192 + /* Add padding, if necessary. */ 1.11193 + if (padding_len > 0) { 1.11194 + if ((m_last == NULL) || sctp_add_pad_tombuf(m_last, padding_len)) { 1.11195 + sctp_m_freem(m_out); 1.11196 + return; 1.11197 + } 1.11198 + } 1.11199 + (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net, 1.11200 + (struct sockaddr *)&net->ro._l_addr, 1.11201 + m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0, 1.11202 + stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag), 1.11203 + stcb->asoc.primary_destination->port, NULL, 1.11204 +#if defined(__FreeBSD__) 1.11205 + 0, 0, 1.11206 +#endif 1.11207 + so_locked); 1.11208 + SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 1.11209 +} 1.11210 + 1.11211 +void 1.11212 +sctp_send_shutdown_complete(struct sctp_tcb *stcb, 1.11213 + struct sctp_nets *net, 1.11214 + int reflect_vtag) 1.11215 +{ 1.11216 + /* formulate and SEND a SHUTDOWN-COMPLETE */ 1.11217 + struct mbuf *m_shutdown_comp; 1.11218 + struct sctp_shutdown_complete_chunk *shutdown_complete; 1.11219 + uint32_t vtag; 1.11220 + uint8_t flags; 1.11221 + 1.11222 + m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER); 1.11223 + if (m_shutdown_comp == NULL) { 1.11224 + /* no mbuf's */ 1.11225 + return; 1.11226 + } 1.11227 + if (reflect_vtag) { 1.11228 + flags = SCTP_HAD_NO_TCB; 1.11229 + vtag = stcb->asoc.my_vtag; 1.11230 + } else { 1.11231 + flags = 0; 1.11232 + vtag = stcb->asoc.peer_vtag; 1.11233 + } 1.11234 + shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *); 1.11235 + shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE; 1.11236 + shutdown_complete->ch.chunk_flags = flags; 1.11237 + shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk)); 1.11238 + SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk); 1.11239 + (void)sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net, 1.11240 + (struct sockaddr *)&net->ro._l_addr, 1.11241 + m_shutdown_comp, 0, NULL, 0, 1, 0, 0, 1.11242 + stcb->sctp_ep->sctp_lport, stcb->rport, 1.11243 + htonl(vtag), 1.11244 + net->port, NULL, 1.11245 +#if defined(__FreeBSD__) 1.11246 + 0, 0, 1.11247 +#endif 1.11248 + SCTP_SO_NOT_LOCKED); 1.11249 + SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 1.11250 + return; 1.11251 +} 1.11252 + 1.11253 +#if defined(__FreeBSD__) 1.11254 +static void 1.11255 +sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst, 1.11256 + struct sctphdr *sh, uint32_t vtag, 1.11257 + uint8_t type, struct mbuf *cause, 1.11258 + uint8_t use_mflowid, uint32_t mflowid, 1.11259 + uint32_t vrf_id, uint16_t port) 1.11260 +#else 1.11261 +static void 1.11262 +sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst, 1.11263 + struct sctphdr *sh, uint32_t vtag, 1.11264 + uint8_t type, struct mbuf *cause, 1.11265 + uint32_t vrf_id SCTP_UNUSED, uint16_t port) 1.11266 +#endif 1.11267 +{ 1.11268 +#ifdef __Panda__ 1.11269 + pakhandle_type o_pak; 1.11270 +#else 1.11271 + struct mbuf *o_pak; 1.11272 +#endif 1.11273 + struct mbuf *mout; 1.11274 + struct sctphdr *shout; 1.11275 + struct sctp_chunkhdr *ch; 1.11276 + struct udphdr *udp; 1.11277 + int len, cause_len, padding_len; 1.11278 +#if defined(INET) || defined(INET6) 1.11279 + int ret; 1.11280 +#endif 1.11281 +#ifdef INET 1.11282 +#if defined(__APPLE__) || defined(__Panda__) 1.11283 + sctp_route_t ro; 1.11284 +#endif 1.11285 + struct sockaddr_in *src_sin, *dst_sin; 1.11286 + struct ip *ip; 1.11287 +#endif 1.11288 +#ifdef INET6 1.11289 + struct sockaddr_in6 *src_sin6, *dst_sin6; 1.11290 + struct ip6_hdr *ip6; 1.11291 +#endif 1.11292 + 1.11293 + /* Compute the length of the cause and add final padding. */ 1.11294 + cause_len = 0; 1.11295 + if (cause != NULL) { 1.11296 + struct mbuf *m_at, *m_last = NULL; 1.11297 + 1.11298 + for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 1.11299 + if (SCTP_BUF_NEXT(m_at) == NULL) 1.11300 + m_last = m_at; 1.11301 + cause_len += SCTP_BUF_LEN(m_at); 1.11302 + } 1.11303 + padding_len = cause_len % 4; 1.11304 + if (padding_len != 0) { 1.11305 + padding_len = 4 - padding_len; 1.11306 + } 1.11307 + if (padding_len != 0) { 1.11308 + if (sctp_add_pad_tombuf(m_last, padding_len)) { 1.11309 + sctp_m_freem(cause); 1.11310 + return; 1.11311 + } 1.11312 + } 1.11313 + } else { 1.11314 + padding_len = 0; 1.11315 + } 1.11316 + /* Get an mbuf for the header. */ 1.11317 + len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 1.11318 + switch (dst->sa_family) { 1.11319 +#ifdef INET 1.11320 + case AF_INET: 1.11321 + len += sizeof(struct ip); 1.11322 + break; 1.11323 +#endif 1.11324 +#ifdef INET6 1.11325 + case AF_INET6: 1.11326 + len += sizeof(struct ip6_hdr); 1.11327 + break; 1.11328 +#endif 1.11329 + default: 1.11330 + break; 1.11331 + } 1.11332 + if (port) { 1.11333 + len += sizeof(struct udphdr); 1.11334 + } 1.11335 +#if defined(__APPLE__) 1.11336 +#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) 1.11337 + mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA); 1.11338 +#else 1.11339 + mout = sctp_get_mbuf_for_msg(len + SCTP_MAX_LINKHDR, 1, M_NOWAIT, 1, MT_DATA); 1.11340 +#endif 1.11341 +#else 1.11342 + mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA); 1.11343 +#endif 1.11344 + if (mout == NULL) { 1.11345 + if (cause) { 1.11346 + sctp_m_freem(cause); 1.11347 + } 1.11348 + return; 1.11349 + } 1.11350 +#if defined(__APPLE__) 1.11351 +#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) 1.11352 + SCTP_BUF_RESV_UF(mout, max_linkhdr); 1.11353 +#else 1.11354 + SCTP_BUF_RESV_UF(mout, SCTP_MAX_LINKHDR); 1.11355 +#endif 1.11356 +#else 1.11357 + SCTP_BUF_RESV_UF(mout, max_linkhdr); 1.11358 +#endif 1.11359 + SCTP_BUF_LEN(mout) = len; 1.11360 + SCTP_BUF_NEXT(mout) = cause; 1.11361 +#if defined(__FreeBSD__) 1.11362 + if (use_mflowid != 0) { 1.11363 + mout->m_pkthdr.flowid = mflowid; 1.11364 + mout->m_flags |= M_FLOWID; 1.11365 + } 1.11366 +#endif 1.11367 +#ifdef INET 1.11368 + ip = NULL; 1.11369 +#endif 1.11370 +#ifdef INET6 1.11371 + ip6 = NULL; 1.11372 +#endif 1.11373 + switch (dst->sa_family) { 1.11374 +#ifdef INET 1.11375 + case AF_INET: 1.11376 + src_sin = (struct sockaddr_in *)src; 1.11377 + dst_sin = (struct sockaddr_in *)dst; 1.11378 + ip = mtod(mout, struct ip *); 1.11379 + ip->ip_v = IPVERSION; 1.11380 + ip->ip_hl = (sizeof(struct ip) >> 2); 1.11381 + ip->ip_tos = 0; 1.11382 +#if defined(__FreeBSD__) 1.11383 + ip->ip_id = ip_newid(); 1.11384 +#elif defined(__APPLE__) 1.11385 +#if RANDOM_IP_ID 1.11386 + ip->ip_id = ip_randomid(); 1.11387 +#else 1.11388 + ip->ip_id = htons(ip_id++); 1.11389 +#endif 1.11390 +#else 1.11391 + ip->ip_id = htons(ip_id++); 1.11392 +#endif 1.11393 + ip->ip_off = 0; 1.11394 + ip->ip_ttl = MODULE_GLOBAL(ip_defttl); 1.11395 + if (port) { 1.11396 + ip->ip_p = IPPROTO_UDP; 1.11397 + } else { 1.11398 + ip->ip_p = IPPROTO_SCTP; 1.11399 + } 1.11400 + ip->ip_src.s_addr = dst_sin->sin_addr.s_addr; 1.11401 + ip->ip_dst.s_addr = src_sin->sin_addr.s_addr; 1.11402 + ip->ip_sum = 0; 1.11403 + len = sizeof(struct ip); 1.11404 + shout = (struct sctphdr *)((caddr_t)ip + len); 1.11405 + break; 1.11406 +#endif 1.11407 +#ifdef INET6 1.11408 + case AF_INET6: 1.11409 + src_sin6 = (struct sockaddr_in6 *)src; 1.11410 + dst_sin6 = (struct sockaddr_in6 *)dst; 1.11411 + ip6 = mtod(mout, struct ip6_hdr *); 1.11412 + ip6->ip6_flow = htonl(0x60000000); 1.11413 +#if defined(__FreeBSD__) 1.11414 + if (V_ip6_auto_flowlabel) { 1.11415 + ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK); 1.11416 + } 1.11417 +#endif 1.11418 +#if defined(__Userspace__) 1.11419 + ip6->ip6_hlim = IPv6_HOP_LIMIT; 1.11420 +#else 1.11421 + ip6->ip6_hlim = MODULE_GLOBAL(ip6_defhlim); 1.11422 +#endif 1.11423 + if (port) { 1.11424 + ip6->ip6_nxt = IPPROTO_UDP; 1.11425 + } else { 1.11426 + ip6->ip6_nxt = IPPROTO_SCTP; 1.11427 + } 1.11428 + ip6->ip6_src = dst_sin6->sin6_addr; 1.11429 + ip6->ip6_dst = src_sin6->sin6_addr; 1.11430 + len = sizeof(struct ip6_hdr); 1.11431 + shout = (struct sctphdr *)((caddr_t)ip6 + len); 1.11432 + break; 1.11433 +#endif 1.11434 + default: 1.11435 + len = 0; 1.11436 + shout = mtod(mout, struct sctphdr *); 1.11437 + break; 1.11438 + } 1.11439 + if (port) { 1.11440 + if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) { 1.11441 + sctp_m_freem(mout); 1.11442 + return; 1.11443 + } 1.11444 + udp = (struct udphdr *)shout; 1.11445 + udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)); 1.11446 + udp->uh_dport = port; 1.11447 + udp->uh_sum = 0; 1.11448 + udp->uh_ulen = htons(sizeof(struct udphdr) + 1.11449 + sizeof(struct sctphdr) + 1.11450 + sizeof(struct sctp_chunkhdr) + 1.11451 + cause_len + padding_len); 1.11452 + len += sizeof(struct udphdr); 1.11453 + shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr)); 1.11454 + } else { 1.11455 + udp = NULL; 1.11456 + } 1.11457 + shout->src_port = sh->dest_port; 1.11458 + shout->dest_port = sh->src_port; 1.11459 + shout->checksum = 0; 1.11460 + if (vtag) { 1.11461 + shout->v_tag = htonl(vtag); 1.11462 + } else { 1.11463 + shout->v_tag = sh->v_tag; 1.11464 + } 1.11465 + len += sizeof(struct sctphdr); 1.11466 + ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr)); 1.11467 + ch->chunk_type = type; 1.11468 + if (vtag) { 1.11469 + ch->chunk_flags = 0; 1.11470 + } else { 1.11471 + ch->chunk_flags = SCTP_HAD_NO_TCB; 1.11472 + } 1.11473 + ch->chunk_length = htons(sizeof(struct sctp_chunkhdr) + cause_len); 1.11474 + len += sizeof(struct sctp_chunkhdr); 1.11475 + len += cause_len + padding_len; 1.11476 + 1.11477 + if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) { 1.11478 + sctp_m_freem(mout); 1.11479 + return; 1.11480 + } 1.11481 + SCTP_ATTACH_CHAIN(o_pak, mout, len); 1.11482 + switch (dst->sa_family) { 1.11483 +#ifdef INET 1.11484 + case AF_INET: 1.11485 +#if defined(__APPLE__) || defined(__Panda__) 1.11486 + /* zap the stack pointer to the route */ 1.11487 + bzero(&ro, sizeof(sctp_route_t)); 1.11488 +#if defined(__Panda__) 1.11489 + ro._l_addr.sa.sa_family = AF_INET; 1.11490 +#endif 1.11491 +#endif 1.11492 + if (port) { 1.11493 +#if !defined(__Windows__) && !defined(__Userspace__) 1.11494 +#if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000) 1.11495 + if (V_udp_cksum) { 1.11496 + udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP)); 1.11497 + } else { 1.11498 + udp->uh_sum = 0; 1.11499 + } 1.11500 +#else 1.11501 + udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP)); 1.11502 +#endif 1.11503 +#else 1.11504 + udp->uh_sum = 0; 1.11505 +#endif 1.11506 + } 1.11507 +#if defined(__FreeBSD__) 1.11508 +#if __FreeBSD_version >= 1000000 1.11509 + ip->ip_len = htons(len); 1.11510 +#else 1.11511 + ip->ip_len = len; 1.11512 +#endif 1.11513 +#elif defined(__APPLE__) || defined(__Userspace__) 1.11514 + ip->ip_len = len; 1.11515 +#else 1.11516 + ip->ip_len = htons(len); 1.11517 +#endif 1.11518 + if (port) { 1.11519 +#if defined(SCTP_WITH_NO_CSUM) 1.11520 + SCTP_STAT_INCR(sctps_sendnocrc); 1.11521 +#else 1.11522 + shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr)); 1.11523 + SCTP_STAT_INCR(sctps_sendswcrc); 1.11524 +#endif 1.11525 +#if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000) 1.11526 + if (V_udp_cksum) { 1.11527 + SCTP_ENABLE_UDP_CSUM(o_pak); 1.11528 + } 1.11529 +#else 1.11530 + SCTP_ENABLE_UDP_CSUM(o_pak); 1.11531 +#endif 1.11532 + } else { 1.11533 +#if defined(SCTP_WITH_NO_CSUM) 1.11534 + SCTP_STAT_INCR(sctps_sendnocrc); 1.11535 +#else 1.11536 +#if defined(__FreeBSD__) && __FreeBSD_version >= 800000 1.11537 + mout->m_pkthdr.csum_flags = CSUM_SCTP; 1.11538 + mout->m_pkthdr.csum_data = 0; 1.11539 + SCTP_STAT_INCR(sctps_sendhwcrc); 1.11540 +#else 1.11541 + shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip)); 1.11542 + SCTP_STAT_INCR(sctps_sendswcrc); 1.11543 +#endif 1.11544 +#endif 1.11545 + } 1.11546 +#ifdef SCTP_PACKET_LOGGING 1.11547 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) { 1.11548 + sctp_packet_log(o_pak); 1.11549 + } 1.11550 +#endif 1.11551 +#if defined(__APPLE__) || defined(__Panda__) 1.11552 + SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id); 1.11553 + /* Free the route if we got one back */ 1.11554 + if (ro.ro_rt) { 1.11555 + RTFREE(ro.ro_rt); 1.11556 + ro.ro_rt = NULL; 1.11557 + } 1.11558 +#else 1.11559 + SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id); 1.11560 +#endif 1.11561 + break; 1.11562 +#endif 1.11563 +#ifdef INET6 1.11564 + case AF_INET6: 1.11565 + ip6->ip6_plen = len - sizeof(struct ip6_hdr); 1.11566 + if (port) { 1.11567 +#if defined(SCTP_WITH_NO_CSUM) 1.11568 + SCTP_STAT_INCR(sctps_sendnocrc); 1.11569 +#else 1.11570 + shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr)); 1.11571 + SCTP_STAT_INCR(sctps_sendswcrc); 1.11572 +#endif 1.11573 +#if defined(__Windows__) 1.11574 + udp->uh_sum = 0; 1.11575 +#elif !defined(__Userspace__) 1.11576 + if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) { 1.11577 + udp->uh_sum = 0xffff; 1.11578 + } 1.11579 +#endif 1.11580 + } else { 1.11581 +#if defined(SCTP_WITH_NO_CSUM) 1.11582 + SCTP_STAT_INCR(sctps_sendnocrc); 1.11583 +#else 1.11584 +#if defined(__FreeBSD__) && __FreeBSD_version >= 900000 1.11585 +#if __FreeBSD_version > 901000 1.11586 + mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6; 1.11587 +#else 1.11588 + mout->m_pkthdr.csum_flags = CSUM_SCTP; 1.11589 +#endif 1.11590 + mout->m_pkthdr.csum_data = 0; 1.11591 + SCTP_STAT_INCR(sctps_sendhwcrc); 1.11592 +#else 1.11593 + shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr)); 1.11594 + SCTP_STAT_INCR(sctps_sendswcrc); 1.11595 +#endif 1.11596 +#endif 1.11597 + } 1.11598 +#ifdef SCTP_PACKET_LOGGING 1.11599 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) { 1.11600 + sctp_packet_log(o_pak); 1.11601 + } 1.11602 +#endif 1.11603 + SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id); 1.11604 + break; 1.11605 +#endif 1.11606 +#if defined(__Userspace__) 1.11607 + case AF_CONN: 1.11608 + { 1.11609 + char *buffer; 1.11610 + struct sockaddr_conn *sconn; 1.11611 + 1.11612 + sconn = (struct sockaddr_conn *)src; 1.11613 +#if defined(SCTP_WITH_NO_CSUM) 1.11614 + SCTP_STAT_INCR(sctps_sendnocrc); 1.11615 +#else 1.11616 + shout->checksum = sctp_calculate_cksum(mout, 0); 1.11617 + SCTP_STAT_INCR(sctps_sendswcrc); 1.11618 +#endif 1.11619 +#ifdef SCTP_PACKET_LOGGING 1.11620 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) { 1.11621 + sctp_packet_log(mout); 1.11622 + } 1.11623 +#endif 1.11624 + /* Don't alloc/free for each packet */ 1.11625 + if ((buffer = malloc(len)) != NULL) { 1.11626 + m_copydata(mout, 0, len, buffer); 1.11627 + SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, len, 0, 0); 1.11628 + free(buffer); 1.11629 + } 1.11630 + sctp_m_freem(mout); 1.11631 + break; 1.11632 + } 1.11633 +#endif 1.11634 + default: 1.11635 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n", 1.11636 + dst->sa_family); 1.11637 + sctp_m_freem(mout); 1.11638 + SCTP_LTRACE_ERR_RET_PKT(mout, NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT); 1.11639 + return; 1.11640 + } 1.11641 + SCTP_STAT_INCR(sctps_sendpackets); 1.11642 + SCTP_STAT_INCR_COUNTER64(sctps_outpackets); 1.11643 + SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks); 1.11644 + return; 1.11645 +} 1.11646 + 1.11647 +void 1.11648 +sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst, 1.11649 + struct sctphdr *sh, 1.11650 +#if defined(__FreeBSD__) 1.11651 + uint8_t use_mflowid, uint32_t mflowid, 1.11652 +#endif 1.11653 + uint32_t vrf_id, uint16_t port) 1.11654 +{ 1.11655 + sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL, 1.11656 +#if defined(__FreeBSD__) 1.11657 + use_mflowid, mflowid, 1.11658 +#endif 1.11659 + vrf_id, port); 1.11660 +} 1.11661 + 1.11662 +void 1.11663 +sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net,int so_locked 1.11664 +#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 1.11665 + SCTP_UNUSED 1.11666 +#endif 1.11667 +) 1.11668 +{ 1.11669 + struct sctp_tmit_chunk *chk; 1.11670 + struct sctp_heartbeat_chunk *hb; 1.11671 + struct timeval now; 1.11672 + 1.11673 + SCTP_TCB_LOCK_ASSERT(stcb); 1.11674 + if (net == NULL) { 1.11675 + return; 1.11676 + } 1.11677 + (void)SCTP_GETTIME_TIMEVAL(&now); 1.11678 + switch (net->ro._l_addr.sa.sa_family) { 1.11679 +#ifdef INET 1.11680 + case AF_INET: 1.11681 + break; 1.11682 +#endif 1.11683 +#ifdef INET6 1.11684 + case AF_INET6: 1.11685 + break; 1.11686 +#endif 1.11687 +#if defined(__Userspace__) 1.11688 + case AF_CONN: 1.11689 + break; 1.11690 +#endif 1.11691 + default: 1.11692 + return; 1.11693 + } 1.11694 + sctp_alloc_a_chunk(stcb, chk); 1.11695 + if (chk == NULL) { 1.11696 + SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n"); 1.11697 + return; 1.11698 + } 1.11699 + 1.11700 + chk->copy_by_ref = 0; 1.11701 + chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST; 1.11702 + chk->rec.chunk_id.can_take_data = 1; 1.11703 + chk->asoc = &stcb->asoc; 1.11704 + chk->send_size = sizeof(struct sctp_heartbeat_chunk); 1.11705 + 1.11706 + chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER); 1.11707 + if (chk->data == NULL) { 1.11708 + sctp_free_a_chunk(stcb, chk, so_locked); 1.11709 + return; 1.11710 + } 1.11711 + SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 1.11712 + SCTP_BUF_LEN(chk->data) = chk->send_size; 1.11713 + chk->sent = SCTP_DATAGRAM_UNSENT; 1.11714 + chk->snd_count = 0; 1.11715 + chk->whoTo = net; 1.11716 + atomic_add_int(&chk->whoTo->ref_count, 1); 1.11717 + /* Now we have a mbuf that we can fill in with the details */ 1.11718 + hb = mtod(chk->data, struct sctp_heartbeat_chunk *); 1.11719 + memset(hb, 0, sizeof(struct sctp_heartbeat_chunk)); 1.11720 + /* fill out chunk header */ 1.11721 + hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST; 1.11722 + hb->ch.chunk_flags = 0; 1.11723 + hb->ch.chunk_length = htons(chk->send_size); 1.11724 + /* Fill out hb parameter */ 1.11725 + hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO); 1.11726 + hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param)); 1.11727 + hb->heartbeat.hb_info.time_value_1 = now.tv_sec; 1.11728 + hb->heartbeat.hb_info.time_value_2 = now.tv_usec; 1.11729 + /* Did our user request this one, put it in */ 1.11730 + hb->heartbeat.hb_info.addr_family = net->ro._l_addr.sa.sa_family; 1.11731 +#ifdef HAVE_SA_LEN 1.11732 + hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len; 1.11733 +#else 1.11734 + switch (net->ro._l_addr.sa.sa_family) { 1.11735 +#ifdef INET 1.11736 + case AF_INET: 1.11737 + hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in); 1.11738 + break; 1.11739 +#endif 1.11740 +#ifdef INET6 1.11741 + case AF_INET6: 1.11742 + hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in6); 1.11743 + break; 1.11744 +#endif 1.11745 +#if defined(__Userspace__) 1.11746 + case AF_CONN: 1.11747 + hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_conn); 1.11748 + break; 1.11749 +#endif 1.11750 + default: 1.11751 + hb->heartbeat.hb_info.addr_len = 0; 1.11752 + break; 1.11753 + } 1.11754 +#endif 1.11755 + if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { 1.11756 + /* 1.11757 + * we only take from the entropy pool if the address is not 1.11758 + * confirmed. 1.11759 + */ 1.11760 + net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 1.11761 + net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 1.11762 + } else { 1.11763 + net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0; 1.11764 + net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0; 1.11765 + } 1.11766 + switch (net->ro._l_addr.sa.sa_family) { 1.11767 +#ifdef INET 1.11768 + case AF_INET: 1.11769 + memcpy(hb->heartbeat.hb_info.address, 1.11770 + &net->ro._l_addr.sin.sin_addr, 1.11771 + sizeof(net->ro._l_addr.sin.sin_addr)); 1.11772 + break; 1.11773 +#endif 1.11774 +#ifdef INET6 1.11775 + case AF_INET6: 1.11776 + memcpy(hb->heartbeat.hb_info.address, 1.11777 + &net->ro._l_addr.sin6.sin6_addr, 1.11778 + sizeof(net->ro._l_addr.sin6.sin6_addr)); 1.11779 + break; 1.11780 +#endif 1.11781 +#if defined(__Userspace__) 1.11782 + case AF_CONN: 1.11783 + memcpy(hb->heartbeat.hb_info.address, 1.11784 + &net->ro._l_addr.sconn.sconn_addr, 1.11785 + sizeof(net->ro._l_addr.sconn.sconn_addr)); 1.11786 + break; 1.11787 +#endif 1.11788 + default: 1.11789 + return; 1.11790 + break; 1.11791 + } 1.11792 + net->hb_responded = 0; 1.11793 + TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 1.11794 + stcb->asoc.ctrl_queue_cnt++; 1.11795 + SCTP_STAT_INCR(sctps_sendheartbeat); 1.11796 + return; 1.11797 +} 1.11798 + 1.11799 +void 1.11800 +sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net, 1.11801 + uint32_t high_tsn) 1.11802 +{ 1.11803 + struct sctp_association *asoc; 1.11804 + struct sctp_ecne_chunk *ecne; 1.11805 + struct sctp_tmit_chunk *chk; 1.11806 + 1.11807 + if (net == NULL) { 1.11808 + return; 1.11809 + } 1.11810 + asoc = &stcb->asoc; 1.11811 + SCTP_TCB_LOCK_ASSERT(stcb); 1.11812 + TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 1.11813 + if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) { 1.11814 + /* found a previous ECN_ECHO update it if needed */ 1.11815 + uint32_t cnt, ctsn; 1.11816 + ecne = mtod(chk->data, struct sctp_ecne_chunk *); 1.11817 + ctsn = ntohl(ecne->tsn); 1.11818 + if (SCTP_TSN_GT(high_tsn, ctsn)) { 1.11819 + ecne->tsn = htonl(high_tsn); 1.11820 + SCTP_STAT_INCR(sctps_queue_upd_ecne); 1.11821 + } 1.11822 + cnt = ntohl(ecne->num_pkts_since_cwr); 1.11823 + cnt++; 1.11824 + ecne->num_pkts_since_cwr = htonl(cnt); 1.11825 + return; 1.11826 + } 1.11827 + } 1.11828 + /* nope could not find one to update so we must build one */ 1.11829 + sctp_alloc_a_chunk(stcb, chk); 1.11830 + if (chk == NULL) { 1.11831 + return; 1.11832 + } 1.11833 + chk->copy_by_ref = 0; 1.11834 + SCTP_STAT_INCR(sctps_queue_upd_ecne); 1.11835 + chk->rec.chunk_id.id = SCTP_ECN_ECHO; 1.11836 + chk->rec.chunk_id.can_take_data = 0; 1.11837 + chk->asoc = &stcb->asoc; 1.11838 + chk->send_size = sizeof(struct sctp_ecne_chunk); 1.11839 + chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER); 1.11840 + if (chk->data == NULL) { 1.11841 + sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1.11842 + return; 1.11843 + } 1.11844 + SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 1.11845 + SCTP_BUF_LEN(chk->data) = chk->send_size; 1.11846 + chk->sent = SCTP_DATAGRAM_UNSENT; 1.11847 + chk->snd_count = 0; 1.11848 + chk->whoTo = net; 1.11849 + atomic_add_int(&chk->whoTo->ref_count, 1); 1.11850 + 1.11851 + stcb->asoc.ecn_echo_cnt_onq++; 1.11852 + ecne = mtod(chk->data, struct sctp_ecne_chunk *); 1.11853 + ecne->ch.chunk_type = SCTP_ECN_ECHO; 1.11854 + ecne->ch.chunk_flags = 0; 1.11855 + ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk)); 1.11856 + ecne->tsn = htonl(high_tsn); 1.11857 + ecne->num_pkts_since_cwr = htonl(1); 1.11858 + TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next); 1.11859 + asoc->ctrl_queue_cnt++; 1.11860 +} 1.11861 + 1.11862 +void 1.11863 +sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net, 1.11864 + struct mbuf *m, int len, int iphlen, int bad_crc) 1.11865 +{ 1.11866 + struct sctp_association *asoc; 1.11867 + struct sctp_pktdrop_chunk *drp; 1.11868 + struct sctp_tmit_chunk *chk; 1.11869 + uint8_t *datap; 1.11870 + int was_trunc = 0; 1.11871 + int fullsz = 0; 1.11872 + long spc; 1.11873 + int offset; 1.11874 + struct sctp_chunkhdr *ch, chunk_buf; 1.11875 + unsigned int chk_length; 1.11876 + 1.11877 + if (!stcb) { 1.11878 + return; 1.11879 + } 1.11880 + asoc = &stcb->asoc; 1.11881 + SCTP_TCB_LOCK_ASSERT(stcb); 1.11882 + if (asoc->peer_supports_pktdrop == 0) { 1.11883 + /*- 1.11884 + * peer must declare support before I send one. 1.11885 + */ 1.11886 + return; 1.11887 + } 1.11888 + if (stcb->sctp_socket == NULL) { 1.11889 + return; 1.11890 + } 1.11891 + sctp_alloc_a_chunk(stcb, chk); 1.11892 + if (chk == NULL) { 1.11893 + return; 1.11894 + } 1.11895 + chk->copy_by_ref = 0; 1.11896 + len -= iphlen; 1.11897 + chk->send_size = len; 1.11898 + /* Validate that we do not have an ABORT in here. */ 1.11899 + offset = iphlen + sizeof(struct sctphdr); 1.11900 + ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 1.11901 + sizeof(*ch), (uint8_t *) & chunk_buf); 1.11902 + while (ch != NULL) { 1.11903 + chk_length = ntohs(ch->chunk_length); 1.11904 + if (chk_length < sizeof(*ch)) { 1.11905 + /* break to abort land */ 1.11906 + break; 1.11907 + } 1.11908 + switch (ch->chunk_type) { 1.11909 + case SCTP_PACKET_DROPPED: 1.11910 + case SCTP_ABORT_ASSOCIATION: 1.11911 + case SCTP_INITIATION_ACK: 1.11912 + /** 1.11913 + * We don't respond with an PKT-DROP to an ABORT 1.11914 + * or PKT-DROP. We also do not respond to an 1.11915 + * INIT-ACK, because we can't know if the initiation 1.11916 + * tag is correct or not. 1.11917 + */ 1.11918 + sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1.11919 + return; 1.11920 + default: 1.11921 + break; 1.11922 + } 1.11923 + offset += SCTP_SIZE32(chk_length); 1.11924 + ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 1.11925 + sizeof(*ch), (uint8_t *) & chunk_buf); 1.11926 + } 1.11927 + 1.11928 + if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) > 1.11929 + min(stcb->asoc.smallest_mtu, MCLBYTES)) { 1.11930 + /* only send 1 mtu worth, trim off the 1.11931 + * excess on the end. 1.11932 + */ 1.11933 + fullsz = len; 1.11934 + len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD; 1.11935 + was_trunc = 1; 1.11936 + } 1.11937 + chk->asoc = &stcb->asoc; 1.11938 + chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 1.11939 + if (chk->data == NULL) { 1.11940 +jump_out: 1.11941 + sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1.11942 + return; 1.11943 + } 1.11944 + SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 1.11945 + drp = mtod(chk->data, struct sctp_pktdrop_chunk *); 1.11946 + if (drp == NULL) { 1.11947 + sctp_m_freem(chk->data); 1.11948 + chk->data = NULL; 1.11949 + goto jump_out; 1.11950 + } 1.11951 + chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) + 1.11952 + sizeof(struct sctphdr) + SCTP_MED_OVERHEAD)); 1.11953 + chk->book_size_scale = 0; 1.11954 + if (was_trunc) { 1.11955 + drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED; 1.11956 + drp->trunc_len = htons(fullsz); 1.11957 + /* Len is already adjusted to size minus overhead above 1.11958 + * take out the pkt_drop chunk itself from it. 1.11959 + */ 1.11960 + chk->send_size = len - sizeof(struct sctp_pktdrop_chunk); 1.11961 + len = chk->send_size; 1.11962 + } else { 1.11963 + /* no truncation needed */ 1.11964 + drp->ch.chunk_flags = 0; 1.11965 + drp->trunc_len = htons(0); 1.11966 + } 1.11967 + if (bad_crc) { 1.11968 + drp->ch.chunk_flags |= SCTP_BADCRC; 1.11969 + } 1.11970 + chk->send_size += sizeof(struct sctp_pktdrop_chunk); 1.11971 + SCTP_BUF_LEN(chk->data) = chk->send_size; 1.11972 + chk->sent = SCTP_DATAGRAM_UNSENT; 1.11973 + chk->snd_count = 0; 1.11974 + if (net) { 1.11975 + /* we should hit here */ 1.11976 + chk->whoTo = net; 1.11977 + atomic_add_int(&chk->whoTo->ref_count, 1); 1.11978 + } else { 1.11979 + chk->whoTo = NULL; 1.11980 + } 1.11981 + chk->rec.chunk_id.id = SCTP_PACKET_DROPPED; 1.11982 + chk->rec.chunk_id.can_take_data = 1; 1.11983 + drp->ch.chunk_type = SCTP_PACKET_DROPPED; 1.11984 + drp->ch.chunk_length = htons(chk->send_size); 1.11985 + spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket); 1.11986 + if (spc < 0) { 1.11987 + spc = 0; 1.11988 + } 1.11989 + drp->bottle_bw = htonl(spc); 1.11990 + if (asoc->my_rwnd) { 1.11991 + drp->current_onq = htonl(asoc->size_on_reasm_queue + 1.11992 + asoc->size_on_all_streams + 1.11993 + asoc->my_rwnd_control_len + 1.11994 + stcb->sctp_socket->so_rcv.sb_cc); 1.11995 + } else { 1.11996 + /*- 1.11997 + * If my rwnd is 0, possibly from mbuf depletion as well as 1.11998 + * space used, tell the peer there is NO space aka onq == bw 1.11999 + */ 1.12000 + drp->current_onq = htonl(spc); 1.12001 + } 1.12002 + drp->reserved = 0; 1.12003 + datap = drp->data; 1.12004 + m_copydata(m, iphlen, len, (caddr_t)datap); 1.12005 + TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 1.12006 + asoc->ctrl_queue_cnt++; 1.12007 +} 1.12008 + 1.12009 +void 1.12010 +sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override) 1.12011 +{ 1.12012 + struct sctp_association *asoc; 1.12013 + struct sctp_cwr_chunk *cwr; 1.12014 + struct sctp_tmit_chunk *chk; 1.12015 + 1.12016 + SCTP_TCB_LOCK_ASSERT(stcb); 1.12017 + if (net == NULL) { 1.12018 + return; 1.12019 + } 1.12020 + asoc = &stcb->asoc; 1.12021 + TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 1.12022 + if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) { 1.12023 + /* found a previous CWR queued to same destination update it if needed */ 1.12024 + uint32_t ctsn; 1.12025 + cwr = mtod(chk->data, struct sctp_cwr_chunk *); 1.12026 + ctsn = ntohl(cwr->tsn); 1.12027 + if (SCTP_TSN_GT(high_tsn, ctsn)) { 1.12028 + cwr->tsn = htonl(high_tsn); 1.12029 + } 1.12030 + if (override & SCTP_CWR_REDUCE_OVERRIDE) { 1.12031 + /* Make sure override is carried */ 1.12032 + cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE; 1.12033 + } 1.12034 + return; 1.12035 + } 1.12036 + } 1.12037 + sctp_alloc_a_chunk(stcb, chk); 1.12038 + if (chk == NULL) { 1.12039 + return; 1.12040 + } 1.12041 + chk->copy_by_ref = 0; 1.12042 + chk->rec.chunk_id.id = SCTP_ECN_CWR; 1.12043 + chk->rec.chunk_id.can_take_data = 1; 1.12044 + chk->asoc = &stcb->asoc; 1.12045 + chk->send_size = sizeof(struct sctp_cwr_chunk); 1.12046 + chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER); 1.12047 + if (chk->data == NULL) { 1.12048 + sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1.12049 + return; 1.12050 + } 1.12051 + SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 1.12052 + SCTP_BUF_LEN(chk->data) = chk->send_size; 1.12053 + chk->sent = SCTP_DATAGRAM_UNSENT; 1.12054 + chk->snd_count = 0; 1.12055 + chk->whoTo = net; 1.12056 + atomic_add_int(&chk->whoTo->ref_count, 1); 1.12057 + cwr = mtod(chk->data, struct sctp_cwr_chunk *); 1.12058 + cwr->ch.chunk_type = SCTP_ECN_CWR; 1.12059 + cwr->ch.chunk_flags = override; 1.12060 + cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk)); 1.12061 + cwr->tsn = htonl(high_tsn); 1.12062 + TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next); 1.12063 + asoc->ctrl_queue_cnt++; 1.12064 +} 1.12065 + 1.12066 +void 1.12067 +sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk, 1.12068 + int number_entries, uint16_t * list, 1.12069 + uint32_t seq, uint32_t resp_seq, uint32_t last_sent) 1.12070 +{ 1.12071 + uint16_t len, old_len, i; 1.12072 + struct sctp_stream_reset_out_request *req_out; 1.12073 + struct sctp_chunkhdr *ch; 1.12074 + 1.12075 + ch = mtod(chk->data, struct sctp_chunkhdr *); 1.12076 + old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 1.12077 + 1.12078 + /* get to new offset for the param. */ 1.12079 + req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len); 1.12080 + /* now how long will this param be? */ 1.12081 + len = (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries)); 1.12082 + req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST); 1.12083 + req_out->ph.param_length = htons(len); 1.12084 + req_out->request_seq = htonl(seq); 1.12085 + req_out->response_seq = htonl(resp_seq); 1.12086 + req_out->send_reset_at_tsn = htonl(last_sent); 1.12087 + if (number_entries) { 1.12088 + for (i = 0; i < number_entries; i++) { 1.12089 + req_out->list_of_streams[i] = htons(list[i]); 1.12090 + } 1.12091 + } 1.12092 + if (SCTP_SIZE32(len) > len) { 1.12093 + /*- 1.12094 + * Need to worry about the pad we may end up adding to the 1.12095 + * end. This is easy since the struct is either aligned to 4 1.12096 + * bytes or 2 bytes off. 1.12097 + */ 1.12098 + req_out->list_of_streams[number_entries] = 0; 1.12099 + } 1.12100 + /* now fix the chunk length */ 1.12101 + ch->chunk_length = htons(len + old_len); 1.12102 + chk->book_size = len + old_len; 1.12103 + chk->book_size_scale = 0; 1.12104 + chk->send_size = SCTP_SIZE32(chk->book_size); 1.12105 + SCTP_BUF_LEN(chk->data) = chk->send_size; 1.12106 + return; 1.12107 +} 1.12108 + 1.12109 +static void 1.12110 +sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk, 1.12111 + int number_entries, uint16_t *list, 1.12112 + uint32_t seq) 1.12113 +{ 1.12114 + uint16_t len, old_len, i; 1.12115 + struct sctp_stream_reset_in_request *req_in; 1.12116 + struct sctp_chunkhdr *ch; 1.12117 + 1.12118 + ch = mtod(chk->data, struct sctp_chunkhdr *); 1.12119 + old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 1.12120 + 1.12121 + /* get to new offset for the param. */ 1.12122 + req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len); 1.12123 + /* now how long will this param be? */ 1.12124 + len = (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries)); 1.12125 + req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST); 1.12126 + req_in->ph.param_length = htons(len); 1.12127 + req_in->request_seq = htonl(seq); 1.12128 + if (number_entries) { 1.12129 + for (i = 0; i < number_entries; i++) { 1.12130 + req_in->list_of_streams[i] = htons(list[i]); 1.12131 + } 1.12132 + } 1.12133 + if (SCTP_SIZE32(len) > len) { 1.12134 + /*- 1.12135 + * Need to worry about the pad we may end up adding to the 1.12136 + * end. This is easy since the struct is either aligned to 4 1.12137 + * bytes or 2 bytes off. 1.12138 + */ 1.12139 + req_in->list_of_streams[number_entries] = 0; 1.12140 + } 1.12141 + /* now fix the chunk length */ 1.12142 + ch->chunk_length = htons(len + old_len); 1.12143 + chk->book_size = len + old_len; 1.12144 + chk->book_size_scale = 0; 1.12145 + chk->send_size = SCTP_SIZE32(chk->book_size); 1.12146 + SCTP_BUF_LEN(chk->data) = chk->send_size; 1.12147 + return; 1.12148 +} 1.12149 + 1.12150 +static void 1.12151 +sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk, 1.12152 + uint32_t seq) 1.12153 +{ 1.12154 + uint16_t len, old_len; 1.12155 + struct sctp_stream_reset_tsn_request *req_tsn; 1.12156 + struct sctp_chunkhdr *ch; 1.12157 + 1.12158 + ch = mtod(chk->data, struct sctp_chunkhdr *); 1.12159 + old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 1.12160 + 1.12161 + /* get to new offset for the param. */ 1.12162 + req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len); 1.12163 + /* now how long will this param be? */ 1.12164 + len = sizeof(struct sctp_stream_reset_tsn_request); 1.12165 + req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST); 1.12166 + req_tsn->ph.param_length = htons(len); 1.12167 + req_tsn->request_seq = htonl(seq); 1.12168 + 1.12169 + /* now fix the chunk length */ 1.12170 + ch->chunk_length = htons(len + old_len); 1.12171 + chk->send_size = len + old_len; 1.12172 + chk->book_size = SCTP_SIZE32(chk->send_size); 1.12173 + chk->book_size_scale = 0; 1.12174 + SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 1.12175 + return; 1.12176 +} 1.12177 + 1.12178 +void 1.12179 +sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk, 1.12180 + uint32_t resp_seq, uint32_t result) 1.12181 +{ 1.12182 + uint16_t len, old_len; 1.12183 + struct sctp_stream_reset_response *resp; 1.12184 + struct sctp_chunkhdr *ch; 1.12185 + 1.12186 + ch = mtod(chk->data, struct sctp_chunkhdr *); 1.12187 + old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 1.12188 + 1.12189 + /* get to new offset for the param. */ 1.12190 + resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len); 1.12191 + /* now how long will this param be? */ 1.12192 + len = sizeof(struct sctp_stream_reset_response); 1.12193 + resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE); 1.12194 + resp->ph.param_length = htons(len); 1.12195 + resp->response_seq = htonl(resp_seq); 1.12196 + resp->result = ntohl(result); 1.12197 + 1.12198 + /* now fix the chunk length */ 1.12199 + ch->chunk_length = htons(len + old_len); 1.12200 + chk->book_size = len + old_len; 1.12201 + chk->book_size_scale = 0; 1.12202 + chk->send_size = SCTP_SIZE32(chk->book_size); 1.12203 + SCTP_BUF_LEN(chk->data) = chk->send_size; 1.12204 + return; 1.12205 +} 1.12206 + 1.12207 +void 1.12208 +sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk, 1.12209 + uint32_t resp_seq, uint32_t result, 1.12210 + uint32_t send_una, uint32_t recv_next) 1.12211 +{ 1.12212 + uint16_t len, old_len; 1.12213 + struct sctp_stream_reset_response_tsn *resp; 1.12214 + struct sctp_chunkhdr *ch; 1.12215 + 1.12216 + ch = mtod(chk->data, struct sctp_chunkhdr *); 1.12217 + old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 1.12218 + 1.12219 + /* get to new offset for the param. */ 1.12220 + resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len); 1.12221 + /* now how long will this param be? */ 1.12222 + len = sizeof(struct sctp_stream_reset_response_tsn); 1.12223 + resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE); 1.12224 + resp->ph.param_length = htons(len); 1.12225 + resp->response_seq = htonl(resp_seq); 1.12226 + resp->result = htonl(result); 1.12227 + resp->senders_next_tsn = htonl(send_una); 1.12228 + resp->receivers_next_tsn = htonl(recv_next); 1.12229 + 1.12230 + /* now fix the chunk length */ 1.12231 + ch->chunk_length = htons(len + old_len); 1.12232 + chk->book_size = len + old_len; 1.12233 + chk->send_size = SCTP_SIZE32(chk->book_size); 1.12234 + chk->book_size_scale = 0; 1.12235 + SCTP_BUF_LEN(chk->data) = chk->send_size; 1.12236 + return; 1.12237 +} 1.12238 + 1.12239 +static void 1.12240 +sctp_add_an_out_stream(struct sctp_tmit_chunk *chk, 1.12241 + uint32_t seq, 1.12242 + uint16_t adding) 1.12243 +{ 1.12244 + uint16_t len, old_len; 1.12245 + struct sctp_chunkhdr *ch; 1.12246 + struct sctp_stream_reset_add_strm *addstr; 1.12247 + 1.12248 + ch = mtod(chk->data, struct sctp_chunkhdr *); 1.12249 + old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 1.12250 + 1.12251 + /* get to new offset for the param. */ 1.12252 + addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len); 1.12253 + /* now how long will this param be? */ 1.12254 + len = sizeof(struct sctp_stream_reset_add_strm); 1.12255 + 1.12256 + /* Fill it out. */ 1.12257 + addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS); 1.12258 + addstr->ph.param_length = htons(len); 1.12259 + addstr->request_seq = htonl(seq); 1.12260 + addstr->number_of_streams = htons(adding); 1.12261 + addstr->reserved = 0; 1.12262 + 1.12263 + /* now fix the chunk length */ 1.12264 + ch->chunk_length = htons(len + old_len); 1.12265 + chk->send_size = len + old_len; 1.12266 + chk->book_size = SCTP_SIZE32(chk->send_size); 1.12267 + chk->book_size_scale = 0; 1.12268 + SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 1.12269 + return; 1.12270 +} 1.12271 + 1.12272 +static void 1.12273 +sctp_add_an_in_stream(struct sctp_tmit_chunk *chk, 1.12274 + uint32_t seq, 1.12275 + uint16_t adding) 1.12276 +{ 1.12277 + uint16_t len, old_len; 1.12278 + struct sctp_chunkhdr *ch; 1.12279 + struct sctp_stream_reset_add_strm *addstr; 1.12280 + 1.12281 + ch = mtod(chk->data, struct sctp_chunkhdr *); 1.12282 + old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length)); 1.12283 + 1.12284 + /* get to new offset for the param. */ 1.12285 + addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len); 1.12286 + /* now how long will this param be? */ 1.12287 + len = sizeof(struct sctp_stream_reset_add_strm); 1.12288 + /* Fill it out. */ 1.12289 + addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS); 1.12290 + addstr->ph.param_length = htons(len); 1.12291 + addstr->request_seq = htonl(seq); 1.12292 + addstr->number_of_streams = htons(adding); 1.12293 + addstr->reserved = 0; 1.12294 + 1.12295 + /* now fix the chunk length */ 1.12296 + ch->chunk_length = htons(len + old_len); 1.12297 + chk->send_size = len + old_len; 1.12298 + chk->book_size = SCTP_SIZE32(chk->send_size); 1.12299 + chk->book_size_scale = 0; 1.12300 + SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 1.12301 + return; 1.12302 +} 1.12303 + 1.12304 +int 1.12305 +sctp_send_str_reset_req(struct sctp_tcb *stcb, 1.12306 + int number_entries, uint16_t *list, 1.12307 + uint8_t send_out_req, 1.12308 + uint8_t send_in_req, 1.12309 + uint8_t send_tsn_req, 1.12310 + uint8_t add_stream, 1.12311 + uint16_t adding_o, 1.12312 + uint16_t adding_i, uint8_t peer_asked) 1.12313 +{ 1.12314 + 1.12315 + struct sctp_association *asoc; 1.12316 + struct sctp_tmit_chunk *chk; 1.12317 + struct sctp_chunkhdr *ch; 1.12318 + uint32_t seq; 1.12319 + 1.12320 + asoc = &stcb->asoc; 1.12321 + if (asoc->stream_reset_outstanding) { 1.12322 + /*- 1.12323 + * Already one pending, must get ACK back to clear the flag. 1.12324 + */ 1.12325 + SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY); 1.12326 + return (EBUSY); 1.12327 + } 1.12328 + if ((send_out_req == 0) && (send_in_req == 0) && (send_tsn_req == 0) && 1.12329 + (add_stream == 0)) { 1.12330 + /* nothing to do */ 1.12331 + SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); 1.12332 + return (EINVAL); 1.12333 + } 1.12334 + if (send_tsn_req && (send_out_req || send_in_req)) { 1.12335 + /* error, can't do that */ 1.12336 + SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); 1.12337 + return (EINVAL); 1.12338 + } 1.12339 + sctp_alloc_a_chunk(stcb, chk); 1.12340 + if (chk == NULL) { 1.12341 + SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 1.12342 + return (ENOMEM); 1.12343 + } 1.12344 + chk->copy_by_ref = 0; 1.12345 + chk->rec.chunk_id.id = SCTP_STREAM_RESET; 1.12346 + chk->rec.chunk_id.can_take_data = 0; 1.12347 + chk->asoc = &stcb->asoc; 1.12348 + chk->book_size = sizeof(struct sctp_chunkhdr); 1.12349 + chk->send_size = SCTP_SIZE32(chk->book_size); 1.12350 + chk->book_size_scale = 0; 1.12351 + 1.12352 + chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 1.12353 + if (chk->data == NULL) { 1.12354 + sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED); 1.12355 + SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 1.12356 + return (ENOMEM); 1.12357 + } 1.12358 + SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 1.12359 + 1.12360 + /* setup chunk parameters */ 1.12361 + chk->sent = SCTP_DATAGRAM_UNSENT; 1.12362 + chk->snd_count = 0; 1.12363 + if (stcb->asoc.alternate) { 1.12364 + chk->whoTo = stcb->asoc.alternate; 1.12365 + } else { 1.12366 + chk->whoTo = stcb->asoc.primary_destination; 1.12367 + } 1.12368 + atomic_add_int(&chk->whoTo->ref_count, 1); 1.12369 + ch = mtod(chk->data, struct sctp_chunkhdr *); 1.12370 + ch->chunk_type = SCTP_STREAM_RESET; 1.12371 + ch->chunk_flags = 0; 1.12372 + ch->chunk_length = htons(chk->book_size); 1.12373 + SCTP_BUF_LEN(chk->data) = chk->send_size; 1.12374 + 1.12375 + seq = stcb->asoc.str_reset_seq_out; 1.12376 + if (send_out_req) { 1.12377 + sctp_add_stream_reset_out(chk, number_entries, list, 1.12378 + seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1)); 1.12379 + asoc->stream_reset_out_is_outstanding = 1; 1.12380 + seq++; 1.12381 + asoc->stream_reset_outstanding++; 1.12382 + } 1.12383 + if ((add_stream & 1) && 1.12384 + ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) { 1.12385 + /* Need to allocate more */ 1.12386 + struct sctp_stream_out *oldstream; 1.12387 + struct sctp_stream_queue_pending *sp, *nsp; 1.12388 + int i; 1.12389 + 1.12390 + oldstream = stcb->asoc.strmout; 1.12391 + /* get some more */ 1.12392 + SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *, 1.12393 + ((stcb->asoc.streamoutcnt+adding_o) * sizeof(struct sctp_stream_out)), 1.12394 + SCTP_M_STRMO); 1.12395 + if (stcb->asoc.strmout == NULL) { 1.12396 + uint8_t x; 1.12397 + stcb->asoc.strmout = oldstream; 1.12398 + /* Turn off the bit */ 1.12399 + x = add_stream & 0xfe; 1.12400 + add_stream = x; 1.12401 + goto skip_stuff; 1.12402 + } 1.12403 + /* Ok now we proceed with copying the old out stuff and 1.12404 + * initializing the new stuff. 1.12405 + */ 1.12406 + SCTP_TCB_SEND_LOCK(stcb); 1.12407 + stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1); 1.12408 + for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1.12409 + TAILQ_INIT(&stcb->asoc.strmout[i].outqueue); 1.12410 + stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues; 1.12411 + stcb->asoc.strmout[i].next_sequence_send = oldstream[i].next_sequence_send; 1.12412 + stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete; 1.12413 + stcb->asoc.strmout[i].stream_no = i; 1.12414 + stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], &oldstream[i]); 1.12415 + /* now anything on those queues? */ 1.12416 + TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) { 1.12417 + TAILQ_REMOVE(&oldstream[i].outqueue, sp, next); 1.12418 + TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next); 1.12419 + } 1.12420 + /* Now move assoc pointers too */ 1.12421 + if (stcb->asoc.last_out_stream == &oldstream[i]) { 1.12422 + stcb->asoc.last_out_stream = &stcb->asoc.strmout[i]; 1.12423 + } 1.12424 + if (stcb->asoc.locked_on_sending == &oldstream[i]) { 1.12425 + stcb->asoc.locked_on_sending = &stcb->asoc.strmout[i]; 1.12426 + } 1.12427 + } 1.12428 + /* now the new streams */ 1.12429 + stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1); 1.12430 + for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) { 1.12431 + TAILQ_INIT(&stcb->asoc.strmout[i].outqueue); 1.12432 + stcb->asoc.strmout[i].chunks_on_queues = 0; 1.12433 + stcb->asoc.strmout[i].next_sequence_send = 0x0; 1.12434 + stcb->asoc.strmout[i].stream_no = i; 1.12435 + stcb->asoc.strmout[i].last_msg_incomplete = 0; 1.12436 + stcb->asoc.ss_functions.sctp_ss_init_stream(&stcb->asoc.strmout[i], NULL); 1.12437 + } 1.12438 + stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o; 1.12439 + SCTP_FREE(oldstream, SCTP_M_STRMO); 1.12440 + SCTP_TCB_SEND_UNLOCK(stcb); 1.12441 + } 1.12442 +skip_stuff: 1.12443 + if ((add_stream & 1) && (adding_o > 0)) { 1.12444 + asoc->strm_pending_add_size = adding_o; 1.12445 + asoc->peer_req_out = peer_asked; 1.12446 + sctp_add_an_out_stream(chk, seq, adding_o); 1.12447 + seq++; 1.12448 + asoc->stream_reset_outstanding++; 1.12449 + } 1.12450 + if ((add_stream & 2) && (adding_i > 0)) { 1.12451 + sctp_add_an_in_stream(chk, seq, adding_i); 1.12452 + seq++; 1.12453 + asoc->stream_reset_outstanding++; 1.12454 + } 1.12455 + if (send_in_req) { 1.12456 + sctp_add_stream_reset_in(chk, number_entries, list, seq); 1.12457 + seq++; 1.12458 + asoc->stream_reset_outstanding++; 1.12459 + } 1.12460 + if (send_tsn_req) { 1.12461 + sctp_add_stream_reset_tsn(chk, seq); 1.12462 + asoc->stream_reset_outstanding++; 1.12463 + } 1.12464 + asoc->str_reset = chk; 1.12465 + /* insert the chunk for sending */ 1.12466 + TAILQ_INSERT_TAIL(&asoc->control_send_queue, 1.12467 + chk, 1.12468 + sctp_next); 1.12469 + asoc->ctrl_queue_cnt++; 1.12470 + sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo); 1.12471 + return (0); 1.12472 +} 1.12473 + 1.12474 +void 1.12475 +sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst, 1.12476 + struct sctphdr *sh, uint32_t vtag, struct mbuf *cause, 1.12477 +#if defined(__FreeBSD__) 1.12478 + uint8_t use_mflowid, uint32_t mflowid, 1.12479 +#endif 1.12480 + uint32_t vrf_id, uint16_t port) 1.12481 +{ 1.12482 + /* Don't respond to an ABORT with an ABORT. */ 1.12483 + if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) { 1.12484 + if (cause) 1.12485 + sctp_m_freem(cause); 1.12486 + return; 1.12487 + } 1.12488 + sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause, 1.12489 +#if defined(__FreeBSD__) 1.12490 + use_mflowid, mflowid, 1.12491 +#endif 1.12492 + vrf_id, port); 1.12493 + return; 1.12494 +} 1.12495 + 1.12496 +void 1.12497 +sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst, 1.12498 + struct sctphdr *sh, uint32_t vtag, struct mbuf *cause, 1.12499 +#if defined(__FreeBSD__) 1.12500 + uint8_t use_mflowid, uint32_t mflowid, 1.12501 +#endif 1.12502 + uint32_t vrf_id, uint16_t port) 1.12503 +{ 1.12504 + sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause, 1.12505 +#if defined(__FreeBSD__) 1.12506 + use_mflowid, mflowid, 1.12507 +#endif 1.12508 + vrf_id, port); 1.12509 + return; 1.12510 +} 1.12511 + 1.12512 +static struct mbuf * 1.12513 +sctp_copy_resume(struct uio *uio, 1.12514 + int max_send_len, 1.12515 +#if defined(__FreeBSD__) && __FreeBSD_version > 602000 1.12516 + int user_marks_eor, 1.12517 +#endif 1.12518 + int *error, 1.12519 + uint32_t *sndout, 1.12520 + struct mbuf **new_tail) 1.12521 +{ 1.12522 +#if defined(__Panda__) 1.12523 + struct mbuf *m; 1.12524 + 1.12525 + m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0, 1.12526 + (user_marks_eor ? M_EOR : 0)); 1.12527 + if (m == NULL) { 1.12528 + SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 1.12529 + *error = ENOMEM; 1.12530 + } else { 1.12531 + *sndout = m_length(m, NULL); 1.12532 + *new_tail = m_last(m); 1.12533 + } 1.12534 + return (m); 1.12535 +#elif defined(__FreeBSD__) && __FreeBSD_version > 602000 1.12536 + struct mbuf *m; 1.12537 + 1.12538 + m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0, 1.12539 + (M_PKTHDR | (user_marks_eor ? M_EOR : 0))); 1.12540 + if (m == NULL) { 1.12541 + SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 1.12542 + *error = ENOMEM; 1.12543 + } else { 1.12544 + *sndout = m_length(m, NULL); 1.12545 + *new_tail = m_last(m); 1.12546 + } 1.12547 + return (m); 1.12548 +#else 1.12549 + int left, cancpy, willcpy; 1.12550 + struct mbuf *m, *head; 1.12551 + 1.12552 +#if defined(__APPLE__) 1.12553 +#if defined(APPLE_LEOPARD) 1.12554 + left = min(uio->uio_resid, max_send_len); 1.12555 +#else 1.12556 + left = min(uio_resid(uio), max_send_len); 1.12557 +#endif 1.12558 +#else 1.12559 + left = min(uio->uio_resid, max_send_len); 1.12560 +#endif 1.12561 + /* Always get a header just in case */ 1.12562 + head = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA); 1.12563 + if (head == NULL) { 1.12564 + SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 1.12565 + *error = ENOMEM; 1.12566 + return (NULL); 1.12567 + } 1.12568 + cancpy = M_TRAILINGSPACE(head); 1.12569 + willcpy = min(cancpy, left); 1.12570 + *error = uiomove(mtod(head, caddr_t), willcpy, uio); 1.12571 + if (*error) { 1.12572 + sctp_m_freem(head); 1.12573 + return (NULL); 1.12574 + } 1.12575 + *sndout += willcpy; 1.12576 + left -= willcpy; 1.12577 + SCTP_BUF_LEN(head) = willcpy; 1.12578 + m = head; 1.12579 + *new_tail = head; 1.12580 + while (left > 0) { 1.12581 + /* move in user data */ 1.12582 + SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA); 1.12583 + if (SCTP_BUF_NEXT(m) == NULL) { 1.12584 + sctp_m_freem(head); 1.12585 + *new_tail = NULL; 1.12586 + SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 1.12587 + *error = ENOMEM; 1.12588 + return (NULL); 1.12589 + } 1.12590 + m = SCTP_BUF_NEXT(m); 1.12591 + cancpy = M_TRAILINGSPACE(m); 1.12592 + willcpy = min(cancpy, left); 1.12593 + *error = uiomove(mtod(m, caddr_t), willcpy, uio); 1.12594 + if (*error) { 1.12595 + sctp_m_freem(head); 1.12596 + *new_tail = NULL; 1.12597 + SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT); 1.12598 + *error = EFAULT; 1.12599 + return (NULL); 1.12600 + } 1.12601 + SCTP_BUF_LEN(m) = willcpy; 1.12602 + left -= willcpy; 1.12603 + *sndout += willcpy; 1.12604 + *new_tail = m; 1.12605 + if (left == 0) { 1.12606 + SCTP_BUF_NEXT(m) = NULL; 1.12607 + } 1.12608 + } 1.12609 + return (head); 1.12610 +#endif 1.12611 +} 1.12612 + 1.12613 +static int 1.12614 +sctp_copy_one(struct sctp_stream_queue_pending *sp, 1.12615 + struct uio *uio, 1.12616 + int resv_upfront) 1.12617 +{ 1.12618 + int left; 1.12619 +#if defined(__Panda__) 1.12620 + left = sp->length; 1.12621 + sp->data = m_uiotombuf(uio, M_WAITOK, sp->length, 1.12622 + resv_upfront, 0); 1.12623 + if (sp->data == NULL) { 1.12624 + SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 1.12625 + return (ENOMEM); 1.12626 + } 1.12627 + 1.12628 + sp->tail_mbuf = m_last(sp->data); 1.12629 + return (0); 1.12630 + 1.12631 +#elif defined(__FreeBSD__) && __FreeBSD_version > 602000 1.12632 + left = sp->length; 1.12633 + sp->data = m_uiotombuf(uio, M_WAITOK, sp->length, 1.12634 + resv_upfront, 0); 1.12635 + if (sp->data == NULL) { 1.12636 + SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 1.12637 + return (ENOMEM); 1.12638 + } 1.12639 + 1.12640 + sp->tail_mbuf = m_last(sp->data); 1.12641 + return (0); 1.12642 +#else 1.12643 + int cancpy, willcpy, error; 1.12644 + struct mbuf *m, *head; 1.12645 + int cpsz = 0; 1.12646 + 1.12647 + /* First one gets a header */ 1.12648 + left = sp->length; 1.12649 + head = m = sctp_get_mbuf_for_msg((left + resv_upfront), 0, M_WAITOK, 0, MT_DATA); 1.12650 + if (m == NULL) { 1.12651 + SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 1.12652 + return (ENOMEM); 1.12653 + } 1.12654 + /*- 1.12655 + * Add this one for m in now, that way if the alloc fails we won't 1.12656 + * have a bad cnt. 1.12657 + */ 1.12658 + SCTP_BUF_RESV_UF(m, resv_upfront); 1.12659 + cancpy = M_TRAILINGSPACE(m); 1.12660 + willcpy = min(cancpy, left); 1.12661 + while (left > 0) { 1.12662 + /* move in user data */ 1.12663 + error = uiomove(mtod(m, caddr_t), willcpy, uio); 1.12664 + if (error) { 1.12665 + sctp_m_freem(head); 1.12666 + return (error); 1.12667 + } 1.12668 + SCTP_BUF_LEN(m) = willcpy; 1.12669 + left -= willcpy; 1.12670 + cpsz += willcpy; 1.12671 + if (left > 0) { 1.12672 + SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA); 1.12673 + if (SCTP_BUF_NEXT(m) == NULL) { 1.12674 + /* 1.12675 + * the head goes back to caller, he can free 1.12676 + * the rest 1.12677 + */ 1.12678 + sctp_m_freem(head); 1.12679 + SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 1.12680 + return (ENOMEM); 1.12681 + } 1.12682 + m = SCTP_BUF_NEXT(m); 1.12683 + cancpy = M_TRAILINGSPACE(m); 1.12684 + willcpy = min(cancpy, left); 1.12685 + } else { 1.12686 + sp->tail_mbuf = m; 1.12687 + SCTP_BUF_NEXT(m) = NULL; 1.12688 + } 1.12689 + } 1.12690 + sp->data = head; 1.12691 + sp->length = cpsz; 1.12692 + return (0); 1.12693 +#endif 1.12694 +} 1.12695 + 1.12696 + 1.12697 + 1.12698 +static struct sctp_stream_queue_pending * 1.12699 +sctp_copy_it_in(struct sctp_tcb *stcb, 1.12700 + struct sctp_association *asoc, 1.12701 + struct sctp_sndrcvinfo *srcv, 1.12702 + struct uio *uio, 1.12703 + struct sctp_nets *net, 1.12704 + int max_send_len, 1.12705 + int user_marks_eor, 1.12706 + int *error) 1.12707 + 1.12708 +{ 1.12709 + /*- 1.12710 + * This routine must be very careful in its work. Protocol 1.12711 + * processing is up and running so care must be taken to spl...() 1.12712 + * when you need to do something that may effect the stcb/asoc. The 1.12713 + * sb is locked however. When data is copied the protocol processing 1.12714 + * should be enabled since this is a slower operation... 1.12715 + */ 1.12716 + struct sctp_stream_queue_pending *sp = NULL; 1.12717 + int resv_in_first; 1.12718 + 1.12719 + *error = 0; 1.12720 + /* Now can we send this? */ 1.12721 + if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) || 1.12722 + (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 1.12723 + (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) || 1.12724 + (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) { 1.12725 + /* got data while shutting down */ 1.12726 + SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET); 1.12727 + *error = ECONNRESET; 1.12728 + goto out_now; 1.12729 + } 1.12730 + sctp_alloc_a_strmoq(stcb, sp); 1.12731 + if (sp == NULL) { 1.12732 + SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 1.12733 + *error = ENOMEM; 1.12734 + goto out_now; 1.12735 + } 1.12736 + sp->act_flags = 0; 1.12737 + sp->sender_all_done = 0; 1.12738 + sp->sinfo_flags = srcv->sinfo_flags; 1.12739 + sp->timetolive = srcv->sinfo_timetolive; 1.12740 + sp->ppid = srcv->sinfo_ppid; 1.12741 + sp->context = srcv->sinfo_context; 1.12742 + (void)SCTP_GETTIME_TIMEVAL(&sp->ts); 1.12743 + 1.12744 + sp->stream = srcv->sinfo_stream; 1.12745 +#if defined(__APPLE__) 1.12746 +#if defined(APPLE_LEOPARD) 1.12747 + sp->length = min(uio->uio_resid, max_send_len); 1.12748 +#else 1.12749 + sp->length = min(uio_resid(uio), max_send_len); 1.12750 +#endif 1.12751 +#else 1.12752 + sp->length = min(uio->uio_resid, max_send_len); 1.12753 +#endif 1.12754 +#if defined(__APPLE__) 1.12755 +#if defined(APPLE_LEOPARD) 1.12756 + if ((sp->length == (uint32_t)uio->uio_resid) && 1.12757 +#else 1.12758 + if ((sp->length == (uint32_t)uio_resid(uio)) && 1.12759 +#endif 1.12760 +#else 1.12761 + if ((sp->length == (uint32_t)uio->uio_resid) && 1.12762 +#endif 1.12763 + ((user_marks_eor == 0) || 1.12764 + (srcv->sinfo_flags & SCTP_EOF) || 1.12765 + (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) { 1.12766 + sp->msg_is_complete = 1; 1.12767 + } else { 1.12768 + sp->msg_is_complete = 0; 1.12769 + } 1.12770 + sp->sender_all_done = 0; 1.12771 + sp->some_taken = 0; 1.12772 + sp->put_last_out = 0; 1.12773 + resv_in_first = sizeof(struct sctp_data_chunk); 1.12774 + sp->data = sp->tail_mbuf = NULL; 1.12775 + if (sp->length == 0) { 1.12776 + *error = 0; 1.12777 + goto skip_copy; 1.12778 + } 1.12779 + if (srcv->sinfo_keynumber_valid) { 1.12780 + sp->auth_keyid = srcv->sinfo_keynumber; 1.12781 + } else { 1.12782 + sp->auth_keyid = stcb->asoc.authinfo.active_keyid; 1.12783 + } 1.12784 + if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) { 1.12785 + sctp_auth_key_acquire(stcb, sp->auth_keyid); 1.12786 + sp->holds_key_ref = 1; 1.12787 + } 1.12788 +#if defined(__APPLE__) 1.12789 + SCTP_SOCKET_UNLOCK(SCTP_INP_SO(stcb->sctp_ep), 0); 1.12790 +#endif 1.12791 + *error = sctp_copy_one(sp, uio, resv_in_first); 1.12792 +#if defined(__APPLE__) 1.12793 + SCTP_SOCKET_LOCK(SCTP_INP_SO(stcb->sctp_ep), 0); 1.12794 +#endif 1.12795 + skip_copy: 1.12796 + if (*error) { 1.12797 + sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED); 1.12798 + sp = NULL; 1.12799 + } else { 1.12800 + if (sp->sinfo_flags & SCTP_ADDR_OVER) { 1.12801 + sp->net = net; 1.12802 + atomic_add_int(&sp->net->ref_count, 1); 1.12803 + } else { 1.12804 + sp->net = NULL; 1.12805 + } 1.12806 + sctp_set_prsctp_policy(sp); 1.12807 + } 1.12808 +out_now: 1.12809 + return (sp); 1.12810 +} 1.12811 + 1.12812 + 1.12813 +int 1.12814 +sctp_sosend(struct socket *so, 1.12815 + struct sockaddr *addr, 1.12816 + struct uio *uio, 1.12817 +#ifdef __Panda__ 1.12818 + pakhandle_type top, 1.12819 + pakhandle_type icontrol, 1.12820 +#else 1.12821 + struct mbuf *top, 1.12822 + struct mbuf *control, 1.12823 +#endif 1.12824 +#if defined(__APPLE__) || defined(__Panda__) 1.12825 + int flags 1.12826 +#else 1.12827 + int flags, 1.12828 +#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 1.12829 + struct thread *p 1.12830 +#elif defined(__Windows__) 1.12831 + PKTHREAD p 1.12832 +#else 1.12833 +#if defined(__Userspace__) 1.12834 + /* 1.12835 + * proc is a dummy in __Userspace__ and will not be passed 1.12836 + * to sctp_lower_sosend 1.12837 + */ 1.12838 +#endif 1.12839 + struct proc *p 1.12840 +#endif 1.12841 +#endif 1.12842 +) 1.12843 +{ 1.12844 +#ifdef __Panda__ 1.12845 + struct mbuf *control = NULL; 1.12846 +#endif 1.12847 +#if defined(__APPLE__) 1.12848 + struct proc *p = current_proc(); 1.12849 +#endif 1.12850 + int error, use_sndinfo = 0; 1.12851 + struct sctp_sndrcvinfo sndrcvninfo; 1.12852 + struct sockaddr *addr_to_use; 1.12853 +#if defined(INET) && defined(INET6) 1.12854 + struct sockaddr_in sin; 1.12855 +#endif 1.12856 + 1.12857 +#if defined(__APPLE__) 1.12858 + SCTP_SOCKET_LOCK(so, 1); 1.12859 +#endif 1.12860 +#ifdef __Panda__ 1.12861 + control = SCTP_HEADER_TO_CHAIN(icontrol); 1.12862 +#endif 1.12863 + if (control) { 1.12864 + /* process cmsg snd/rcv info (maybe a assoc-id) */ 1.12865 + if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control, 1.12866 + sizeof(sndrcvninfo))) { 1.12867 + /* got one */ 1.12868 + use_sndinfo = 1; 1.12869 + } 1.12870 + } 1.12871 + addr_to_use = addr; 1.12872 +#if defined(INET) && defined(INET6) 1.12873 + if ((addr) && (addr->sa_family == AF_INET6)) { 1.12874 + struct sockaddr_in6 *sin6; 1.12875 + 1.12876 + sin6 = (struct sockaddr_in6 *)addr; 1.12877 + if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 1.12878 + in6_sin6_2_sin(&sin, sin6); 1.12879 + addr_to_use = (struct sockaddr *)&sin; 1.12880 + } 1.12881 + } 1.12882 +#endif 1.12883 + error = sctp_lower_sosend(so, addr_to_use, uio, top, 1.12884 +#ifdef __Panda__ 1.12885 + icontrol, 1.12886 +#else 1.12887 + control, 1.12888 +#endif 1.12889 + flags, 1.12890 + use_sndinfo ? &sndrcvninfo: NULL 1.12891 +#if !(defined(__Panda__) || defined(__Userspace__)) 1.12892 + , p 1.12893 +#endif 1.12894 + ); 1.12895 +#if defined(__APPLE__) 1.12896 + SCTP_SOCKET_UNLOCK(so, 1); 1.12897 +#endif 1.12898 + return (error); 1.12899 +} 1.12900 + 1.12901 + 1.12902 +int 1.12903 +sctp_lower_sosend(struct socket *so, 1.12904 + struct sockaddr *addr, 1.12905 + struct uio *uio, 1.12906 +#ifdef __Panda__ 1.12907 + pakhandle_type i_pak, 1.12908 + pakhandle_type i_control, 1.12909 +#else 1.12910 + struct mbuf *i_pak, 1.12911 + struct mbuf *control, 1.12912 +#endif 1.12913 + int flags, 1.12914 + struct sctp_sndrcvinfo *srcv 1.12915 +#if !(defined( __Panda__) || defined(__Userspace__)) 1.12916 + , 1.12917 +#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 1.12918 + struct thread *p 1.12919 +#elif defined(__Windows__) 1.12920 + PKTHREAD p 1.12921 +#else 1.12922 + struct proc *p 1.12923 +#endif 1.12924 +#endif 1.12925 + ) 1.12926 +{ 1.12927 + unsigned int sndlen = 0, max_len; 1.12928 + int error, len; 1.12929 + struct mbuf *top = NULL; 1.12930 +#ifdef __Panda__ 1.12931 + struct mbuf *control = NULL; 1.12932 +#endif 1.12933 + int queue_only = 0, queue_only_for_init = 0; 1.12934 + int free_cnt_applied = 0; 1.12935 + int un_sent; 1.12936 + int now_filled = 0; 1.12937 + unsigned int inqueue_bytes = 0; 1.12938 + struct sctp_block_entry be; 1.12939 + struct sctp_inpcb *inp; 1.12940 + struct sctp_tcb *stcb = NULL; 1.12941 + struct timeval now; 1.12942 + struct sctp_nets *net; 1.12943 + struct sctp_association *asoc; 1.12944 + struct sctp_inpcb *t_inp; 1.12945 + int user_marks_eor; 1.12946 + int create_lock_applied = 0; 1.12947 + int nagle_applies = 0; 1.12948 + int some_on_control = 0; 1.12949 + int got_all_of_the_send = 0; 1.12950 + int hold_tcblock = 0; 1.12951 + int non_blocking = 0; 1.12952 + uint32_t local_add_more, local_soresv = 0; 1.12953 + uint16_t port; 1.12954 + uint16_t sinfo_flags; 1.12955 + sctp_assoc_t sinfo_assoc_id; 1.12956 + 1.12957 + error = 0; 1.12958 + net = NULL; 1.12959 + stcb = NULL; 1.12960 + asoc = NULL; 1.12961 + 1.12962 +#if defined(__APPLE__) 1.12963 + sctp_lock_assert(so); 1.12964 +#endif 1.12965 + t_inp = inp = (struct sctp_inpcb *)so->so_pcb; 1.12966 + if (inp == NULL) { 1.12967 + SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); 1.12968 + error = EINVAL; 1.12969 + if (i_pak) { 1.12970 + SCTP_RELEASE_PKT(i_pak); 1.12971 + } 1.12972 + return (error); 1.12973 + } 1.12974 + if ((uio == NULL) && (i_pak == NULL)) { 1.12975 + SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 1.12976 + return (EINVAL); 1.12977 + } 1.12978 + user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 1.12979 + atomic_add_int(&inp->total_sends, 1); 1.12980 + if (uio) { 1.12981 +#if defined(__APPLE__) 1.12982 +#if defined(APPLE_LEOPARD) 1.12983 + if (uio->uio_resid < 0) { 1.12984 +#else 1.12985 + if (uio_resid(uio) < 0) { 1.12986 +#endif 1.12987 +#else 1.12988 + if (uio->uio_resid < 0) { 1.12989 +#endif 1.12990 + SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 1.12991 + return (EINVAL); 1.12992 + } 1.12993 +#if defined(__APPLE__) 1.12994 +#if defined(APPLE_LEOPARD) 1.12995 + sndlen = uio->uio_resid; 1.12996 +#else 1.12997 + sndlen = uio_resid(uio); 1.12998 +#endif 1.12999 +#else 1.13000 + sndlen = uio->uio_resid; 1.13001 +#endif 1.13002 + } else { 1.13003 + top = SCTP_HEADER_TO_CHAIN(i_pak); 1.13004 +#ifdef __Panda__ 1.13005 + /*- 1.13006 + * app len indicates the datalen, dgsize for cases 1.13007 + * of SCTP_EOF/ABORT will not have the right len 1.13008 + */ 1.13009 + sndlen = SCTP_APP_DATA_LEN(i_pak); 1.13010 + /*- 1.13011 + * Set the particle len also to zero to match 1.13012 + * up with app len. We only have one particle 1.13013 + * if app len is zero for Panda. This is ensured 1.13014 + * in the socket lib 1.13015 + */ 1.13016 + if (sndlen == 0) { 1.13017 + SCTP_BUF_LEN(top) = 0; 1.13018 + } 1.13019 + /*- 1.13020 + * We delink the chain from header, but keep 1.13021 + * the header around as we will need it in 1.13022 + * EAGAIN case 1.13023 + */ 1.13024 + SCTP_DETACH_HEADER_FROM_CHAIN(i_pak); 1.13025 +#else 1.13026 + sndlen = SCTP_HEADER_LEN(i_pak); 1.13027 +#endif 1.13028 + } 1.13029 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %d\n", 1.13030 + (void *)addr, 1.13031 + sndlen); 1.13032 +#ifdef __Panda__ 1.13033 + if (i_control) { 1.13034 + control = SCTP_HEADER_TO_CHAIN(i_control); 1.13035 + } 1.13036 +#endif 1.13037 + if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 1.13038 + (inp->sctp_socket->so_qlimit)) { 1.13039 + /* The listener can NOT send */ 1.13040 + SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN); 1.13041 + error = ENOTCONN; 1.13042 + goto out_unlocked; 1.13043 + } 1.13044 + /** 1.13045 + * Pre-screen address, if one is given the sin-len 1.13046 + * must be set correctly! 1.13047 + */ 1.13048 + if (addr) { 1.13049 + union sctp_sockstore *raddr = (union sctp_sockstore *)addr; 1.13050 + switch (raddr->sa.sa_family) { 1.13051 +#ifdef INET 1.13052 + case AF_INET: 1.13053 +#ifdef HAVE_SIN_LEN 1.13054 + if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) { 1.13055 + SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 1.13056 + error = EINVAL; 1.13057 + goto out_unlocked; 1.13058 + } 1.13059 +#endif 1.13060 + port = raddr->sin.sin_port; 1.13061 + break; 1.13062 +#endif 1.13063 +#ifdef INET6 1.13064 + case AF_INET6: 1.13065 +#ifdef HAVE_SIN6_LEN 1.13066 + if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) { 1.13067 + SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 1.13068 + error = EINVAL; 1.13069 + goto out_unlocked; 1.13070 + } 1.13071 +#endif 1.13072 + port = raddr->sin6.sin6_port; 1.13073 + break; 1.13074 +#endif 1.13075 +#if defined(__Userspace__) 1.13076 + case AF_CONN: 1.13077 +#ifdef HAVE_SCONN_LEN 1.13078 + if (raddr->sconn.sconn_len != sizeof(struct sockaddr_conn)) { 1.13079 + SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 1.13080 + error = EINVAL; 1.13081 + goto out_unlocked; 1.13082 + } 1.13083 +#endif 1.13084 + port = raddr->sconn.sconn_port; 1.13085 + break; 1.13086 +#endif 1.13087 + default: 1.13088 + SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT); 1.13089 + error = EAFNOSUPPORT; 1.13090 + goto out_unlocked; 1.13091 + } 1.13092 + } else 1.13093 + port = 0; 1.13094 + 1.13095 + if (srcv) { 1.13096 + sinfo_flags = srcv->sinfo_flags; 1.13097 + sinfo_assoc_id = srcv->sinfo_assoc_id; 1.13098 + if (INVALID_SINFO_FLAG(sinfo_flags) || 1.13099 + PR_SCTP_INVALID_POLICY(sinfo_flags)) { 1.13100 + SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 1.13101 + error = EINVAL; 1.13102 + goto out_unlocked; 1.13103 + } 1.13104 + if (srcv->sinfo_flags) 1.13105 + SCTP_STAT_INCR(sctps_sends_with_flags); 1.13106 + } else { 1.13107 + sinfo_flags = inp->def_send.sinfo_flags; 1.13108 + sinfo_assoc_id = inp->def_send.sinfo_assoc_id; 1.13109 + } 1.13110 + if (sinfo_flags & SCTP_SENDALL) { 1.13111 + /* its a sendall */ 1.13112 + error = sctp_sendall(inp, uio, top, srcv); 1.13113 + top = NULL; 1.13114 + goto out_unlocked; 1.13115 + } 1.13116 + if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) { 1.13117 + SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 1.13118 + error = EINVAL; 1.13119 + goto out_unlocked; 1.13120 + } 1.13121 + /* now we must find the assoc */ 1.13122 + if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) || 1.13123 + (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 1.13124 + SCTP_INP_RLOCK(inp); 1.13125 + stcb = LIST_FIRST(&inp->sctp_asoc_list); 1.13126 + if (stcb) { 1.13127 + SCTP_TCB_LOCK(stcb); 1.13128 + hold_tcblock = 1; 1.13129 + } 1.13130 + SCTP_INP_RUNLOCK(inp); 1.13131 + } else if (sinfo_assoc_id) { 1.13132 + stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 0); 1.13133 + } else if (addr) { 1.13134 + /*- 1.13135 + * Since we did not use findep we must 1.13136 + * increment it, and if we don't find a tcb 1.13137 + * decrement it. 1.13138 + */ 1.13139 + SCTP_INP_WLOCK(inp); 1.13140 + SCTP_INP_INCR_REF(inp); 1.13141 + SCTP_INP_WUNLOCK(inp); 1.13142 + stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL); 1.13143 + if (stcb == NULL) { 1.13144 + SCTP_INP_WLOCK(inp); 1.13145 + SCTP_INP_DECR_REF(inp); 1.13146 + SCTP_INP_WUNLOCK(inp); 1.13147 + } else { 1.13148 + hold_tcblock = 1; 1.13149 + } 1.13150 + } 1.13151 + if ((stcb == NULL) && (addr)) { 1.13152 + /* Possible implicit send? */ 1.13153 + SCTP_ASOC_CREATE_LOCK(inp); 1.13154 + create_lock_applied = 1; 1.13155 + if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 1.13156 + (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 1.13157 + /* Should I really unlock ? */ 1.13158 + SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL); 1.13159 + error = EINVAL; 1.13160 + goto out_unlocked; 1.13161 + 1.13162 + } 1.13163 + if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) && 1.13164 + (addr->sa_family == AF_INET6)) { 1.13165 + SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 1.13166 + error = EINVAL; 1.13167 + goto out_unlocked; 1.13168 + } 1.13169 + SCTP_INP_WLOCK(inp); 1.13170 + SCTP_INP_INCR_REF(inp); 1.13171 + SCTP_INP_WUNLOCK(inp); 1.13172 + /* With the lock applied look again */ 1.13173 + stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL); 1.13174 + if ((stcb == NULL) && (control != NULL) && (port > 0)) { 1.13175 + stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error); 1.13176 + } 1.13177 + if (stcb == NULL) { 1.13178 + SCTP_INP_WLOCK(inp); 1.13179 + SCTP_INP_DECR_REF(inp); 1.13180 + SCTP_INP_WUNLOCK(inp); 1.13181 + } else { 1.13182 + hold_tcblock = 1; 1.13183 + } 1.13184 + if (error) { 1.13185 + goto out_unlocked; 1.13186 + } 1.13187 + if (t_inp != inp) { 1.13188 + SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN); 1.13189 + error = ENOTCONN; 1.13190 + goto out_unlocked; 1.13191 + } 1.13192 + } 1.13193 + if (stcb == NULL) { 1.13194 + if (addr == NULL) { 1.13195 + SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT); 1.13196 + error = ENOENT; 1.13197 + goto out_unlocked; 1.13198 + } else { 1.13199 + /* We must go ahead and start the INIT process */ 1.13200 + uint32_t vrf_id; 1.13201 + 1.13202 + if ((sinfo_flags & SCTP_ABORT) || 1.13203 + ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) { 1.13204 + /*- 1.13205 + * User asks to abort a non-existant assoc, 1.13206 + * or EOF a non-existant assoc with no data 1.13207 + */ 1.13208 + SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT); 1.13209 + error = ENOENT; 1.13210 + goto out_unlocked; 1.13211 + } 1.13212 + /* get an asoc/stcb struct */ 1.13213 + vrf_id = inp->def_vrf_id; 1.13214 +#ifdef INVARIANTS 1.13215 + if (create_lock_applied == 0) { 1.13216 + panic("Error, should hold create lock and I don't?"); 1.13217 + } 1.13218 +#endif 1.13219 + stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id, 1.13220 +#if !(defined( __Panda__) || defined(__Userspace__)) 1.13221 + p 1.13222 +#else 1.13223 + (struct proc *)NULL 1.13224 +#endif 1.13225 + ); 1.13226 + if (stcb == NULL) { 1.13227 + /* Error is setup for us in the call */ 1.13228 + goto out_unlocked; 1.13229 + } 1.13230 + if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 1.13231 + stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 1.13232 + /* Set the connected flag so we can queue data */ 1.13233 + soisconnecting(so); 1.13234 + } 1.13235 + hold_tcblock = 1; 1.13236 + if (create_lock_applied) { 1.13237 + SCTP_ASOC_CREATE_UNLOCK(inp); 1.13238 + create_lock_applied = 0; 1.13239 + } else { 1.13240 + SCTP_PRINTF("Huh-3? create lock should have been on??\n"); 1.13241 + } 1.13242 + /* Turn on queue only flag to prevent data from being sent */ 1.13243 + queue_only = 1; 1.13244 + asoc = &stcb->asoc; 1.13245 + SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT); 1.13246 + (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 1.13247 + 1.13248 + /* initialize authentication params for the assoc */ 1.13249 + sctp_initialize_auth_params(inp, stcb); 1.13250 + 1.13251 + if (control) { 1.13252 + if (sctp_process_cmsgs_for_init(stcb, control, &error)) { 1.13253 + sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_7); 1.13254 + hold_tcblock = 0; 1.13255 + stcb = NULL; 1.13256 + goto out_unlocked; 1.13257 + } 1.13258 + } 1.13259 + /* out with the INIT */ 1.13260 + queue_only_for_init = 1; 1.13261 + /*- 1.13262 + * we may want to dig in after this call and adjust the MTU 1.13263 + * value. It defaulted to 1500 (constant) but the ro 1.13264 + * structure may now have an update and thus we may need to 1.13265 + * change it BEFORE we append the message. 1.13266 + */ 1.13267 + } 1.13268 + } else 1.13269 + asoc = &stcb->asoc; 1.13270 + if (srcv == NULL) 1.13271 + srcv = (struct sctp_sndrcvinfo *)&asoc->def_send; 1.13272 + if (srcv->sinfo_flags & SCTP_ADDR_OVER) { 1.13273 + if (addr) 1.13274 + net = sctp_findnet(stcb, addr); 1.13275 + else 1.13276 + net = NULL; 1.13277 + if ((net == NULL) || 1.13278 + ((port != 0) && (port != stcb->rport))) { 1.13279 + SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 1.13280 + error = EINVAL; 1.13281 + goto out_unlocked; 1.13282 + } 1.13283 + } else { 1.13284 + if (stcb->asoc.alternate) { 1.13285 + net = stcb->asoc.alternate; 1.13286 + } else { 1.13287 + net = stcb->asoc.primary_destination; 1.13288 + } 1.13289 + } 1.13290 + atomic_add_int(&stcb->total_sends, 1); 1.13291 + /* Keep the stcb from being freed under our feet */ 1.13292 + atomic_add_int(&asoc->refcnt, 1); 1.13293 + free_cnt_applied = 1; 1.13294 + 1.13295 + if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) { 1.13296 + if (sndlen > asoc->smallest_mtu) { 1.13297 + SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE); 1.13298 + error = EMSGSIZE; 1.13299 + goto out_unlocked; 1.13300 + } 1.13301 + } 1.13302 +#if defined(__Userspace__) 1.13303 + if (inp->recv_callback) { 1.13304 + non_blocking = 1; 1.13305 + } 1.13306 +#else 1.13307 + if (SCTP_SO_IS_NBIO(so) 1.13308 +#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 1.13309 + || (flags & MSG_NBIO) 1.13310 +#endif 1.13311 + ) { 1.13312 + non_blocking = 1; 1.13313 + } 1.13314 +#endif 1.13315 + /* would we block? */ 1.13316 + if (non_blocking) { 1.13317 + if (hold_tcblock == 0) { 1.13318 + SCTP_TCB_LOCK(stcb); 1.13319 + hold_tcblock = 1; 1.13320 + } 1.13321 + inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); 1.13322 + if ((SCTP_SB_LIMIT_SND(so) < (sndlen + inqueue_bytes + stcb->asoc.sb_send_resv)) || 1.13323 + (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) { 1.13324 + SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK); 1.13325 + if (sndlen > SCTP_SB_LIMIT_SND(so)) 1.13326 + error = EMSGSIZE; 1.13327 + else 1.13328 + error = EWOULDBLOCK; 1.13329 + goto out_unlocked; 1.13330 + } 1.13331 + stcb->asoc.sb_send_resv += sndlen; 1.13332 + SCTP_TCB_UNLOCK(stcb); 1.13333 + hold_tcblock = 0; 1.13334 + } else { 1.13335 + atomic_add_int(&stcb->asoc.sb_send_resv, sndlen); 1.13336 + } 1.13337 + local_soresv = sndlen; 1.13338 + if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 1.13339 + SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET); 1.13340 + error = ECONNRESET; 1.13341 + goto out_unlocked; 1.13342 + } 1.13343 + if (create_lock_applied) { 1.13344 + SCTP_ASOC_CREATE_UNLOCK(inp); 1.13345 + create_lock_applied = 0; 1.13346 + } 1.13347 + if (asoc->stream_reset_outstanding) { 1.13348 + /* 1.13349 + * Can't queue any data while stream reset is underway. 1.13350 + */ 1.13351 + SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAGAIN); 1.13352 + error = EAGAIN; 1.13353 + goto out_unlocked; 1.13354 + } 1.13355 + if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 1.13356 + (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 1.13357 + queue_only = 1; 1.13358 + } 1.13359 + /* we are now done with all control */ 1.13360 + if (control) { 1.13361 + sctp_m_freem(control); 1.13362 + control = NULL; 1.13363 + } 1.13364 + if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) || 1.13365 + (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) || 1.13366 + (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) || 1.13367 + (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) { 1.13368 + if (srcv->sinfo_flags & SCTP_ABORT) { 1.13369 + ; 1.13370 + } else { 1.13371 + SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET); 1.13372 + error = ECONNRESET; 1.13373 + goto out_unlocked; 1.13374 + } 1.13375 + } 1.13376 + /* Ok, we will attempt a msgsnd :> */ 1.13377 +#if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)) 1.13378 + if (p) { 1.13379 +#if defined(__FreeBSD__) && __FreeBSD_version >= 603000 1.13380 + p->td_ru.ru_msgsnd++; 1.13381 +#elif defined(__FreeBSD__) && __FreeBSD_version >= 500000 1.13382 + p->td_proc->p_stats->p_ru.ru_msgsnd++; 1.13383 +#else 1.13384 + p->p_stats->p_ru.ru_msgsnd++; 1.13385 +#endif 1.13386 + } 1.13387 +#endif 1.13388 + /* Are we aborting? */ 1.13389 + if (srcv->sinfo_flags & SCTP_ABORT) { 1.13390 + struct mbuf *mm; 1.13391 + int tot_demand, tot_out = 0, max_out; 1.13392 + 1.13393 + SCTP_STAT_INCR(sctps_sends_with_abort); 1.13394 + if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 1.13395 + (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 1.13396 + /* It has to be up before we abort */ 1.13397 + /* how big is the user initiated abort? */ 1.13398 + SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 1.13399 + error = EINVAL; 1.13400 + goto out; 1.13401 + } 1.13402 + if (hold_tcblock) { 1.13403 + SCTP_TCB_UNLOCK(stcb); 1.13404 + hold_tcblock = 0; 1.13405 + } 1.13406 + if (top) { 1.13407 + struct mbuf *cntm = NULL; 1.13408 + 1.13409 + mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_WAITOK, 1, MT_DATA); 1.13410 + if (sndlen != 0) { 1.13411 + for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) { 1.13412 + tot_out += SCTP_BUF_LEN(cntm); 1.13413 + } 1.13414 + } 1.13415 + } else { 1.13416 + /* Must fit in a MTU */ 1.13417 + tot_out = sndlen; 1.13418 + tot_demand = (tot_out + sizeof(struct sctp_paramhdr)); 1.13419 + if (tot_demand > SCTP_DEFAULT_ADD_MORE) { 1.13420 + /* To big */ 1.13421 + SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE); 1.13422 + error = EMSGSIZE; 1.13423 + goto out; 1.13424 + } 1.13425 + mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAITOK, 1, MT_DATA); 1.13426 + } 1.13427 + if (mm == NULL) { 1.13428 + SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM); 1.13429 + error = ENOMEM; 1.13430 + goto out; 1.13431 + } 1.13432 + max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr); 1.13433 + max_out -= sizeof(struct sctp_abort_msg); 1.13434 + if (tot_out > max_out) { 1.13435 + tot_out = max_out; 1.13436 + } 1.13437 + if (mm) { 1.13438 + struct sctp_paramhdr *ph; 1.13439 + 1.13440 + /* now move forward the data pointer */ 1.13441 + ph = mtod(mm, struct sctp_paramhdr *); 1.13442 + ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 1.13443 + ph->param_length = htons(sizeof(struct sctp_paramhdr) + tot_out); 1.13444 + ph++; 1.13445 + SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr); 1.13446 + if (top == NULL) { 1.13447 +#if defined(__APPLE__) 1.13448 + SCTP_SOCKET_UNLOCK(so, 0); 1.13449 +#endif 1.13450 + error = uiomove((caddr_t)ph, (int)tot_out, uio); 1.13451 +#if defined(__APPLE__) 1.13452 + SCTP_SOCKET_LOCK(so, 0); 1.13453 +#endif 1.13454 + if (error) { 1.13455 + /*- 1.13456 + * Here if we can't get his data we 1.13457 + * still abort we just don't get to 1.13458 + * send the users note :-0 1.13459 + */ 1.13460 + sctp_m_freem(mm); 1.13461 + mm = NULL; 1.13462 + } 1.13463 + } else { 1.13464 + if (sndlen != 0) { 1.13465 + SCTP_BUF_NEXT(mm) = top; 1.13466 + } 1.13467 + } 1.13468 + } 1.13469 + if (hold_tcblock == 0) { 1.13470 + SCTP_TCB_LOCK(stcb); 1.13471 + } 1.13472 + atomic_add_int(&stcb->asoc.refcnt, -1); 1.13473 + free_cnt_applied = 0; 1.13474 + /* release this lock, otherwise we hang on ourselves */ 1.13475 + sctp_abort_an_association(stcb->sctp_ep, stcb, mm, SCTP_SO_LOCKED); 1.13476 + /* now relock the stcb so everything is sane */ 1.13477 + hold_tcblock = 0; 1.13478 + stcb = NULL; 1.13479 + /* In this case top is already chained to mm 1.13480 + * avoid double free, since we free it below if 1.13481 + * top != NULL and driver would free it after sending 1.13482 + * the packet out 1.13483 + */ 1.13484 + if (sndlen != 0) { 1.13485 + top = NULL; 1.13486 + } 1.13487 + goto out_unlocked; 1.13488 + } 1.13489 + /* Calculate the maximum we can send */ 1.13490 + inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); 1.13491 + if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) { 1.13492 + if (non_blocking) { 1.13493 + /* we already checked for non-blocking above. */ 1.13494 + max_len = sndlen; 1.13495 + } else { 1.13496 + max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes; 1.13497 + } 1.13498 + } else { 1.13499 + max_len = 0; 1.13500 + } 1.13501 + if (hold_tcblock) { 1.13502 + SCTP_TCB_UNLOCK(stcb); 1.13503 + hold_tcblock = 0; 1.13504 + } 1.13505 + /* Is the stream no. valid? */ 1.13506 + if (srcv->sinfo_stream >= asoc->streamoutcnt) { 1.13507 + /* Invalid stream number */ 1.13508 + SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 1.13509 + error = EINVAL; 1.13510 + goto out_unlocked; 1.13511 + } 1.13512 + if (asoc->strmout == NULL) { 1.13513 + /* huh? software error */ 1.13514 + SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT); 1.13515 + error = EFAULT; 1.13516 + goto out_unlocked; 1.13517 + } 1.13518 + 1.13519 + /* Unless E_EOR mode is on, we must make a send FIT in one call. */ 1.13520 + if ((user_marks_eor == 0) && 1.13521 + (sndlen > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) { 1.13522 + /* It will NEVER fit */ 1.13523 + SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE); 1.13524 + error = EMSGSIZE; 1.13525 + goto out_unlocked; 1.13526 + } 1.13527 + if ((uio == NULL) && user_marks_eor) { 1.13528 + /*- 1.13529 + * We do not support eeor mode for 1.13530 + * sending with mbuf chains (like sendfile). 1.13531 + */ 1.13532 + SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 1.13533 + error = EINVAL; 1.13534 + goto out_unlocked; 1.13535 + } 1.13536 + 1.13537 + if (user_marks_eor) { 1.13538 + local_add_more = min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold)); 1.13539 + } else { 1.13540 + /*- 1.13541 + * For non-eeor the whole message must fit in 1.13542 + * the socket send buffer. 1.13543 + */ 1.13544 + local_add_more = sndlen; 1.13545 + } 1.13546 + len = 0; 1.13547 + if (non_blocking) { 1.13548 + goto skip_preblock; 1.13549 + } 1.13550 + if (((max_len <= local_add_more) && 1.13551 + (SCTP_SB_LIMIT_SND(so) >= local_add_more)) || 1.13552 + (max_len == 0) || 1.13553 + ((stcb->asoc.chunks_on_out_queue+stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) { 1.13554 + /* No room right now ! */ 1.13555 + SOCKBUF_LOCK(&so->so_snd); 1.13556 + inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); 1.13557 + while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) || 1.13558 + ((stcb->asoc.stream_queue_cnt+stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) { 1.13559 + SCTPDBG(SCTP_DEBUG_OUTPUT1,"pre_block limit:%u <(inq:%d + %d) || (%d+%d > %d)\n", 1.13560 + (unsigned int)SCTP_SB_LIMIT_SND(so), 1.13561 + inqueue_bytes, 1.13562 + local_add_more, 1.13563 + stcb->asoc.stream_queue_cnt, 1.13564 + stcb->asoc.chunks_on_out_queue, 1.13565 + SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)); 1.13566 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { 1.13567 + sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen); 1.13568 + } 1.13569 + be.error = 0; 1.13570 +#if !defined(__Panda__) && !defined(__Windows__) 1.13571 + stcb->block_entry = &be; 1.13572 +#endif 1.13573 + error = sbwait(&so->so_snd); 1.13574 + stcb->block_entry = NULL; 1.13575 + if (error || so->so_error || be.error) { 1.13576 + if (error == 0) { 1.13577 + if (so->so_error) 1.13578 + error = so->so_error; 1.13579 + if (be.error) { 1.13580 + error = be.error; 1.13581 + } 1.13582 + } 1.13583 + SOCKBUF_UNLOCK(&so->so_snd); 1.13584 + goto out_unlocked; 1.13585 + } 1.13586 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { 1.13587 + sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK, 1.13588 + asoc, stcb->asoc.total_output_queue_size); 1.13589 + } 1.13590 + if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 1.13591 + goto out_unlocked; 1.13592 + } 1.13593 + inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); 1.13594 + } 1.13595 + if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) { 1.13596 + max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes; 1.13597 + } else { 1.13598 + max_len = 0; 1.13599 + } 1.13600 + SOCKBUF_UNLOCK(&so->so_snd); 1.13601 + } 1.13602 + 1.13603 +skip_preblock: 1.13604 + if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 1.13605 + goto out_unlocked; 1.13606 + } 1.13607 +#if defined(__APPLE__) 1.13608 + error = sblock(&so->so_snd, SBLOCKWAIT(flags)); 1.13609 +#endif 1.13610 + /* sndlen covers for mbuf case 1.13611 + * uio_resid covers for the non-mbuf case 1.13612 + * NOTE: uio will be null when top/mbuf is passed 1.13613 + */ 1.13614 + if (sndlen == 0) { 1.13615 + if (srcv->sinfo_flags & SCTP_EOF) { 1.13616 + got_all_of_the_send = 1; 1.13617 + goto dataless_eof; 1.13618 + } else { 1.13619 + SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 1.13620 + error = EINVAL; 1.13621 + goto out; 1.13622 + } 1.13623 + } 1.13624 + if (top == NULL) { 1.13625 + struct sctp_stream_queue_pending *sp; 1.13626 + struct sctp_stream_out *strm; 1.13627 + uint32_t sndout; 1.13628 + 1.13629 + SCTP_TCB_SEND_LOCK(stcb); 1.13630 + if ((asoc->stream_locked) && 1.13631 + (asoc->stream_locked_on != srcv->sinfo_stream)) { 1.13632 + SCTP_TCB_SEND_UNLOCK(stcb); 1.13633 + SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL); 1.13634 + error = EINVAL; 1.13635 + goto out; 1.13636 + } 1.13637 + SCTP_TCB_SEND_UNLOCK(stcb); 1.13638 + 1.13639 + strm = &stcb->asoc.strmout[srcv->sinfo_stream]; 1.13640 + if (strm->last_msg_incomplete == 0) { 1.13641 + do_a_copy_in: 1.13642 + sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error); 1.13643 + if ((sp == NULL) || (error)) { 1.13644 + goto out; 1.13645 + } 1.13646 + SCTP_TCB_SEND_LOCK(stcb); 1.13647 + if (sp->msg_is_complete) { 1.13648 + strm->last_msg_incomplete = 0; 1.13649 + asoc->stream_locked = 0; 1.13650 + } else { 1.13651 + /* Just got locked to this guy in 1.13652 + * case of an interrupt. 1.13653 + */ 1.13654 + strm->last_msg_incomplete = 1; 1.13655 + asoc->stream_locked = 1; 1.13656 + asoc->stream_locked_on = srcv->sinfo_stream; 1.13657 + sp->sender_all_done = 0; 1.13658 + } 1.13659 + sctp_snd_sb_alloc(stcb, sp->length); 1.13660 + atomic_add_int(&asoc->stream_queue_cnt, 1); 1.13661 + if (srcv->sinfo_flags & SCTP_UNORDERED) { 1.13662 + SCTP_STAT_INCR(sctps_sends_with_unord); 1.13663 + } 1.13664 + TAILQ_INSERT_TAIL(&strm->outqueue, sp, next); 1.13665 + stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1); 1.13666 + SCTP_TCB_SEND_UNLOCK(stcb); 1.13667 + } else { 1.13668 + SCTP_TCB_SEND_LOCK(stcb); 1.13669 + sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead); 1.13670 + SCTP_TCB_SEND_UNLOCK(stcb); 1.13671 + if (sp == NULL) { 1.13672 + /* ???? Huh ??? last msg is gone */ 1.13673 +#ifdef INVARIANTS 1.13674 + panic("Warning: Last msg marked incomplete, yet nothing left?"); 1.13675 +#else 1.13676 + SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n"); 1.13677 + strm->last_msg_incomplete = 0; 1.13678 +#endif 1.13679 + goto do_a_copy_in; 1.13680 + 1.13681 + } 1.13682 + } 1.13683 +#if defined(__APPLE__) 1.13684 +#if defined(APPLE_LEOPARD) 1.13685 + while (uio->uio_resid > 0) { 1.13686 +#else 1.13687 + while (uio_resid(uio) > 0) { 1.13688 +#endif 1.13689 +#else 1.13690 + while (uio->uio_resid > 0) { 1.13691 +#endif 1.13692 + /* How much room do we have? */ 1.13693 + struct mbuf *new_tail, *mm; 1.13694 + 1.13695 + if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size) 1.13696 + max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size; 1.13697 + else 1.13698 + max_len = 0; 1.13699 + 1.13700 + if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) || 1.13701 + (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) || 1.13702 +#if defined(__APPLE__) 1.13703 +#if defined(APPLE_LEOPARD) 1.13704 + (uio->uio_resid && (uio->uio_resid <= (int)max_len))) { 1.13705 +#else 1.13706 + (uio_resid(uio) && (uio_resid(uio) <= (int)max_len))) { 1.13707 +#endif 1.13708 +#else 1.13709 + (uio->uio_resid && (uio->uio_resid <= (int)max_len))) { 1.13710 +#endif 1.13711 + sndout = 0; 1.13712 + new_tail = NULL; 1.13713 + if (hold_tcblock) { 1.13714 + SCTP_TCB_UNLOCK(stcb); 1.13715 + hold_tcblock = 0; 1.13716 + } 1.13717 +#if defined(__APPLE__) 1.13718 + SCTP_SOCKET_UNLOCK(so, 0); 1.13719 +#endif 1.13720 +#if defined(__FreeBSD__) && __FreeBSD_version > 602000 1.13721 + mm = sctp_copy_resume(uio, max_len, user_marks_eor, &error, &sndout, &new_tail); 1.13722 +#else 1.13723 + mm = sctp_copy_resume(uio, max_len, &error, &sndout, &new_tail); 1.13724 +#endif 1.13725 +#if defined(__APPLE__) 1.13726 + SCTP_SOCKET_LOCK(so, 0); 1.13727 +#endif 1.13728 + if ((mm == NULL) || error) { 1.13729 + if (mm) { 1.13730 + sctp_m_freem(mm); 1.13731 + } 1.13732 + goto out; 1.13733 + } 1.13734 + /* Update the mbuf and count */ 1.13735 + SCTP_TCB_SEND_LOCK(stcb); 1.13736 + if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 1.13737 + /* we need to get out. 1.13738 + * Peer probably aborted. 1.13739 + */ 1.13740 + sctp_m_freem(mm); 1.13741 + if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) { 1.13742 + SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET); 1.13743 + error = ECONNRESET; 1.13744 + } 1.13745 + SCTP_TCB_SEND_UNLOCK(stcb); 1.13746 + goto out; 1.13747 + } 1.13748 + if (sp->tail_mbuf) { 1.13749 + /* tack it to the end */ 1.13750 + SCTP_BUF_NEXT(sp->tail_mbuf) = mm; 1.13751 + sp->tail_mbuf = new_tail; 1.13752 + } else { 1.13753 + /* A stolen mbuf */ 1.13754 + sp->data = mm; 1.13755 + sp->tail_mbuf = new_tail; 1.13756 + } 1.13757 + sctp_snd_sb_alloc(stcb, sndout); 1.13758 + atomic_add_int(&sp->length,sndout); 1.13759 + len += sndout; 1.13760 + 1.13761 + /* Did we reach EOR? */ 1.13762 +#if defined(__APPLE__) 1.13763 +#if defined(APPLE_LEOPARD) 1.13764 + if ((uio->uio_resid == 0) && 1.13765 +#else 1.13766 + if ((uio_resid(uio) == 0) && 1.13767 +#endif 1.13768 +#else 1.13769 + if ((uio->uio_resid == 0) && 1.13770 +#endif 1.13771 + ((user_marks_eor == 0) || 1.13772 + (srcv->sinfo_flags & SCTP_EOF) || 1.13773 + (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) { 1.13774 + sp->msg_is_complete = 1; 1.13775 + } else { 1.13776 + sp->msg_is_complete = 0; 1.13777 + } 1.13778 + SCTP_TCB_SEND_UNLOCK(stcb); 1.13779 + } 1.13780 +#if defined(__APPLE__) 1.13781 +#if defined(APPLE_LEOPARD) 1.13782 + if (uio->uio_resid == 0) { 1.13783 +#else 1.13784 + if (uio_resid(uio) == 0) { 1.13785 +#endif 1.13786 +#else 1.13787 + if (uio->uio_resid == 0) { 1.13788 +#endif 1.13789 + /* got it all? */ 1.13790 + continue; 1.13791 + } 1.13792 + /* PR-SCTP? */ 1.13793 + if ((asoc->peer_supports_prsctp) && (asoc->sent_queue_cnt_removeable > 0)) { 1.13794 + /* This is ugly but we must assure locking order */ 1.13795 + if (hold_tcblock == 0) { 1.13796 + SCTP_TCB_LOCK(stcb); 1.13797 + hold_tcblock = 1; 1.13798 + } 1.13799 + sctp_prune_prsctp(stcb, asoc, srcv, sndlen); 1.13800 + inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk)); 1.13801 + if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size) 1.13802 + max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes; 1.13803 + else 1.13804 + max_len = 0; 1.13805 + if (max_len > 0) { 1.13806 + continue; 1.13807 + } 1.13808 + SCTP_TCB_UNLOCK(stcb); 1.13809 + hold_tcblock = 0; 1.13810 + } 1.13811 + /* wait for space now */ 1.13812 + if (non_blocking) { 1.13813 + /* Non-blocking io in place out */ 1.13814 + goto skip_out_eof; 1.13815 + } 1.13816 + /* What about the INIT, send it maybe */ 1.13817 + if (queue_only_for_init) { 1.13818 + if (hold_tcblock == 0) { 1.13819 + SCTP_TCB_LOCK(stcb); 1.13820 + hold_tcblock = 1; 1.13821 + } 1.13822 + if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) { 1.13823 + /* a collision took us forward? */ 1.13824 + queue_only = 0; 1.13825 + } else { 1.13826 + sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 1.13827 + SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT); 1.13828 + queue_only = 1; 1.13829 + } 1.13830 + } 1.13831 + if ((net->flight_size > net->cwnd) && 1.13832 + (asoc->sctp_cmt_on_off == 0)) { 1.13833 + SCTP_STAT_INCR(sctps_send_cwnd_avoid); 1.13834 + queue_only = 1; 1.13835 + } else if (asoc->ifp_had_enobuf) { 1.13836 + SCTP_STAT_INCR(sctps_ifnomemqueued); 1.13837 + if (net->flight_size > (2 * net->mtu)) { 1.13838 + queue_only = 1; 1.13839 + } 1.13840 + asoc->ifp_had_enobuf = 0; 1.13841 + } 1.13842 + un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 1.13843 + (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk))); 1.13844 + if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && 1.13845 + (stcb->asoc.total_flight > 0) && 1.13846 + (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) && 1.13847 + (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) { 1.13848 + 1.13849 + /*- 1.13850 + * Ok, Nagle is set on and we have data outstanding. 1.13851 + * Don't send anything and let SACKs drive out the 1.13852 + * data unless wen have a "full" segment to send. 1.13853 + */ 1.13854 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) { 1.13855 + sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED); 1.13856 + } 1.13857 + SCTP_STAT_INCR(sctps_naglequeued); 1.13858 + nagle_applies = 1; 1.13859 + } else { 1.13860 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) { 1.13861 + if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) 1.13862 + sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED); 1.13863 + } 1.13864 + SCTP_STAT_INCR(sctps_naglesent); 1.13865 + nagle_applies = 0; 1.13866 + } 1.13867 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { 1.13868 + 1.13869 + sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only, 1.13870 + nagle_applies, un_sent); 1.13871 + sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size, 1.13872 + stcb->asoc.total_flight, 1.13873 + stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count); 1.13874 + } 1.13875 + if (queue_only_for_init) 1.13876 + queue_only_for_init = 0; 1.13877 + if ((queue_only == 0) && (nagle_applies == 0)) { 1.13878 + /*- 1.13879 + * need to start chunk output 1.13880 + * before blocking.. note that if 1.13881 + * a lock is already applied, then 1.13882 + * the input via the net is happening 1.13883 + * and I don't need to start output :-D 1.13884 + */ 1.13885 + if (hold_tcblock == 0) { 1.13886 + if (SCTP_TCB_TRYLOCK(stcb)) { 1.13887 + hold_tcblock = 1; 1.13888 + sctp_chunk_output(inp, 1.13889 + stcb, 1.13890 + SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); 1.13891 + } 1.13892 + } else { 1.13893 + sctp_chunk_output(inp, 1.13894 + stcb, 1.13895 + SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); 1.13896 + } 1.13897 + if (hold_tcblock == 1) { 1.13898 + SCTP_TCB_UNLOCK(stcb); 1.13899 + hold_tcblock = 0; 1.13900 + } 1.13901 + } 1.13902 + SOCKBUF_LOCK(&so->so_snd); 1.13903 + /*- 1.13904 + * This is a bit strange, but I think it will 1.13905 + * work. The total_output_queue_size is locked and 1.13906 + * protected by the TCB_LOCK, which we just released. 1.13907 + * There is a race that can occur between releasing it 1.13908 + * above, and me getting the socket lock, where sacks 1.13909 + * come in but we have not put the SB_WAIT on the 1.13910 + * so_snd buffer to get the wakeup. After the LOCK 1.13911 + * is applied the sack_processing will also need to 1.13912 + * LOCK the so->so_snd to do the actual sowwakeup(). So 1.13913 + * once we have the socket buffer lock if we recheck the 1.13914 + * size we KNOW we will get to sleep safely with the 1.13915 + * wakeup flag in place. 1.13916 + */ 1.13917 + if (SCTP_SB_LIMIT_SND(so) <= (stcb->asoc.total_output_queue_size + 1.13918 + min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) { 1.13919 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { 1.13920 +#if defined(__APPLE__) 1.13921 +#if defined(APPLE_LEOPARD) 1.13922 + sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK, 1.13923 + asoc, uio->uio_resid); 1.13924 +#else 1.13925 + sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK, 1.13926 + asoc, uio_resid(uio)); 1.13927 +#endif 1.13928 +#else 1.13929 + sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK, 1.13930 + asoc, uio->uio_resid); 1.13931 +#endif 1.13932 + } 1.13933 + be.error = 0; 1.13934 +#if !defined(__Panda__) && !defined(__Windows__) 1.13935 + stcb->block_entry = &be; 1.13936 +#endif 1.13937 +#if defined(__APPLE__) 1.13938 + sbunlock(&so->so_snd, 1); 1.13939 +#endif 1.13940 + error = sbwait(&so->so_snd); 1.13941 + stcb->block_entry = NULL; 1.13942 + 1.13943 + if (error || so->so_error || be.error) { 1.13944 + if (error == 0) { 1.13945 + if (so->so_error) 1.13946 + error = so->so_error; 1.13947 + if (be.error) { 1.13948 + error = be.error; 1.13949 + } 1.13950 + } 1.13951 + SOCKBUF_UNLOCK(&so->so_snd); 1.13952 + goto out_unlocked; 1.13953 + } 1.13954 + 1.13955 +#if defined(__APPLE__) 1.13956 + error = sblock(&so->so_snd, SBLOCKWAIT(flags)); 1.13957 +#endif 1.13958 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { 1.13959 + sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK, 1.13960 + asoc, stcb->asoc.total_output_queue_size); 1.13961 + } 1.13962 + } 1.13963 + SOCKBUF_UNLOCK(&so->so_snd); 1.13964 + if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 1.13965 + goto out_unlocked; 1.13966 + } 1.13967 + } 1.13968 + SCTP_TCB_SEND_LOCK(stcb); 1.13969 + if (sp) { 1.13970 + if (sp->msg_is_complete == 0) { 1.13971 + strm->last_msg_incomplete = 1; 1.13972 + asoc->stream_locked = 1; 1.13973 + asoc->stream_locked_on = srcv->sinfo_stream; 1.13974 + } else { 1.13975 + sp->sender_all_done = 1; 1.13976 + strm->last_msg_incomplete = 0; 1.13977 + asoc->stream_locked = 0; 1.13978 + } 1.13979 + } else { 1.13980 + SCTP_PRINTF("Huh no sp TSNH?\n"); 1.13981 + strm->last_msg_incomplete = 0; 1.13982 + asoc->stream_locked = 0; 1.13983 + } 1.13984 + SCTP_TCB_SEND_UNLOCK(stcb); 1.13985 +#if defined(__APPLE__) 1.13986 +#if defined(APPLE_LEOPARD) 1.13987 + if (uio->uio_resid == 0) { 1.13988 +#else 1.13989 + if (uio_resid(uio) == 0) { 1.13990 +#endif 1.13991 +#else 1.13992 + if (uio->uio_resid == 0) { 1.13993 +#endif 1.13994 + got_all_of_the_send = 1; 1.13995 + } 1.13996 + } else { 1.13997 + /* We send in a 0, since we do NOT have any locks */ 1.13998 + error = sctp_msg_append(stcb, net, top, srcv, 0); 1.13999 + top = NULL; 1.14000 + if (srcv->sinfo_flags & SCTP_EOF) { 1.14001 + /* 1.14002 + * This should only happen for Panda for the mbuf 1.14003 + * send case, which does NOT yet support EEOR mode. 1.14004 + * Thus, we can just set this flag to do the proper 1.14005 + * EOF handling. 1.14006 + */ 1.14007 + got_all_of_the_send = 1; 1.14008 + } 1.14009 + } 1.14010 + if (error) { 1.14011 + goto out; 1.14012 + } 1.14013 +dataless_eof: 1.14014 + /* EOF thing ? */ 1.14015 + if ((srcv->sinfo_flags & SCTP_EOF) && 1.14016 + (got_all_of_the_send == 1)) { 1.14017 + int cnt; 1.14018 + SCTP_STAT_INCR(sctps_sends_with_eof); 1.14019 + error = 0; 1.14020 + if (hold_tcblock == 0) { 1.14021 + SCTP_TCB_LOCK(stcb); 1.14022 + hold_tcblock = 1; 1.14023 + } 1.14024 + cnt = sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED); 1.14025 + if (TAILQ_EMPTY(&asoc->send_queue) && 1.14026 + TAILQ_EMPTY(&asoc->sent_queue) && 1.14027 + (cnt == 0)) { 1.14028 + if (asoc->locked_on_sending) { 1.14029 + goto abort_anyway; 1.14030 + } 1.14031 + /* there is nothing queued to send, so I'm done... */ 1.14032 + if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 1.14033 + (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 1.14034 + (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 1.14035 + struct sctp_nets *netp; 1.14036 + 1.14037 + /* only send SHUTDOWN the first time through */ 1.14038 + if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1.14039 + SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1.14040 + } 1.14041 + SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 1.14042 + SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 1.14043 + sctp_stop_timers_for_shutdown(stcb); 1.14044 + if (stcb->asoc.alternate) { 1.14045 + netp = stcb->asoc.alternate; 1.14046 + } else { 1.14047 + netp = stcb->asoc.primary_destination; 1.14048 + } 1.14049 + sctp_send_shutdown(stcb, netp); 1.14050 + sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, 1.14051 + netp); 1.14052 + sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 1.14053 + asoc->primary_destination); 1.14054 + } 1.14055 + } else { 1.14056 + /*- 1.14057 + * we still got (or just got) data to send, so set 1.14058 + * SHUTDOWN_PENDING 1.14059 + */ 1.14060 + /*- 1.14061 + * XXX sockets draft says that SCTP_EOF should be 1.14062 + * sent with no data. currently, we will allow user 1.14063 + * data to be sent first and move to 1.14064 + * SHUTDOWN-PENDING 1.14065 + */ 1.14066 + if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 1.14067 + (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 1.14068 + (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 1.14069 + if (hold_tcblock == 0) { 1.14070 + SCTP_TCB_LOCK(stcb); 1.14071 + hold_tcblock = 1; 1.14072 + } 1.14073 + if (asoc->locked_on_sending) { 1.14074 + /* Locked to send out the data */ 1.14075 + struct sctp_stream_queue_pending *sp; 1.14076 + sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead); 1.14077 + if (sp) { 1.14078 + if ((sp->length == 0) && (sp->msg_is_complete == 0)) 1.14079 + asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 1.14080 + } 1.14081 + } 1.14082 + asoc->state |= SCTP_STATE_SHUTDOWN_PENDING; 1.14083 + if (TAILQ_EMPTY(&asoc->send_queue) && 1.14084 + TAILQ_EMPTY(&asoc->sent_queue) && 1.14085 + (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 1.14086 + abort_anyway: 1.14087 + if (free_cnt_applied) { 1.14088 + atomic_add_int(&stcb->asoc.refcnt, -1); 1.14089 + free_cnt_applied = 0; 1.14090 + } 1.14091 + sctp_abort_an_association(stcb->sctp_ep, stcb, 1.14092 + NULL, SCTP_SO_LOCKED); 1.14093 + /* now relock the stcb so everything is sane */ 1.14094 + hold_tcblock = 0; 1.14095 + stcb = NULL; 1.14096 + goto out; 1.14097 + } 1.14098 + sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, 1.14099 + asoc->primary_destination); 1.14100 + sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY); 1.14101 + } 1.14102 + } 1.14103 + } 1.14104 +skip_out_eof: 1.14105 + if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { 1.14106 + some_on_control = 1; 1.14107 + } 1.14108 + if (queue_only_for_init) { 1.14109 + if (hold_tcblock == 0) { 1.14110 + SCTP_TCB_LOCK(stcb); 1.14111 + hold_tcblock = 1; 1.14112 + } 1.14113 + if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) { 1.14114 + /* a collision took us forward? */ 1.14115 + queue_only = 0; 1.14116 + } else { 1.14117 + sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED); 1.14118 + SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT); 1.14119 + queue_only = 1; 1.14120 + } 1.14121 + } 1.14122 + if ((net->flight_size > net->cwnd) && 1.14123 + (stcb->asoc.sctp_cmt_on_off == 0)) { 1.14124 + SCTP_STAT_INCR(sctps_send_cwnd_avoid); 1.14125 + queue_only = 1; 1.14126 + } else if (asoc->ifp_had_enobuf) { 1.14127 + SCTP_STAT_INCR(sctps_ifnomemqueued); 1.14128 + if (net->flight_size > (2 * net->mtu)) { 1.14129 + queue_only = 1; 1.14130 + } 1.14131 + asoc->ifp_had_enobuf = 0; 1.14132 + } 1.14133 + un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) + 1.14134 + (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk))); 1.14135 + if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) && 1.14136 + (stcb->asoc.total_flight > 0) && 1.14137 + (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) && 1.14138 + (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) { 1.14139 + /*- 1.14140 + * Ok, Nagle is set on and we have data outstanding. 1.14141 + * Don't send anything and let SACKs drive out the 1.14142 + * data unless wen have a "full" segment to send. 1.14143 + */ 1.14144 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) { 1.14145 + sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED); 1.14146 + } 1.14147 + SCTP_STAT_INCR(sctps_naglequeued); 1.14148 + nagle_applies = 1; 1.14149 + } else { 1.14150 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) { 1.14151 + if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) 1.14152 + sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED); 1.14153 + } 1.14154 + SCTP_STAT_INCR(sctps_naglesent); 1.14155 + nagle_applies = 0; 1.14156 + } 1.14157 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) { 1.14158 + sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only, 1.14159 + nagle_applies, un_sent); 1.14160 + sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size, 1.14161 + stcb->asoc.total_flight, 1.14162 + stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count); 1.14163 + } 1.14164 + if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) { 1.14165 + /* we can attempt to send too. */ 1.14166 + if (hold_tcblock == 0) { 1.14167 + /* If there is activity recv'ing sacks no need to send */ 1.14168 + if (SCTP_TCB_TRYLOCK(stcb)) { 1.14169 + sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); 1.14170 + hold_tcblock = 1; 1.14171 + } 1.14172 + } else { 1.14173 + sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); 1.14174 + } 1.14175 + } else if ((queue_only == 0) && 1.14176 + (stcb->asoc.peers_rwnd == 0) && 1.14177 + (stcb->asoc.total_flight == 0)) { 1.14178 + /* We get to have a probe outstanding */ 1.14179 + if (hold_tcblock == 0) { 1.14180 + hold_tcblock = 1; 1.14181 + SCTP_TCB_LOCK(stcb); 1.14182 + } 1.14183 + sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED); 1.14184 + } else if (some_on_control) { 1.14185 + int num_out, reason, frag_point; 1.14186 + 1.14187 + /* Here we do control only */ 1.14188 + if (hold_tcblock == 0) { 1.14189 + hold_tcblock = 1; 1.14190 + SCTP_TCB_LOCK(stcb); 1.14191 + } 1.14192 + frag_point = sctp_get_frag_point(stcb, &stcb->asoc); 1.14193 + (void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out, 1.14194 + &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED); 1.14195 + } 1.14196 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n", 1.14197 + queue_only, stcb->asoc.peers_rwnd, un_sent, 1.14198 + stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue, 1.14199 + stcb->asoc.total_output_queue_size, error); 1.14200 + 1.14201 +out: 1.14202 +#if defined(__APPLE__) 1.14203 + sbunlock(&so->so_snd, 1); 1.14204 +#endif 1.14205 +out_unlocked: 1.14206 + 1.14207 + if (local_soresv && stcb) { 1.14208 + atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen); 1.14209 + } 1.14210 + if (create_lock_applied) { 1.14211 + SCTP_ASOC_CREATE_UNLOCK(inp); 1.14212 + } 1.14213 + if ((stcb) && hold_tcblock) { 1.14214 + SCTP_TCB_UNLOCK(stcb); 1.14215 + } 1.14216 + if (stcb && free_cnt_applied) { 1.14217 + atomic_add_int(&stcb->asoc.refcnt, -1); 1.14218 + } 1.14219 +#ifdef INVARIANTS 1.14220 +#if !defined(__APPLE__) 1.14221 + if (stcb) { 1.14222 + if (mtx_owned(&stcb->tcb_mtx)) { 1.14223 + panic("Leaving with tcb mtx owned?"); 1.14224 + } 1.14225 + if (mtx_owned(&stcb->tcb_send_mtx)) { 1.14226 + panic("Leaving with tcb send mtx owned?"); 1.14227 + } 1.14228 + } 1.14229 +#endif 1.14230 +#endif 1.14231 +#ifdef __Panda__ 1.14232 + /* 1.14233 + * Handle the EAGAIN/ENOMEM cases to reattach the pak header 1.14234 + * to particle when pak is passed in, so that caller 1.14235 + * can try again with this pak 1.14236 + * 1.14237 + * NOTE: For other cases, including success case, 1.14238 + * we simply want to return the header back to free 1.14239 + * pool 1.14240 + */ 1.14241 + if (top) { 1.14242 + if ((error == EAGAIN) || (error == ENOMEM)) { 1.14243 + SCTP_ATTACH_CHAIN(i_pak, top, sndlen); 1.14244 + top = NULL; 1.14245 + } else { 1.14246 + (void)SCTP_RELEASE_HEADER(i_pak); 1.14247 + } 1.14248 + } else { 1.14249 + /* This is to handle cases when top has 1.14250 + * been reset to NULL but pak might not 1.14251 + * be freed 1.14252 + */ 1.14253 + if (i_pak) { 1.14254 + (void)SCTP_RELEASE_HEADER(i_pak); 1.14255 + } 1.14256 + } 1.14257 +#endif 1.14258 +#ifdef INVARIANTS 1.14259 + if (inp) { 1.14260 + sctp_validate_no_locks(inp); 1.14261 + } else { 1.14262 + SCTP_PRINTF("Warning - inp is NULL so cant validate locks\n"); 1.14263 + } 1.14264 +#endif 1.14265 + if (top) { 1.14266 + sctp_m_freem(top); 1.14267 + } 1.14268 + if (control) { 1.14269 + sctp_m_freem(control); 1.14270 + } 1.14271 + return (error); 1.14272 +} 1.14273 + 1.14274 + 1.14275 +/* 1.14276 + * generate an AUTHentication chunk, if required 1.14277 + */ 1.14278 +struct mbuf * 1.14279 +sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end, 1.14280 + struct sctp_auth_chunk **auth_ret, uint32_t * offset, 1.14281 + struct sctp_tcb *stcb, uint8_t chunk) 1.14282 +{ 1.14283 + struct mbuf *m_auth; 1.14284 + struct sctp_auth_chunk *auth; 1.14285 + int chunk_len; 1.14286 + struct mbuf *cn; 1.14287 + 1.14288 + if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) || 1.14289 + (stcb == NULL)) 1.14290 + return (m); 1.14291 + 1.14292 + /* sysctl disabled auth? */ 1.14293 + if (SCTP_BASE_SYSCTL(sctp_auth_disable)) 1.14294 + return (m); 1.14295 + 1.14296 + /* peer doesn't do auth... */ 1.14297 + if (!stcb->asoc.peer_supports_auth) { 1.14298 + return (m); 1.14299 + } 1.14300 + /* does the requested chunk require auth? */ 1.14301 + if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) { 1.14302 + return (m); 1.14303 + } 1.14304 + m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_NOWAIT, 1, MT_HEADER); 1.14305 + if (m_auth == NULL) { 1.14306 + /* no mbuf's */ 1.14307 + return (m); 1.14308 + } 1.14309 + /* reserve some space if this will be the first mbuf */ 1.14310 + if (m == NULL) 1.14311 + SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD); 1.14312 + /* fill in the AUTH chunk details */ 1.14313 + auth = mtod(m_auth, struct sctp_auth_chunk *); 1.14314 + bzero(auth, sizeof(*auth)); 1.14315 + auth->ch.chunk_type = SCTP_AUTHENTICATION; 1.14316 + auth->ch.chunk_flags = 0; 1.14317 + chunk_len = sizeof(*auth) + 1.14318 + sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id); 1.14319 + auth->ch.chunk_length = htons(chunk_len); 1.14320 + auth->hmac_id = htons(stcb->asoc.peer_hmac_id); 1.14321 + /* key id and hmac digest will be computed and filled in upon send */ 1.14322 + 1.14323 + /* save the offset where the auth was inserted into the chain */ 1.14324 + *offset = 0; 1.14325 + for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) { 1.14326 + *offset += SCTP_BUF_LEN(cn); 1.14327 + } 1.14328 + 1.14329 + /* update length and return pointer to the auth chunk */ 1.14330 + SCTP_BUF_LEN(m_auth) = chunk_len; 1.14331 + m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0); 1.14332 + if (auth_ret != NULL) 1.14333 + *auth_ret = auth; 1.14334 + 1.14335 + return (m); 1.14336 +} 1.14337 + 1.14338 +#if defined(__FreeBSD__) || defined(__APPLE__) 1.14339 +#ifdef INET6 1.14340 +int 1.14341 +sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro) 1.14342 +{ 1.14343 + struct nd_prefix *pfx = NULL; 1.14344 + struct nd_pfxrouter *pfxrtr = NULL; 1.14345 + struct sockaddr_in6 gw6; 1.14346 + 1.14347 + if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6) 1.14348 + return (0); 1.14349 + 1.14350 + /* get prefix entry of address */ 1.14351 + LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) { 1.14352 + if (pfx->ndpr_stateflags & NDPRF_DETACHED) 1.14353 + continue; 1.14354 + if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr, 1.14355 + &src6->sin6_addr, &pfx->ndpr_mask)) 1.14356 + break; 1.14357 + } 1.14358 + /* no prefix entry in the prefix list */ 1.14359 + if (pfx == NULL) { 1.14360 + SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for "); 1.14361 + SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6); 1.14362 + return (0); 1.14363 + } 1.14364 + 1.14365 + SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is "); 1.14366 + SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6); 1.14367 + 1.14368 + /* search installed gateway from prefix entry */ 1.14369 + LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) { 1.14370 + memset(&gw6, 0, sizeof(struct sockaddr_in6)); 1.14371 + gw6.sin6_family = AF_INET6; 1.14372 +#ifdef HAVE_SIN6_LEN 1.14373 + gw6.sin6_len = sizeof(struct sockaddr_in6); 1.14374 +#endif 1.14375 + memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr, 1.14376 + sizeof(struct in6_addr)); 1.14377 + SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is "); 1.14378 + SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6); 1.14379 + SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is "); 1.14380 + SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway); 1.14381 + if (sctp_cmpaddr((struct sockaddr *)&gw6, 1.14382 + ro->ro_rt->rt_gateway)) { 1.14383 + SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n"); 1.14384 + return (1); 1.14385 + } 1.14386 + } 1.14387 + SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n"); 1.14388 + return (0); 1.14389 +} 1.14390 +#endif 1.14391 + 1.14392 +int 1.14393 +sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro) 1.14394 +{ 1.14395 +#ifdef INET 1.14396 + struct sockaddr_in *sin, *mask; 1.14397 + struct ifaddr *ifa; 1.14398 + struct in_addr srcnetaddr, gwnetaddr; 1.14399 + 1.14400 + if (ro == NULL || ro->ro_rt == NULL || 1.14401 + sifa->address.sa.sa_family != AF_INET) { 1.14402 + return (0); 1.14403 + } 1.14404 + ifa = (struct ifaddr *)sifa->ifa; 1.14405 + mask = (struct sockaddr_in *)(ifa->ifa_netmask); 1.14406 + sin = (struct sockaddr_in *)&sifa->address.sin; 1.14407 + srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr); 1.14408 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is "); 1.14409 + SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa); 1.14410 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr); 1.14411 + 1.14412 + sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway; 1.14413 + gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr); 1.14414 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is "); 1.14415 + SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway); 1.14416 + SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr); 1.14417 + if (srcnetaddr.s_addr == gwnetaddr.s_addr) { 1.14418 + return (1); 1.14419 + } 1.14420 +#endif 1.14421 + return (0); 1.14422 +} 1.14423 +#elif defined(__Userspace__) 1.14424 +/* TODO __Userspace__ versions of sctp_vXsrc_match_nexthop(). */ 1.14425 +int 1.14426 +sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro) 1.14427 +{ 1.14428 + return (0); 1.14429 +} 1.14430 +int 1.14431 +sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro) 1.14432 +{ 1.14433 + return (0); 1.14434 +} 1.14435 + 1.14436 +#endif