1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/netwerk/sctp/src/netinet/sctp_pcb.c Wed Dec 31 06:09:35 2014 +0100 1.3 @@ -0,0 +1,7991 @@ 1.4 +/*- 1.5 + * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 1.6 + * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 1.7 + * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 1.8 + * 1.9 + * Redistribution and use in source and binary forms, with or without 1.10 + * modification, are permitted provided that the following conditions are met: 1.11 + * 1.12 + * a) Redistributions of source code must retain the above copyright notice, 1.13 + * this list of conditions and the following disclaimer. 1.14 + * 1.15 + * b) Redistributions in binary form must reproduce the above copyright 1.16 + * notice, this list of conditions and the following disclaimer in 1.17 + * the documentation and/or other materials provided with the distribution. 1.18 + * 1.19 + * c) Neither the name of Cisco Systems, Inc. nor the names of its 1.20 + * contributors may be used to endorse or promote products derived 1.21 + * from this software without specific prior written permission. 1.22 + * 1.23 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 1.24 + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 1.25 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1.26 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 1.27 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 1.28 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 1.29 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 1.30 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 1.31 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 1.32 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 1.33 + * THE POSSIBILITY OF SUCH DAMAGE. 1.34 + */ 1.35 + 1.36 +#ifdef __FreeBSD__ 1.37 +#include <sys/cdefs.h> 1.38 +__FBSDID("$FreeBSD: head/sys/netinet/sctp_pcb.c 258765 2013-11-30 12:51:19Z tuexen $"); 1.39 +#endif 1.40 + 1.41 +#include <netinet/sctp_os.h> 1.42 +#ifdef __FreeBSD__ 1.43 +#include <sys/proc.h> 1.44 +#endif 1.45 +#include <netinet/sctp_var.h> 1.46 +#include <netinet/sctp_sysctl.h> 1.47 +#include <netinet/sctp_pcb.h> 1.48 +#include <netinet/sctputil.h> 1.49 +#include <netinet/sctp.h> 1.50 +#include <netinet/sctp_header.h> 1.51 +#include <netinet/sctp_asconf.h> 1.52 +#include <netinet/sctp_output.h> 1.53 +#include <netinet/sctp_timer.h> 1.54 +#include <netinet/sctp_bsd_addr.h> 1.55 +#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 1.56 +#include <netinet/sctp_dtrace_define.h> 1.57 +#endif 1.58 +#if !defined(__Userspace_os_Windows) 1.59 +#include <netinet/udp.h> 1.60 +#endif 1.61 +#ifdef INET6 1.62 +#if defined(__Userspace__) 1.63 +#include "user_ip6_var.h" 1.64 +#else 1.65 +#include <netinet6/ip6_var.h> 1.66 +#endif 1.67 +#endif 1.68 +#if defined(__FreeBSD__) 1.69 +#include <sys/sched.h> 1.70 +#include <sys/smp.h> 1.71 +#include <sys/unistd.h> 1.72 +#endif 1.73 +#if defined(__Userspace__) 1.74 +#include <user_socketvar.h> 1.75 +#endif 1.76 + 1.77 +#if defined(__APPLE__) 1.78 +#define APPLE_FILE_NO 4 1.79 +#endif 1.80 + 1.81 +#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 1.82 +VNET_DEFINE(struct sctp_base_info, system_base_info); 1.83 +#else 1.84 +struct sctp_base_info system_base_info; 1.85 +#endif 1.86 + 1.87 +#if defined(__Userspace__) 1.88 +#if defined(INET) || defined(INET6) 1.89 +struct ifaddrs *g_interfaces; 1.90 +#endif 1.91 +#endif 1.92 +/* FIX: we don't handle multiple link local scopes */ 1.93 +/* "scopeless" replacement IN6_ARE_ADDR_EQUAL */ 1.94 +#ifdef INET6 1.95 +int 1.96 +SCTP6_ARE_ADDR_EQUAL(struct sockaddr_in6 *a, struct sockaddr_in6 *b) 1.97 +{ 1.98 +#ifdef SCTP_EMBEDDED_V6_SCOPE 1.99 +#if defined(__APPLE__) 1.100 + struct in6_addr tmp_a, tmp_b; 1.101 + 1.102 + tmp_a = a->sin6_addr; 1.103 +#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) 1.104 + if (in6_embedscope(&tmp_a, a, NULL, NULL) != 0) { 1.105 +#else 1.106 + if (in6_embedscope(&tmp_a, a, NULL, NULL, NULL) != 0) { 1.107 +#endif 1.108 + return (0); 1.109 + } 1.110 + tmp_b = b->sin6_addr; 1.111 +#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) 1.112 + if (in6_embedscope(&tmp_b, b, NULL, NULL) != 0) { 1.113 +#else 1.114 + if (in6_embedscope(&tmp_b, b, NULL, NULL, NULL) != 0) { 1.115 +#endif 1.116 + return (0); 1.117 + } 1.118 + return (IN6_ARE_ADDR_EQUAL(&tmp_a, &tmp_b)); 1.119 +#elif defined(SCTP_KAME) 1.120 + struct sockaddr_in6 tmp_a, tmp_b; 1.121 + 1.122 + memcpy(&tmp_a, a, sizeof(struct sockaddr_in6)); 1.123 + if (sa6_embedscope(&tmp_a, MODULE_GLOBAL(ip6_use_defzone)) != 0) { 1.124 + return (0); 1.125 + } 1.126 + memcpy(&tmp_b, b, sizeof(struct sockaddr_in6)); 1.127 + if (sa6_embedscope(&tmp_b, MODULE_GLOBAL(ip6_use_defzone)) != 0) { 1.128 + return (0); 1.129 + } 1.130 + return (IN6_ARE_ADDR_EQUAL(&tmp_a.sin6_addr, &tmp_b.sin6_addr)); 1.131 +#else 1.132 + struct in6_addr tmp_a, tmp_b; 1.133 + 1.134 + tmp_a = a->sin6_addr; 1.135 + if (in6_embedscope(&tmp_a, a) != 0) { 1.136 + return (0); 1.137 + } 1.138 + tmp_b = b->sin6_addr; 1.139 + if (in6_embedscope(&tmp_b, b) != 0) { 1.140 + return (0); 1.141 + } 1.142 + return (IN6_ARE_ADDR_EQUAL(&tmp_a, &tmp_b)); 1.143 +#endif 1.144 +#else 1.145 + return (IN6_ARE_ADDR_EQUAL(&(a->sin6_addr), &(b->sin6_addr))); 1.146 +#endif /* SCTP_EMBEDDED_V6_SCOPE */ 1.147 +} 1.148 +#endif 1.149 + 1.150 +void 1.151 +sctp_fill_pcbinfo(struct sctp_pcbinfo *spcb) 1.152 +{ 1.153 + /* 1.154 + * We really don't need to lock this, but I will just because it 1.155 + * does not hurt. 1.156 + */ 1.157 + SCTP_INP_INFO_RLOCK(); 1.158 + spcb->ep_count = SCTP_BASE_INFO(ipi_count_ep); 1.159 + spcb->asoc_count = SCTP_BASE_INFO(ipi_count_asoc); 1.160 + spcb->laddr_count = SCTP_BASE_INFO(ipi_count_laddr); 1.161 + spcb->raddr_count = SCTP_BASE_INFO(ipi_count_raddr); 1.162 + spcb->chk_count = SCTP_BASE_INFO(ipi_count_chunk); 1.163 + spcb->readq_count = SCTP_BASE_INFO(ipi_count_readq); 1.164 + spcb->stream_oque = SCTP_BASE_INFO(ipi_count_strmoq); 1.165 + spcb->free_chunks = SCTP_BASE_INFO(ipi_free_chunks); 1.166 + SCTP_INP_INFO_RUNLOCK(); 1.167 +} 1.168 + 1.169 +/*- 1.170 + * Addresses are added to VRF's (Virtual Router's). For BSD we 1.171 + * have only the default VRF 0. We maintain a hash list of 1.172 + * VRF's. Each VRF has its own list of sctp_ifn's. Each of 1.173 + * these has a list of addresses. When we add a new address 1.174 + * to a VRF we lookup the ifn/ifn_index, if the ifn does 1.175 + * not exist we create it and add it to the list of IFN's 1.176 + * within the VRF. Once we have the sctp_ifn, we add the 1.177 + * address to the list. So we look something like: 1.178 + * 1.179 + * hash-vrf-table 1.180 + * vrf-> ifn-> ifn -> ifn 1.181 + * vrf | 1.182 + * ... +--ifa-> ifa -> ifa 1.183 + * vrf 1.184 + * 1.185 + * We keep these separate lists since the SCTP subsystem will 1.186 + * point to these from its source address selection nets structure. 1.187 + * When an address is deleted it does not happen right away on 1.188 + * the SCTP side, it gets scheduled. What we do when a 1.189 + * delete happens is immediately remove the address from 1.190 + * the master list and decrement the refcount. As our 1.191 + * addip iterator works through and frees the src address 1.192 + * selection pointing to the sctp_ifa, eventually the refcount 1.193 + * will reach 0 and we will delete it. Note that it is assumed 1.194 + * that any locking on system level ifn/ifa is done at the 1.195 + * caller of these functions and these routines will only 1.196 + * lock the SCTP structures as they add or delete things. 1.197 + * 1.198 + * Other notes on VRF concepts. 1.199 + * - An endpoint can be in multiple VRF's 1.200 + * - An association lives within a VRF and only one VRF. 1.201 + * - Any incoming packet we can deduce the VRF for by 1.202 + * looking at the mbuf/pak inbound (for BSD its VRF=0 :D) 1.203 + * - Any downward send call or connect call must supply the 1.204 + * VRF via ancillary data or via some sort of set default 1.205 + * VRF socket option call (again for BSD no brainer since 1.206 + * the VRF is always 0). 1.207 + * - An endpoint may add multiple VRF's to it. 1.208 + * - Listening sockets can accept associations in any 1.209 + * of the VRF's they are in but the assoc will end up 1.210 + * in only one VRF (gotten from the packet or connect/send). 1.211 + * 1.212 + */ 1.213 + 1.214 +struct sctp_vrf * 1.215 +sctp_allocate_vrf(int vrf_id) 1.216 +{ 1.217 + struct sctp_vrf *vrf = NULL; 1.218 + struct sctp_vrflist *bucket; 1.219 + 1.220 + /* First allocate the VRF structure */ 1.221 + vrf = sctp_find_vrf(vrf_id); 1.222 + if (vrf) { 1.223 + /* Already allocated */ 1.224 + return (vrf); 1.225 + } 1.226 + SCTP_MALLOC(vrf, struct sctp_vrf *, sizeof(struct sctp_vrf), 1.227 + SCTP_M_VRF); 1.228 + if (vrf == NULL) { 1.229 + /* No memory */ 1.230 +#ifdef INVARIANTS 1.231 + panic("No memory for VRF:%d", vrf_id); 1.232 +#endif 1.233 + return (NULL); 1.234 + } 1.235 + /* setup the VRF */ 1.236 + memset(vrf, 0, sizeof(struct sctp_vrf)); 1.237 + vrf->vrf_id = vrf_id; 1.238 + LIST_INIT(&vrf->ifnlist); 1.239 + vrf->total_ifa_count = 0; 1.240 + vrf->refcount = 0; 1.241 + /* now also setup table ids */ 1.242 + SCTP_INIT_VRF_TABLEID(vrf); 1.243 + /* Init the HASH of addresses */ 1.244 + vrf->vrf_addr_hash = SCTP_HASH_INIT(SCTP_VRF_ADDR_HASH_SIZE, 1.245 + &vrf->vrf_addr_hashmark); 1.246 + if (vrf->vrf_addr_hash == NULL) { 1.247 + /* No memory */ 1.248 +#ifdef INVARIANTS 1.249 + panic("No memory for VRF:%d", vrf_id); 1.250 +#endif 1.251 + SCTP_FREE(vrf, SCTP_M_VRF); 1.252 + return (NULL); 1.253 + } 1.254 + 1.255 + /* Add it to the hash table */ 1.256 + bucket = &SCTP_BASE_INFO(sctp_vrfhash)[(vrf_id & SCTP_BASE_INFO(hashvrfmark))]; 1.257 + LIST_INSERT_HEAD(bucket, vrf, next_vrf); 1.258 + atomic_add_int(&SCTP_BASE_INFO(ipi_count_vrfs), 1); 1.259 + return (vrf); 1.260 +} 1.261 + 1.262 + 1.263 +struct sctp_ifn * 1.264 +sctp_find_ifn(void *ifn, uint32_t ifn_index) 1.265 +{ 1.266 + struct sctp_ifn *sctp_ifnp; 1.267 + struct sctp_ifnlist *hash_ifn_head; 1.268 + 1.269 + /* We assume the lock is held for the addresses 1.270 + * if that's wrong problems could occur :-) 1.271 + */ 1.272 + hash_ifn_head = &SCTP_BASE_INFO(vrf_ifn_hash)[(ifn_index & SCTP_BASE_INFO(vrf_ifn_hashmark))]; 1.273 + LIST_FOREACH(sctp_ifnp, hash_ifn_head, next_bucket) { 1.274 + if (sctp_ifnp->ifn_index == ifn_index) { 1.275 + return (sctp_ifnp); 1.276 + } 1.277 + if (sctp_ifnp->ifn_p && ifn && (sctp_ifnp->ifn_p == ifn)) { 1.278 + return (sctp_ifnp); 1.279 + } 1.280 + } 1.281 + return (NULL); 1.282 +} 1.283 + 1.284 + 1.285 +struct sctp_vrf * 1.286 +sctp_find_vrf(uint32_t vrf_id) 1.287 +{ 1.288 + struct sctp_vrflist *bucket; 1.289 + struct sctp_vrf *liste; 1.290 + 1.291 + bucket = &SCTP_BASE_INFO(sctp_vrfhash)[(vrf_id & SCTP_BASE_INFO(hashvrfmark))]; 1.292 + LIST_FOREACH(liste, bucket, next_vrf) { 1.293 + if (vrf_id == liste->vrf_id) { 1.294 + return (liste); 1.295 + } 1.296 + } 1.297 + return (NULL); 1.298 +} 1.299 + 1.300 + 1.301 +void 1.302 +sctp_free_vrf(struct sctp_vrf *vrf) 1.303 +{ 1.304 + if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&vrf->refcount)) { 1.305 + if (vrf->vrf_addr_hash) { 1.306 + SCTP_HASH_FREE(vrf->vrf_addr_hash, vrf->vrf_addr_hashmark); 1.307 + vrf->vrf_addr_hash = NULL; 1.308 + } 1.309 + /* We zero'd the count */ 1.310 + LIST_REMOVE(vrf, next_vrf); 1.311 + SCTP_FREE(vrf, SCTP_M_VRF); 1.312 + atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_vrfs), 1); 1.313 + } 1.314 +} 1.315 + 1.316 + 1.317 +void 1.318 +sctp_free_ifn(struct sctp_ifn *sctp_ifnp) 1.319 +{ 1.320 + if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&sctp_ifnp->refcount)) { 1.321 + /* We zero'd the count */ 1.322 + if (sctp_ifnp->vrf) { 1.323 + sctp_free_vrf(sctp_ifnp->vrf); 1.324 + } 1.325 + SCTP_FREE(sctp_ifnp, SCTP_M_IFN); 1.326 + atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ifns), 1); 1.327 + } 1.328 +} 1.329 + 1.330 + 1.331 +void 1.332 +sctp_update_ifn_mtu(uint32_t ifn_index, uint32_t mtu) 1.333 +{ 1.334 + struct sctp_ifn *sctp_ifnp; 1.335 + 1.336 + sctp_ifnp = sctp_find_ifn((void *)NULL, ifn_index); 1.337 + if (sctp_ifnp != NULL) { 1.338 + sctp_ifnp->ifn_mtu = mtu; 1.339 + } 1.340 +} 1.341 + 1.342 + 1.343 +void 1.344 +sctp_free_ifa(struct sctp_ifa *sctp_ifap) 1.345 +{ 1.346 + if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&sctp_ifap->refcount)) { 1.347 + /* We zero'd the count */ 1.348 + if (sctp_ifap->ifn_p) { 1.349 + sctp_free_ifn(sctp_ifap->ifn_p); 1.350 + } 1.351 + SCTP_FREE(sctp_ifap, SCTP_M_IFA); 1.352 + atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ifas), 1); 1.353 + } 1.354 +} 1.355 + 1.356 + 1.357 +static void 1.358 +sctp_delete_ifn(struct sctp_ifn *sctp_ifnp, int hold_addr_lock) 1.359 +{ 1.360 + struct sctp_ifn *found; 1.361 + 1.362 + found = sctp_find_ifn(sctp_ifnp->ifn_p, sctp_ifnp->ifn_index); 1.363 + if (found == NULL) { 1.364 + /* Not in the list.. sorry */ 1.365 + return; 1.366 + } 1.367 + if (hold_addr_lock == 0) 1.368 + SCTP_IPI_ADDR_WLOCK(); 1.369 + LIST_REMOVE(sctp_ifnp, next_bucket); 1.370 + LIST_REMOVE(sctp_ifnp, next_ifn); 1.371 + SCTP_DEREGISTER_INTERFACE(sctp_ifnp->ifn_index, 1.372 + sctp_ifnp->registered_af); 1.373 + if (hold_addr_lock == 0) 1.374 + SCTP_IPI_ADDR_WUNLOCK(); 1.375 + /* Take away the reference, and possibly free it */ 1.376 + sctp_free_ifn(sctp_ifnp); 1.377 +} 1.378 + 1.379 + 1.380 +void 1.381 +sctp_mark_ifa_addr_down(uint32_t vrf_id, struct sockaddr *addr, 1.382 + const char *if_name, uint32_t ifn_index) 1.383 +{ 1.384 + struct sctp_vrf *vrf; 1.385 + struct sctp_ifa *sctp_ifap; 1.386 + 1.387 + SCTP_IPI_ADDR_RLOCK(); 1.388 + vrf = sctp_find_vrf(vrf_id); 1.389 + if (vrf == NULL) { 1.390 + SCTPDBG(SCTP_DEBUG_PCB4, "Can't find vrf_id 0x%x\n", vrf_id); 1.391 + goto out; 1.392 + 1.393 + } 1.394 + sctp_ifap = sctp_find_ifa_by_addr(addr, vrf->vrf_id, SCTP_ADDR_LOCKED); 1.395 + if (sctp_ifap == NULL) { 1.396 + SCTPDBG(SCTP_DEBUG_PCB4, "Can't find sctp_ifap for address\n"); 1.397 + goto out; 1.398 + } 1.399 + if (sctp_ifap->ifn_p == NULL) { 1.400 + SCTPDBG(SCTP_DEBUG_PCB4, "IFA has no IFN - can't mark unuseable\n"); 1.401 + goto out; 1.402 + } 1.403 + if (if_name) { 1.404 + if (strncmp(if_name, sctp_ifap->ifn_p->ifn_name, SCTP_IFNAMSIZ) != 0) { 1.405 + SCTPDBG(SCTP_DEBUG_PCB4, "IFN %s of IFA not the same as %s\n", 1.406 + sctp_ifap->ifn_p->ifn_name, if_name); 1.407 + goto out; 1.408 + } 1.409 + } else { 1.410 + if (sctp_ifap->ifn_p->ifn_index != ifn_index) { 1.411 + SCTPDBG(SCTP_DEBUG_PCB4, "IFA owned by ifn_index:%d down command for ifn_index:%d - ignored\n", 1.412 + sctp_ifap->ifn_p->ifn_index, ifn_index); 1.413 + goto out; 1.414 + } 1.415 + } 1.416 + 1.417 + sctp_ifap->localifa_flags &= (~SCTP_ADDR_VALID); 1.418 + sctp_ifap->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE; 1.419 + out: 1.420 + SCTP_IPI_ADDR_RUNLOCK(); 1.421 +} 1.422 + 1.423 + 1.424 +void 1.425 +sctp_mark_ifa_addr_up(uint32_t vrf_id, struct sockaddr *addr, 1.426 + const char *if_name, uint32_t ifn_index) 1.427 +{ 1.428 + struct sctp_vrf *vrf; 1.429 + struct sctp_ifa *sctp_ifap; 1.430 + 1.431 + SCTP_IPI_ADDR_RLOCK(); 1.432 + vrf = sctp_find_vrf(vrf_id); 1.433 + if (vrf == NULL) { 1.434 + SCTPDBG(SCTP_DEBUG_PCB4, "Can't find vrf_id 0x%x\n", vrf_id); 1.435 + goto out; 1.436 + 1.437 + } 1.438 + sctp_ifap = sctp_find_ifa_by_addr(addr, vrf->vrf_id, SCTP_ADDR_LOCKED); 1.439 + if (sctp_ifap == NULL) { 1.440 + SCTPDBG(SCTP_DEBUG_PCB4, "Can't find sctp_ifap for address\n"); 1.441 + goto out; 1.442 + } 1.443 + if (sctp_ifap->ifn_p == NULL) { 1.444 + SCTPDBG(SCTP_DEBUG_PCB4, "IFA has no IFN - can't mark unuseable\n"); 1.445 + goto out; 1.446 + } 1.447 + if (if_name) { 1.448 + if (strncmp(if_name, sctp_ifap->ifn_p->ifn_name, SCTP_IFNAMSIZ) != 0) { 1.449 + SCTPDBG(SCTP_DEBUG_PCB4, "IFN %s of IFA not the same as %s\n", 1.450 + sctp_ifap->ifn_p->ifn_name, if_name); 1.451 + goto out; 1.452 + } 1.453 + } else { 1.454 + if (sctp_ifap->ifn_p->ifn_index != ifn_index) { 1.455 + SCTPDBG(SCTP_DEBUG_PCB4, "IFA owned by ifn_index:%d down command for ifn_index:%d - ignored\n", 1.456 + sctp_ifap->ifn_p->ifn_index, ifn_index); 1.457 + goto out; 1.458 + } 1.459 + } 1.460 + 1.461 + sctp_ifap->localifa_flags &= (~SCTP_ADDR_IFA_UNUSEABLE); 1.462 + sctp_ifap->localifa_flags |= SCTP_ADDR_VALID; 1.463 + out: 1.464 + SCTP_IPI_ADDR_RUNLOCK(); 1.465 +} 1.466 + 1.467 + 1.468 +/*- 1.469 + * Add an ifa to an ifn. 1.470 + * Register the interface as necessary. 1.471 + * NOTE: ADDR write lock MUST be held. 1.472 + */ 1.473 +static void 1.474 +sctp_add_ifa_to_ifn(struct sctp_ifn *sctp_ifnp, struct sctp_ifa *sctp_ifap) 1.475 +{ 1.476 + int ifa_af; 1.477 + 1.478 + LIST_INSERT_HEAD(&sctp_ifnp->ifalist, sctp_ifap, next_ifa); 1.479 + sctp_ifap->ifn_p = sctp_ifnp; 1.480 + atomic_add_int(&sctp_ifap->ifn_p->refcount, 1); 1.481 + /* update address counts */ 1.482 + sctp_ifnp->ifa_count++; 1.483 + ifa_af = sctp_ifap->address.sa.sa_family; 1.484 + switch (ifa_af) { 1.485 +#ifdef INET 1.486 + case AF_INET: 1.487 + sctp_ifnp->num_v4++; 1.488 + break; 1.489 +#endif 1.490 +#ifdef INET6 1.491 + case AF_INET6: 1.492 + sctp_ifnp->num_v6++; 1.493 + break; 1.494 +#endif 1.495 + default: 1.496 + break; 1.497 + } 1.498 + if (sctp_ifnp->ifa_count == 1) { 1.499 + /* register the new interface */ 1.500 + SCTP_REGISTER_INTERFACE(sctp_ifnp->ifn_index, ifa_af); 1.501 + sctp_ifnp->registered_af = ifa_af; 1.502 + } 1.503 +} 1.504 + 1.505 + 1.506 +/*- 1.507 + * Remove an ifa from its ifn. 1.508 + * If no more addresses exist, remove the ifn too. Otherwise, re-register 1.509 + * the interface based on the remaining address families left. 1.510 + * NOTE: ADDR write lock MUST be held. 1.511 + */ 1.512 +static void 1.513 +sctp_remove_ifa_from_ifn(struct sctp_ifa *sctp_ifap) 1.514 +{ 1.515 + LIST_REMOVE(sctp_ifap, next_ifa); 1.516 + if (sctp_ifap->ifn_p) { 1.517 + /* update address counts */ 1.518 + sctp_ifap->ifn_p->ifa_count--; 1.519 + switch (sctp_ifap->address.sa.sa_family) { 1.520 +#ifdef INET 1.521 + case AF_INET: 1.522 + sctp_ifap->ifn_p->num_v4--; 1.523 + break; 1.524 +#endif 1.525 +#ifdef INET6 1.526 + case AF_INET6: 1.527 + sctp_ifap->ifn_p->num_v6--; 1.528 + break; 1.529 +#endif 1.530 + default: 1.531 + break; 1.532 + } 1.533 + 1.534 + if (LIST_EMPTY(&sctp_ifap->ifn_p->ifalist)) { 1.535 + /* remove the ifn, possibly freeing it */ 1.536 + sctp_delete_ifn(sctp_ifap->ifn_p, SCTP_ADDR_LOCKED); 1.537 + } else { 1.538 + /* re-register address family type, if needed */ 1.539 + if ((sctp_ifap->ifn_p->num_v6 == 0) && 1.540 + (sctp_ifap->ifn_p->registered_af == AF_INET6)) { 1.541 + SCTP_DEREGISTER_INTERFACE(sctp_ifap->ifn_p->ifn_index, AF_INET6); 1.542 + SCTP_REGISTER_INTERFACE(sctp_ifap->ifn_p->ifn_index, AF_INET); 1.543 + sctp_ifap->ifn_p->registered_af = AF_INET; 1.544 + } else if ((sctp_ifap->ifn_p->num_v4 == 0) && 1.545 + (sctp_ifap->ifn_p->registered_af == AF_INET)) { 1.546 + SCTP_DEREGISTER_INTERFACE(sctp_ifap->ifn_p->ifn_index, AF_INET); 1.547 + SCTP_REGISTER_INTERFACE(sctp_ifap->ifn_p->ifn_index, AF_INET6); 1.548 + sctp_ifap->ifn_p->registered_af = AF_INET6; 1.549 + } 1.550 + /* free the ifn refcount */ 1.551 + sctp_free_ifn(sctp_ifap->ifn_p); 1.552 + } 1.553 + sctp_ifap->ifn_p = NULL; 1.554 + } 1.555 +} 1.556 + 1.557 + 1.558 +struct sctp_ifa * 1.559 +sctp_add_addr_to_vrf(uint32_t vrf_id, void *ifn, uint32_t ifn_index, 1.560 + uint32_t ifn_type, const char *if_name, void *ifa, 1.561 + struct sockaddr *addr, uint32_t ifa_flags, 1.562 + int dynamic_add) 1.563 +{ 1.564 + struct sctp_vrf *vrf; 1.565 + struct sctp_ifn *sctp_ifnp = NULL; 1.566 + struct sctp_ifa *sctp_ifap = NULL; 1.567 + struct sctp_ifalist *hash_addr_head; 1.568 + struct sctp_ifnlist *hash_ifn_head; 1.569 + uint32_t hash_of_addr; 1.570 + int new_ifn_af = 0; 1.571 + 1.572 +#ifdef SCTP_DEBUG 1.573 + SCTPDBG(SCTP_DEBUG_PCB4, "vrf_id 0x%x: adding address: ", vrf_id); 1.574 + SCTPDBG_ADDR(SCTP_DEBUG_PCB4, addr); 1.575 +#endif 1.576 + SCTP_IPI_ADDR_WLOCK(); 1.577 + sctp_ifnp = sctp_find_ifn(ifn, ifn_index); 1.578 + if (sctp_ifnp) { 1.579 + vrf = sctp_ifnp->vrf; 1.580 + } else { 1.581 + vrf = sctp_find_vrf(vrf_id); 1.582 + if (vrf == NULL) { 1.583 + vrf = sctp_allocate_vrf(vrf_id); 1.584 + if (vrf == NULL) { 1.585 + SCTP_IPI_ADDR_WUNLOCK(); 1.586 + return (NULL); 1.587 + } 1.588 + } 1.589 + } 1.590 + if (sctp_ifnp == NULL) { 1.591 + /* build one and add it, can't hold lock 1.592 + * until after malloc done though. 1.593 + */ 1.594 + SCTP_IPI_ADDR_WUNLOCK(); 1.595 + SCTP_MALLOC(sctp_ifnp, struct sctp_ifn *, 1.596 + sizeof(struct sctp_ifn), SCTP_M_IFN); 1.597 + if (sctp_ifnp == NULL) { 1.598 +#ifdef INVARIANTS 1.599 + panic("No memory for IFN"); 1.600 +#endif 1.601 + return (NULL); 1.602 + } 1.603 + memset(sctp_ifnp, 0, sizeof(struct sctp_ifn)); 1.604 + sctp_ifnp->ifn_index = ifn_index; 1.605 + sctp_ifnp->ifn_p = ifn; 1.606 + sctp_ifnp->ifn_type = ifn_type; 1.607 + sctp_ifnp->refcount = 0; 1.608 + sctp_ifnp->vrf = vrf; 1.609 + atomic_add_int(&vrf->refcount, 1); 1.610 + sctp_ifnp->ifn_mtu = SCTP_GATHER_MTU_FROM_IFN_INFO(ifn, ifn_index, addr->sa_family); 1.611 + if (if_name != NULL) { 1.612 + snprintf(sctp_ifnp->ifn_name, SCTP_IFNAMSIZ, "%s", if_name); 1.613 + } else { 1.614 + snprintf(sctp_ifnp->ifn_name, SCTP_IFNAMSIZ, "%s", "unknown"); 1.615 + } 1.616 + hash_ifn_head = &SCTP_BASE_INFO(vrf_ifn_hash)[(ifn_index & SCTP_BASE_INFO(vrf_ifn_hashmark))]; 1.617 + LIST_INIT(&sctp_ifnp->ifalist); 1.618 + SCTP_IPI_ADDR_WLOCK(); 1.619 + LIST_INSERT_HEAD(hash_ifn_head, sctp_ifnp, next_bucket); 1.620 + LIST_INSERT_HEAD(&vrf->ifnlist, sctp_ifnp, next_ifn); 1.621 + atomic_add_int(&SCTP_BASE_INFO(ipi_count_ifns), 1); 1.622 + new_ifn_af = 1; 1.623 + } 1.624 + sctp_ifap = sctp_find_ifa_by_addr(addr, vrf->vrf_id, SCTP_ADDR_LOCKED); 1.625 + if (sctp_ifap) { 1.626 + /* Hmm, it already exists? */ 1.627 + if ((sctp_ifap->ifn_p) && 1.628 + (sctp_ifap->ifn_p->ifn_index == ifn_index)) { 1.629 + SCTPDBG(SCTP_DEBUG_PCB4, "Using existing ifn %s (0x%x) for ifa %p\n", 1.630 + sctp_ifap->ifn_p->ifn_name, ifn_index, 1.631 + (void *)sctp_ifap); 1.632 + if (new_ifn_af) { 1.633 + /* Remove the created one that we don't want */ 1.634 + sctp_delete_ifn(sctp_ifnp, SCTP_ADDR_LOCKED); 1.635 + } 1.636 + if (sctp_ifap->localifa_flags & SCTP_BEING_DELETED) { 1.637 + /* easy to solve, just switch back to active */ 1.638 + SCTPDBG(SCTP_DEBUG_PCB4, "Clearing deleted ifa flag\n"); 1.639 + sctp_ifap->localifa_flags = SCTP_ADDR_VALID; 1.640 + sctp_ifap->ifn_p = sctp_ifnp; 1.641 + atomic_add_int(&sctp_ifap->ifn_p->refcount, 1); 1.642 + } 1.643 + exit_stage_left: 1.644 + SCTP_IPI_ADDR_WUNLOCK(); 1.645 + return (sctp_ifap); 1.646 + } else { 1.647 + if (sctp_ifap->ifn_p) { 1.648 + /* 1.649 + * The last IFN gets the address, remove the 1.650 + * old one 1.651 + */ 1.652 + SCTPDBG(SCTP_DEBUG_PCB4, "Moving ifa %p from %s (0x%x) to %s (0x%x)\n", 1.653 + (void *)sctp_ifap, sctp_ifap->ifn_p->ifn_name, 1.654 + sctp_ifap->ifn_p->ifn_index, if_name, 1.655 + ifn_index); 1.656 + /* remove the address from the old ifn */ 1.657 + sctp_remove_ifa_from_ifn(sctp_ifap); 1.658 + /* move the address over to the new ifn */ 1.659 + sctp_add_ifa_to_ifn(sctp_ifnp, sctp_ifap); 1.660 + goto exit_stage_left; 1.661 + } else { 1.662 + /* repair ifnp which was NULL ? */ 1.663 + sctp_ifap->localifa_flags = SCTP_ADDR_VALID; 1.664 + SCTPDBG(SCTP_DEBUG_PCB4, "Repairing ifn %p for ifa %p\n", 1.665 + (void *)sctp_ifnp, (void *)sctp_ifap); 1.666 + sctp_add_ifa_to_ifn(sctp_ifnp, sctp_ifap); 1.667 + } 1.668 + goto exit_stage_left; 1.669 + } 1.670 + } 1.671 + SCTP_IPI_ADDR_WUNLOCK(); 1.672 + SCTP_MALLOC(sctp_ifap, struct sctp_ifa *, sizeof(struct sctp_ifa), SCTP_M_IFA); 1.673 + if (sctp_ifap == NULL) { 1.674 +#ifdef INVARIANTS 1.675 + panic("No memory for IFA"); 1.676 +#endif 1.677 + return (NULL); 1.678 + } 1.679 + memset(sctp_ifap, 0, sizeof(struct sctp_ifa)); 1.680 + sctp_ifap->ifn_p = sctp_ifnp; 1.681 + atomic_add_int(&sctp_ifnp->refcount, 1); 1.682 + sctp_ifap->vrf_id = vrf_id; 1.683 + sctp_ifap->ifa = ifa; 1.684 +#ifdef HAVE_SA_LEN 1.685 + memcpy(&sctp_ifap->address, addr, addr->sa_len); 1.686 +#else 1.687 + switch (addr->sa_family) { 1.688 +#ifdef INET 1.689 + case AF_INET: 1.690 + memcpy(&sctp_ifap->address, addr, sizeof(struct sockaddr_in)); 1.691 + break; 1.692 +#endif 1.693 +#ifdef INET6 1.694 + case AF_INET6: 1.695 + memcpy(&sctp_ifap->address, addr, sizeof(struct sockaddr_in6)); 1.696 + break; 1.697 +#endif 1.698 +#if defined(__Userspace__) 1.699 + case AF_CONN: 1.700 + memcpy(&sctp_ifap->address, addr, sizeof(struct sockaddr_conn)); 1.701 + break; 1.702 +#endif 1.703 + default: 1.704 + /* TSNH */ 1.705 + break; 1.706 + } 1.707 +#endif 1.708 + sctp_ifap->localifa_flags = SCTP_ADDR_VALID | SCTP_ADDR_DEFER_USE; 1.709 + sctp_ifap->flags = ifa_flags; 1.710 + /* Set scope */ 1.711 + switch (sctp_ifap->address.sa.sa_family) { 1.712 +#ifdef INET 1.713 + case AF_INET: 1.714 + { 1.715 + struct sockaddr_in *sin; 1.716 + 1.717 + sin = (struct sockaddr_in *)&sctp_ifap->address.sin; 1.718 + if (SCTP_IFN_IS_IFT_LOOP(sctp_ifap->ifn_p) || 1.719 + (IN4_ISLOOPBACK_ADDRESS(&sin->sin_addr))) { 1.720 + sctp_ifap->src_is_loop = 1; 1.721 + } 1.722 + if ((IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 1.723 + sctp_ifap->src_is_priv = 1; 1.724 + } 1.725 + sctp_ifnp->num_v4++; 1.726 + if (new_ifn_af) 1.727 + new_ifn_af = AF_INET; 1.728 + break; 1.729 + } 1.730 +#endif 1.731 +#ifdef INET6 1.732 + case AF_INET6: 1.733 + { 1.734 + /* ok to use deprecated addresses? */ 1.735 + struct sockaddr_in6 *sin6; 1.736 + 1.737 + sin6 = (struct sockaddr_in6 *)&sctp_ifap->address.sin6; 1.738 + if (SCTP_IFN_IS_IFT_LOOP(sctp_ifap->ifn_p) || 1.739 + (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr))) { 1.740 + sctp_ifap->src_is_loop = 1; 1.741 + } 1.742 + if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 1.743 + sctp_ifap->src_is_priv = 1; 1.744 + } 1.745 + sctp_ifnp->num_v6++; 1.746 + if (new_ifn_af) 1.747 + new_ifn_af = AF_INET6; 1.748 + break; 1.749 + } 1.750 +#endif 1.751 +#if defined(__Userspace__) 1.752 + case AF_CONN: 1.753 + if (new_ifn_af) 1.754 + new_ifn_af = AF_CONN; 1.755 + break; 1.756 +#endif 1.757 + default: 1.758 + new_ifn_af = 0; 1.759 + break; 1.760 + } 1.761 + hash_of_addr = sctp_get_ifa_hash_val(&sctp_ifap->address.sa); 1.762 + 1.763 + if ((sctp_ifap->src_is_priv == 0) && 1.764 + (sctp_ifap->src_is_loop == 0)) { 1.765 + sctp_ifap->src_is_glob = 1; 1.766 + } 1.767 + SCTP_IPI_ADDR_WLOCK(); 1.768 + hash_addr_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 1.769 + LIST_INSERT_HEAD(hash_addr_head, sctp_ifap, next_bucket); 1.770 + sctp_ifap->refcount = 1; 1.771 + LIST_INSERT_HEAD(&sctp_ifnp->ifalist, sctp_ifap, next_ifa); 1.772 + sctp_ifnp->ifa_count++; 1.773 + vrf->total_ifa_count++; 1.774 + atomic_add_int(&SCTP_BASE_INFO(ipi_count_ifas), 1); 1.775 + if (new_ifn_af) { 1.776 + SCTP_REGISTER_INTERFACE(ifn_index, new_ifn_af); 1.777 + sctp_ifnp->registered_af = new_ifn_af; 1.778 + } 1.779 + SCTP_IPI_ADDR_WUNLOCK(); 1.780 + if (dynamic_add) { 1.781 + /* Bump up the refcount so that when the timer 1.782 + * completes it will drop back down. 1.783 + */ 1.784 + struct sctp_laddr *wi; 1.785 + 1.786 + atomic_add_int(&sctp_ifap->refcount, 1); 1.787 + wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 1.788 + if (wi == NULL) { 1.789 + /* 1.790 + * Gak, what can we do? We have lost an address 1.791 + * change can you say HOSED? 1.792 + */ 1.793 + SCTPDBG(SCTP_DEBUG_PCB4, "Lost an address change?\n"); 1.794 + /* Opps, must decrement the count */ 1.795 + sctp_del_addr_from_vrf(vrf_id, addr, ifn_index, 1.796 + if_name); 1.797 + return (NULL); 1.798 + } 1.799 + SCTP_INCR_LADDR_COUNT(); 1.800 + bzero(wi, sizeof(*wi)); 1.801 + (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 1.802 + wi->ifa = sctp_ifap; 1.803 + wi->action = SCTP_ADD_IP_ADDRESS; 1.804 + 1.805 + SCTP_WQ_ADDR_LOCK(); 1.806 + LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 1.807 + SCTP_WQ_ADDR_UNLOCK(); 1.808 + 1.809 + sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1.810 + (struct sctp_inpcb *)NULL, 1.811 + (struct sctp_tcb *)NULL, 1.812 + (struct sctp_nets *)NULL); 1.813 + } else { 1.814 + /* it's ready for use */ 1.815 + sctp_ifap->localifa_flags &= ~SCTP_ADDR_DEFER_USE; 1.816 + } 1.817 + return (sctp_ifap); 1.818 +} 1.819 + 1.820 +void 1.821 +sctp_del_addr_from_vrf(uint32_t vrf_id, struct sockaddr *addr, 1.822 + uint32_t ifn_index, const char *if_name) 1.823 +{ 1.824 + struct sctp_vrf *vrf; 1.825 + struct sctp_ifa *sctp_ifap = NULL; 1.826 + 1.827 + SCTP_IPI_ADDR_WLOCK(); 1.828 + vrf = sctp_find_vrf(vrf_id); 1.829 + if (vrf == NULL) { 1.830 + SCTPDBG(SCTP_DEBUG_PCB4, "Can't find vrf_id 0x%x\n", vrf_id); 1.831 + goto out_now; 1.832 + } 1.833 + 1.834 +#ifdef SCTP_DEBUG 1.835 + SCTPDBG(SCTP_DEBUG_PCB4, "vrf_id 0x%x: deleting address:", vrf_id); 1.836 + SCTPDBG_ADDR(SCTP_DEBUG_PCB4, addr); 1.837 +#endif 1.838 + sctp_ifap = sctp_find_ifa_by_addr(addr, vrf->vrf_id, SCTP_ADDR_LOCKED); 1.839 + if (sctp_ifap) { 1.840 + /* Validate the delete */ 1.841 + if (sctp_ifap->ifn_p) { 1.842 + int valid = 0; 1.843 + /*- 1.844 + * The name has priority over the ifn_index 1.845 + * if its given. We do this especially for 1.846 + * panda who might recycle indexes fast. 1.847 + */ 1.848 + if (if_name) { 1.849 + if (strncmp(if_name, sctp_ifap->ifn_p->ifn_name, SCTP_IFNAMSIZ) == 0) { 1.850 + /* They match its a correct delete */ 1.851 + valid = 1; 1.852 + } 1.853 + } 1.854 + if (!valid) { 1.855 + /* last ditch check ifn_index */ 1.856 + if (ifn_index == sctp_ifap->ifn_p->ifn_index) { 1.857 + valid = 1; 1.858 + } 1.859 + } 1.860 + if (!valid) { 1.861 + SCTPDBG(SCTP_DEBUG_PCB4, "ifn:%d ifname:%s does not match addresses\n", 1.862 + ifn_index, ((if_name == NULL) ? "NULL" : if_name)); 1.863 + SCTPDBG(SCTP_DEBUG_PCB4, "ifn:%d ifname:%s - ignoring delete\n", 1.864 + sctp_ifap->ifn_p->ifn_index, sctp_ifap->ifn_p->ifn_name); 1.865 + SCTP_IPI_ADDR_WUNLOCK(); 1.866 + return; 1.867 + } 1.868 + } 1.869 + SCTPDBG(SCTP_DEBUG_PCB4, "Deleting ifa %p\n", (void *)sctp_ifap); 1.870 + sctp_ifap->localifa_flags &= SCTP_ADDR_VALID; 1.871 + sctp_ifap->localifa_flags |= SCTP_BEING_DELETED; 1.872 + vrf->total_ifa_count--; 1.873 + LIST_REMOVE(sctp_ifap, next_bucket); 1.874 + sctp_remove_ifa_from_ifn(sctp_ifap); 1.875 + } 1.876 +#ifdef SCTP_DEBUG 1.877 + else { 1.878 + SCTPDBG(SCTP_DEBUG_PCB4, "Del Addr-ifn:%d Could not find address:", 1.879 + ifn_index); 1.880 + SCTPDBG_ADDR(SCTP_DEBUG_PCB1, addr); 1.881 + } 1.882 +#endif 1.883 + 1.884 + out_now: 1.885 + SCTP_IPI_ADDR_WUNLOCK(); 1.886 + if (sctp_ifap) { 1.887 + struct sctp_laddr *wi; 1.888 + 1.889 + wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 1.890 + if (wi == NULL) { 1.891 + /* 1.892 + * Gak, what can we do? We have lost an address 1.893 + * change can you say HOSED? 1.894 + */ 1.895 + SCTPDBG(SCTP_DEBUG_PCB4, "Lost an address change?\n"); 1.896 + 1.897 + /* Oops, must decrement the count */ 1.898 + sctp_free_ifa(sctp_ifap); 1.899 + return; 1.900 + } 1.901 + SCTP_INCR_LADDR_COUNT(); 1.902 + bzero(wi, sizeof(*wi)); 1.903 + (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 1.904 + wi->ifa = sctp_ifap; 1.905 + wi->action = SCTP_DEL_IP_ADDRESS; 1.906 + SCTP_WQ_ADDR_LOCK(); 1.907 + /* 1.908 + * Should this really be a tailq? As it is we will process the 1.909 + * newest first :-0 1.910 + */ 1.911 + LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 1.912 + SCTP_WQ_ADDR_UNLOCK(); 1.913 + 1.914 + sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1.915 + (struct sctp_inpcb *)NULL, 1.916 + (struct sctp_tcb *)NULL, 1.917 + (struct sctp_nets *)NULL); 1.918 + } 1.919 + return; 1.920 +} 1.921 + 1.922 + 1.923 +static int 1.924 +sctp_does_stcb_own_this_addr(struct sctp_tcb *stcb, struct sockaddr *to) 1.925 +{ 1.926 + int loopback_scope; 1.927 +#if defined(INET) 1.928 + int ipv4_local_scope, ipv4_addr_legal; 1.929 +#endif 1.930 +#if defined(INET6) 1.931 + int local_scope, site_scope, ipv6_addr_legal; 1.932 +#endif 1.933 +#if defined(__Userspace__) 1.934 + int conn_addr_legal; 1.935 +#endif 1.936 + struct sctp_vrf *vrf; 1.937 + struct sctp_ifn *sctp_ifn; 1.938 + struct sctp_ifa *sctp_ifa; 1.939 + 1.940 + loopback_scope = stcb->asoc.scope.loopback_scope; 1.941 +#if defined(INET) 1.942 + ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 1.943 + ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 1.944 +#endif 1.945 +#if defined(INET6) 1.946 + local_scope = stcb->asoc.scope.local_scope; 1.947 + site_scope = stcb->asoc.scope.site_scope; 1.948 + ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 1.949 +#endif 1.950 +#if defined(__Userspace__) 1.951 + conn_addr_legal = stcb->asoc.scope.conn_addr_legal; 1.952 +#endif 1.953 + 1.954 + SCTP_IPI_ADDR_RLOCK(); 1.955 + vrf = sctp_find_vrf(stcb->asoc.vrf_id); 1.956 + if (vrf == NULL) { 1.957 + /* no vrf, no addresses */ 1.958 + SCTP_IPI_ADDR_RUNLOCK(); 1.959 + return (0); 1.960 + } 1.961 + 1.962 + if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1.963 + LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 1.964 + if ((loopback_scope == 0) && 1.965 + SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 1.966 + continue; 1.967 + } 1.968 + LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 1.969 + if (sctp_is_addr_restricted(stcb, sctp_ifa) && 1.970 + (!sctp_is_addr_pending(stcb, sctp_ifa))) { 1.971 + /* We allow pending addresses, where we 1.972 + * have sent an asconf-add to be considered 1.973 + * valid. 1.974 + */ 1.975 + continue; 1.976 + } 1.977 + if (sctp_ifa->address.sa.sa_family != to->sa_family) { 1.978 + continue; 1.979 + } 1.980 + switch (sctp_ifa->address.sa.sa_family) { 1.981 +#ifdef INET 1.982 + case AF_INET: 1.983 + if (ipv4_addr_legal) { 1.984 + struct sockaddr_in *sin, *rsin; 1.985 + 1.986 + sin = &sctp_ifa->address.sin; 1.987 + rsin = (struct sockaddr_in *)to; 1.988 + if ((ipv4_local_scope == 0) && 1.989 + IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) { 1.990 + continue; 1.991 + } 1.992 + if (sin->sin_addr.s_addr == rsin->sin_addr.s_addr) { 1.993 + SCTP_IPI_ADDR_RUNLOCK(); 1.994 + return (1); 1.995 + } 1.996 + } 1.997 + break; 1.998 +#endif 1.999 +#ifdef INET6 1.1000 + case AF_INET6: 1.1001 + if (ipv6_addr_legal) { 1.1002 + struct sockaddr_in6 *sin6, *rsin6; 1.1003 +#if defined(SCTP_EMBEDDED_V6_SCOPE) && !defined(SCTP_KAME) 1.1004 + struct sockaddr_in6 lsa6; 1.1005 +#endif 1.1006 + sin6 = &sctp_ifa->address.sin6; 1.1007 + rsin6 = (struct sockaddr_in6 *)to; 1.1008 + if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 1.1009 + if (local_scope == 0) 1.1010 + continue; 1.1011 +#if defined(SCTP_EMBEDDED_V6_SCOPE) 1.1012 + if (sin6->sin6_scope_id == 0) { 1.1013 +#ifdef SCTP_KAME 1.1014 + if (sa6_recoverscope(sin6) != 0) 1.1015 + continue; 1.1016 +#else 1.1017 + lsa6 = *sin6; 1.1018 + if (in6_recoverscope(&lsa6, 1.1019 + &lsa6.sin6_addr, 1.1020 + NULL)) 1.1021 + continue; 1.1022 + sin6 = &lsa6; 1.1023 +#endif /* SCTP_KAME */ 1.1024 + } 1.1025 +#endif /* SCTP_EMBEDDED_V6_SCOPE */ 1.1026 + } 1.1027 + if ((site_scope == 0) && 1.1028 + (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 1.1029 + continue; 1.1030 + } 1.1031 + if (SCTP6_ARE_ADDR_EQUAL(sin6, rsin6)) { 1.1032 + SCTP_IPI_ADDR_RUNLOCK(); 1.1033 + return (1); 1.1034 + } 1.1035 + } 1.1036 + break; 1.1037 +#endif 1.1038 +#if defined(__Userspace__) 1.1039 + case AF_CONN: 1.1040 + if (conn_addr_legal) { 1.1041 + struct sockaddr_conn *sconn, *rsconn; 1.1042 + 1.1043 + sconn = &sctp_ifa->address.sconn; 1.1044 + rsconn = (struct sockaddr_conn *)to; 1.1045 + if (sconn->sconn_addr == rsconn->sconn_addr) { 1.1046 + SCTP_IPI_ADDR_RUNLOCK(); 1.1047 + return (1); 1.1048 + } 1.1049 + } 1.1050 + break; 1.1051 +#endif 1.1052 + default: 1.1053 + /* TSNH */ 1.1054 + break; 1.1055 + } 1.1056 + } 1.1057 + } 1.1058 + } else { 1.1059 + struct sctp_laddr *laddr; 1.1060 + 1.1061 + LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, sctp_nxt_addr) { 1.1062 + if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) { 1.1063 + SCTPDBG(SCTP_DEBUG_PCB1, "ifa being deleted\n"); 1.1064 + continue; 1.1065 + } 1.1066 + if (sctp_is_addr_restricted(stcb, laddr->ifa) && 1.1067 + (!sctp_is_addr_pending(stcb, laddr->ifa))) { 1.1068 + /* We allow pending addresses, where we 1.1069 + * have sent an asconf-add to be considered 1.1070 + * valid. 1.1071 + */ 1.1072 + continue; 1.1073 + } 1.1074 + if (laddr->ifa->address.sa.sa_family != to->sa_family) { 1.1075 + continue; 1.1076 + } 1.1077 + switch (to->sa_family) { 1.1078 +#ifdef INET 1.1079 + case AF_INET: 1.1080 + { 1.1081 + struct sockaddr_in *sin, *rsin; 1.1082 + 1.1083 + sin = (struct sockaddr_in *)&laddr->ifa->address.sin; 1.1084 + rsin = (struct sockaddr_in *)to; 1.1085 + if (sin->sin_addr.s_addr == rsin->sin_addr.s_addr) { 1.1086 + SCTP_IPI_ADDR_RUNLOCK(); 1.1087 + return (1); 1.1088 + } 1.1089 + break; 1.1090 + } 1.1091 +#endif 1.1092 +#ifdef INET6 1.1093 + case AF_INET6: 1.1094 + { 1.1095 + struct sockaddr_in6 *sin6, *rsin6; 1.1096 + 1.1097 + sin6 = (struct sockaddr_in6 *)&laddr->ifa->address.sin6; 1.1098 + rsin6 = (struct sockaddr_in6 *)to; 1.1099 + if (SCTP6_ARE_ADDR_EQUAL(sin6, rsin6)) { 1.1100 + SCTP_IPI_ADDR_RUNLOCK(); 1.1101 + return (1); 1.1102 + } 1.1103 + break; 1.1104 + } 1.1105 + 1.1106 +#endif 1.1107 +#if defined(__Userspace__) 1.1108 + case AF_CONN: 1.1109 + { 1.1110 + struct sockaddr_conn *sconn, *rsconn; 1.1111 + 1.1112 + sconn = (struct sockaddr_conn *)&laddr->ifa->address.sconn; 1.1113 + rsconn = (struct sockaddr_conn *)to; 1.1114 + if (sconn->sconn_addr == rsconn->sconn_addr) { 1.1115 + SCTP_IPI_ADDR_RUNLOCK(); 1.1116 + return (1); 1.1117 + } 1.1118 + break; 1.1119 + } 1.1120 +#endif 1.1121 + default: 1.1122 + /* TSNH */ 1.1123 + break; 1.1124 + } 1.1125 + 1.1126 + } 1.1127 + } 1.1128 + SCTP_IPI_ADDR_RUNLOCK(); 1.1129 + return (0); 1.1130 +} 1.1131 + 1.1132 + 1.1133 +static struct sctp_tcb * 1.1134 +sctp_tcb_special_locate(struct sctp_inpcb **inp_p, struct sockaddr *from, 1.1135 + struct sockaddr *to, struct sctp_nets **netp, uint32_t vrf_id) 1.1136 +{ 1.1137 + /**** ASSUMES THE CALLER holds the INP_INFO_RLOCK */ 1.1138 + /* 1.1139 + * If we support the TCP model, then we must now dig through to see 1.1140 + * if we can find our endpoint in the list of tcp ep's. 1.1141 + */ 1.1142 + uint16_t lport, rport; 1.1143 + struct sctppcbhead *ephead; 1.1144 + struct sctp_inpcb *inp; 1.1145 + struct sctp_laddr *laddr; 1.1146 + struct sctp_tcb *stcb; 1.1147 + struct sctp_nets *net; 1.1148 +#ifdef SCTP_MVRF 1.1149 + int fnd, i; 1.1150 +#endif 1.1151 + 1.1152 + if ((to == NULL) || (from == NULL)) { 1.1153 + return (NULL); 1.1154 + } 1.1155 + 1.1156 + switch (to->sa_family) { 1.1157 +#ifdef INET 1.1158 + case AF_INET: 1.1159 + if (from->sa_family == AF_INET) { 1.1160 + lport = ((struct sockaddr_in *)to)->sin_port; 1.1161 + rport = ((struct sockaddr_in *)from)->sin_port; 1.1162 + } else { 1.1163 + return (NULL); 1.1164 + } 1.1165 + break; 1.1166 +#endif 1.1167 +#ifdef INET6 1.1168 + case AF_INET6: 1.1169 + if (from->sa_family == AF_INET6) { 1.1170 + lport = ((struct sockaddr_in6 *)to)->sin6_port; 1.1171 + rport = ((struct sockaddr_in6 *)from)->sin6_port; 1.1172 + } else { 1.1173 + return (NULL); 1.1174 + } 1.1175 + break; 1.1176 +#endif 1.1177 +#if defined(__Userspace__) 1.1178 + case AF_CONN: 1.1179 + if (from->sa_family == AF_CONN) { 1.1180 + lport = ((struct sockaddr_conn *)to)->sconn_port; 1.1181 + rport = ((struct sockaddr_conn *)from)->sconn_port; 1.1182 + } else { 1.1183 + return (NULL); 1.1184 + } 1.1185 + break; 1.1186 +#endif 1.1187 + default: 1.1188 + return (NULL); 1.1189 + } 1.1190 + ephead = &SCTP_BASE_INFO(sctp_tcpephash)[SCTP_PCBHASH_ALLADDR((lport | rport), SCTP_BASE_INFO(hashtcpmark))]; 1.1191 + /* 1.1192 + * Ok now for each of the guys in this bucket we must look and see: 1.1193 + * - Does the remote port match. - Does there single association's 1.1194 + * addresses match this address (to). If so we update p_ep to point 1.1195 + * to this ep and return the tcb from it. 1.1196 + */ 1.1197 + LIST_FOREACH(inp, ephead, sctp_hash) { 1.1198 + SCTP_INP_RLOCK(inp); 1.1199 + if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 1.1200 + SCTP_INP_RUNLOCK(inp); 1.1201 + continue; 1.1202 + } 1.1203 + if (lport != inp->sctp_lport) { 1.1204 + SCTP_INP_RUNLOCK(inp); 1.1205 + continue; 1.1206 + } 1.1207 +#ifdef SCTP_MVRF 1.1208 + fnd = 0; 1.1209 + for (i = 0; i < inp->num_vrfs; i++) { 1.1210 + if (inp->m_vrf_ids[i] == vrf_id) { 1.1211 + fnd = 1; 1.1212 + break; 1.1213 + } 1.1214 + } 1.1215 + if (fnd == 0) { 1.1216 + SCTP_INP_RUNLOCK(inp); 1.1217 + continue; 1.1218 + } 1.1219 +#else 1.1220 + if (inp->def_vrf_id != vrf_id) { 1.1221 + SCTP_INP_RUNLOCK(inp); 1.1222 + continue; 1.1223 + } 1.1224 +#endif 1.1225 + /* check to see if the ep has one of the addresses */ 1.1226 + if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { 1.1227 + /* We are NOT bound all, so look further */ 1.1228 + int match = 0; 1.1229 + 1.1230 + LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 1.1231 + 1.1232 + if (laddr->ifa == NULL) { 1.1233 + SCTPDBG(SCTP_DEBUG_PCB1, "%s: NULL ifa\n", __FUNCTION__); 1.1234 + continue; 1.1235 + } 1.1236 + if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) { 1.1237 + SCTPDBG(SCTP_DEBUG_PCB1, "ifa being deleted\n"); 1.1238 + continue; 1.1239 + } 1.1240 + if (laddr->ifa->address.sa.sa_family == 1.1241 + to->sa_family) { 1.1242 + /* see if it matches */ 1.1243 +#ifdef INET 1.1244 + if (from->sa_family == AF_INET) { 1.1245 + struct sockaddr_in *intf_addr, *sin; 1.1246 + 1.1247 + intf_addr = &laddr->ifa->address.sin; 1.1248 + sin = (struct sockaddr_in *)to; 1.1249 + if (sin->sin_addr.s_addr == 1.1250 + intf_addr->sin_addr.s_addr) { 1.1251 + match = 1; 1.1252 + break; 1.1253 + } 1.1254 + } 1.1255 +#endif 1.1256 +#ifdef INET6 1.1257 + if (from->sa_family == AF_INET6) { 1.1258 + struct sockaddr_in6 *intf_addr6; 1.1259 + struct sockaddr_in6 *sin6; 1.1260 + 1.1261 + sin6 = (struct sockaddr_in6 *) 1.1262 + to; 1.1263 + intf_addr6 = &laddr->ifa->address.sin6; 1.1264 + 1.1265 + if (SCTP6_ARE_ADDR_EQUAL(sin6, 1.1266 + intf_addr6)) { 1.1267 + match = 1; 1.1268 + break; 1.1269 + } 1.1270 + } 1.1271 +#endif 1.1272 +#if defined(__Userspace__) 1.1273 + if (from->sa_family == AF_CONN) { 1.1274 + struct sockaddr_conn *intf_addr, *sconn; 1.1275 + 1.1276 + intf_addr = &laddr->ifa->address.sconn; 1.1277 + sconn = (struct sockaddr_conn *)to; 1.1278 + if (sconn->sconn_addr == 1.1279 + intf_addr->sconn_addr) { 1.1280 + match = 1; 1.1281 + break; 1.1282 + } 1.1283 + } 1.1284 +#endif 1.1285 + } 1.1286 + } 1.1287 + if (match == 0) { 1.1288 + /* This endpoint does not have this address */ 1.1289 + SCTP_INP_RUNLOCK(inp); 1.1290 + continue; 1.1291 + } 1.1292 + } 1.1293 + /* 1.1294 + * Ok if we hit here the ep has the address, does it hold 1.1295 + * the tcb? 1.1296 + */ 1.1297 + /* XXX: Why don't we TAILQ_FOREACH through sctp_asoc_list? */ 1.1298 + stcb = LIST_FIRST(&inp->sctp_asoc_list); 1.1299 + if (stcb == NULL) { 1.1300 + SCTP_INP_RUNLOCK(inp); 1.1301 + continue; 1.1302 + } 1.1303 + SCTP_TCB_LOCK(stcb); 1.1304 + if (!sctp_does_stcb_own_this_addr(stcb, to)) { 1.1305 + SCTP_TCB_UNLOCK(stcb); 1.1306 + SCTP_INP_RUNLOCK(inp); 1.1307 + continue; 1.1308 + } 1.1309 + if (stcb->rport != rport) { 1.1310 + /* remote port does not match. */ 1.1311 + SCTP_TCB_UNLOCK(stcb); 1.1312 + SCTP_INP_RUNLOCK(inp); 1.1313 + continue; 1.1314 + } 1.1315 + if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 1.1316 + SCTP_TCB_UNLOCK(stcb); 1.1317 + SCTP_INP_RUNLOCK(inp); 1.1318 + continue; 1.1319 + } 1.1320 + if (!sctp_does_stcb_own_this_addr(stcb, to)) { 1.1321 + SCTP_TCB_UNLOCK(stcb); 1.1322 + SCTP_INP_RUNLOCK(inp); 1.1323 + continue; 1.1324 + } 1.1325 + /* Does this TCB have a matching address? */ 1.1326 + TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1.1327 + 1.1328 + if (net->ro._l_addr.sa.sa_family != from->sa_family) { 1.1329 + /* not the same family, can't be a match */ 1.1330 + continue; 1.1331 + } 1.1332 + switch (from->sa_family) { 1.1333 +#ifdef INET 1.1334 + case AF_INET: 1.1335 + { 1.1336 + struct sockaddr_in *sin, *rsin; 1.1337 + 1.1338 + sin = (struct sockaddr_in *)&net->ro._l_addr; 1.1339 + rsin = (struct sockaddr_in *)from; 1.1340 + if (sin->sin_addr.s_addr == 1.1341 + rsin->sin_addr.s_addr) { 1.1342 + /* found it */ 1.1343 + if (netp != NULL) { 1.1344 + *netp = net; 1.1345 + } 1.1346 + /* Update the endpoint pointer */ 1.1347 + *inp_p = inp; 1.1348 + SCTP_INP_RUNLOCK(inp); 1.1349 + return (stcb); 1.1350 + } 1.1351 + break; 1.1352 + } 1.1353 +#endif 1.1354 +#ifdef INET6 1.1355 + case AF_INET6: 1.1356 + { 1.1357 + struct sockaddr_in6 *sin6, *rsin6; 1.1358 + 1.1359 + sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 1.1360 + rsin6 = (struct sockaddr_in6 *)from; 1.1361 + if (SCTP6_ARE_ADDR_EQUAL(sin6, 1.1362 + rsin6)) { 1.1363 + /* found it */ 1.1364 + if (netp != NULL) { 1.1365 + *netp = net; 1.1366 + } 1.1367 + /* Update the endpoint pointer */ 1.1368 + *inp_p = inp; 1.1369 + SCTP_INP_RUNLOCK(inp); 1.1370 + return (stcb); 1.1371 + } 1.1372 + break; 1.1373 + } 1.1374 +#endif 1.1375 +#if defined(__Userspace__) 1.1376 + case AF_CONN: 1.1377 + { 1.1378 + struct sockaddr_conn *sconn, *rsconn; 1.1379 + 1.1380 + sconn = (struct sockaddr_conn *)&net->ro._l_addr; 1.1381 + rsconn = (struct sockaddr_conn *)from; 1.1382 + if (sconn->sconn_addr == rsconn->sconn_addr) { 1.1383 + /* found it */ 1.1384 + if (netp != NULL) { 1.1385 + *netp = net; 1.1386 + } 1.1387 + /* Update the endpoint pointer */ 1.1388 + *inp_p = inp; 1.1389 + SCTP_INP_RUNLOCK(inp); 1.1390 + return (stcb); 1.1391 + } 1.1392 + break; 1.1393 + } 1.1394 +#endif 1.1395 + default: 1.1396 + /* TSNH */ 1.1397 + break; 1.1398 + } 1.1399 + } 1.1400 + SCTP_TCB_UNLOCK(stcb); 1.1401 + SCTP_INP_RUNLOCK(inp); 1.1402 + } 1.1403 + return (NULL); 1.1404 +} 1.1405 + 1.1406 + 1.1407 +/* 1.1408 + * rules for use 1.1409 + * 1.1410 + * 1) If I return a NULL you must decrement any INP ref cnt. 2) If I find an 1.1411 + * stcb, both will be locked (locked_tcb and stcb) but decrement will be done 1.1412 + * (if locked == NULL). 3) Decrement happens on return ONLY if locked == 1.1413 + * NULL. 1.1414 + */ 1.1415 + 1.1416 +struct sctp_tcb * 1.1417 +sctp_findassociation_ep_addr(struct sctp_inpcb **inp_p, struct sockaddr *remote, 1.1418 + struct sctp_nets **netp, struct sockaddr *local, struct sctp_tcb *locked_tcb) 1.1419 +{ 1.1420 + struct sctpasochead *head; 1.1421 + struct sctp_inpcb *inp; 1.1422 + struct sctp_tcb *stcb = NULL; 1.1423 + struct sctp_nets *net; 1.1424 + uint16_t rport; 1.1425 + 1.1426 + inp = *inp_p; 1.1427 + switch (remote->sa_family) { 1.1428 +#ifdef INET 1.1429 + case AF_INET: 1.1430 + rport = (((struct sockaddr_in *)remote)->sin_port); 1.1431 + break; 1.1432 +#endif 1.1433 +#ifdef INET6 1.1434 + case AF_INET6: 1.1435 + rport = (((struct sockaddr_in6 *)remote)->sin6_port); 1.1436 + break; 1.1437 +#endif 1.1438 +#if defined(__Userspace__) 1.1439 + case AF_CONN: 1.1440 + rport = (((struct sockaddr_in6 *)remote)->sin6_port); 1.1441 + break; 1.1442 +#endif 1.1443 + default: 1.1444 + return (NULL); 1.1445 + } 1.1446 + if (locked_tcb) { 1.1447 + /* 1.1448 + * UN-lock so we can do proper locking here this occurs when 1.1449 + * called from load_addresses_from_init. 1.1450 + */ 1.1451 + atomic_add_int(&locked_tcb->asoc.refcnt, 1); 1.1452 + SCTP_TCB_UNLOCK(locked_tcb); 1.1453 + } 1.1454 + SCTP_INP_INFO_RLOCK(); 1.1455 + if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1.1456 + (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 1.1457 + /*- 1.1458 + * Now either this guy is our listener or it's the 1.1459 + * connector. If it is the one that issued the connect, then 1.1460 + * it's only chance is to be the first TCB in the list. If 1.1461 + * it is the acceptor, then do the special_lookup to hash 1.1462 + * and find the real inp. 1.1463 + */ 1.1464 + if ((inp->sctp_socket) && (inp->sctp_socket->so_qlimit)) { 1.1465 + /* to is peer addr, from is my addr */ 1.1466 +#ifndef SCTP_MVRF 1.1467 + stcb = sctp_tcb_special_locate(inp_p, remote, local, 1.1468 + netp, inp->def_vrf_id); 1.1469 + if ((stcb != NULL) && (locked_tcb == NULL)) { 1.1470 + /* we have a locked tcb, lower refcount */ 1.1471 + SCTP_INP_DECR_REF(inp); 1.1472 + } 1.1473 + if ((locked_tcb != NULL) && (locked_tcb != stcb)) { 1.1474 + SCTP_INP_RLOCK(locked_tcb->sctp_ep); 1.1475 + SCTP_TCB_LOCK(locked_tcb); 1.1476 + atomic_subtract_int(&locked_tcb->asoc.refcnt, 1); 1.1477 + SCTP_INP_RUNLOCK(locked_tcb->sctp_ep); 1.1478 + } 1.1479 +#else 1.1480 + /*- 1.1481 + * MVRF is tricky, we must look in every VRF 1.1482 + * the endpoint has. 1.1483 + */ 1.1484 + int i; 1.1485 + 1.1486 + for (i = 0; i < inp->num_vrfs; i++) { 1.1487 + stcb = sctp_tcb_special_locate(inp_p, remote, local, 1.1488 + netp, inp->m_vrf_ids[i]); 1.1489 + if ((stcb != NULL) && (locked_tcb == NULL)) { 1.1490 + /* we have a locked tcb, lower refcount */ 1.1491 + SCTP_INP_DECR_REF(inp); 1.1492 + break; 1.1493 + } 1.1494 + if ((locked_tcb != NULL) && (locked_tcb != stcb)) { 1.1495 + SCTP_INP_RLOCK(locked_tcb->sctp_ep); 1.1496 + SCTP_TCB_LOCK(locked_tcb); 1.1497 + atomic_subtract_int(&locked_tcb->asoc.refcnt, 1); 1.1498 + SCTP_INP_RUNLOCK(locked_tcb->sctp_ep); 1.1499 + break; 1.1500 + } 1.1501 + } 1.1502 +#endif 1.1503 + SCTP_INP_INFO_RUNLOCK(); 1.1504 + return (stcb); 1.1505 + } else { 1.1506 + SCTP_INP_WLOCK(inp); 1.1507 + if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 1.1508 + goto null_return; 1.1509 + } 1.1510 + stcb = LIST_FIRST(&inp->sctp_asoc_list); 1.1511 + if (stcb == NULL) { 1.1512 + goto null_return; 1.1513 + } 1.1514 + SCTP_TCB_LOCK(stcb); 1.1515 + 1.1516 + if (stcb->rport != rport) { 1.1517 + /* remote port does not match. */ 1.1518 + SCTP_TCB_UNLOCK(stcb); 1.1519 + goto null_return; 1.1520 + } 1.1521 + if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 1.1522 + SCTP_TCB_UNLOCK(stcb); 1.1523 + goto null_return; 1.1524 + } 1.1525 + if (local && !sctp_does_stcb_own_this_addr(stcb, local)) { 1.1526 + SCTP_TCB_UNLOCK(stcb); 1.1527 + goto null_return; 1.1528 + } 1.1529 + /* now look at the list of remote addresses */ 1.1530 + TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1.1531 +#ifdef INVARIANTS 1.1532 + if (net == (TAILQ_NEXT(net, sctp_next))) { 1.1533 + panic("Corrupt net list"); 1.1534 + } 1.1535 +#endif 1.1536 + if (net->ro._l_addr.sa.sa_family != 1.1537 + remote->sa_family) { 1.1538 + /* not the same family */ 1.1539 + continue; 1.1540 + } 1.1541 + switch (remote->sa_family) { 1.1542 +#ifdef INET 1.1543 + case AF_INET: 1.1544 + { 1.1545 + struct sockaddr_in *sin, *rsin; 1.1546 + 1.1547 + sin = (struct sockaddr_in *) 1.1548 + &net->ro._l_addr; 1.1549 + rsin = (struct sockaddr_in *)remote; 1.1550 + if (sin->sin_addr.s_addr == 1.1551 + rsin->sin_addr.s_addr) { 1.1552 + /* found it */ 1.1553 + if (netp != NULL) { 1.1554 + *netp = net; 1.1555 + } 1.1556 + if (locked_tcb == NULL) { 1.1557 + SCTP_INP_DECR_REF(inp); 1.1558 + } else if (locked_tcb != stcb) { 1.1559 + SCTP_TCB_LOCK(locked_tcb); 1.1560 + } 1.1561 + if (locked_tcb) { 1.1562 + atomic_subtract_int(&locked_tcb->asoc.refcnt, 1); 1.1563 + } 1.1564 + 1.1565 + SCTP_INP_WUNLOCK(inp); 1.1566 + SCTP_INP_INFO_RUNLOCK(); 1.1567 + return (stcb); 1.1568 + } 1.1569 + break; 1.1570 + } 1.1571 +#endif 1.1572 +#ifdef INET6 1.1573 + case AF_INET6: 1.1574 + { 1.1575 + struct sockaddr_in6 *sin6, *rsin6; 1.1576 + 1.1577 + sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 1.1578 + rsin6 = (struct sockaddr_in6 *)remote; 1.1579 + if (SCTP6_ARE_ADDR_EQUAL(sin6, 1.1580 + rsin6)) { 1.1581 + /* found it */ 1.1582 + if (netp != NULL) { 1.1583 + *netp = net; 1.1584 + } 1.1585 + if (locked_tcb == NULL) { 1.1586 + SCTP_INP_DECR_REF(inp); 1.1587 + } else if (locked_tcb != stcb) { 1.1588 + SCTP_TCB_LOCK(locked_tcb); 1.1589 + } 1.1590 + if (locked_tcb) { 1.1591 + atomic_subtract_int(&locked_tcb->asoc.refcnt, 1); 1.1592 + } 1.1593 + SCTP_INP_WUNLOCK(inp); 1.1594 + SCTP_INP_INFO_RUNLOCK(); 1.1595 + return (stcb); 1.1596 + } 1.1597 + break; 1.1598 + } 1.1599 +#endif 1.1600 +#if defined(__Userspace__) 1.1601 + case AF_CONN: 1.1602 + { 1.1603 + struct sockaddr_conn *sconn, *rsconn; 1.1604 + 1.1605 + sconn = (struct sockaddr_conn *)&net->ro._l_addr; 1.1606 + rsconn = (struct sockaddr_conn *)remote; 1.1607 + if (sconn->sconn_addr == rsconn->sconn_addr) { 1.1608 + /* found it */ 1.1609 + if (netp != NULL) { 1.1610 + *netp = net; 1.1611 + } 1.1612 + if (locked_tcb == NULL) { 1.1613 + SCTP_INP_DECR_REF(inp); 1.1614 + } else if (locked_tcb != stcb) { 1.1615 + SCTP_TCB_LOCK(locked_tcb); 1.1616 + } 1.1617 + if (locked_tcb) { 1.1618 + atomic_subtract_int(&locked_tcb->asoc.refcnt, 1); 1.1619 + } 1.1620 + SCTP_INP_WUNLOCK(inp); 1.1621 + SCTP_INP_INFO_RUNLOCK(); 1.1622 + return (stcb); 1.1623 + } 1.1624 + break; 1.1625 + } 1.1626 +#endif 1.1627 + default: 1.1628 + /* TSNH */ 1.1629 + break; 1.1630 + } 1.1631 + } 1.1632 + SCTP_TCB_UNLOCK(stcb); 1.1633 + } 1.1634 + } else { 1.1635 + SCTP_INP_WLOCK(inp); 1.1636 + if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 1.1637 + goto null_return; 1.1638 + } 1.1639 + head = &inp->sctp_tcbhash[SCTP_PCBHASH_ALLADDR(rport, 1.1640 + inp->sctp_hashmark)]; 1.1641 + if (head == NULL) { 1.1642 + goto null_return; 1.1643 + } 1.1644 + LIST_FOREACH(stcb, head, sctp_tcbhash) { 1.1645 + if (stcb->rport != rport) { 1.1646 + /* remote port does not match */ 1.1647 + continue; 1.1648 + } 1.1649 + SCTP_TCB_LOCK(stcb); 1.1650 + if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 1.1651 + SCTP_TCB_UNLOCK(stcb); 1.1652 + continue; 1.1653 + } 1.1654 + if (local && !sctp_does_stcb_own_this_addr(stcb, local)) { 1.1655 + SCTP_TCB_UNLOCK(stcb); 1.1656 + continue; 1.1657 + } 1.1658 + /* now look at the list of remote addresses */ 1.1659 + TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1.1660 +#ifdef INVARIANTS 1.1661 + if (net == (TAILQ_NEXT(net, sctp_next))) { 1.1662 + panic("Corrupt net list"); 1.1663 + } 1.1664 +#endif 1.1665 + if (net->ro._l_addr.sa.sa_family != 1.1666 + remote->sa_family) { 1.1667 + /* not the same family */ 1.1668 + continue; 1.1669 + } 1.1670 + switch (remote->sa_family) { 1.1671 +#ifdef INET 1.1672 + case AF_INET: 1.1673 + { 1.1674 + struct sockaddr_in *sin, *rsin; 1.1675 + 1.1676 + sin = (struct sockaddr_in *) 1.1677 + &net->ro._l_addr; 1.1678 + rsin = (struct sockaddr_in *)remote; 1.1679 + if (sin->sin_addr.s_addr == 1.1680 + rsin->sin_addr.s_addr) { 1.1681 + /* found it */ 1.1682 + if (netp != NULL) { 1.1683 + *netp = net; 1.1684 + } 1.1685 + if (locked_tcb == NULL) { 1.1686 + SCTP_INP_DECR_REF(inp); 1.1687 + } else if (locked_tcb != stcb) { 1.1688 + SCTP_TCB_LOCK(locked_tcb); 1.1689 + } 1.1690 + if (locked_tcb) { 1.1691 + atomic_subtract_int(&locked_tcb->asoc.refcnt, 1); 1.1692 + } 1.1693 + SCTP_INP_WUNLOCK(inp); 1.1694 + SCTP_INP_INFO_RUNLOCK(); 1.1695 + return (stcb); 1.1696 + } 1.1697 + break; 1.1698 + } 1.1699 +#endif 1.1700 +#ifdef INET6 1.1701 + case AF_INET6: 1.1702 + { 1.1703 + struct sockaddr_in6 *sin6, *rsin6; 1.1704 + 1.1705 + sin6 = (struct sockaddr_in6 *) 1.1706 + &net->ro._l_addr; 1.1707 + rsin6 = (struct sockaddr_in6 *)remote; 1.1708 + if (SCTP6_ARE_ADDR_EQUAL(sin6, 1.1709 + rsin6)) { 1.1710 + /* found it */ 1.1711 + if (netp != NULL) { 1.1712 + *netp = net; 1.1713 + } 1.1714 + if (locked_tcb == NULL) { 1.1715 + SCTP_INP_DECR_REF(inp); 1.1716 + } else if (locked_tcb != stcb) { 1.1717 + SCTP_TCB_LOCK(locked_tcb); 1.1718 + } 1.1719 + if (locked_tcb) { 1.1720 + atomic_subtract_int(&locked_tcb->asoc.refcnt, 1); 1.1721 + } 1.1722 + SCTP_INP_WUNLOCK(inp); 1.1723 + SCTP_INP_INFO_RUNLOCK(); 1.1724 + return (stcb); 1.1725 + } 1.1726 + break; 1.1727 + } 1.1728 +#endif 1.1729 +#if defined(__Userspace__) 1.1730 + case AF_CONN: 1.1731 + { 1.1732 + struct sockaddr_conn *sconn, *rsconn; 1.1733 + 1.1734 + sconn = (struct sockaddr_conn *)&net->ro._l_addr; 1.1735 + rsconn = (struct sockaddr_conn *)remote; 1.1736 + if (sconn->sconn_addr == rsconn->sconn_addr) { 1.1737 + /* found it */ 1.1738 + if (netp != NULL) { 1.1739 + *netp = net; 1.1740 + } 1.1741 + if (locked_tcb == NULL) { 1.1742 + SCTP_INP_DECR_REF(inp); 1.1743 + } else if (locked_tcb != stcb) { 1.1744 + SCTP_TCB_LOCK(locked_tcb); 1.1745 + } 1.1746 + if (locked_tcb) { 1.1747 + atomic_subtract_int(&locked_tcb->asoc.refcnt, 1); 1.1748 + } 1.1749 + SCTP_INP_WUNLOCK(inp); 1.1750 + SCTP_INP_INFO_RUNLOCK(); 1.1751 + return (stcb); 1.1752 + } 1.1753 + break; 1.1754 + } 1.1755 +#endif 1.1756 + default: 1.1757 + /* TSNH */ 1.1758 + break; 1.1759 + } 1.1760 + } 1.1761 + SCTP_TCB_UNLOCK(stcb); 1.1762 + } 1.1763 + } 1.1764 +null_return: 1.1765 + /* clean up for returning null */ 1.1766 + if (locked_tcb) { 1.1767 + SCTP_TCB_LOCK(locked_tcb); 1.1768 + atomic_subtract_int(&locked_tcb->asoc.refcnt, 1); 1.1769 + } 1.1770 + SCTP_INP_WUNLOCK(inp); 1.1771 + SCTP_INP_INFO_RUNLOCK(); 1.1772 + /* not found */ 1.1773 + return (NULL); 1.1774 +} 1.1775 + 1.1776 + 1.1777 +/* 1.1778 + * Find an association for a specific endpoint using the association id given 1.1779 + * out in the COMM_UP notification 1.1780 + */ 1.1781 +struct sctp_tcb * 1.1782 +sctp_findasoc_ep_asocid_locked(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock) 1.1783 +{ 1.1784 + /* 1.1785 + * Use my the assoc_id to find a endpoint 1.1786 + */ 1.1787 + struct sctpasochead *head; 1.1788 + struct sctp_tcb *stcb; 1.1789 + uint32_t id; 1.1790 + 1.1791 + if (inp == NULL) { 1.1792 + SCTP_PRINTF("TSNH ep_associd\n"); 1.1793 + return (NULL); 1.1794 + } 1.1795 + if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 1.1796 + SCTP_PRINTF("TSNH ep_associd0\n"); 1.1797 + return (NULL); 1.1798 + } 1.1799 + id = (uint32_t)asoc_id; 1.1800 + head = &inp->sctp_asocidhash[SCTP_PCBHASH_ASOC(id, inp->hashasocidmark)]; 1.1801 + if (head == NULL) { 1.1802 + /* invalid id TSNH */ 1.1803 + SCTP_PRINTF("TSNH ep_associd1\n"); 1.1804 + return (NULL); 1.1805 + } 1.1806 + LIST_FOREACH(stcb, head, sctp_tcbasocidhash) { 1.1807 + if (stcb->asoc.assoc_id == id) { 1.1808 + if (inp != stcb->sctp_ep) { 1.1809 + /* 1.1810 + * some other guy has the same id active (id 1.1811 + * collision ??). 1.1812 + */ 1.1813 + SCTP_PRINTF("TSNH ep_associd2\n"); 1.1814 + continue; 1.1815 + } 1.1816 + if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 1.1817 + continue; 1.1818 + } 1.1819 + if (want_lock) { 1.1820 + SCTP_TCB_LOCK(stcb); 1.1821 + } 1.1822 + return (stcb); 1.1823 + } 1.1824 + } 1.1825 + return (NULL); 1.1826 +} 1.1827 + 1.1828 + 1.1829 +struct sctp_tcb * 1.1830 +sctp_findassociation_ep_asocid(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock) 1.1831 +{ 1.1832 + struct sctp_tcb *stcb; 1.1833 + 1.1834 + SCTP_INP_RLOCK(inp); 1.1835 + stcb = sctp_findasoc_ep_asocid_locked(inp, asoc_id, want_lock); 1.1836 + SCTP_INP_RUNLOCK(inp); 1.1837 + return (stcb); 1.1838 +} 1.1839 + 1.1840 + 1.1841 +/* 1.1842 + * Endpoint probe expects that the INP_INFO is locked. 1.1843 + */ 1.1844 +static struct sctp_inpcb * 1.1845 +sctp_endpoint_probe(struct sockaddr *nam, struct sctppcbhead *head, 1.1846 + uint16_t lport, uint32_t vrf_id) 1.1847 +{ 1.1848 + struct sctp_inpcb *inp; 1.1849 + struct sctp_laddr *laddr; 1.1850 +#ifdef INET 1.1851 + struct sockaddr_in *sin; 1.1852 +#endif 1.1853 +#ifdef INET6 1.1854 + struct sockaddr_in6 *sin6; 1.1855 + struct sockaddr_in6 *intf_addr6; 1.1856 +#endif 1.1857 +#if defined(__Userspace__) 1.1858 + struct sockaddr_conn *sconn; 1.1859 +#endif 1.1860 +#ifdef SCTP_MVRF 1.1861 + int i; 1.1862 +#endif 1.1863 + int fnd; 1.1864 + 1.1865 +#ifdef INET 1.1866 + sin = NULL; 1.1867 +#endif 1.1868 +#ifdef INET6 1.1869 + sin6 = NULL; 1.1870 +#endif 1.1871 +#if defined(__Userspace__) 1.1872 + sconn = NULL; 1.1873 +#endif 1.1874 + switch (nam->sa_family) { 1.1875 +#ifdef INET 1.1876 + case AF_INET: 1.1877 + sin = (struct sockaddr_in *)nam; 1.1878 + break; 1.1879 +#endif 1.1880 +#ifdef INET6 1.1881 + case AF_INET6: 1.1882 + sin6 = (struct sockaddr_in6 *)nam; 1.1883 + break; 1.1884 +#endif 1.1885 +#if defined(__Userspace__) 1.1886 + case AF_CONN: 1.1887 + sconn = (struct sockaddr_conn *)nam; 1.1888 + break; 1.1889 +#endif 1.1890 + default: 1.1891 + /* unsupported family */ 1.1892 + return (NULL); 1.1893 + } 1.1894 + 1.1895 + if (head == NULL) 1.1896 + return (NULL); 1.1897 + 1.1898 + LIST_FOREACH(inp, head, sctp_hash) { 1.1899 + SCTP_INP_RLOCK(inp); 1.1900 + if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 1.1901 + SCTP_INP_RUNLOCK(inp); 1.1902 + continue; 1.1903 + } 1.1904 + if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) && 1.1905 + (inp->sctp_lport == lport)) { 1.1906 + /* got it */ 1.1907 +#ifdef INET 1.1908 + if ((nam->sa_family == AF_INET) && 1.1909 + (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 1.1910 + SCTP_IPV6_V6ONLY(inp)) { 1.1911 + /* IPv4 on a IPv6 socket with ONLY IPv6 set */ 1.1912 + SCTP_INP_RUNLOCK(inp); 1.1913 + continue; 1.1914 + } 1.1915 +#endif 1.1916 +#ifdef INET6 1.1917 + /* A V6 address and the endpoint is NOT bound V6 */ 1.1918 + if (nam->sa_family == AF_INET6 && 1.1919 + (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 1.1920 + SCTP_INP_RUNLOCK(inp); 1.1921 + continue; 1.1922 + } 1.1923 +#endif 1.1924 + /* does a VRF id match? */ 1.1925 + fnd = 0; 1.1926 +#ifdef SCTP_MVRF 1.1927 + for (i = 0; i < inp->num_vrfs; i++) { 1.1928 + if (inp->m_vrf_ids[i] == vrf_id) { 1.1929 + fnd = 1; 1.1930 + break; 1.1931 + } 1.1932 + } 1.1933 +#else 1.1934 + if (inp->def_vrf_id == vrf_id) 1.1935 + fnd = 1; 1.1936 +#endif 1.1937 + 1.1938 + SCTP_INP_RUNLOCK(inp); 1.1939 + if (!fnd) 1.1940 + continue; 1.1941 + return (inp); 1.1942 + } 1.1943 + SCTP_INP_RUNLOCK(inp); 1.1944 + } 1.1945 + switch (nam->sa_family) { 1.1946 +#ifdef INET 1.1947 + case AF_INET: 1.1948 + if (sin->sin_addr.s_addr == INADDR_ANY) { 1.1949 + /* Can't hunt for one that has no address specified */ 1.1950 + return (NULL); 1.1951 + } 1.1952 + break; 1.1953 +#endif 1.1954 +#ifdef INET6 1.1955 + case AF_INET6: 1.1956 + if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 1.1957 + /* Can't hunt for one that has no address specified */ 1.1958 + return (NULL); 1.1959 + } 1.1960 + break; 1.1961 +#endif 1.1962 +#if defined(__Userspace__) 1.1963 + case AF_CONN: 1.1964 + if (sconn->sconn_addr == NULL) { 1.1965 + return (NULL); 1.1966 + } 1.1967 + break; 1.1968 +#endif 1.1969 + default: 1.1970 + break; 1.1971 + } 1.1972 + /* 1.1973 + * ok, not bound to all so see if we can find a EP bound to this 1.1974 + * address. 1.1975 + */ 1.1976 + LIST_FOREACH(inp, head, sctp_hash) { 1.1977 + SCTP_INP_RLOCK(inp); 1.1978 + if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 1.1979 + SCTP_INP_RUNLOCK(inp); 1.1980 + continue; 1.1981 + } 1.1982 + if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL)) { 1.1983 + SCTP_INP_RUNLOCK(inp); 1.1984 + continue; 1.1985 + } 1.1986 + /* 1.1987 + * Ok this could be a likely candidate, look at all of its 1.1988 + * addresses 1.1989 + */ 1.1990 + if (inp->sctp_lport != lport) { 1.1991 + SCTP_INP_RUNLOCK(inp); 1.1992 + continue; 1.1993 + } 1.1994 + /* does a VRF id match? */ 1.1995 + fnd = 0; 1.1996 +#ifdef SCTP_MVRF 1.1997 + for (i = 0; i < inp->num_vrfs; i++) { 1.1998 + if (inp->m_vrf_ids[i] == vrf_id) { 1.1999 + fnd = 1; 1.2000 + break; 1.2001 + } 1.2002 + } 1.2003 +#else 1.2004 + if (inp->def_vrf_id == vrf_id) 1.2005 + fnd = 1; 1.2006 + 1.2007 +#endif 1.2008 + if (!fnd) { 1.2009 + SCTP_INP_RUNLOCK(inp); 1.2010 + continue; 1.2011 + } 1.2012 + LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 1.2013 + if (laddr->ifa == NULL) { 1.2014 + SCTPDBG(SCTP_DEBUG_PCB1, "%s: NULL ifa\n", 1.2015 + __FUNCTION__); 1.2016 + continue; 1.2017 + } 1.2018 + SCTPDBG(SCTP_DEBUG_PCB1, "Ok laddr->ifa:%p is possible, ", 1.2019 + (void *)laddr->ifa); 1.2020 + if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) { 1.2021 + SCTPDBG(SCTP_DEBUG_PCB1, "Huh IFA being deleted\n"); 1.2022 + continue; 1.2023 + } 1.2024 + if (laddr->ifa->address.sa.sa_family == nam->sa_family) { 1.2025 + /* possible, see if it matches */ 1.2026 + switch (nam->sa_family) { 1.2027 +#ifdef INET 1.2028 + case AF_INET: 1.2029 +#if defined(__APPLE__) 1.2030 + if (sin == NULL) { 1.2031 + /* TSNH */ 1.2032 + break; 1.2033 + } 1.2034 +#endif 1.2035 + if (sin->sin_addr.s_addr == 1.2036 + laddr->ifa->address.sin.sin_addr.s_addr) { 1.2037 + SCTP_INP_RUNLOCK(inp); 1.2038 + return (inp); 1.2039 + } 1.2040 + break; 1.2041 +#endif 1.2042 +#ifdef INET6 1.2043 + case AF_INET6: 1.2044 + intf_addr6 = &laddr->ifa->address.sin6; 1.2045 + if (SCTP6_ARE_ADDR_EQUAL(sin6, 1.2046 + intf_addr6)) { 1.2047 + SCTP_INP_RUNLOCK(inp); 1.2048 + return (inp); 1.2049 + } 1.2050 + break; 1.2051 +#endif 1.2052 +#if defined(__Userspace__) 1.2053 + case AF_CONN: 1.2054 + if (sconn->sconn_addr == laddr->ifa->address.sconn.sconn_addr) { 1.2055 + SCTP_INP_RUNLOCK(inp); 1.2056 + return (inp); 1.2057 + } 1.2058 + break; 1.2059 +#endif 1.2060 + } 1.2061 + } 1.2062 + } 1.2063 + SCTP_INP_RUNLOCK(inp); 1.2064 + } 1.2065 + return (NULL); 1.2066 +} 1.2067 + 1.2068 + 1.2069 +static struct sctp_inpcb * 1.2070 +sctp_isport_inuse(struct sctp_inpcb *inp, uint16_t lport, uint32_t vrf_id) 1.2071 +{ 1.2072 + struct sctppcbhead *head; 1.2073 + struct sctp_inpcb *t_inp; 1.2074 +#ifdef SCTP_MVRF 1.2075 + int i; 1.2076 +#endif 1.2077 + int fnd; 1.2078 + 1.2079 + head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(lport, 1.2080 + SCTP_BASE_INFO(hashmark))]; 1.2081 + LIST_FOREACH(t_inp, head, sctp_hash) { 1.2082 + if (t_inp->sctp_lport != lport) { 1.2083 + continue; 1.2084 + } 1.2085 + /* is it in the VRF in question */ 1.2086 + fnd = 0; 1.2087 +#ifdef SCTP_MVRF 1.2088 + for (i = 0; i < inp->num_vrfs; i++) { 1.2089 + if (t_inp->m_vrf_ids[i] == vrf_id) { 1.2090 + fnd = 1; 1.2091 + break; 1.2092 + } 1.2093 + } 1.2094 +#else 1.2095 + if (t_inp->def_vrf_id == vrf_id) 1.2096 + fnd = 1; 1.2097 +#endif 1.2098 + if (!fnd) 1.2099 + continue; 1.2100 + 1.2101 + /* This one is in use. */ 1.2102 + /* check the v6/v4 binding issue */ 1.2103 + if ((t_inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 1.2104 + SCTP_IPV6_V6ONLY(t_inp)) { 1.2105 + if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1.2106 + /* collision in V6 space */ 1.2107 + return (t_inp); 1.2108 + } else { 1.2109 + /* inp is BOUND_V4 no conflict */ 1.2110 + continue; 1.2111 + } 1.2112 + } else if (t_inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 1.2113 + /* t_inp is bound v4 and v6, conflict always */ 1.2114 + return (t_inp); 1.2115 + } else { 1.2116 + /* t_inp is bound only V4 */ 1.2117 + if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 1.2118 + SCTP_IPV6_V6ONLY(inp)) { 1.2119 + /* no conflict */ 1.2120 + continue; 1.2121 + } 1.2122 + /* else fall through to conflict */ 1.2123 + } 1.2124 + return (t_inp); 1.2125 + } 1.2126 + return (NULL); 1.2127 +} 1.2128 + 1.2129 + 1.2130 +int 1.2131 +sctp_swap_inpcb_for_listen(struct sctp_inpcb *inp) 1.2132 +{ 1.2133 + /* For 1-2-1 with port reuse */ 1.2134 + struct sctppcbhead *head; 1.2135 + struct sctp_inpcb *tinp; 1.2136 + 1.2137 + if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE)) { 1.2138 + /* only works with port reuse on */ 1.2139 + return (-1); 1.2140 + } 1.2141 + if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) { 1.2142 + return (0); 1.2143 + } 1.2144 + SCTP_INP_RUNLOCK(inp); 1.2145 + head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(inp->sctp_lport, 1.2146 + SCTP_BASE_INFO(hashmark))]; 1.2147 + /* Kick out all non-listeners to the TCP hash */ 1.2148 + LIST_FOREACH(tinp, head, sctp_hash) { 1.2149 + if (tinp->sctp_lport != inp->sctp_lport) { 1.2150 + continue; 1.2151 + } 1.2152 + if (tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 1.2153 + continue; 1.2154 + } 1.2155 + if (tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 1.2156 + continue; 1.2157 + } 1.2158 + if (tinp->sctp_socket->so_qlimit) { 1.2159 + continue; 1.2160 + } 1.2161 + SCTP_INP_WLOCK(tinp); 1.2162 + LIST_REMOVE(tinp, sctp_hash); 1.2163 + head = &SCTP_BASE_INFO(sctp_tcpephash)[SCTP_PCBHASH_ALLADDR(tinp->sctp_lport, SCTP_BASE_INFO(hashtcpmark))]; 1.2164 + tinp->sctp_flags |= SCTP_PCB_FLAGS_IN_TCPPOOL; 1.2165 + LIST_INSERT_HEAD(head, tinp, sctp_hash); 1.2166 + SCTP_INP_WUNLOCK(tinp); 1.2167 + } 1.2168 + SCTP_INP_WLOCK(inp); 1.2169 + /* Pull from where he was */ 1.2170 + LIST_REMOVE(inp, sctp_hash); 1.2171 + inp->sctp_flags &= ~SCTP_PCB_FLAGS_IN_TCPPOOL; 1.2172 + head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(inp->sctp_lport, SCTP_BASE_INFO(hashmark))]; 1.2173 + LIST_INSERT_HEAD(head, inp, sctp_hash); 1.2174 + SCTP_INP_WUNLOCK(inp); 1.2175 + SCTP_INP_RLOCK(inp); 1.2176 + return (0); 1.2177 +} 1.2178 + 1.2179 + 1.2180 +struct sctp_inpcb * 1.2181 +sctp_pcb_findep(struct sockaddr *nam, int find_tcp_pool, int have_lock, 1.2182 + uint32_t vrf_id) 1.2183 +{ 1.2184 + /* 1.2185 + * First we check the hash table to see if someone has this port 1.2186 + * bound with just the port. 1.2187 + */ 1.2188 + struct sctp_inpcb *inp; 1.2189 + struct sctppcbhead *head; 1.2190 + int lport; 1.2191 + unsigned int i; 1.2192 +#ifdef INET 1.2193 + struct sockaddr_in *sin; 1.2194 +#endif 1.2195 +#ifdef INET6 1.2196 + struct sockaddr_in6 *sin6; 1.2197 +#endif 1.2198 +#if defined(__Userspace__) 1.2199 + struct sockaddr_conn *sconn; 1.2200 +#endif 1.2201 + 1.2202 + switch (nam->sa_family) { 1.2203 +#ifdef INET 1.2204 + case AF_INET: 1.2205 + sin = (struct sockaddr_in *)nam; 1.2206 + lport = sin->sin_port; 1.2207 + break; 1.2208 +#endif 1.2209 +#ifdef INET6 1.2210 + case AF_INET6: 1.2211 + sin6 = (struct sockaddr_in6 *)nam; 1.2212 + lport = sin6->sin6_port; 1.2213 + break; 1.2214 +#endif 1.2215 +#if defined(__Userspace__) 1.2216 + case AF_CONN: 1.2217 + sconn = (struct sockaddr_conn *)nam; 1.2218 + lport = sconn->sconn_port; 1.2219 + break; 1.2220 +#endif 1.2221 + default: 1.2222 + return (NULL); 1.2223 + } 1.2224 + /* 1.2225 + * I could cheat here and just cast to one of the types but we will 1.2226 + * do it right. It also provides the check against an Unsupported 1.2227 + * type too. 1.2228 + */ 1.2229 + /* Find the head of the ALLADDR chain */ 1.2230 + if (have_lock == 0) { 1.2231 + SCTP_INP_INFO_RLOCK(); 1.2232 + } 1.2233 + head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(lport, 1.2234 + SCTP_BASE_INFO(hashmark))]; 1.2235 + inp = sctp_endpoint_probe(nam, head, lport, vrf_id); 1.2236 + 1.2237 + /* 1.2238 + * If the TCP model exists it could be that the main listening 1.2239 + * endpoint is gone but there still exists a connected socket for this 1.2240 + * guy. If so we can return the first one that we find. This may NOT 1.2241 + * be the correct one so the caller should be wary on the returned INP. 1.2242 + * Currently the only caller that sets find_tcp_pool is in bindx where 1.2243 + * we are verifying that a user CAN bind the address. He either 1.2244 + * has bound it already, or someone else has, or its open to bind, 1.2245 + * so this is good enough. 1.2246 + */ 1.2247 + if (inp == NULL && find_tcp_pool) { 1.2248 + for (i = 0; i < SCTP_BASE_INFO(hashtcpmark) + 1; i++) { 1.2249 + head = &SCTP_BASE_INFO(sctp_tcpephash)[i]; 1.2250 + inp = sctp_endpoint_probe(nam, head, lport, vrf_id); 1.2251 + if (inp) { 1.2252 + break; 1.2253 + } 1.2254 + } 1.2255 + } 1.2256 + if (inp) { 1.2257 + SCTP_INP_INCR_REF(inp); 1.2258 + } 1.2259 + if (have_lock == 0) { 1.2260 + SCTP_INP_INFO_RUNLOCK(); 1.2261 + } 1.2262 + return (inp); 1.2263 +} 1.2264 + 1.2265 + 1.2266 +/* 1.2267 + * Find an association for an endpoint with the pointer to whom you want to 1.2268 + * send to and the endpoint pointer. The address can be IPv4 or IPv6. We may 1.2269 + * need to change the *to to some other struct like a mbuf... 1.2270 + */ 1.2271 +struct sctp_tcb * 1.2272 +sctp_findassociation_addr_sa(struct sockaddr *from, struct sockaddr *to, 1.2273 + struct sctp_inpcb **inp_p, struct sctp_nets **netp, int find_tcp_pool, 1.2274 + uint32_t vrf_id) 1.2275 +{ 1.2276 + struct sctp_inpcb *inp = NULL; 1.2277 + struct sctp_tcb *stcb; 1.2278 + 1.2279 + SCTP_INP_INFO_RLOCK(); 1.2280 + if (find_tcp_pool) { 1.2281 + if (inp_p != NULL) { 1.2282 + stcb = sctp_tcb_special_locate(inp_p, from, to, netp, 1.2283 + vrf_id); 1.2284 + } else { 1.2285 + stcb = sctp_tcb_special_locate(&inp, from, to, netp, 1.2286 + vrf_id); 1.2287 + } 1.2288 + if (stcb != NULL) { 1.2289 + SCTP_INP_INFO_RUNLOCK(); 1.2290 + return (stcb); 1.2291 + } 1.2292 + } 1.2293 + inp = sctp_pcb_findep(to, 0, 1, vrf_id); 1.2294 + if (inp_p != NULL) { 1.2295 + *inp_p = inp; 1.2296 + } 1.2297 + SCTP_INP_INFO_RUNLOCK(); 1.2298 + if (inp == NULL) { 1.2299 + return (NULL); 1.2300 + } 1.2301 + /* 1.2302 + * ok, we have an endpoint, now lets find the assoc for it (if any) 1.2303 + * we now place the source address or from in the to of the find 1.2304 + * endpoint call. Since in reality this chain is used from the 1.2305 + * inbound packet side. 1.2306 + */ 1.2307 + if (inp_p != NULL) { 1.2308 + stcb = sctp_findassociation_ep_addr(inp_p, from, netp, to, 1.2309 + NULL); 1.2310 + } else { 1.2311 + stcb = sctp_findassociation_ep_addr(&inp, from, netp, to, 1.2312 + NULL); 1.2313 + } 1.2314 + return (stcb); 1.2315 +} 1.2316 + 1.2317 + 1.2318 +/* 1.2319 + * This routine will grub through the mbuf that is a INIT or INIT-ACK and 1.2320 + * find all addresses that the sender has specified in any address list. Each 1.2321 + * address will be used to lookup the TCB and see if one exits. 1.2322 + */ 1.2323 +static struct sctp_tcb * 1.2324 +sctp_findassociation_special_addr(struct mbuf *m, int offset, 1.2325 + struct sctphdr *sh, struct sctp_inpcb **inp_p, struct sctp_nets **netp, 1.2326 + struct sockaddr *dst) 1.2327 +{ 1.2328 + struct sctp_paramhdr *phdr, parm_buf; 1.2329 +#if defined(INET) || defined(INET6) 1.2330 + struct sctp_tcb *stcb; 1.2331 + uint16_t ptype; 1.2332 +#endif 1.2333 + uint16_t plen; 1.2334 +#ifdef INET 1.2335 + struct sockaddr_in sin4; 1.2336 +#endif 1.2337 +#ifdef INET6 1.2338 + struct sockaddr_in6 sin6; 1.2339 +#endif 1.2340 + 1.2341 +#ifdef INET 1.2342 + memset(&sin4, 0, sizeof(sin4)); 1.2343 +#ifdef HAVE_SIN_LEN 1.2344 + sin4.sin_len = sizeof(sin4); 1.2345 +#endif 1.2346 + sin4.sin_family = AF_INET; 1.2347 + sin4.sin_port = sh->src_port; 1.2348 +#endif 1.2349 +#ifdef INET6 1.2350 + memset(&sin6, 0, sizeof(sin6)); 1.2351 +#ifdef HAVE_SIN6_LEN 1.2352 + sin6.sin6_len = sizeof(sin6); 1.2353 +#endif 1.2354 + sin6.sin6_family = AF_INET6; 1.2355 + sin6.sin6_port = sh->src_port; 1.2356 +#endif 1.2357 + 1.2358 + offset += sizeof(struct sctp_init_chunk); 1.2359 + 1.2360 + phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf)); 1.2361 + while (phdr != NULL) { 1.2362 + /* now we must see if we want the parameter */ 1.2363 +#if defined(INET) || defined(INET6) 1.2364 + ptype = ntohs(phdr->param_type); 1.2365 +#endif 1.2366 + plen = ntohs(phdr->param_length); 1.2367 + if (plen == 0) { 1.2368 + break; 1.2369 + } 1.2370 +#ifdef INET 1.2371 + if (ptype == SCTP_IPV4_ADDRESS && 1.2372 + plen == sizeof(struct sctp_ipv4addr_param)) { 1.2373 + /* Get the rest of the address */ 1.2374 + struct sctp_ipv4addr_param ip4_parm, *p4; 1.2375 + 1.2376 + phdr = sctp_get_next_param(m, offset, 1.2377 + (struct sctp_paramhdr *)&ip4_parm, min(plen, sizeof(ip4_parm))); 1.2378 + if (phdr == NULL) { 1.2379 + return (NULL); 1.2380 + } 1.2381 + p4 = (struct sctp_ipv4addr_param *)phdr; 1.2382 + memcpy(&sin4.sin_addr, &p4->addr, sizeof(p4->addr)); 1.2383 + /* look it up */ 1.2384 + stcb = sctp_findassociation_ep_addr(inp_p, 1.2385 + (struct sockaddr *)&sin4, netp, dst, NULL); 1.2386 + if (stcb != NULL) { 1.2387 + return (stcb); 1.2388 + } 1.2389 + } 1.2390 +#endif 1.2391 +#ifdef INET6 1.2392 + if (ptype == SCTP_IPV6_ADDRESS && 1.2393 + plen == sizeof(struct sctp_ipv6addr_param)) { 1.2394 + /* Get the rest of the address */ 1.2395 + struct sctp_ipv6addr_param ip6_parm, *p6; 1.2396 + 1.2397 + phdr = sctp_get_next_param(m, offset, 1.2398 + (struct sctp_paramhdr *)&ip6_parm, min(plen,sizeof(ip6_parm))); 1.2399 + if (phdr == NULL) { 1.2400 + return (NULL); 1.2401 + } 1.2402 + p6 = (struct sctp_ipv6addr_param *)phdr; 1.2403 + memcpy(&sin6.sin6_addr, &p6->addr, sizeof(p6->addr)); 1.2404 + /* look it up */ 1.2405 + stcb = sctp_findassociation_ep_addr(inp_p, 1.2406 + (struct sockaddr *)&sin6, netp, dst, NULL); 1.2407 + if (stcb != NULL) { 1.2408 + return (stcb); 1.2409 + } 1.2410 + } 1.2411 +#endif 1.2412 + offset += SCTP_SIZE32(plen); 1.2413 + phdr = sctp_get_next_param(m, offset, &parm_buf, 1.2414 + sizeof(parm_buf)); 1.2415 + } 1.2416 + return (NULL); 1.2417 +} 1.2418 + 1.2419 +static struct sctp_tcb * 1.2420 +sctp_findassoc_by_vtag(struct sockaddr *from, struct sockaddr *to, uint32_t vtag, 1.2421 + struct sctp_inpcb **inp_p, struct sctp_nets **netp, uint16_t rport, 1.2422 + uint16_t lport, int skip_src_check, uint32_t vrf_id, uint32_t remote_tag) 1.2423 +{ 1.2424 + /* 1.2425 + * Use my vtag to hash. If we find it we then verify the source addr 1.2426 + * is in the assoc. If all goes well we save a bit on rec of a 1.2427 + * packet. 1.2428 + */ 1.2429 + struct sctpasochead *head; 1.2430 + struct sctp_nets *net; 1.2431 + struct sctp_tcb *stcb; 1.2432 +#ifdef SCTP_MVRF 1.2433 + unsigned int i; 1.2434 +#endif 1.2435 + 1.2436 + SCTP_INP_INFO_RLOCK(); 1.2437 + head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(vtag, 1.2438 + SCTP_BASE_INFO(hashasocmark))]; 1.2439 + if (head == NULL) { 1.2440 + /* invalid vtag */ 1.2441 + SCTP_INP_INFO_RUNLOCK(); 1.2442 + return (NULL); 1.2443 + } 1.2444 + LIST_FOREACH(stcb, head, sctp_asocs) { 1.2445 + SCTP_INP_RLOCK(stcb->sctp_ep); 1.2446 + if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 1.2447 + SCTP_INP_RUNLOCK(stcb->sctp_ep); 1.2448 + continue; 1.2449 + } 1.2450 +#ifdef SCTP_MVRF 1.2451 + for (i = 0; i < stcb->sctp_ep->num_vrfs; i++) { 1.2452 + if (stcb->sctp_ep->m_vrf_ids[i] == vrf_id) { 1.2453 + break; 1.2454 + } 1.2455 + } 1.2456 + if (i == stcb->sctp_ep->num_vrfs) { 1.2457 + SCTP_INP_RUNLOCK(inp); 1.2458 + continue; 1.2459 + } 1.2460 +#else 1.2461 + if (stcb->sctp_ep->def_vrf_id != vrf_id) { 1.2462 + SCTP_INP_RUNLOCK(stcb->sctp_ep); 1.2463 + continue; 1.2464 + } 1.2465 +#endif 1.2466 + SCTP_TCB_LOCK(stcb); 1.2467 + SCTP_INP_RUNLOCK(stcb->sctp_ep); 1.2468 + if (stcb->asoc.my_vtag == vtag) { 1.2469 + /* candidate */ 1.2470 + if (stcb->rport != rport) { 1.2471 + SCTP_TCB_UNLOCK(stcb); 1.2472 + continue; 1.2473 + } 1.2474 + if (stcb->sctp_ep->sctp_lport != lport) { 1.2475 + SCTP_TCB_UNLOCK(stcb); 1.2476 + continue; 1.2477 + } 1.2478 + if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 1.2479 + SCTP_TCB_UNLOCK(stcb); 1.2480 + continue; 1.2481 + } 1.2482 + /* RRS:Need toaddr check here */ 1.2483 + if (sctp_does_stcb_own_this_addr(stcb, to) == 0) { 1.2484 + /* Endpoint does not own this address */ 1.2485 + SCTP_TCB_UNLOCK(stcb); 1.2486 + continue; 1.2487 + } 1.2488 + if (remote_tag) { 1.2489 + /* If we have both vtags that's all we match on */ 1.2490 + if (stcb->asoc.peer_vtag == remote_tag) { 1.2491 + /* If both tags match we consider it conclusive 1.2492 + * and check NO source/destination addresses 1.2493 + */ 1.2494 + goto conclusive; 1.2495 + } 1.2496 + } 1.2497 + if (skip_src_check) { 1.2498 + conclusive: 1.2499 + if (from) { 1.2500 + *netp = sctp_findnet(stcb, from); 1.2501 + } else { 1.2502 + *netp = NULL; /* unknown */ 1.2503 + } 1.2504 + if (inp_p) 1.2505 + *inp_p = stcb->sctp_ep; 1.2506 + SCTP_INP_INFO_RUNLOCK(); 1.2507 + return (stcb); 1.2508 + } 1.2509 + net = sctp_findnet(stcb, from); 1.2510 + if (net) { 1.2511 + /* yep its him. */ 1.2512 + *netp = net; 1.2513 + SCTP_STAT_INCR(sctps_vtagexpress); 1.2514 + *inp_p = stcb->sctp_ep; 1.2515 + SCTP_INP_INFO_RUNLOCK(); 1.2516 + return (stcb); 1.2517 + } else { 1.2518 + /* 1.2519 + * not him, this should only happen in rare 1.2520 + * cases so I peg it. 1.2521 + */ 1.2522 + SCTP_STAT_INCR(sctps_vtagbogus); 1.2523 + } 1.2524 + } 1.2525 + SCTP_TCB_UNLOCK(stcb); 1.2526 + } 1.2527 + SCTP_INP_INFO_RUNLOCK(); 1.2528 + return (NULL); 1.2529 +} 1.2530 + 1.2531 + 1.2532 +/* 1.2533 + * Find an association with the pointer to the inbound IP packet. This can be 1.2534 + * a IPv4 or IPv6 packet. 1.2535 + */ 1.2536 +struct sctp_tcb * 1.2537 +sctp_findassociation_addr(struct mbuf *m, int offset, 1.2538 + struct sockaddr *src, struct sockaddr *dst, 1.2539 + struct sctphdr *sh, struct sctp_chunkhdr *ch, 1.2540 + struct sctp_inpcb **inp_p, struct sctp_nets **netp, uint32_t vrf_id) 1.2541 +{ 1.2542 + int find_tcp_pool; 1.2543 + struct sctp_tcb *stcb; 1.2544 + struct sctp_inpcb *inp; 1.2545 + 1.2546 + if (sh->v_tag) { 1.2547 + /* we only go down this path if vtag is non-zero */ 1.2548 + stcb = sctp_findassoc_by_vtag(src, dst, ntohl(sh->v_tag), 1.2549 + inp_p, netp, sh->src_port, sh->dest_port, 0, vrf_id, 0); 1.2550 + if (stcb) { 1.2551 + return (stcb); 1.2552 + } 1.2553 + } 1.2554 + 1.2555 + find_tcp_pool = 0; 1.2556 + if ((ch->chunk_type != SCTP_INITIATION) && 1.2557 + (ch->chunk_type != SCTP_INITIATION_ACK) && 1.2558 + (ch->chunk_type != SCTP_COOKIE_ACK) && 1.2559 + (ch->chunk_type != SCTP_COOKIE_ECHO)) { 1.2560 + /* Other chunk types go to the tcp pool. */ 1.2561 + find_tcp_pool = 1; 1.2562 + } 1.2563 + if (inp_p) { 1.2564 + stcb = sctp_findassociation_addr_sa(src, dst, inp_p, netp, 1.2565 + find_tcp_pool, vrf_id); 1.2566 + inp = *inp_p; 1.2567 + } else { 1.2568 + stcb = sctp_findassociation_addr_sa(src, dst, &inp, netp, 1.2569 + find_tcp_pool, vrf_id); 1.2570 + } 1.2571 + SCTPDBG(SCTP_DEBUG_PCB1, "stcb:%p inp:%p\n", (void *)stcb, (void *)inp); 1.2572 + if (stcb == NULL && inp) { 1.2573 + /* Found a EP but not this address */ 1.2574 + if ((ch->chunk_type == SCTP_INITIATION) || 1.2575 + (ch->chunk_type == SCTP_INITIATION_ACK)) { 1.2576 + /*- 1.2577 + * special hook, we do NOT return linp or an 1.2578 + * association that is linked to an existing 1.2579 + * association that is under the TCP pool (i.e. no 1.2580 + * listener exists). The endpoint finding routine 1.2581 + * will always find a listener before examining the 1.2582 + * TCP pool. 1.2583 + */ 1.2584 + if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) { 1.2585 + if (inp_p) { 1.2586 + *inp_p = NULL; 1.2587 + } 1.2588 + return (NULL); 1.2589 + } 1.2590 + stcb = sctp_findassociation_special_addr(m, 1.2591 + offset, sh, &inp, netp, dst); 1.2592 + if (inp_p != NULL) { 1.2593 + *inp_p = inp; 1.2594 + } 1.2595 + } 1.2596 + } 1.2597 + SCTPDBG(SCTP_DEBUG_PCB1, "stcb is %p\n", (void *)stcb); 1.2598 + return (stcb); 1.2599 +} 1.2600 + 1.2601 +/* 1.2602 + * lookup an association by an ASCONF lookup address. 1.2603 + * if the lookup address is 0.0.0.0 or ::0, use the vtag to do the lookup 1.2604 + */ 1.2605 +struct sctp_tcb * 1.2606 +sctp_findassociation_ep_asconf(struct mbuf *m, int offset, 1.2607 + struct sockaddr *dst, struct sctphdr *sh, 1.2608 + struct sctp_inpcb **inp_p, struct sctp_nets **netp, uint32_t vrf_id) 1.2609 +{ 1.2610 + struct sctp_tcb *stcb; 1.2611 + struct sockaddr_storage remote_store; 1.2612 + struct sctp_paramhdr parm_buf, *phdr; 1.2613 + int ptype; 1.2614 + int zero_address = 0; 1.2615 +#ifdef INET 1.2616 + struct sockaddr_in *sin; 1.2617 +#endif 1.2618 +#ifdef INET6 1.2619 + struct sockaddr_in6 *sin6; 1.2620 +#endif 1.2621 + 1.2622 + memset(&remote_store, 0, sizeof(remote_store)); 1.2623 + phdr = sctp_get_next_param(m, offset + sizeof(struct sctp_asconf_chunk), 1.2624 + &parm_buf, sizeof(struct sctp_paramhdr)); 1.2625 + if (phdr == NULL) { 1.2626 + SCTPDBG(SCTP_DEBUG_INPUT3, "%s: failed to get asconf lookup addr\n", 1.2627 + __FUNCTION__); 1.2628 + return NULL; 1.2629 + } 1.2630 + ptype = (int)((uint32_t) ntohs(phdr->param_type)); 1.2631 + /* get the correlation address */ 1.2632 + switch (ptype) { 1.2633 +#ifdef INET6 1.2634 + case SCTP_IPV6_ADDRESS: 1.2635 + { 1.2636 + /* ipv6 address param */ 1.2637 + struct sctp_ipv6addr_param *p6, p6_buf; 1.2638 + 1.2639 + if (ntohs(phdr->param_length) != sizeof(struct sctp_ipv6addr_param)) { 1.2640 + return NULL; 1.2641 + } 1.2642 + p6 = (struct sctp_ipv6addr_param *)sctp_get_next_param(m, 1.2643 + offset + sizeof(struct sctp_asconf_chunk), 1.2644 + &p6_buf.ph, sizeof(*p6)); 1.2645 + if (p6 == NULL) { 1.2646 + SCTPDBG(SCTP_DEBUG_INPUT3, "%s: failed to get asconf v6 lookup addr\n", 1.2647 + __FUNCTION__); 1.2648 + return (NULL); 1.2649 + } 1.2650 + sin6 = (struct sockaddr_in6 *)&remote_store; 1.2651 + sin6->sin6_family = AF_INET6; 1.2652 +#ifdef HAVE_SIN6_LEN 1.2653 + sin6->sin6_len = sizeof(*sin6); 1.2654 +#endif 1.2655 + sin6->sin6_port = sh->src_port; 1.2656 + memcpy(&sin6->sin6_addr, &p6->addr, sizeof(struct in6_addr)); 1.2657 + if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) 1.2658 + zero_address = 1; 1.2659 + break; 1.2660 + } 1.2661 +#endif 1.2662 +#ifdef INET 1.2663 + case SCTP_IPV4_ADDRESS: 1.2664 + { 1.2665 + /* ipv4 address param */ 1.2666 + struct sctp_ipv4addr_param *p4, p4_buf; 1.2667 + 1.2668 + if (ntohs(phdr->param_length) != sizeof(struct sctp_ipv4addr_param)) { 1.2669 + return NULL; 1.2670 + } 1.2671 + p4 = (struct sctp_ipv4addr_param *)sctp_get_next_param(m, 1.2672 + offset + sizeof(struct sctp_asconf_chunk), 1.2673 + &p4_buf.ph, sizeof(*p4)); 1.2674 + if (p4 == NULL) { 1.2675 + SCTPDBG(SCTP_DEBUG_INPUT3, "%s: failed to get asconf v4 lookup addr\n", 1.2676 + __FUNCTION__); 1.2677 + return (NULL); 1.2678 + } 1.2679 + sin = (struct sockaddr_in *)&remote_store; 1.2680 + sin->sin_family = AF_INET; 1.2681 +#ifdef HAVE_SIN_LEN 1.2682 + sin->sin_len = sizeof(*sin); 1.2683 +#endif 1.2684 + sin->sin_port = sh->src_port; 1.2685 + memcpy(&sin->sin_addr, &p4->addr, sizeof(struct in_addr)); 1.2686 + if (sin->sin_addr.s_addr == INADDR_ANY) 1.2687 + zero_address = 1; 1.2688 + break; 1.2689 + } 1.2690 +#endif 1.2691 + default: 1.2692 + /* invalid address param type */ 1.2693 + return NULL; 1.2694 + } 1.2695 + 1.2696 + if (zero_address) { 1.2697 + stcb = sctp_findassoc_by_vtag(NULL, dst, ntohl(sh->v_tag), inp_p, 1.2698 + netp, sh->src_port, sh->dest_port, 1, vrf_id, 0); 1.2699 + if (stcb != NULL) { 1.2700 + SCTP_INP_DECR_REF(*inp_p); 1.2701 + } 1.2702 + } else { 1.2703 + stcb = sctp_findassociation_ep_addr(inp_p, 1.2704 + (struct sockaddr *)&remote_store, netp, 1.2705 + dst, NULL); 1.2706 + } 1.2707 + return (stcb); 1.2708 +} 1.2709 + 1.2710 + 1.2711 +/* 1.2712 + * allocate a sctp_inpcb and setup a temporary binding to a port/all 1.2713 + * addresses. This way if we don't get a bind we by default pick a ephemeral 1.2714 + * port with all addresses bound. 1.2715 + */ 1.2716 +int 1.2717 +sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id) 1.2718 +{ 1.2719 + /* 1.2720 + * we get called when a new endpoint starts up. We need to allocate 1.2721 + * the sctp_inpcb structure from the zone and init it. Mark it as 1.2722 + * unbound and find a port that we can use as an ephemeral with 1.2723 + * INADDR_ANY. If the user binds later no problem we can then add in 1.2724 + * the specific addresses. And setup the default parameters for the 1.2725 + * EP. 1.2726 + */ 1.2727 + int i, error; 1.2728 + struct sctp_inpcb *inp; 1.2729 + struct sctp_pcb *m; 1.2730 + struct timeval time; 1.2731 + sctp_sharedkey_t *null_key; 1.2732 + 1.2733 + error = 0; 1.2734 + 1.2735 + SCTP_INP_INFO_WLOCK(); 1.2736 + inp = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_ep), struct sctp_inpcb); 1.2737 + if (inp == NULL) { 1.2738 + SCTP_PRINTF("Out of SCTP-INPCB structures - no resources\n"); 1.2739 + SCTP_INP_INFO_WUNLOCK(); 1.2740 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS); 1.2741 + return (ENOBUFS); 1.2742 + } 1.2743 + /* zap it */ 1.2744 + bzero(inp, sizeof(*inp)); 1.2745 + 1.2746 + /* bump generations */ 1.2747 +#if defined(__APPLE__) 1.2748 + inp->ip_inp.inp.inp_state = INPCB_STATE_INUSE; 1.2749 +#endif 1.2750 + /* setup socket pointers */ 1.2751 + inp->sctp_socket = so; 1.2752 + inp->ip_inp.inp.inp_socket = so; 1.2753 +#ifdef INET6 1.2754 +#if !defined(__Userspace__) && !defined(__Windows__) 1.2755 + if (INP_SOCKAF(so) == AF_INET6) { 1.2756 + if (MODULE_GLOBAL(ip6_auto_flowlabel)) { 1.2757 + inp->ip_inp.inp.inp_flags |= IN6P_AUTOFLOWLABEL; 1.2758 + } 1.2759 + if (MODULE_GLOBAL(ip6_v6only)) { 1.2760 + inp->ip_inp.inp.inp_flags |= IN6P_IPV6_V6ONLY; 1.2761 + } 1.2762 + } 1.2763 +#endif 1.2764 +#endif 1.2765 + inp->sctp_associd_counter = 1; 1.2766 + inp->partial_delivery_point = SCTP_SB_LIMIT_RCV(so) >> SCTP_PARTIAL_DELIVERY_SHIFT; 1.2767 + inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT; 1.2768 + inp->sctp_cmt_on_off = SCTP_BASE_SYSCTL(sctp_cmt_on_off); 1.2769 + inp->sctp_ecn_enable = SCTP_BASE_SYSCTL(sctp_ecn_enable); 1.2770 +#if defined(__Userspace__) 1.2771 + inp->ulp_info = NULL; 1.2772 + inp->recv_callback = NULL; 1.2773 + inp->send_callback = NULL; 1.2774 + inp->send_sb_threshold = 0; 1.2775 +#endif 1.2776 + /* init the small hash table we use to track asocid <-> tcb */ 1.2777 + inp->sctp_asocidhash = SCTP_HASH_INIT(SCTP_STACK_VTAG_HASH_SIZE, &inp->hashasocidmark); 1.2778 + if (inp->sctp_asocidhash == NULL) { 1.2779 + SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp); 1.2780 + SCTP_INP_INFO_WUNLOCK(); 1.2781 + return (ENOBUFS); 1.2782 + } 1.2783 +#ifdef IPSEC 1.2784 +#if !(defined(__APPLE__)) 1.2785 + { 1.2786 + struct inpcbpolicy *pcb_sp = NULL; 1.2787 + 1.2788 + error = ipsec_init_policy(so, &pcb_sp); 1.2789 + /* Arrange to share the policy */ 1.2790 + inp->ip_inp.inp.inp_sp = pcb_sp; 1.2791 + ((struct in6pcb *)(&inp->ip_inp.inp))->in6p_sp = pcb_sp; 1.2792 + } 1.2793 +#else 1.2794 + /* not sure what to do for openbsd here */ 1.2795 + error = 0; 1.2796 +#endif 1.2797 + if (error != 0) { 1.2798 + SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp); 1.2799 + SCTP_INP_INFO_WUNLOCK(); 1.2800 + return error; 1.2801 + } 1.2802 +#endif /* IPSEC */ 1.2803 + SCTP_INCR_EP_COUNT(); 1.2804 + inp->ip_inp.inp.inp_ip_ttl = MODULE_GLOBAL(ip_defttl); 1.2805 + SCTP_INP_INFO_WUNLOCK(); 1.2806 + 1.2807 + so->so_pcb = (caddr_t)inp; 1.2808 + 1.2809 +#if defined(__FreeBSD__) && __FreeBSD_version < 803000 1.2810 + if ((SCTP_SO_TYPE(so) == SOCK_DGRAM) || 1.2811 + (SCTP_SO_TYPE(so) == SOCK_SEQPACKET)) { 1.2812 +#else 1.2813 + if (SCTP_SO_TYPE(so) == SOCK_SEQPACKET) { 1.2814 +#endif 1.2815 + /* UDP style socket */ 1.2816 + inp->sctp_flags = (SCTP_PCB_FLAGS_UDPTYPE | 1.2817 + SCTP_PCB_FLAGS_UNBOUND); 1.2818 + /* Be sure it is NON-BLOCKING IO for UDP */ 1.2819 + /* SCTP_SET_SO_NBIO(so); */ 1.2820 + } else if (SCTP_SO_TYPE(so) == SOCK_STREAM) { 1.2821 + /* TCP style socket */ 1.2822 + inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE | 1.2823 + SCTP_PCB_FLAGS_UNBOUND); 1.2824 + /* Be sure we have blocking IO by default */ 1.2825 + SCTP_CLEAR_SO_NBIO(so); 1.2826 +#if defined(__Panda__) 1.2827 + } else if (SCTP_SO_TYPE(so) == SOCK_FASTSEQPACKET) { 1.2828 + inp->sctp_flags = (SCTP_PCB_FLAGS_UDPTYPE | 1.2829 + SCTP_PCB_FLAGS_UNBOUND); 1.2830 + sctp_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE); 1.2831 + } else if (SCTP_SO_TYPE(so) == SOCK_FASTSTREAM) { 1.2832 + inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE | 1.2833 + SCTP_PCB_FLAGS_UNBOUND); 1.2834 + sctp_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE); 1.2835 +#endif 1.2836 + } else { 1.2837 + /* 1.2838 + * unsupported socket type (RAW, etc)- in case we missed it 1.2839 + * in protosw 1.2840 + */ 1.2841 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EOPNOTSUPP); 1.2842 + so->so_pcb = NULL; 1.2843 + SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp); 1.2844 + return (EOPNOTSUPP); 1.2845 + } 1.2846 + if (SCTP_BASE_SYSCTL(sctp_default_frag_interleave) == SCTP_FRAG_LEVEL_1) { 1.2847 + sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 1.2848 + sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 1.2849 + } else if (SCTP_BASE_SYSCTL(sctp_default_frag_interleave) == SCTP_FRAG_LEVEL_2) { 1.2850 + sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 1.2851 + sctp_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 1.2852 + } else if (SCTP_BASE_SYSCTL(sctp_default_frag_interleave) == SCTP_FRAG_LEVEL_0) { 1.2853 + sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE); 1.2854 + sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS); 1.2855 + } 1.2856 + inp->sctp_tcbhash = SCTP_HASH_INIT(SCTP_BASE_SYSCTL(sctp_pcbtblsize), 1.2857 + &inp->sctp_hashmark); 1.2858 + if (inp->sctp_tcbhash == NULL) { 1.2859 + SCTP_PRINTF("Out of SCTP-INPCB->hashinit - no resources\n"); 1.2860 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS); 1.2861 + so->so_pcb = NULL; 1.2862 + SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp); 1.2863 + return (ENOBUFS); 1.2864 + } 1.2865 +#ifdef SCTP_MVRF 1.2866 + inp->vrf_size = SCTP_DEFAULT_VRF_SIZE; 1.2867 + SCTP_MALLOC(inp->m_vrf_ids, uint32_t *, 1.2868 + (sizeof(uint32_t) * inp->vrf_size), SCTP_M_MVRF); 1.2869 + if (inp->m_vrf_ids == NULL) { 1.2870 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS); 1.2871 + so->so_pcb = NULL; 1.2872 + SCTP_HASH_FREE(inp->sctp_tcbhash, inp->sctp_hashmark); 1.2873 + SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp); 1.2874 + return (ENOBUFS); 1.2875 + } 1.2876 + inp->m_vrf_ids[0] = vrf_id; 1.2877 + inp->num_vrfs = 1; 1.2878 +#endif 1.2879 + inp->def_vrf_id = vrf_id; 1.2880 + 1.2881 +#if defined(__APPLE__) 1.2882 +#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) 1.2883 + inp->ip_inp.inp.inpcb_mtx = lck_mtx_alloc_init(SCTP_BASE_INFO(sctbinfo).mtx_grp, SCTP_BASE_INFO(sctbinfo).mtx_attr); 1.2884 + if (inp->ip_inp.inp.inpcb_mtx == NULL) { 1.2885 + SCTP_PRINTF("in_pcballoc: can't alloc mutex! so=%p\n", (void *)so); 1.2886 +#ifdef SCTP_MVRF 1.2887 + SCTP_FREE(inp->m_vrf_ids, SCTP_M_MVRF); 1.2888 +#endif 1.2889 + SCTP_HASH_FREE(inp->sctp_tcbhash, inp->sctp_hashmark); 1.2890 + so->so_pcb = NULL; 1.2891 + SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp); 1.2892 + SCTP_UNLOCK_EXC(SCTP_BASE_INFO(sctbinfo).ipi_lock); 1.2893 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOMEM); 1.2894 + return (ENOMEM); 1.2895 + } 1.2896 +#elif defined(APPLE_LION) || defined(APPLE_MOUNTAINLION) 1.2897 + lck_mtx_init(&inp->ip_inp.inp.inpcb_mtx, SCTP_BASE_INFO(sctbinfo).mtx_grp, SCTP_BASE_INFO(sctbinfo).mtx_attr); 1.2898 +#else 1.2899 + lck_mtx_init(&inp->ip_inp.inp.inpcb_mtx, SCTP_BASE_INFO(sctbinfo).ipi_lock_grp, SCTP_BASE_INFO(sctbinfo).ipi_lock_attr); 1.2900 +#endif 1.2901 +#endif 1.2902 + SCTP_INP_INFO_WLOCK(); 1.2903 + SCTP_INP_LOCK_INIT(inp); 1.2904 +#if defined(__FreeBSD__) 1.2905 + INP_LOCK_INIT(&inp->ip_inp.inp, "inp", "sctpinp"); 1.2906 +#endif 1.2907 + SCTP_INP_READ_INIT(inp); 1.2908 + SCTP_ASOC_CREATE_LOCK_INIT(inp); 1.2909 + /* lock the new ep */ 1.2910 + SCTP_INP_WLOCK(inp); 1.2911 + 1.2912 + /* add it to the info area */ 1.2913 + LIST_INSERT_HEAD(&SCTP_BASE_INFO(listhead), inp, sctp_list); 1.2914 +#if defined(__APPLE__) 1.2915 + inp->ip_inp.inp.inp_pcbinfo = &SCTP_BASE_INFO(sctbinfo); 1.2916 +#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) || defined(APPLE_LION) || defined(APPLE_MOUNTAINLION) 1.2917 + LIST_INSERT_HEAD(SCTP_BASE_INFO(sctbinfo).listhead, &inp->ip_inp.inp, inp_list); 1.2918 +#else 1.2919 + LIST_INSERT_HEAD(SCTP_BASE_INFO(sctbinfo).ipi_listhead, &inp->ip_inp.inp, inp_list); 1.2920 +#endif 1.2921 +#endif 1.2922 + SCTP_INP_INFO_WUNLOCK(); 1.2923 + 1.2924 + TAILQ_INIT(&inp->read_queue); 1.2925 + LIST_INIT(&inp->sctp_addr_list); 1.2926 + 1.2927 + LIST_INIT(&inp->sctp_asoc_list); 1.2928 + 1.2929 +#ifdef SCTP_TRACK_FREED_ASOCS 1.2930 + /* TEMP CODE */ 1.2931 + LIST_INIT(&inp->sctp_asoc_free_list); 1.2932 +#endif 1.2933 + /* Init the timer structure for signature change */ 1.2934 + SCTP_OS_TIMER_INIT(&inp->sctp_ep.signature_change.timer); 1.2935 + inp->sctp_ep.signature_change.type = SCTP_TIMER_TYPE_NEWCOOKIE; 1.2936 + 1.2937 + /* now init the actual endpoint default data */ 1.2938 + m = &inp->sctp_ep; 1.2939 + 1.2940 + /* setup the base timeout information */ 1.2941 + m->sctp_timeoutticks[SCTP_TIMER_SEND] = SEC_TO_TICKS(SCTP_SEND_SEC); /* needed ? */ 1.2942 + m->sctp_timeoutticks[SCTP_TIMER_INIT] = SEC_TO_TICKS(SCTP_INIT_SEC); /* needed ? */ 1.2943 + m->sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_delayed_sack_time_default)); 1.2944 + m->sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_heartbeat_interval_default)); 1.2945 + m->sctp_timeoutticks[SCTP_TIMER_PMTU] = SEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_pmtu_raise_time_default)); 1.2946 + m->sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] = SEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_shutdown_guard_time_default)); 1.2947 + m->sctp_timeoutticks[SCTP_TIMER_SIGNATURE] = SEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_secret_lifetime_default)); 1.2948 + /* all max/min max are in ms */ 1.2949 + m->sctp_maxrto = SCTP_BASE_SYSCTL(sctp_rto_max_default); 1.2950 + m->sctp_minrto = SCTP_BASE_SYSCTL(sctp_rto_min_default); 1.2951 + m->initial_rto = SCTP_BASE_SYSCTL(sctp_rto_initial_default); 1.2952 + m->initial_init_rto_max = SCTP_BASE_SYSCTL(sctp_init_rto_max_default); 1.2953 + m->sctp_sack_freq = SCTP_BASE_SYSCTL(sctp_sack_freq_default); 1.2954 + m->max_init_times = SCTP_BASE_SYSCTL(sctp_init_rtx_max_default); 1.2955 + m->max_send_times = SCTP_BASE_SYSCTL(sctp_assoc_rtx_max_default); 1.2956 + m->def_net_failure = SCTP_BASE_SYSCTL(sctp_path_rtx_max_default); 1.2957 + m->def_net_pf_threshold = SCTP_BASE_SYSCTL(sctp_path_pf_threshold); 1.2958 + m->sctp_sws_sender = SCTP_SWS_SENDER_DEF; 1.2959 + m->sctp_sws_receiver = SCTP_SWS_RECEIVER_DEF; 1.2960 + m->max_burst = SCTP_BASE_SYSCTL(sctp_max_burst_default); 1.2961 + m->fr_max_burst = SCTP_BASE_SYSCTL(sctp_fr_max_burst_default); 1.2962 + 1.2963 + m->sctp_default_cc_module = SCTP_BASE_SYSCTL(sctp_default_cc_module); 1.2964 + m->sctp_default_ss_module = SCTP_BASE_SYSCTL(sctp_default_ss_module); 1.2965 + m->max_open_streams_intome = SCTP_BASE_SYSCTL(sctp_nr_incoming_streams_default); 1.2966 + /* number of streams to pre-open on a association */ 1.2967 + m->pre_open_stream_count = SCTP_BASE_SYSCTL(sctp_nr_outgoing_streams_default); 1.2968 + 1.2969 + /* Add adaptation cookie */ 1.2970 + m->adaptation_layer_indicator = 0; 1.2971 + m->adaptation_layer_indicator_provided = 0; 1.2972 + 1.2973 + /* seed random number generator */ 1.2974 + m->random_counter = 1; 1.2975 + m->store_at = SCTP_SIGNATURE_SIZE; 1.2976 + SCTP_READ_RANDOM(m->random_numbers, sizeof(m->random_numbers)); 1.2977 + sctp_fill_random_store(m); 1.2978 + 1.2979 + /* Minimum cookie size */ 1.2980 + m->size_of_a_cookie = (sizeof(struct sctp_init_msg) * 2) + 1.2981 + sizeof(struct sctp_state_cookie); 1.2982 + m->size_of_a_cookie += SCTP_SIGNATURE_SIZE; 1.2983 + 1.2984 + /* Setup the initial secret */ 1.2985 + (void)SCTP_GETTIME_TIMEVAL(&time); 1.2986 + m->time_of_secret_change = time.tv_sec; 1.2987 + 1.2988 + for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1.2989 + m->secret_key[0][i] = sctp_select_initial_TSN(m); 1.2990 + } 1.2991 + sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL); 1.2992 + 1.2993 + /* How long is a cookie good for ? */ 1.2994 + m->def_cookie_life = MSEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_valid_cookie_life_default)); 1.2995 + /* 1.2996 + * Initialize authentication parameters 1.2997 + */ 1.2998 + m->local_hmacs = sctp_default_supported_hmaclist(); 1.2999 + m->local_auth_chunks = sctp_alloc_chunklist(); 1.3000 + m->default_dscp = 0; 1.3001 +#ifdef INET6 1.3002 + m->default_flowlabel = 0; 1.3003 +#endif 1.3004 + m->port = 0; /* encapsulation disabled by default */ 1.3005 + sctp_auth_set_default_chunks(m->local_auth_chunks); 1.3006 + LIST_INIT(&m->shared_keys); 1.3007 + /* add default NULL key as key id 0 */ 1.3008 + null_key = sctp_alloc_sharedkey(); 1.3009 + sctp_insert_sharedkey(&m->shared_keys, null_key); 1.3010 + SCTP_INP_WUNLOCK(inp); 1.3011 +#ifdef SCTP_LOG_CLOSING 1.3012 + sctp_log_closing(inp, NULL, 12); 1.3013 +#endif 1.3014 + return (error); 1.3015 +} 1.3016 + 1.3017 + 1.3018 +void 1.3019 +sctp_move_pcb_and_assoc(struct sctp_inpcb *old_inp, struct sctp_inpcb *new_inp, 1.3020 + struct sctp_tcb *stcb) 1.3021 +{ 1.3022 + struct sctp_nets *net; 1.3023 + uint16_t lport, rport; 1.3024 + struct sctppcbhead *head; 1.3025 + struct sctp_laddr *laddr, *oladdr; 1.3026 + 1.3027 + atomic_add_int(&stcb->asoc.refcnt, 1); 1.3028 + SCTP_TCB_UNLOCK(stcb); 1.3029 + SCTP_INP_INFO_WLOCK(); 1.3030 + SCTP_INP_WLOCK(old_inp); 1.3031 + SCTP_INP_WLOCK(new_inp); 1.3032 + SCTP_TCB_LOCK(stcb); 1.3033 + atomic_subtract_int(&stcb->asoc.refcnt, 1); 1.3034 + 1.3035 + new_inp->sctp_ep.time_of_secret_change = 1.3036 + old_inp->sctp_ep.time_of_secret_change; 1.3037 + memcpy(new_inp->sctp_ep.secret_key, old_inp->sctp_ep.secret_key, 1.3038 + sizeof(old_inp->sctp_ep.secret_key)); 1.3039 + new_inp->sctp_ep.current_secret_number = 1.3040 + old_inp->sctp_ep.current_secret_number; 1.3041 + new_inp->sctp_ep.last_secret_number = 1.3042 + old_inp->sctp_ep.last_secret_number; 1.3043 + new_inp->sctp_ep.size_of_a_cookie = old_inp->sctp_ep.size_of_a_cookie; 1.3044 + 1.3045 + /* make it so new data pours into the new socket */ 1.3046 + stcb->sctp_socket = new_inp->sctp_socket; 1.3047 + stcb->sctp_ep = new_inp; 1.3048 + 1.3049 + /* Copy the port across */ 1.3050 + lport = new_inp->sctp_lport = old_inp->sctp_lport; 1.3051 + rport = stcb->rport; 1.3052 + /* Pull the tcb from the old association */ 1.3053 + LIST_REMOVE(stcb, sctp_tcbhash); 1.3054 + LIST_REMOVE(stcb, sctp_tcblist); 1.3055 + if (stcb->asoc.in_asocid_hash) { 1.3056 + LIST_REMOVE(stcb, sctp_tcbasocidhash); 1.3057 + } 1.3058 + /* Now insert the new_inp into the TCP connected hash */ 1.3059 + head = &SCTP_BASE_INFO(sctp_tcpephash)[SCTP_PCBHASH_ALLADDR((lport | rport), SCTP_BASE_INFO(hashtcpmark))]; 1.3060 + 1.3061 + LIST_INSERT_HEAD(head, new_inp, sctp_hash); 1.3062 + /* Its safe to access */ 1.3063 + new_inp->sctp_flags &= ~SCTP_PCB_FLAGS_UNBOUND; 1.3064 + 1.3065 + /* Now move the tcb into the endpoint list */ 1.3066 + LIST_INSERT_HEAD(&new_inp->sctp_asoc_list, stcb, sctp_tcblist); 1.3067 + /* 1.3068 + * Question, do we even need to worry about the ep-hash since we 1.3069 + * only have one connection? Probably not :> so lets get rid of it 1.3070 + * and not suck up any kernel memory in that. 1.3071 + */ 1.3072 + if (stcb->asoc.in_asocid_hash) { 1.3073 + struct sctpasochead *lhd; 1.3074 + lhd = &new_inp->sctp_asocidhash[SCTP_PCBHASH_ASOC(stcb->asoc.assoc_id, 1.3075 + new_inp->hashasocidmark)]; 1.3076 + LIST_INSERT_HEAD(lhd, stcb, sctp_tcbasocidhash); 1.3077 + } 1.3078 + /* Ok. Let's restart timer. */ 1.3079 + TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1.3080 + sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, new_inp, 1.3081 + stcb, net); 1.3082 + } 1.3083 + 1.3084 + SCTP_INP_INFO_WUNLOCK(); 1.3085 + if (new_inp->sctp_tcbhash != NULL) { 1.3086 + SCTP_HASH_FREE(new_inp->sctp_tcbhash, new_inp->sctp_hashmark); 1.3087 + new_inp->sctp_tcbhash = NULL; 1.3088 + } 1.3089 + if ((new_inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) { 1.3090 + /* Subset bound, so copy in the laddr list from the old_inp */ 1.3091 + LIST_FOREACH(oladdr, &old_inp->sctp_addr_list, sctp_nxt_addr) { 1.3092 + laddr = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 1.3093 + if (laddr == NULL) { 1.3094 + /* 1.3095 + * Gak, what can we do? This assoc is really 1.3096 + * HOSED. We probably should send an abort 1.3097 + * here. 1.3098 + */ 1.3099 + SCTPDBG(SCTP_DEBUG_PCB1, "Association hosed in TCP model, out of laddr memory\n"); 1.3100 + continue; 1.3101 + } 1.3102 + SCTP_INCR_LADDR_COUNT(); 1.3103 + bzero(laddr, sizeof(*laddr)); 1.3104 + (void)SCTP_GETTIME_TIMEVAL(&laddr->start_time); 1.3105 + laddr->ifa = oladdr->ifa; 1.3106 + atomic_add_int(&laddr->ifa->refcount, 1); 1.3107 + LIST_INSERT_HEAD(&new_inp->sctp_addr_list, laddr, 1.3108 + sctp_nxt_addr); 1.3109 + new_inp->laddr_count++; 1.3110 + if (oladdr == stcb->asoc.last_used_address) { 1.3111 + stcb->asoc.last_used_address = laddr; 1.3112 + } 1.3113 + } 1.3114 + } 1.3115 + /* Now any running timers need to be adjusted 1.3116 + * since we really don't care if they are running 1.3117 + * or not just blast in the new_inp into all of 1.3118 + * them. 1.3119 + */ 1.3120 + 1.3121 + stcb->asoc.dack_timer.ep = (void *)new_inp; 1.3122 + stcb->asoc.asconf_timer.ep = (void *)new_inp; 1.3123 + stcb->asoc.strreset_timer.ep = (void *)new_inp; 1.3124 + stcb->asoc.shut_guard_timer.ep = (void *)new_inp; 1.3125 + stcb->asoc.autoclose_timer.ep = (void *)new_inp; 1.3126 + stcb->asoc.delayed_event_timer.ep = (void *)new_inp; 1.3127 + stcb->asoc.delete_prim_timer.ep = (void *)new_inp; 1.3128 + /* now what about the nets? */ 1.3129 + TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1.3130 + net->pmtu_timer.ep = (void *)new_inp; 1.3131 + net->hb_timer.ep = (void *)new_inp; 1.3132 + net->rxt_timer.ep = (void *)new_inp; 1.3133 + } 1.3134 + SCTP_INP_WUNLOCK(new_inp); 1.3135 + SCTP_INP_WUNLOCK(old_inp); 1.3136 +} 1.3137 + 1.3138 + 1.3139 +#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)) 1.3140 +/* 1.3141 + * Don't know why, but without this there is an unknown reference when 1.3142 + * compiling NetBSD... hmm 1.3143 + */ 1.3144 +extern void in6_sin6_2_sin(struct sockaddr_in *, struct sockaddr_in6 *sin6); 1.3145 +#endif 1.3146 + 1.3147 + 1.3148 +/* sctp_ifap is used to bypass normal local address validation checks */ 1.3149 +int 1.3150 +#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 1.3151 +sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, 1.3152 + struct sctp_ifa *sctp_ifap, struct thread *p) 1.3153 +#elif defined(__Windows__) 1.3154 +sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, 1.3155 + struct sctp_ifa *sctp_ifap, PKTHREAD p) 1.3156 +#else 1.3157 +sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, 1.3158 + struct sctp_ifa *sctp_ifap, struct proc *p) 1.3159 +#endif 1.3160 +{ 1.3161 + /* bind a ep to a socket address */ 1.3162 + struct sctppcbhead *head; 1.3163 + struct sctp_inpcb *inp, *inp_tmp; 1.3164 +#if defined(INET) || (defined(INET6) && defined(__APPLE__)) || defined(__FreeBSD__) || defined(__APPLE__) 1.3165 + struct inpcb *ip_inp; 1.3166 +#endif 1.3167 + int port_reuse_active = 0; 1.3168 + int bindall; 1.3169 +#ifdef SCTP_MVRF 1.3170 + int i; 1.3171 +#endif 1.3172 + uint16_t lport; 1.3173 + int error; 1.3174 + uint32_t vrf_id; 1.3175 + 1.3176 + lport = 0; 1.3177 + error = 0; 1.3178 + bindall = 1; 1.3179 + inp = (struct sctp_inpcb *)so->so_pcb; 1.3180 +#if defined(INET) || (defined(INET6) && defined(__APPLE__)) || defined(__FreeBSD__) || defined(__APPLE__) 1.3181 + ip_inp = (struct inpcb *)so->so_pcb; 1.3182 +#endif 1.3183 +#ifdef SCTP_DEBUG 1.3184 + if (addr) { 1.3185 + SCTPDBG(SCTP_DEBUG_PCB1, "Bind called port: %d\n", 1.3186 + ntohs(((struct sockaddr_in *)addr)->sin_port)); 1.3187 + SCTPDBG(SCTP_DEBUG_PCB1, "Addr: "); 1.3188 + SCTPDBG_ADDR(SCTP_DEBUG_PCB1, addr); 1.3189 + } 1.3190 +#endif 1.3191 + if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) { 1.3192 + /* already did a bind, subsequent binds NOT allowed ! */ 1.3193 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); 1.3194 + return (EINVAL); 1.3195 + } 1.3196 +#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 1.3197 +#ifdef INVARIANTS 1.3198 + if (p == NULL) 1.3199 + panic("null proc/thread"); 1.3200 +#endif 1.3201 +#endif 1.3202 + if (addr != NULL) { 1.3203 + switch (addr->sa_family) { 1.3204 +#ifdef INET 1.3205 + case AF_INET: 1.3206 + { 1.3207 + struct sockaddr_in *sin; 1.3208 + 1.3209 + /* IPV6_V6ONLY socket? */ 1.3210 + if (SCTP_IPV6_V6ONLY(ip_inp)) { 1.3211 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); 1.3212 + return (EINVAL); 1.3213 + } 1.3214 +#ifdef HAVE_SA_LEN 1.3215 + if (addr->sa_len != sizeof(*sin)) { 1.3216 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); 1.3217 + return (EINVAL); 1.3218 + } 1.3219 +#endif 1.3220 + 1.3221 + sin = (struct sockaddr_in *)addr; 1.3222 + lport = sin->sin_port; 1.3223 +#if defined(__FreeBSD__) && __FreeBSD_version >= 800000 1.3224 + /* 1.3225 + * For LOOPBACK the prison_local_ip4() call will transmute the ip address 1.3226 + * to the proper value. 1.3227 + */ 1.3228 + if (p && (error = prison_local_ip4(p->td_ucred, &sin->sin_addr)) != 0) { 1.3229 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); 1.3230 + return (error); 1.3231 + } 1.3232 +#endif 1.3233 + if (sin->sin_addr.s_addr != INADDR_ANY) { 1.3234 + bindall = 0; 1.3235 + } 1.3236 + break; 1.3237 + } 1.3238 +#endif 1.3239 +#ifdef INET6 1.3240 + case AF_INET6: 1.3241 + { 1.3242 + /* Only for pure IPv6 Address. (No IPv4 Mapped!) */ 1.3243 + struct sockaddr_in6 *sin6; 1.3244 + 1.3245 + sin6 = (struct sockaddr_in6 *)addr; 1.3246 + 1.3247 +#ifdef HAVE_SA_LEN 1.3248 + if (addr->sa_len != sizeof(*sin6)) { 1.3249 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); 1.3250 + return (EINVAL); 1.3251 + } 1.3252 +#endif 1.3253 + lport = sin6->sin6_port; 1.3254 +#if defined(__FreeBSD__) && __FreeBSD_version >= 800000 1.3255 + /* 1.3256 + * For LOOPBACK the prison_local_ip6() call will transmute the ipv6 address 1.3257 + * to the proper value. 1.3258 + */ 1.3259 + if (p && (error = prison_local_ip6(p->td_ucred, &sin6->sin6_addr, 1.3260 + (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) { 1.3261 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); 1.3262 + return (error); 1.3263 + } 1.3264 +#endif 1.3265 + if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 1.3266 + bindall = 0; 1.3267 +#ifdef SCTP_EMBEDDED_V6_SCOPE 1.3268 + /* KAME hack: embed scopeid */ 1.3269 +#if defined(SCTP_KAME) 1.3270 + if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) { 1.3271 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); 1.3272 + return (EINVAL); 1.3273 + } 1.3274 +#elif defined(__APPLE__) 1.3275 +#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) 1.3276 + if (in6_embedscope(&sin6->sin6_addr, sin6, ip_inp, NULL) != 0) { 1.3277 +#else 1.3278 + if (in6_embedscope(&sin6->sin6_addr, sin6, ip_inp, NULL, NULL) != 0) { 1.3279 +#endif 1.3280 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); 1.3281 + return (EINVAL); 1.3282 + } 1.3283 +#elif defined(__FreeBSD__) 1.3284 + error = scope6_check_id(sin6, MODULE_GLOBAL(ip6_use_defzone)); 1.3285 + if (error != 0) { 1.3286 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); 1.3287 + return (error); 1.3288 + } 1.3289 +#else 1.3290 + if (in6_embedscope(&sin6->sin6_addr, sin6) != 0) { 1.3291 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); 1.3292 + return (EINVAL); 1.3293 + } 1.3294 +#endif 1.3295 +#endif /* SCTP_EMBEDDED_V6_SCOPE */ 1.3296 + } 1.3297 +#ifndef SCOPEDROUTING 1.3298 + /* this must be cleared for ifa_ifwithaddr() */ 1.3299 + sin6->sin6_scope_id = 0; 1.3300 +#endif /* SCOPEDROUTING */ 1.3301 + break; 1.3302 + } 1.3303 +#endif 1.3304 +#if defined(__Userspace__) 1.3305 + case AF_CONN: 1.3306 + { 1.3307 + struct sockaddr_conn *sconn; 1.3308 + 1.3309 +#ifdef HAVE_SA_LEN 1.3310 + if (addr->sa_len != sizeof(struct sockaddr_conn)) { 1.3311 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); 1.3312 + return (EINVAL); 1.3313 + } 1.3314 +#endif 1.3315 + sconn = (struct sockaddr_conn *)addr; 1.3316 + lport = sconn->sconn_port; 1.3317 + if (sconn->sconn_addr != NULL) { 1.3318 + bindall = 0; 1.3319 + } 1.3320 + break; 1.3321 + } 1.3322 +#endif 1.3323 + default: 1.3324 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EAFNOSUPPORT); 1.3325 + return (EAFNOSUPPORT); 1.3326 + } 1.3327 + } 1.3328 + SCTP_INP_INFO_WLOCK(); 1.3329 + SCTP_INP_WLOCK(inp); 1.3330 + /* Setup a vrf_id to be the default for the non-bind-all case. */ 1.3331 + vrf_id = inp->def_vrf_id; 1.3332 + 1.3333 + /* increase our count due to the unlock we do */ 1.3334 + SCTP_INP_INCR_REF(inp); 1.3335 + if (lport) { 1.3336 + /* 1.3337 + * Did the caller specify a port? if so we must see if an ep 1.3338 + * already has this one bound. 1.3339 + */ 1.3340 + /* got to be root to get at low ports */ 1.3341 +#if !defined(__Windows__) 1.3342 + if (ntohs(lport) < IPPORT_RESERVED) { 1.3343 + if (p && (error = 1.3344 +#ifdef __FreeBSD__ 1.3345 +#if __FreeBSD_version > 602000 1.3346 + priv_check(p, PRIV_NETINET_RESERVEDPORT) 1.3347 +#elif __FreeBSD_version >= 500000 1.3348 + suser_cred(p->td_ucred, 0) 1.3349 +#else 1.3350 + suser(p) 1.3351 +#endif 1.3352 +#elif defined(__APPLE__) 1.3353 + suser(p->p_ucred, &p->p_acflag) 1.3354 +#elif defined(__Userspace__) /* must be true to use raw socket */ 1.3355 + 1 1.3356 +#else 1.3357 + suser(p, 0) 1.3358 +#endif 1.3359 + )) { 1.3360 + SCTP_INP_DECR_REF(inp); 1.3361 + SCTP_INP_WUNLOCK(inp); 1.3362 + SCTP_INP_INFO_WUNLOCK(); 1.3363 + return (error); 1.3364 + } 1.3365 +#if defined(__Panda__) 1.3366 + if (!SCTP_IS_PRIVILEDGED(so)) { 1.3367 + SCTP_INP_DECR_REF(inp); 1.3368 + SCTP_INP_WUNLOCK(inp); 1.3369 + SCTP_INP_INFO_WUNLOCK(); 1.3370 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EACCES); 1.3371 + return (EACCES); 1.3372 + } 1.3373 +#endif 1.3374 + } 1.3375 +#if !defined(__Panda__) && !defined(__Userspace__) 1.3376 + if (p == NULL) { 1.3377 + SCTP_INP_DECR_REF(inp); 1.3378 + SCTP_INP_WUNLOCK(inp); 1.3379 + SCTP_INP_INFO_WUNLOCK(); 1.3380 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); 1.3381 + return (error); 1.3382 + } 1.3383 +#endif 1.3384 +#endif /* __Windows__ */ 1.3385 + SCTP_INP_WUNLOCK(inp); 1.3386 + if (bindall) { 1.3387 +#ifdef SCTP_MVRF 1.3388 + for (i = 0; i < inp->num_vrfs; i++) { 1.3389 + vrf_id = inp->m_vrf_ids[i]; 1.3390 +#else 1.3391 + vrf_id = inp->def_vrf_id; 1.3392 +#endif 1.3393 + inp_tmp = sctp_pcb_findep(addr, 0, 1, vrf_id); 1.3394 + if (inp_tmp != NULL) { 1.3395 + /* 1.3396 + * lock guy returned and lower count 1.3397 + * note that we are not bound so 1.3398 + * inp_tmp should NEVER be inp. And 1.3399 + * it is this inp (inp_tmp) that gets 1.3400 + * the reference bump, so we must 1.3401 + * lower it. 1.3402 + */ 1.3403 + SCTP_INP_DECR_REF(inp_tmp); 1.3404 + /* unlock info */ 1.3405 + if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) && 1.3406 + (sctp_is_feature_on(inp_tmp, SCTP_PCB_FLAGS_PORTREUSE))) { 1.3407 + /* Ok, must be one-2-one and allowing port re-use */ 1.3408 + port_reuse_active = 1; 1.3409 + goto continue_anyway; 1.3410 + } 1.3411 + SCTP_INP_DECR_REF(inp); 1.3412 + SCTP_INP_INFO_WUNLOCK(); 1.3413 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRINUSE); 1.3414 + return (EADDRINUSE); 1.3415 + } 1.3416 +#ifdef SCTP_MVRF 1.3417 + } 1.3418 +#endif 1.3419 + } else { 1.3420 + inp_tmp = sctp_pcb_findep(addr, 0, 1, vrf_id); 1.3421 + if (inp_tmp != NULL) { 1.3422 + /* 1.3423 + * lock guy returned and lower count note 1.3424 + * that we are not bound so inp_tmp should 1.3425 + * NEVER be inp. And it is this inp (inp_tmp) 1.3426 + * that gets the reference bump, so we must 1.3427 + * lower it. 1.3428 + */ 1.3429 + SCTP_INP_DECR_REF(inp_tmp); 1.3430 + /* unlock info */ 1.3431 + if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) && 1.3432 + (sctp_is_feature_on(inp_tmp, SCTP_PCB_FLAGS_PORTREUSE))) { 1.3433 + /* Ok, must be one-2-one and allowing port re-use */ 1.3434 + port_reuse_active = 1; 1.3435 + goto continue_anyway; 1.3436 + } 1.3437 + SCTP_INP_DECR_REF(inp); 1.3438 + SCTP_INP_INFO_WUNLOCK(); 1.3439 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRINUSE); 1.3440 + return (EADDRINUSE); 1.3441 + } 1.3442 + } 1.3443 + continue_anyway: 1.3444 + SCTP_INP_WLOCK(inp); 1.3445 + if (bindall) { 1.3446 + /* verify that no lport is not used by a singleton */ 1.3447 + if ((port_reuse_active == 0) && 1.3448 + (inp_tmp = sctp_isport_inuse(inp, lport, vrf_id))) { 1.3449 + /* Sorry someone already has this one bound */ 1.3450 + if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) && 1.3451 + (sctp_is_feature_on(inp_tmp, SCTP_PCB_FLAGS_PORTREUSE))) { 1.3452 + port_reuse_active = 1; 1.3453 + } else { 1.3454 + SCTP_INP_DECR_REF(inp); 1.3455 + SCTP_INP_WUNLOCK(inp); 1.3456 + SCTP_INP_INFO_WUNLOCK(); 1.3457 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRINUSE); 1.3458 + return (EADDRINUSE); 1.3459 + } 1.3460 + } 1.3461 + } 1.3462 + } else { 1.3463 + uint16_t first, last, candidate; 1.3464 + uint16_t count; 1.3465 + int done; 1.3466 + 1.3467 +#if defined(__Windows__) 1.3468 + first = 1; 1.3469 + last = 0xffff; 1.3470 +#else 1.3471 +#if defined(__Userspace__) 1.3472 + /* TODO ensure uid is 0, etc... */ 1.3473 +#elif defined(__FreeBSD__) || defined(__APPLE__) 1.3474 + if (ip_inp->inp_flags & INP_HIGHPORT) { 1.3475 + first = MODULE_GLOBAL(ipport_hifirstauto); 1.3476 + last = MODULE_GLOBAL(ipport_hilastauto); 1.3477 + } else if (ip_inp->inp_flags & INP_LOWPORT) { 1.3478 + if (p && (error = 1.3479 +#ifdef __FreeBSD__ 1.3480 +#if __FreeBSD_version > 602000 1.3481 + priv_check(p, PRIV_NETINET_RESERVEDPORT) 1.3482 +#elif __FreeBSD_version >= 500000 1.3483 + suser_cred(p->td_ucred, 0) 1.3484 +#else 1.3485 + suser(p) 1.3486 +#endif 1.3487 +#elif defined(__APPLE__) 1.3488 + suser(p->p_ucred, &p->p_acflag) 1.3489 +#else 1.3490 + suser(p, 0) 1.3491 +#endif 1.3492 + )) { 1.3493 + SCTP_INP_DECR_REF(inp); 1.3494 + SCTP_INP_WUNLOCK(inp); 1.3495 + SCTP_INP_INFO_WUNLOCK(); 1.3496 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error); 1.3497 + return (error); 1.3498 + } 1.3499 + first = MODULE_GLOBAL(ipport_lowfirstauto); 1.3500 + last = MODULE_GLOBAL(ipport_lowlastauto); 1.3501 + } else { 1.3502 +#endif 1.3503 + first = MODULE_GLOBAL(ipport_firstauto); 1.3504 + last = MODULE_GLOBAL(ipport_lastauto); 1.3505 +#if defined(__FreeBSD__) || defined(__APPLE__) 1.3506 + } 1.3507 +#endif 1.3508 +#endif /* __Windows__ */ 1.3509 + if (first > last) { 1.3510 + uint16_t temp; 1.3511 + 1.3512 + temp = first; 1.3513 + first = last; 1.3514 + last = temp; 1.3515 + } 1.3516 + count = last - first + 1; /* number of candidates */ 1.3517 + candidate = first + sctp_select_initial_TSN(&inp->sctp_ep) % (count); 1.3518 + 1.3519 + done = 0; 1.3520 + while (!done) { 1.3521 +#ifdef SCTP_MVRF 1.3522 + for (i = 0; i < inp->num_vrfs; i++) { 1.3523 + if (sctp_isport_inuse(inp, htons(candidate), inp->m_vrf_ids[i]) != NULL) { 1.3524 + break; 1.3525 + } 1.3526 + } 1.3527 + if (i == inp->num_vrfs) { 1.3528 + done = 1; 1.3529 + } 1.3530 +#else 1.3531 + if (sctp_isport_inuse(inp, htons(candidate), inp->def_vrf_id) == NULL) { 1.3532 + done = 1; 1.3533 + } 1.3534 +#endif 1.3535 + if (!done) { 1.3536 + if (--count == 0) { 1.3537 + SCTP_INP_DECR_REF(inp); 1.3538 + SCTP_INP_WUNLOCK(inp); 1.3539 + SCTP_INP_INFO_WUNLOCK(); 1.3540 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRINUSE); 1.3541 + return (EADDRINUSE); 1.3542 + } 1.3543 + if (candidate == last) 1.3544 + candidate = first; 1.3545 + else 1.3546 + candidate = candidate + 1; 1.3547 + } 1.3548 + } 1.3549 + lport = htons(candidate); 1.3550 + } 1.3551 + SCTP_INP_DECR_REF(inp); 1.3552 + if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE | 1.3553 + SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 1.3554 + /* 1.3555 + * this really should not happen. The guy did a non-blocking 1.3556 + * bind and then did a close at the same time. 1.3557 + */ 1.3558 + SCTP_INP_WUNLOCK(inp); 1.3559 + SCTP_INP_INFO_WUNLOCK(); 1.3560 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); 1.3561 + return (EINVAL); 1.3562 + } 1.3563 + /* ok we look clear to give out this port, so lets setup the binding */ 1.3564 + if (bindall) { 1.3565 + /* binding to all addresses, so just set in the proper flags */ 1.3566 + inp->sctp_flags |= SCTP_PCB_FLAGS_BOUNDALL; 1.3567 + /* set the automatic addr changes from kernel flag */ 1.3568 + if (SCTP_BASE_SYSCTL(sctp_auto_asconf) == 0) { 1.3569 + sctp_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF); 1.3570 + sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); 1.3571 + } else { 1.3572 + sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF); 1.3573 + sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); 1.3574 + } 1.3575 + if (SCTP_BASE_SYSCTL(sctp_multiple_asconfs) == 0) { 1.3576 + sctp_feature_off(inp, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS); 1.3577 + } else { 1.3578 + sctp_feature_on(inp, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS); 1.3579 + } 1.3580 + /* set the automatic mobility_base from kernel 1.3581 + flag (by micchie) 1.3582 + */ 1.3583 + if (SCTP_BASE_SYSCTL(sctp_mobility_base) == 0) { 1.3584 + sctp_mobility_feature_off(inp, SCTP_MOBILITY_BASE); 1.3585 + sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); 1.3586 + } else { 1.3587 + sctp_mobility_feature_on(inp, SCTP_MOBILITY_BASE); 1.3588 + sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); 1.3589 + } 1.3590 + /* set the automatic mobility_fasthandoff from kernel 1.3591 + flag (by micchie) 1.3592 + */ 1.3593 + if (SCTP_BASE_SYSCTL(sctp_mobility_fasthandoff) == 0) { 1.3594 + sctp_mobility_feature_off(inp, SCTP_MOBILITY_FASTHANDOFF); 1.3595 + sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); 1.3596 + } else { 1.3597 + sctp_mobility_feature_on(inp, SCTP_MOBILITY_FASTHANDOFF); 1.3598 + sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); 1.3599 + } 1.3600 + } else { 1.3601 + /* 1.3602 + * bind specific, make sure flags is off and add a new 1.3603 + * address structure to the sctp_addr_list inside the ep 1.3604 + * structure. 1.3605 + * 1.3606 + * We will need to allocate one and insert it at the head. The 1.3607 + * socketopt call can just insert new addresses in there as 1.3608 + * well. It will also have to do the embed scope kame hack 1.3609 + * too (before adding). 1.3610 + */ 1.3611 + struct sctp_ifa *ifa; 1.3612 + struct sockaddr_storage store_sa; 1.3613 + 1.3614 + memset(&store_sa, 0, sizeof(store_sa)); 1.3615 + switch (addr->sa_family) { 1.3616 +#ifdef INET 1.3617 + case AF_INET: 1.3618 + { 1.3619 + struct sockaddr_in *sin; 1.3620 + 1.3621 + sin = (struct sockaddr_in *)&store_sa; 1.3622 + memcpy(sin, addr, sizeof(struct sockaddr_in)); 1.3623 + sin->sin_port = 0; 1.3624 + break; 1.3625 + } 1.3626 +#endif 1.3627 +#ifdef INET6 1.3628 + case AF_INET6: 1.3629 + { 1.3630 + struct sockaddr_in6 *sin6; 1.3631 + 1.3632 + sin6 = (struct sockaddr_in6 *)&store_sa; 1.3633 + memcpy(sin6, addr, sizeof(struct sockaddr_in6)); 1.3634 + sin6->sin6_port = 0; 1.3635 + break; 1.3636 + } 1.3637 +#endif 1.3638 +#if defined(__Userspace__) 1.3639 + case AF_CONN: 1.3640 + { 1.3641 + struct sockaddr_conn *sconn; 1.3642 + 1.3643 + sconn = (struct sockaddr_conn *)&store_sa; 1.3644 + memcpy(sconn, addr, sizeof(struct sockaddr_conn)); 1.3645 + sconn->sconn_port = 0; 1.3646 + break; 1.3647 + } 1.3648 +#endif 1.3649 + default: 1.3650 + break; 1.3651 + } 1.3652 + /* 1.3653 + * first find the interface with the bound address need to 1.3654 + * zero out the port to find the address! yuck! can't do 1.3655 + * this earlier since need port for sctp_pcb_findep() 1.3656 + */ 1.3657 + if (sctp_ifap != NULL) { 1.3658 + ifa = sctp_ifap; 1.3659 + } else { 1.3660 + /* Note for BSD we hit here always other 1.3661 + * O/S's will pass things in via the 1.3662 + * sctp_ifap argument (Panda). 1.3663 + */ 1.3664 + ifa = sctp_find_ifa_by_addr((struct sockaddr *)&store_sa, 1.3665 + vrf_id, SCTP_ADDR_NOT_LOCKED); 1.3666 + } 1.3667 + if (ifa == NULL) { 1.3668 + /* Can't find an interface with that address */ 1.3669 + SCTP_INP_WUNLOCK(inp); 1.3670 + SCTP_INP_INFO_WUNLOCK(); 1.3671 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRNOTAVAIL); 1.3672 + return (EADDRNOTAVAIL); 1.3673 + } 1.3674 +#ifdef INET6 1.3675 + if (addr->sa_family == AF_INET6) { 1.3676 + /* GAK, more FIXME IFA lock? */ 1.3677 + if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { 1.3678 + /* Can't bind a non-existent addr. */ 1.3679 + SCTP_INP_WUNLOCK(inp); 1.3680 + SCTP_INP_INFO_WUNLOCK(); 1.3681 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); 1.3682 + return (EINVAL); 1.3683 + } 1.3684 + } 1.3685 +#endif 1.3686 + /* we're not bound all */ 1.3687 + inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUNDALL; 1.3688 + /* allow bindx() to send ASCONF's for binding changes */ 1.3689 + sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF); 1.3690 + /* clear automatic addr changes from kernel flag */ 1.3691 + sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTO_ASCONF); 1.3692 + 1.3693 + /* add this address to the endpoint list */ 1.3694 + error = sctp_insert_laddr(&inp->sctp_addr_list, ifa, 0); 1.3695 + if (error != 0) { 1.3696 + SCTP_INP_WUNLOCK(inp); 1.3697 + SCTP_INP_INFO_WUNLOCK(); 1.3698 + return (error); 1.3699 + } 1.3700 + inp->laddr_count++; 1.3701 + } 1.3702 + /* find the bucket */ 1.3703 + if (port_reuse_active) { 1.3704 + /* Put it into tcp 1-2-1 hash */ 1.3705 + head = &SCTP_BASE_INFO(sctp_tcpephash)[SCTP_PCBHASH_ALLADDR(lport, SCTP_BASE_INFO(hashtcpmark))]; 1.3706 + inp->sctp_flags |= SCTP_PCB_FLAGS_IN_TCPPOOL; 1.3707 + } else { 1.3708 + head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(lport, SCTP_BASE_INFO(hashmark))]; 1.3709 + } 1.3710 + /* put it in the bucket */ 1.3711 + LIST_INSERT_HEAD(head, inp, sctp_hash); 1.3712 + SCTPDBG(SCTP_DEBUG_PCB1, "Main hash to bind at head:%p, bound port:%d - in tcp_pool=%d\n", 1.3713 + (void *)head, ntohs(lport), port_reuse_active); 1.3714 + /* set in the port */ 1.3715 + inp->sctp_lport = lport; 1.3716 + 1.3717 + /* turn off just the unbound flag */ 1.3718 + inp->sctp_flags &= ~SCTP_PCB_FLAGS_UNBOUND; 1.3719 + SCTP_INP_WUNLOCK(inp); 1.3720 + SCTP_INP_INFO_WUNLOCK(); 1.3721 + return (0); 1.3722 +} 1.3723 + 1.3724 + 1.3725 +static void 1.3726 +sctp_iterator_inp_being_freed(struct sctp_inpcb *inp) 1.3727 +{ 1.3728 + struct sctp_iterator *it, *nit; 1.3729 + 1.3730 + /* 1.3731 + * We enter with the only the ITERATOR_LOCK in place and a write 1.3732 + * lock on the inp_info stuff. 1.3733 + */ 1.3734 + it = sctp_it_ctl.cur_it; 1.3735 +#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 1.3736 + if (it && (it->vn != curvnet)) { 1.3737 + /* Its not looking at our VNET */ 1.3738 + return; 1.3739 + } 1.3740 +#endif 1.3741 + if (it && (it->inp == inp)) { 1.3742 + /* 1.3743 + * This is tricky and we hold the iterator lock, 1.3744 + * but when it returns and gets the lock (when we 1.3745 + * release it) the iterator will try to operate on 1.3746 + * inp. We need to stop that from happening. But 1.3747 + * of course the iterator has a reference on the 1.3748 + * stcb and inp. We can mark it and it will stop. 1.3749 + * 1.3750 + * If its a single iterator situation, we 1.3751 + * set the end iterator flag. Otherwise 1.3752 + * we set the iterator to go to the next inp. 1.3753 + * 1.3754 + */ 1.3755 + if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1.3756 + sctp_it_ctl.iterator_flags |= SCTP_ITERATOR_STOP_CUR_IT; 1.3757 + } else { 1.3758 + sctp_it_ctl.iterator_flags |= SCTP_ITERATOR_STOP_CUR_INP; 1.3759 + } 1.3760 + } 1.3761 + /* Now go through and remove any single reference to 1.3762 + * our inp that may be still pending on the list 1.3763 + */ 1.3764 + SCTP_IPI_ITERATOR_WQ_LOCK(); 1.3765 + TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) { 1.3766 +#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 1.3767 + if (it->vn != curvnet) { 1.3768 + continue; 1.3769 + } 1.3770 +#endif 1.3771 + if (it->inp == inp) { 1.3772 + /* This one points to me is it inp specific? */ 1.3773 + if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1.3774 + /* Remove and free this one */ 1.3775 + TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, 1.3776 + it, sctp_nxt_itr); 1.3777 + if (it->function_atend != NULL) { 1.3778 + (*it->function_atend) (it->pointer, it->val); 1.3779 + } 1.3780 + SCTP_FREE(it, SCTP_M_ITER); 1.3781 + } else { 1.3782 + it->inp = LIST_NEXT(it->inp, sctp_list); 1.3783 + if (it->inp) { 1.3784 + SCTP_INP_INCR_REF(it->inp); 1.3785 + } 1.3786 + } 1.3787 + /* When its put in the refcnt is incremented so decr it */ 1.3788 + SCTP_INP_DECR_REF(inp); 1.3789 + } 1.3790 + } 1.3791 + SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1.3792 +} 1.3793 + 1.3794 +/* release sctp_inpcb unbind the port */ 1.3795 +void 1.3796 +sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from) 1.3797 +{ 1.3798 + /* 1.3799 + * Here we free a endpoint. We must find it (if it is in the Hash 1.3800 + * table) and remove it from there. Then we must also find it in the 1.3801 + * overall list and remove it from there. After all removals are 1.3802 + * complete then any timer has to be stopped. Then start the actual 1.3803 + * freeing. a) Any local lists. b) Any associations. c) The hash of 1.3804 + * all associations. d) finally the ep itself. 1.3805 + */ 1.3806 + struct sctp_tcb *asoc, *nasoc; 1.3807 + struct sctp_laddr *laddr, *nladdr; 1.3808 + struct inpcb *ip_pcb; 1.3809 + struct socket *so; 1.3810 + int being_refed = 0; 1.3811 + struct sctp_queued_to_read *sq, *nsq; 1.3812 +#if !defined(__Panda__) && !defined(__Userspace__) 1.3813 +#if !defined(__FreeBSD__) || __FreeBSD_version < 500000 1.3814 + sctp_rtentry_t *rt; 1.3815 +#endif 1.3816 +#endif 1.3817 + int cnt; 1.3818 + sctp_sharedkey_t *shared_key, *nshared_key; 1.3819 + 1.3820 + 1.3821 +#if defined(__APPLE__) 1.3822 + sctp_lock_assert(SCTP_INP_SO(inp)); 1.3823 +#endif 1.3824 +#ifdef SCTP_LOG_CLOSING 1.3825 + sctp_log_closing(inp, NULL, 0); 1.3826 +#endif 1.3827 + SCTP_ITERATOR_LOCK(); 1.3828 + /* mark any iterators on the list or being processed */ 1.3829 + sctp_iterator_inp_being_freed(inp); 1.3830 + SCTP_ITERATOR_UNLOCK(); 1.3831 + so = inp->sctp_socket; 1.3832 + if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 1.3833 + /* been here before.. eeks.. get out of here */ 1.3834 + SCTP_PRINTF("This conflict in free SHOULD not be happening! from %d, imm %d\n", from, immediate); 1.3835 +#ifdef SCTP_LOG_CLOSING 1.3836 + sctp_log_closing(inp, NULL, 1); 1.3837 +#endif 1.3838 + return; 1.3839 + } 1.3840 + SCTP_ASOC_CREATE_LOCK(inp); 1.3841 + SCTP_INP_INFO_WLOCK(); 1.3842 + 1.3843 + SCTP_INP_WLOCK(inp); 1.3844 + if (from == SCTP_CALLED_AFTER_CMPSET_OFCLOSE) { 1.3845 + inp->sctp_flags &= ~SCTP_PCB_FLAGS_CLOSE_IP; 1.3846 + /* socket is gone, so no more wakeups allowed */ 1.3847 + inp->sctp_flags |= SCTP_PCB_FLAGS_DONT_WAKE; 1.3848 + inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT; 1.3849 + inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT; 1.3850 + 1.3851 + } 1.3852 + /* First time through we have the socket lock, after that no more. */ 1.3853 + sctp_timer_stop(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL, 1.3854 + SCTP_FROM_SCTP_PCB+SCTP_LOC_1); 1.3855 + 1.3856 + if (inp->control) { 1.3857 + sctp_m_freem(inp->control); 1.3858 + inp->control = NULL; 1.3859 + } 1.3860 + if (inp->pkt) { 1.3861 + sctp_m_freem(inp->pkt); 1.3862 + inp->pkt = NULL; 1.3863 + } 1.3864 + ip_pcb = &inp->ip_inp.inp; /* we could just cast the main pointer 1.3865 + * here but I will be nice :> (i.e. 1.3866 + * ip_pcb = ep;) */ 1.3867 + if (immediate == SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE) { 1.3868 + int cnt_in_sd; 1.3869 + 1.3870 + cnt_in_sd = 0; 1.3871 + LIST_FOREACH_SAFE(asoc, &inp->sctp_asoc_list, sctp_tcblist, nasoc) { 1.3872 + SCTP_TCB_LOCK(asoc); 1.3873 + if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 1.3874 + /* Skip guys being freed */ 1.3875 + cnt_in_sd++; 1.3876 + if (asoc->asoc.state & SCTP_STATE_IN_ACCEPT_QUEUE) { 1.3877 + /* 1.3878 + * Special case - we did not start a kill 1.3879 + * timer on the asoc due to it was not 1.3880 + * closed. So go ahead and start it now. 1.3881 + */ 1.3882 + asoc->asoc.state &= ~SCTP_STATE_IN_ACCEPT_QUEUE; 1.3883 + sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, asoc, NULL); 1.3884 + } 1.3885 + SCTP_TCB_UNLOCK(asoc); 1.3886 + continue; 1.3887 + } 1.3888 + if (((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_COOKIE_WAIT) || 1.3889 + (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_COOKIE_ECHOED)) && 1.3890 + (asoc->asoc.total_output_queue_size == 0)) { 1.3891 + /* If we have data in queue, we don't want to just 1.3892 + * free since the app may have done, send()/close 1.3893 + * or connect/send/close. And it wants the data 1.3894 + * to get across first. 1.3895 + */ 1.3896 + /* Just abandon things in the front states */ 1.3897 + if (sctp_free_assoc(inp, asoc, SCTP_PCBFREE_NOFORCE, 1.3898 + SCTP_FROM_SCTP_PCB+SCTP_LOC_2) == 0) { 1.3899 + cnt_in_sd++; 1.3900 + } 1.3901 + continue; 1.3902 + } 1.3903 + /* Disconnect the socket please */ 1.3904 + asoc->sctp_socket = NULL; 1.3905 + asoc->asoc.state |= SCTP_STATE_CLOSED_SOCKET; 1.3906 + if ((asoc->asoc.size_on_reasm_queue > 0) || 1.3907 + (asoc->asoc.control_pdapi) || 1.3908 + (asoc->asoc.size_on_all_streams > 0) || 1.3909 + (so && (so->so_rcv.sb_cc > 0))) { 1.3910 + /* Left with Data unread */ 1.3911 + struct mbuf *op_err; 1.3912 + 1.3913 + op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 1.3914 + 0, M_NOWAIT, 1, MT_DATA); 1.3915 + if (op_err) { 1.3916 + /* Fill in the user initiated abort */ 1.3917 + struct sctp_paramhdr *ph; 1.3918 + 1.3919 + SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr); 1.3920 + ph = mtod(op_err, struct sctp_paramhdr *); 1.3921 + ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 1.3922 + ph->param_length = htons(SCTP_BUF_LEN(op_err)); 1.3923 + } 1.3924 + asoc->sctp_ep->last_abort_code = SCTP_FROM_SCTP_PCB+SCTP_LOC_3; 1.3925 + sctp_send_abort_tcb(asoc, op_err, SCTP_SO_LOCKED); 1.3926 + SCTP_STAT_INCR_COUNTER32(sctps_aborted); 1.3927 + if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_OPEN) || 1.3928 + (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1.3929 + SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1.3930 + } 1.3931 + if (sctp_free_assoc(inp, asoc, 1.3932 + SCTP_PCBFREE_NOFORCE, SCTP_FROM_SCTP_PCB+SCTP_LOC_4) == 0) { 1.3933 + cnt_in_sd++; 1.3934 + } 1.3935 + continue; 1.3936 + } else if (TAILQ_EMPTY(&asoc->asoc.send_queue) && 1.3937 + TAILQ_EMPTY(&asoc->asoc.sent_queue) && 1.3938 + (asoc->asoc.stream_queue_cnt == 0)) { 1.3939 + if (asoc->asoc.locked_on_sending) { 1.3940 + goto abort_anyway; 1.3941 + } 1.3942 + if ((SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_SHUTDOWN_SENT) && 1.3943 + (SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 1.3944 + struct sctp_nets *netp; 1.3945 + 1.3946 + /* 1.3947 + * there is nothing queued to send, 1.3948 + * so I send shutdown 1.3949 + */ 1.3950 + if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_OPEN) || 1.3951 + (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1.3952 + SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1.3953 + } 1.3954 + SCTP_SET_STATE(&asoc->asoc, SCTP_STATE_SHUTDOWN_SENT); 1.3955 + SCTP_CLEAR_SUBSTATE(&asoc->asoc, SCTP_STATE_SHUTDOWN_PENDING); 1.3956 + sctp_stop_timers_for_shutdown(asoc); 1.3957 + if (asoc->asoc.alternate) { 1.3958 + netp = asoc->asoc.alternate; 1.3959 + } else { 1.3960 + netp = asoc->asoc.primary_destination; 1.3961 + } 1.3962 + sctp_send_shutdown(asoc, netp); 1.3963 + sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, asoc->sctp_ep, asoc, 1.3964 + netp); 1.3965 + sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, asoc->sctp_ep, asoc, 1.3966 + asoc->asoc.primary_destination); 1.3967 + sctp_chunk_output(inp, asoc, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_LOCKED); 1.3968 + } 1.3969 + } else { 1.3970 + /* mark into shutdown pending */ 1.3971 + struct sctp_stream_queue_pending *sp; 1.3972 + 1.3973 + asoc->asoc.state |= SCTP_STATE_SHUTDOWN_PENDING; 1.3974 + sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, asoc->sctp_ep, asoc, 1.3975 + asoc->asoc.primary_destination); 1.3976 + if (asoc->asoc.locked_on_sending) { 1.3977 + sp = TAILQ_LAST(&((asoc->asoc.locked_on_sending)->outqueue), 1.3978 + sctp_streamhead); 1.3979 + if (sp == NULL) { 1.3980 + SCTP_PRINTF("Error, sp is NULL, locked on sending is %p strm:%d\n", 1.3981 + (void *)asoc->asoc.locked_on_sending, 1.3982 + asoc->asoc.locked_on_sending->stream_no); 1.3983 + } else { 1.3984 + if ((sp->length == 0) && (sp->msg_is_complete == 0)) 1.3985 + asoc->asoc.state |= SCTP_STATE_PARTIAL_MSG_LEFT; 1.3986 + } 1.3987 + } 1.3988 + if (TAILQ_EMPTY(&asoc->asoc.send_queue) && 1.3989 + TAILQ_EMPTY(&asoc->asoc.sent_queue) && 1.3990 + (asoc->asoc.state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 1.3991 + struct mbuf *op_err; 1.3992 + abort_anyway: 1.3993 + op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 1.3994 + 0, M_NOWAIT, 1, MT_DATA); 1.3995 + if (op_err) { 1.3996 + /* Fill in the user initiated abort */ 1.3997 + struct sctp_paramhdr *ph; 1.3998 + 1.3999 + SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr); 1.4000 + ph = mtod(op_err, struct sctp_paramhdr *); 1.4001 + ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 1.4002 + ph->param_length = htons(SCTP_BUF_LEN(op_err)); 1.4003 + } 1.4004 + asoc->sctp_ep->last_abort_code = SCTP_FROM_SCTP_PCB+SCTP_LOC_5; 1.4005 + sctp_send_abort_tcb(asoc, op_err, SCTP_SO_LOCKED); 1.4006 + SCTP_STAT_INCR_COUNTER32(sctps_aborted); 1.4007 + if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_OPEN) || 1.4008 + (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1.4009 + SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1.4010 + } 1.4011 + if (sctp_free_assoc(inp, asoc, 1.4012 + SCTP_PCBFREE_NOFORCE, 1.4013 + SCTP_FROM_SCTP_PCB+SCTP_LOC_6) == 0) { 1.4014 + cnt_in_sd++; 1.4015 + } 1.4016 + continue; 1.4017 + } else { 1.4018 + sctp_chunk_output(inp, asoc, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED); 1.4019 + } 1.4020 + } 1.4021 + cnt_in_sd++; 1.4022 + SCTP_TCB_UNLOCK(asoc); 1.4023 + } 1.4024 + /* now is there some left in our SHUTDOWN state? */ 1.4025 + if (cnt_in_sd) { 1.4026 +#ifdef SCTP_LOG_CLOSING 1.4027 + sctp_log_closing(inp, NULL, 2); 1.4028 +#endif 1.4029 + inp->sctp_socket = NULL; 1.4030 + SCTP_INP_WUNLOCK(inp); 1.4031 + SCTP_ASOC_CREATE_UNLOCK(inp); 1.4032 + SCTP_INP_INFO_WUNLOCK(); 1.4033 + return; 1.4034 + } 1.4035 + } 1.4036 + inp->sctp_socket = NULL; 1.4037 + if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) != 1.4038 + SCTP_PCB_FLAGS_UNBOUND) { 1.4039 + /* 1.4040 + * ok, this guy has been bound. It's port is 1.4041 + * somewhere in the SCTP_BASE_INFO(hash table). Remove 1.4042 + * it! 1.4043 + */ 1.4044 + LIST_REMOVE(inp, sctp_hash); 1.4045 + inp->sctp_flags |= SCTP_PCB_FLAGS_UNBOUND; 1.4046 + } 1.4047 + 1.4048 + /* If there is a timer running to kill us, 1.4049 + * forget it, since it may have a contest 1.4050 + * on the INP lock.. which would cause us 1.4051 + * to die ... 1.4052 + */ 1.4053 + cnt = 0; 1.4054 + LIST_FOREACH_SAFE(asoc, &inp->sctp_asoc_list, sctp_tcblist, nasoc) { 1.4055 + SCTP_TCB_LOCK(asoc); 1.4056 + if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 1.4057 + if (asoc->asoc.state & SCTP_STATE_IN_ACCEPT_QUEUE) { 1.4058 + asoc->asoc.state &= ~SCTP_STATE_IN_ACCEPT_QUEUE; 1.4059 + sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, asoc, NULL); 1.4060 + } 1.4061 + cnt++; 1.4062 + SCTP_TCB_UNLOCK(asoc); 1.4063 + continue; 1.4064 + } 1.4065 + /* Free associations that are NOT killing us */ 1.4066 + if ((SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_COOKIE_WAIT) && 1.4067 + ((asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) { 1.4068 + struct mbuf *op_err; 1.4069 + 1.4070 + op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 1.4071 + 0, M_NOWAIT, 1, MT_DATA); 1.4072 + if (op_err) { 1.4073 + /* Fill in the user initiated abort */ 1.4074 + struct sctp_paramhdr *ph; 1.4075 + 1.4076 + SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr); 1.4077 + ph = mtod(op_err, struct sctp_paramhdr *); 1.4078 + ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT); 1.4079 + ph->param_length = htons(SCTP_BUF_LEN(op_err)); 1.4080 + } 1.4081 + asoc->sctp_ep->last_abort_code = SCTP_FROM_SCTP_PCB+SCTP_LOC_7; 1.4082 + sctp_send_abort_tcb(asoc, op_err, SCTP_SO_LOCKED); 1.4083 + SCTP_STAT_INCR_COUNTER32(sctps_aborted); 1.4084 + } else if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 1.4085 + cnt++; 1.4086 + SCTP_TCB_UNLOCK(asoc); 1.4087 + continue; 1.4088 + } 1.4089 + if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_OPEN) || 1.4090 + (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1.4091 + SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1.4092 + } 1.4093 + if (sctp_free_assoc(inp, asoc, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_PCB+SCTP_LOC_8) == 0) { 1.4094 + cnt++; 1.4095 + } 1.4096 + } 1.4097 + if (cnt) { 1.4098 + /* Ok we have someone out there that will kill us */ 1.4099 + (void)SCTP_OS_TIMER_STOP(&inp->sctp_ep.signature_change.timer); 1.4100 +#ifdef SCTP_LOG_CLOSING 1.4101 + sctp_log_closing(inp, NULL, 3); 1.4102 +#endif 1.4103 + SCTP_INP_WUNLOCK(inp); 1.4104 + SCTP_ASOC_CREATE_UNLOCK(inp); 1.4105 + SCTP_INP_INFO_WUNLOCK(); 1.4106 + return; 1.4107 + } 1.4108 + if (SCTP_INP_LOCK_CONTENDED(inp)) 1.4109 + being_refed++; 1.4110 + if (SCTP_INP_READ_CONTENDED(inp)) 1.4111 + being_refed++; 1.4112 + if (SCTP_ASOC_CREATE_LOCK_CONTENDED(inp)) 1.4113 + being_refed++; 1.4114 + 1.4115 + if ((inp->refcount) || 1.4116 + (being_refed) || 1.4117 + (inp->sctp_flags & SCTP_PCB_FLAGS_CLOSE_IP)) { 1.4118 + (void)SCTP_OS_TIMER_STOP(&inp->sctp_ep.signature_change.timer); 1.4119 +#ifdef SCTP_LOG_CLOSING 1.4120 + sctp_log_closing(inp, NULL, 4); 1.4121 +#endif 1.4122 + sctp_timer_start(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL); 1.4123 + SCTP_INP_WUNLOCK(inp); 1.4124 + SCTP_ASOC_CREATE_UNLOCK(inp); 1.4125 + SCTP_INP_INFO_WUNLOCK(); 1.4126 + return; 1.4127 + } 1.4128 + inp->sctp_ep.signature_change.type = 0; 1.4129 + inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_ALLGONE; 1.4130 + /* Remove it from the list .. last thing we need a 1.4131 + * lock for. 1.4132 + */ 1.4133 + LIST_REMOVE(inp, sctp_list); 1.4134 + SCTP_INP_WUNLOCK(inp); 1.4135 + SCTP_ASOC_CREATE_UNLOCK(inp); 1.4136 + SCTP_INP_INFO_WUNLOCK(); 1.4137 + /* Now we release all locks. Since this INP 1.4138 + * cannot be found anymore except possibly by the 1.4139 + * kill timer that might be running. We call 1.4140 + * the drain function here. It should hit the case 1.4141 + * were it sees the ACTIVE flag cleared and exit 1.4142 + * out freeing us to proceed and destroy everything. 1.4143 + */ 1.4144 + if (from != SCTP_CALLED_FROM_INPKILL_TIMER) { 1.4145 + (void)SCTP_OS_TIMER_STOP_DRAIN(&inp->sctp_ep.signature_change.timer); 1.4146 + } else { 1.4147 + /* Probably un-needed */ 1.4148 + (void)SCTP_OS_TIMER_STOP(&inp->sctp_ep.signature_change.timer); 1.4149 + } 1.4150 + 1.4151 +#ifdef SCTP_LOG_CLOSING 1.4152 + sctp_log_closing(inp, NULL, 5); 1.4153 +#endif 1.4154 + 1.4155 +#if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)) 1.4156 +#if !defined(__FreeBSD__) || __FreeBSD_version < 500000 1.4157 + rt = ip_pcb->inp_route.ro_rt; 1.4158 +#endif 1.4159 +#endif 1.4160 + 1.4161 +#if defined(__Panda__) 1.4162 + if (inp->pak_to_read) { 1.4163 + (void)SCTP_OS_TIMER_STOP(&inp->sctp_ep.zero_copy_timer.timer); 1.4164 + SCTP_RELEASE_PKT(inp->pak_to_read); 1.4165 + inp->pak_to_read = NULL; 1.4166 + } 1.4167 + if (inp->pak_to_read_sendq) { 1.4168 + (void)SCTP_OS_TIMER_STOP(&inp->sctp_ep.zero_copy_sendq_timer.timer); 1.4169 + SCTP_RELEASE_PKT(inp->pak_to_read_sendq); 1.4170 + inp->pak_to_read_sendq = NULL; 1.4171 + } 1.4172 +#endif 1.4173 + if ((inp->sctp_asocidhash) != NULL) { 1.4174 + SCTP_HASH_FREE(inp->sctp_asocidhash, inp->hashasocidmark); 1.4175 + inp->sctp_asocidhash = NULL; 1.4176 + } 1.4177 + /*sa_ignore FREED_MEMORY*/ 1.4178 + TAILQ_FOREACH_SAFE(sq, &inp->read_queue, next, nsq) { 1.4179 + /* Its only abandoned if it had data left */ 1.4180 + if (sq->length) 1.4181 + SCTP_STAT_INCR(sctps_left_abandon); 1.4182 + 1.4183 + TAILQ_REMOVE(&inp->read_queue, sq, next); 1.4184 + sctp_free_remote_addr(sq->whoFrom); 1.4185 + if (so) 1.4186 + so->so_rcv.sb_cc -= sq->length; 1.4187 + if (sq->data) { 1.4188 + sctp_m_freem(sq->data); 1.4189 + sq->data = NULL; 1.4190 + } 1.4191 + /* 1.4192 + * no need to free the net count, since at this point all 1.4193 + * assoc's are gone. 1.4194 + */ 1.4195 + SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), sq); 1.4196 + SCTP_DECR_READQ_COUNT(); 1.4197 + } 1.4198 + /* Now the sctp_pcb things */ 1.4199 + /* 1.4200 + * free each asoc if it is not already closed/free. we can't use the 1.4201 + * macro here since le_next will get freed as part of the 1.4202 + * sctp_free_assoc() call. 1.4203 + */ 1.4204 + if (so) { 1.4205 +#ifdef IPSEC 1.4206 + ipsec_delete_pcbpolicy(ip_pcb); 1.4207 +#endif /* IPSEC */ 1.4208 + 1.4209 + /* Unlocks not needed since the socket is gone now */ 1.4210 + } 1.4211 +#ifndef __Panda__ 1.4212 + if (ip_pcb->inp_options) { 1.4213 + (void)sctp_m_free(ip_pcb->inp_options); 1.4214 + ip_pcb->inp_options = 0; 1.4215 + } 1.4216 +#endif 1.4217 + 1.4218 +#if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)) 1.4219 +#if !defined(__FreeBSD__) || __FreeBSD_version < 500000 1.4220 + if (rt) { 1.4221 + RTFREE(rt); 1.4222 + ip_pcb->inp_route.ro_rt = 0; 1.4223 + } 1.4224 +#endif 1.4225 +#if defined(__FreeBSD__) && __FreeBSD_version < 803000 1.4226 +#ifdef INET 1.4227 + if (ip_pcb->inp_moptions) { 1.4228 + inp_freemoptions(ip_pcb->inp_moptions); 1.4229 + ip_pcb->inp_moptions = 0; 1.4230 + } 1.4231 +#endif 1.4232 +#endif 1.4233 +#endif 1.4234 + 1.4235 +#ifdef INET6 1.4236 +#if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)) 1.4237 +#if defined(__FreeBSD__) || defined(__APPLE__) 1.4238 + if (ip_pcb->inp_vflag & INP_IPV6) { 1.4239 +#else 1.4240 + if (inp->inp_vflag & INP_IPV6) { 1.4241 +#endif 1.4242 + struct in6pcb *in6p; 1.4243 + 1.4244 + in6p = (struct in6pcb *)inp; 1.4245 + ip6_freepcbopts(in6p->in6p_outputopts); 1.4246 + } 1.4247 +#endif 1.4248 +#endif /* INET6 */ 1.4249 +#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__)) 1.4250 + inp->inp_vflag = 0; 1.4251 +#else 1.4252 + ip_pcb->inp_vflag = 0; 1.4253 +#endif 1.4254 + /* free up authentication fields */ 1.4255 + if (inp->sctp_ep.local_auth_chunks != NULL) 1.4256 + sctp_free_chunklist(inp->sctp_ep.local_auth_chunks); 1.4257 + if (inp->sctp_ep.local_hmacs != NULL) 1.4258 + sctp_free_hmaclist(inp->sctp_ep.local_hmacs); 1.4259 + 1.4260 + LIST_FOREACH_SAFE(shared_key, &inp->sctp_ep.shared_keys, next, nshared_key) { 1.4261 + LIST_REMOVE(shared_key, next); 1.4262 + sctp_free_sharedkey(shared_key); 1.4263 + /*sa_ignore FREED_MEMORY*/ 1.4264 + } 1.4265 + 1.4266 +#if defined(__APPLE__) 1.4267 + inp->ip_inp.inp.inp_state = INPCB_STATE_DEAD; 1.4268 + if (in_pcb_checkstate(&inp->ip_inp.inp, WNT_STOPUSING, 1) != WNT_STOPUSING) { 1.4269 +#ifdef INVARIANTS 1.4270 + panic("sctp_inpcb_free inp = %p couldn't set to STOPUSING\n", (void *)inp); 1.4271 +#else 1.4272 + SCTP_PRINTF("sctp_inpcb_free inp = %p couldn't set to STOPUSING\n", (void *)inp); 1.4273 +#endif 1.4274 + } 1.4275 + inp->ip_inp.inp.inp_socket->so_flags |= SOF_PCBCLEARING; 1.4276 +#endif 1.4277 + /* 1.4278 + * if we have an address list the following will free the list of 1.4279 + * ifaddr's that are set into this ep. Again macro limitations here, 1.4280 + * since the LIST_FOREACH could be a bad idea. 1.4281 + */ 1.4282 + LIST_FOREACH_SAFE(laddr, &inp->sctp_addr_list, sctp_nxt_addr, nladdr) { 1.4283 + sctp_remove_laddr(laddr); 1.4284 + } 1.4285 + 1.4286 +#ifdef SCTP_TRACK_FREED_ASOCS 1.4287 + /* TEMP CODE */ 1.4288 + LIST_FOREACH_SAFE(asoc, &inp->sctp_asoc_free_list, sctp_tcblist, nasoc) { 1.4289 + LIST_REMOVE(asoc, sctp_tcblist); 1.4290 + SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), asoc); 1.4291 + SCTP_DECR_ASOC_COUNT(); 1.4292 + } 1.4293 + /* *** END TEMP CODE ****/ 1.4294 +#endif 1.4295 +#ifdef SCTP_MVRF 1.4296 + SCTP_FREE(inp->m_vrf_ids, SCTP_M_MVRF); 1.4297 +#endif 1.4298 + /* Now lets see about freeing the EP hash table. */ 1.4299 + if (inp->sctp_tcbhash != NULL) { 1.4300 + SCTP_HASH_FREE(inp->sctp_tcbhash, inp->sctp_hashmark); 1.4301 + inp->sctp_tcbhash = NULL; 1.4302 + } 1.4303 + /* Now we must put the ep memory back into the zone pool */ 1.4304 +#if defined(__FreeBSD__) 1.4305 + INP_LOCK_DESTROY(&inp->ip_inp.inp); 1.4306 +#endif 1.4307 + SCTP_INP_LOCK_DESTROY(inp); 1.4308 + SCTP_INP_READ_DESTROY(inp); 1.4309 + SCTP_ASOC_CREATE_LOCK_DESTROY(inp); 1.4310 +#if !defined(__APPLE__) 1.4311 + SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp); 1.4312 + SCTP_DECR_EP_COUNT(); 1.4313 +#else 1.4314 + /* For Tiger, we will do this later... */ 1.4315 +#endif 1.4316 +} 1.4317 + 1.4318 + 1.4319 +struct sctp_nets * 1.4320 +sctp_findnet(struct sctp_tcb *stcb, struct sockaddr *addr) 1.4321 +{ 1.4322 + struct sctp_nets *net; 1.4323 + /* locate the address */ 1.4324 + TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1.4325 + if (sctp_cmpaddr(addr, (struct sockaddr *)&net->ro._l_addr)) 1.4326 + return (net); 1.4327 + } 1.4328 + return (NULL); 1.4329 +} 1.4330 + 1.4331 + 1.4332 +int 1.4333 +sctp_is_address_on_local_host(struct sockaddr *addr, uint32_t vrf_id) 1.4334 +{ 1.4335 +#ifdef __Panda__ 1.4336 + return (0); 1.4337 +#else 1.4338 + struct sctp_ifa *sctp_ifa; 1.4339 + sctp_ifa = sctp_find_ifa_by_addr(addr, vrf_id, SCTP_ADDR_NOT_LOCKED); 1.4340 + if (sctp_ifa) { 1.4341 + return (1); 1.4342 + } else { 1.4343 + return (0); 1.4344 + } 1.4345 +#endif 1.4346 +} 1.4347 + 1.4348 +/* 1.4349 + * add's a remote endpoint address, done with the INIT/INIT-ACK as well as 1.4350 + * when a ASCONF arrives that adds it. It will also initialize all the cwnd 1.4351 + * stats of stuff. 1.4352 + */ 1.4353 +int 1.4354 +sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr, 1.4355 + struct sctp_nets **netp, int set_scope, int from) 1.4356 +{ 1.4357 + /* 1.4358 + * The following is redundant to the same lines in the 1.4359 + * sctp_aloc_assoc() but is needed since others call the add 1.4360 + * address function 1.4361 + */ 1.4362 + struct sctp_nets *net, *netfirst; 1.4363 + int addr_inscope; 1.4364 + 1.4365 + SCTPDBG(SCTP_DEBUG_PCB1, "Adding an address (from:%d) to the peer: ", 1.4366 + from); 1.4367 + SCTPDBG_ADDR(SCTP_DEBUG_PCB1, newaddr); 1.4368 + 1.4369 + netfirst = sctp_findnet(stcb, newaddr); 1.4370 + if (netfirst) { 1.4371 + /* 1.4372 + * Lie and return ok, we don't want to make the association 1.4373 + * go away for this behavior. It will happen in the TCP 1.4374 + * model in a connected socket. It does not reach the hash 1.4375 + * table until after the association is built so it can't be 1.4376 + * found. Mark as reachable, since the initial creation will 1.4377 + * have been cleared and the NOT_IN_ASSOC flag will have 1.4378 + * been added... and we don't want to end up removing it 1.4379 + * back out. 1.4380 + */ 1.4381 + if (netfirst->dest_state & SCTP_ADDR_UNCONFIRMED) { 1.4382 + netfirst->dest_state = (SCTP_ADDR_REACHABLE | 1.4383 + SCTP_ADDR_UNCONFIRMED); 1.4384 + } else { 1.4385 + netfirst->dest_state = SCTP_ADDR_REACHABLE; 1.4386 + } 1.4387 + 1.4388 + return (0); 1.4389 + } 1.4390 + addr_inscope = 1; 1.4391 + switch (newaddr->sa_family) { 1.4392 +#ifdef INET 1.4393 + case AF_INET: 1.4394 + { 1.4395 + struct sockaddr_in *sin; 1.4396 + 1.4397 + sin = (struct sockaddr_in *)newaddr; 1.4398 + if (sin->sin_addr.s_addr == 0) { 1.4399 + /* Invalid address */ 1.4400 + return (-1); 1.4401 + } 1.4402 + /* zero out the bzero area */ 1.4403 + memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); 1.4404 + 1.4405 + /* assure len is set */ 1.4406 +#ifdef HAVE_SIN_LEN 1.4407 + sin->sin_len = sizeof(struct sockaddr_in); 1.4408 +#endif 1.4409 + if (set_scope) { 1.4410 +#ifdef SCTP_DONT_DO_PRIVADDR_SCOPE 1.4411 + stcb->asoc.scope.ipv4_local_scope = 1; 1.4412 +#else 1.4413 + if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) { 1.4414 + stcb->asoc.scope.ipv4_local_scope = 1; 1.4415 + } 1.4416 +#endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */ 1.4417 + } else { 1.4418 + /* Validate the address is in scope */ 1.4419 + if ((IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) && 1.4420 + (stcb->asoc.scope.ipv4_local_scope == 0)) { 1.4421 + addr_inscope = 0; 1.4422 + } 1.4423 + } 1.4424 + break; 1.4425 + } 1.4426 +#endif 1.4427 +#ifdef INET6 1.4428 + case AF_INET6: 1.4429 + { 1.4430 + struct sockaddr_in6 *sin6; 1.4431 + 1.4432 + sin6 = (struct sockaddr_in6 *)newaddr; 1.4433 + if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 1.4434 + /* Invalid address */ 1.4435 + return (-1); 1.4436 + } 1.4437 + /* assure len is set */ 1.4438 +#ifdef HAVE_SIN6_LEN 1.4439 + sin6->sin6_len = sizeof(struct sockaddr_in6); 1.4440 +#endif 1.4441 + if (set_scope) { 1.4442 + if (sctp_is_address_on_local_host(newaddr, stcb->asoc.vrf_id)) { 1.4443 + stcb->asoc.scope.loopback_scope = 1; 1.4444 + stcb->asoc.scope.local_scope = 0; 1.4445 + stcb->asoc.scope.ipv4_local_scope = 1; 1.4446 + stcb->asoc.scope.site_scope = 1; 1.4447 + } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 1.4448 + /* 1.4449 + * If the new destination is a LINK_LOCAL we 1.4450 + * must have common site scope. Don't set 1.4451 + * the local scope since we may not share 1.4452 + * all links, only loopback can do this. 1.4453 + * Links on the local network would also be 1.4454 + * on our private network for v4 too. 1.4455 + */ 1.4456 + stcb->asoc.scope.ipv4_local_scope = 1; 1.4457 + stcb->asoc.scope.site_scope = 1; 1.4458 + } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) { 1.4459 + /* 1.4460 + * If the new destination is SITE_LOCAL then 1.4461 + * we must have site scope in common. 1.4462 + */ 1.4463 + stcb->asoc.scope.site_scope = 1; 1.4464 + } 1.4465 + } else { 1.4466 + /* Validate the address is in scope */ 1.4467 + if (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr) && 1.4468 + (stcb->asoc.scope.loopback_scope == 0)) { 1.4469 + addr_inscope = 0; 1.4470 + } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr) && 1.4471 + (stcb->asoc.scope.local_scope == 0)) { 1.4472 + addr_inscope = 0; 1.4473 + } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr) && 1.4474 + (stcb->asoc.scope.site_scope == 0)) { 1.4475 + addr_inscope = 0; 1.4476 + } 1.4477 + } 1.4478 + break; 1.4479 + } 1.4480 +#endif 1.4481 +#if defined(__Userspace__) 1.4482 + case AF_CONN: 1.4483 + { 1.4484 + struct sockaddr_conn *sconn; 1.4485 + 1.4486 + sconn = (struct sockaddr_conn *)newaddr; 1.4487 + if (sconn->sconn_addr == NULL) { 1.4488 + /* Invalid address */ 1.4489 + return (-1); 1.4490 + } 1.4491 +#ifdef HAVE_SCONN_LEN 1.4492 + sconn->sconn_len = sizeof(struct sockaddr_conn); 1.4493 +#endif 1.4494 + break; 1.4495 + } 1.4496 +#endif 1.4497 + default: 1.4498 + /* not supported family type */ 1.4499 + return (-1); 1.4500 + } 1.4501 + net = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_net), struct sctp_nets); 1.4502 + if (net == NULL) { 1.4503 + return (-1); 1.4504 + } 1.4505 + SCTP_INCR_RADDR_COUNT(); 1.4506 + bzero(net, sizeof(struct sctp_nets)); 1.4507 + (void)SCTP_GETTIME_TIMEVAL(&net->start_time); 1.4508 +#ifdef HAVE_SA_LEN 1.4509 + memcpy(&net->ro._l_addr, newaddr, newaddr->sa_len); 1.4510 +#endif 1.4511 + switch (newaddr->sa_family) { 1.4512 +#ifdef INET 1.4513 + case AF_INET: 1.4514 +#ifndef HAVE_SA_LEN 1.4515 + memcpy(&net->ro._l_addr, newaddr, sizeof(struct sockaddr_in)); 1.4516 +#endif 1.4517 + ((struct sockaddr_in *)&net->ro._l_addr)->sin_port = stcb->rport; 1.4518 + break; 1.4519 +#endif 1.4520 +#ifdef INET6 1.4521 + case AF_INET6: 1.4522 +#ifndef HAVE_SA_LEN 1.4523 + memcpy(&net->ro._l_addr, newaddr, sizeof(struct sockaddr_in6)); 1.4524 +#endif 1.4525 + ((struct sockaddr_in6 *)&net->ro._l_addr)->sin6_port = stcb->rport; 1.4526 + break; 1.4527 +#endif 1.4528 +#if defined(__Userspace__) 1.4529 + case AF_CONN: 1.4530 +#ifndef HAVE_SA_LEN 1.4531 + memcpy(&net->ro._l_addr, newaddr, sizeof(struct sockaddr_conn)); 1.4532 +#endif 1.4533 + ((struct sockaddr_conn *)&net->ro._l_addr)->sconn_port = stcb->rport; 1.4534 + break; 1.4535 +#endif 1.4536 + default: 1.4537 + break; 1.4538 + } 1.4539 + net->addr_is_local = sctp_is_address_on_local_host(newaddr, stcb->asoc.vrf_id); 1.4540 + if (net->addr_is_local && ((set_scope || (from == SCTP_ADDR_IS_CONFIRMED)))) { 1.4541 + stcb->asoc.scope.loopback_scope = 1; 1.4542 + stcb->asoc.scope.ipv4_local_scope = 1; 1.4543 + stcb->asoc.scope.local_scope = 0; 1.4544 + stcb->asoc.scope.site_scope = 1; 1.4545 + addr_inscope = 1; 1.4546 + } 1.4547 + net->failure_threshold = stcb->asoc.def_net_failure; 1.4548 + net->pf_threshold = stcb->asoc.def_net_pf_threshold; 1.4549 + if (addr_inscope == 0) { 1.4550 + net->dest_state = (SCTP_ADDR_REACHABLE | 1.4551 + SCTP_ADDR_OUT_OF_SCOPE); 1.4552 + } else { 1.4553 + if (from == SCTP_ADDR_IS_CONFIRMED) 1.4554 + /* SCTP_ADDR_IS_CONFIRMED is passed by connect_x */ 1.4555 + net->dest_state = SCTP_ADDR_REACHABLE; 1.4556 + else 1.4557 + net->dest_state = SCTP_ADDR_REACHABLE | 1.4558 + SCTP_ADDR_UNCONFIRMED; 1.4559 + } 1.4560 + /* We set this to 0, the timer code knows that 1.4561 + * this means its an initial value 1.4562 + */ 1.4563 + net->rto_needed = 1; 1.4564 + net->RTO = 0; 1.4565 + net->RTO_measured = 0; 1.4566 + stcb->asoc.numnets++; 1.4567 + net->ref_count = 1; 1.4568 + net->cwr_window_tsn = net->last_cwr_tsn = stcb->asoc.sending_seq - 1; 1.4569 + net->port = stcb->asoc.port; 1.4570 + net->dscp = stcb->asoc.default_dscp; 1.4571 +#ifdef INET6 1.4572 + net->flowlabel = stcb->asoc.default_flowlabel; 1.4573 +#endif 1.4574 + if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) { 1.4575 + net->dest_state |= SCTP_ADDR_NOHB; 1.4576 + } else { 1.4577 + net->dest_state &= ~SCTP_ADDR_NOHB; 1.4578 + } 1.4579 + if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DO_NOT_PMTUD)) { 1.4580 + net->dest_state |= SCTP_ADDR_NO_PMTUD; 1.4581 + } else { 1.4582 + net->dest_state &= ~SCTP_ADDR_NO_PMTUD; 1.4583 + } 1.4584 + net->heart_beat_delay = stcb->asoc.heart_beat_delay; 1.4585 + /* Init the timer structure */ 1.4586 + SCTP_OS_TIMER_INIT(&net->rxt_timer.timer); 1.4587 + SCTP_OS_TIMER_INIT(&net->pmtu_timer.timer); 1.4588 + SCTP_OS_TIMER_INIT(&net->hb_timer.timer); 1.4589 + 1.4590 + /* Now generate a route for this guy */ 1.4591 +#ifdef INET6 1.4592 +#ifdef SCTP_EMBEDDED_V6_SCOPE 1.4593 + /* KAME hack: embed scopeid */ 1.4594 + if (newaddr->sa_family == AF_INET6) { 1.4595 + struct sockaddr_in6 *sin6; 1.4596 + 1.4597 + sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 1.4598 +#if defined(__APPLE__) 1.4599 +#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) 1.4600 + (void)in6_embedscope(&sin6->sin6_addr, sin6, &stcb->sctp_ep->ip_inp.inp, NULL); 1.4601 +#else 1.4602 + (void)in6_embedscope(&sin6->sin6_addr, sin6, &stcb->sctp_ep->ip_inp.inp, NULL, NULL); 1.4603 +#endif 1.4604 +#elif defined(SCTP_KAME) 1.4605 + (void)sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)); 1.4606 +#else 1.4607 + (void)in6_embedscope(&sin6->sin6_addr, sin6); 1.4608 +#endif 1.4609 +#ifndef SCOPEDROUTING 1.4610 + sin6->sin6_scope_id = 0; 1.4611 +#endif 1.4612 + } 1.4613 +#endif /* SCTP_EMBEDDED_V6_SCOPE */ 1.4614 +#endif 1.4615 + SCTP_RTALLOC((sctp_route_t *)&net->ro, stcb->asoc.vrf_id); 1.4616 + 1.4617 +#if !defined(__Userspace__) 1.4618 + if (SCTP_ROUTE_HAS_VALID_IFN(&net->ro)) { 1.4619 + /* Get source address */ 1.4620 + net->ro._s_addr = sctp_source_address_selection(stcb->sctp_ep, 1.4621 + stcb, 1.4622 + (sctp_route_t *)&net->ro, 1.4623 + net, 1.4624 + 0, 1.4625 + stcb->asoc.vrf_id); 1.4626 + /* Now get the interface MTU */ 1.4627 + if (net->ro._s_addr && net->ro._s_addr->ifn_p) { 1.4628 + net->mtu = SCTP_GATHER_MTU_FROM_INTFC(net->ro._s_addr->ifn_p); 1.4629 + } 1.4630 + if (net->mtu > 0) { 1.4631 + uint32_t rmtu; 1.4632 + 1.4633 + rmtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, net->ro.ro_rt); 1.4634 + if (rmtu == 0) { 1.4635 + /* Start things off to match mtu of interface please. */ 1.4636 + SCTP_SET_MTU_OF_ROUTE(&net->ro._l_addr.sa, 1.4637 + net->ro.ro_rt, net->mtu); 1.4638 + } else { 1.4639 + /* we take the route mtu over the interface, since 1.4640 + * the route may be leading out the loopback, or 1.4641 + * a different interface. 1.4642 + */ 1.4643 + net->mtu = rmtu; 1.4644 + } 1.4645 + } 1.4646 + } 1.4647 +#endif 1.4648 + if (net->mtu == 0) { 1.4649 + switch (newaddr->sa_family) { 1.4650 +#ifdef INET 1.4651 + case AF_INET: 1.4652 + net->mtu = SCTP_DEFAULT_MTU; 1.4653 + break; 1.4654 +#endif 1.4655 +#ifdef INET6 1.4656 + case AF_INET6: 1.4657 + net->mtu = 1280; 1.4658 + break; 1.4659 +#endif 1.4660 +#if defined(__Userspace__) 1.4661 + case AF_CONN: 1.4662 + net->mtu = 1280; 1.4663 + break; 1.4664 +#endif 1.4665 + default: 1.4666 + break; 1.4667 + } 1.4668 + } 1.4669 + if (net->port) { 1.4670 + net->mtu -= (uint32_t)sizeof(struct udphdr); 1.4671 + } 1.4672 + if (from == SCTP_ALLOC_ASOC) { 1.4673 + stcb->asoc.smallest_mtu = net->mtu; 1.4674 + } 1.4675 + if (stcb->asoc.smallest_mtu > net->mtu) { 1.4676 + stcb->asoc.smallest_mtu = net->mtu; 1.4677 + } 1.4678 +#ifdef INET6 1.4679 +#ifdef SCTP_EMBEDDED_V6_SCOPE 1.4680 + if (newaddr->sa_family == AF_INET6) { 1.4681 + struct sockaddr_in6 *sin6; 1.4682 + 1.4683 + sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 1.4684 +#ifdef SCTP_KAME 1.4685 + (void)sa6_recoverscope(sin6); 1.4686 +#else 1.4687 + (void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL); 1.4688 +#endif /* SCTP_KAME */ 1.4689 + } 1.4690 +#endif /* SCTP_EMBEDDED_V6_SCOPE */ 1.4691 +#endif 1.4692 + 1.4693 + /* JRS - Use the congestion control given in the CC module */ 1.4694 + if (stcb->asoc.cc_functions.sctp_set_initial_cc_param != NULL) 1.4695 + (*stcb->asoc.cc_functions.sctp_set_initial_cc_param)(stcb, net); 1.4696 + 1.4697 + /* 1.4698 + * CMT: CUC algo - set find_pseudo_cumack to TRUE (1) at beginning 1.4699 + * of assoc (2005/06/27, iyengar@cis.udel.edu) 1.4700 + */ 1.4701 + net->find_pseudo_cumack = 1; 1.4702 + net->find_rtx_pseudo_cumack = 1; 1.4703 + net->src_addr_selected = 0; 1.4704 +#if defined(__FreeBSD__) 1.4705 + /* Choose an initial flowid. */ 1.4706 + net->flowid = stcb->asoc.my_vtag ^ 1.4707 + ntohs(stcb->rport) ^ 1.4708 + ntohs(stcb->sctp_ep->sctp_lport); 1.4709 +#ifdef INVARIANTS 1.4710 + net->flowidset = 1; 1.4711 +#endif 1.4712 +#endif 1.4713 + if (netp) { 1.4714 + *netp = net; 1.4715 + } 1.4716 + netfirst = TAILQ_FIRST(&stcb->asoc.nets); 1.4717 + if (net->ro.ro_rt == NULL) { 1.4718 + /* Since we have no route put it at the back */ 1.4719 + TAILQ_INSERT_TAIL(&stcb->asoc.nets, net, sctp_next); 1.4720 + } else if (netfirst == NULL) { 1.4721 + /* We are the first one in the pool. */ 1.4722 + TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next); 1.4723 + } else if (netfirst->ro.ro_rt == NULL) { 1.4724 + /* 1.4725 + * First one has NO route. Place this one ahead of the first 1.4726 + * one. 1.4727 + */ 1.4728 + TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next); 1.4729 +#ifndef __Panda__ 1.4730 + } else if (net->ro.ro_rt->rt_ifp != netfirst->ro.ro_rt->rt_ifp) { 1.4731 + /* 1.4732 + * This one has a different interface than the one at the 1.4733 + * top of the list. Place it ahead. 1.4734 + */ 1.4735 + TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next); 1.4736 +#endif 1.4737 + } else { 1.4738 + /* 1.4739 + * Ok we have the same interface as the first one. Move 1.4740 + * forward until we find either a) one with a NULL route... 1.4741 + * insert ahead of that b) one with a different ifp.. insert 1.4742 + * after that. c) end of the list.. insert at the tail. 1.4743 + */ 1.4744 + struct sctp_nets *netlook; 1.4745 + 1.4746 + do { 1.4747 + netlook = TAILQ_NEXT(netfirst, sctp_next); 1.4748 + if (netlook == NULL) { 1.4749 + /* End of the list */ 1.4750 + TAILQ_INSERT_TAIL(&stcb->asoc.nets, net, sctp_next); 1.4751 + break; 1.4752 + } else if (netlook->ro.ro_rt == NULL) { 1.4753 + /* next one has NO route */ 1.4754 + TAILQ_INSERT_BEFORE(netfirst, net, sctp_next); 1.4755 + break; 1.4756 + } 1.4757 +#ifndef __Panda__ 1.4758 + else if (netlook->ro.ro_rt->rt_ifp != net->ro.ro_rt->rt_ifp) 1.4759 +#else 1.4760 + else 1.4761 +#endif 1.4762 + { 1.4763 + TAILQ_INSERT_AFTER(&stcb->asoc.nets, netlook, 1.4764 + net, sctp_next); 1.4765 + break; 1.4766 + } 1.4767 +#ifndef __Panda__ 1.4768 + /* Shift forward */ 1.4769 + netfirst = netlook; 1.4770 +#endif 1.4771 + } while (netlook != NULL); 1.4772 + } 1.4773 + 1.4774 + /* got to have a primary set */ 1.4775 + if (stcb->asoc.primary_destination == 0) { 1.4776 + stcb->asoc.primary_destination = net; 1.4777 + } else if ((stcb->asoc.primary_destination->ro.ro_rt == NULL) && 1.4778 + (net->ro.ro_rt) && 1.4779 + ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) { 1.4780 + /* No route to current primary adopt new primary */ 1.4781 + stcb->asoc.primary_destination = net; 1.4782 + } 1.4783 + /* Validate primary is first */ 1.4784 + net = TAILQ_FIRST(&stcb->asoc.nets); 1.4785 + if ((net != stcb->asoc.primary_destination) && 1.4786 + (stcb->asoc.primary_destination)) { 1.4787 + /* first one on the list is NOT the primary 1.4788 + * sctp_cmpaddr() is much more efficient if 1.4789 + * the primary is the first on the list, make it 1.4790 + * so. 1.4791 + */ 1.4792 + TAILQ_REMOVE(&stcb->asoc.nets, 1.4793 + stcb->asoc.primary_destination, sctp_next); 1.4794 + TAILQ_INSERT_HEAD(&stcb->asoc.nets, 1.4795 + stcb->asoc.primary_destination, sctp_next); 1.4796 + } 1.4797 + return (0); 1.4798 +} 1.4799 + 1.4800 + 1.4801 +static uint32_t 1.4802 +sctp_aloc_a_assoc_id(struct sctp_inpcb *inp, struct sctp_tcb *stcb) 1.4803 +{ 1.4804 + uint32_t id; 1.4805 + struct sctpasochead *head; 1.4806 + struct sctp_tcb *lstcb; 1.4807 + 1.4808 + SCTP_INP_WLOCK(inp); 1.4809 + try_again: 1.4810 + if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 1.4811 + /* TSNH */ 1.4812 + SCTP_INP_WUNLOCK(inp); 1.4813 + return (0); 1.4814 + } 1.4815 + /* 1.4816 + * We don't allow assoc id to be one of SCTP_FUTURE_ASSOC, 1.4817 + * SCTP_CURRENT_ASSOC and SCTP_ALL_ASSOC. 1.4818 + */ 1.4819 + if (inp->sctp_associd_counter <= SCTP_ALL_ASSOC) { 1.4820 + inp->sctp_associd_counter = SCTP_ALL_ASSOC + 1; 1.4821 + } 1.4822 + id = inp->sctp_associd_counter; 1.4823 + inp->sctp_associd_counter++; 1.4824 + lstcb = sctp_findasoc_ep_asocid_locked(inp, (sctp_assoc_t)id, 0); 1.4825 + if (lstcb) { 1.4826 + goto try_again; 1.4827 + } 1.4828 + head = &inp->sctp_asocidhash[SCTP_PCBHASH_ASOC(id, inp->hashasocidmark)]; 1.4829 + LIST_INSERT_HEAD(head, stcb, sctp_tcbasocidhash); 1.4830 + stcb->asoc.in_asocid_hash = 1; 1.4831 + SCTP_INP_WUNLOCK(inp); 1.4832 + return id; 1.4833 +} 1.4834 + 1.4835 +/* 1.4836 + * allocate an association and add it to the endpoint. The caller must be 1.4837 + * careful to add all additional addresses once they are know right away or 1.4838 + * else the assoc will be may experience a blackout scenario. 1.4839 + */ 1.4840 +struct sctp_tcb * 1.4841 +sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr, 1.4842 + int *error, uint32_t override_tag, uint32_t vrf_id, 1.4843 +#if defined(__FreeBSD__) && __FreeBSD_version >= 500000 1.4844 + struct thread *p 1.4845 +#elif defined(__Windows__) 1.4846 + PKTHREAD p 1.4847 +#else 1.4848 +#if defined(__Userspace__) 1.4849 + /* __Userspace__ NULL proc is going to be passed here. See sctp_lower_sosend */ 1.4850 +#endif 1.4851 + struct proc *p 1.4852 +#endif 1.4853 +) 1.4854 +{ 1.4855 + /* note the p argument is only valid in unbound sockets */ 1.4856 + 1.4857 + struct sctp_tcb *stcb; 1.4858 + struct sctp_association *asoc; 1.4859 + struct sctpasochead *head; 1.4860 + uint16_t rport; 1.4861 + int err; 1.4862 + 1.4863 + /* 1.4864 + * Assumption made here: Caller has done a 1.4865 + * sctp_findassociation_ep_addr(ep, addr's); to make sure the 1.4866 + * address does not exist already. 1.4867 + */ 1.4868 + if (SCTP_BASE_INFO(ipi_count_asoc) >= SCTP_MAX_NUM_OF_ASOC) { 1.4869 + /* Hit max assoc, sorry no more */ 1.4870 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS); 1.4871 + *error = ENOBUFS; 1.4872 + return (NULL); 1.4873 + } 1.4874 + if (firstaddr == NULL) { 1.4875 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); 1.4876 + *error = EINVAL; 1.4877 + return (NULL); 1.4878 + } 1.4879 + SCTP_INP_RLOCK(inp); 1.4880 + if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) && 1.4881 + ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE)) || 1.4882 + (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 1.4883 + /* 1.4884 + * If its in the TCP pool, its NOT allowed to create an 1.4885 + * association. The parent listener needs to call 1.4886 + * sctp_aloc_assoc.. or the one-2-many socket. If a peeled 1.4887 + * off, or connected one does this.. its an error. 1.4888 + */ 1.4889 + SCTP_INP_RUNLOCK(inp); 1.4890 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); 1.4891 + *error = EINVAL; 1.4892 + return (NULL); 1.4893 + } 1.4894 + if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 1.4895 + (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)) { 1.4896 + if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) || 1.4897 + (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED)) { 1.4898 + SCTP_INP_RUNLOCK(inp); 1.4899 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); 1.4900 + *error = EINVAL; 1.4901 + return (NULL); 1.4902 + } 1.4903 + } 1.4904 + SCTPDBG(SCTP_DEBUG_PCB3, "Allocate an association for peer:"); 1.4905 +#ifdef SCTP_DEBUG 1.4906 + if (firstaddr) { 1.4907 + SCTPDBG_ADDR(SCTP_DEBUG_PCB3, firstaddr); 1.4908 + switch (firstaddr->sa_family) { 1.4909 +#ifdef INET 1.4910 + case AF_INET: 1.4911 + SCTPDBG(SCTP_DEBUG_PCB3, "Port:%d\n", 1.4912 + ntohs(((struct sockaddr_in *)firstaddr)->sin_port)); 1.4913 + break; 1.4914 +#endif 1.4915 +#ifdef INET6 1.4916 + case AF_INET6: 1.4917 + SCTPDBG(SCTP_DEBUG_PCB3, "Port:%d\n", 1.4918 + ntohs(((struct sockaddr_in6 *)firstaddr)->sin6_port)); 1.4919 + break; 1.4920 +#endif 1.4921 +#if defined(__Userspace__) 1.4922 + case AF_CONN: 1.4923 + SCTPDBG(SCTP_DEBUG_PCB3, "Port:%d\n", 1.4924 + ntohs(((struct sockaddr_conn *)firstaddr)->sconn_port)); 1.4925 + break; 1.4926 +#endif 1.4927 + default: 1.4928 + break; 1.4929 + } 1.4930 + } else { 1.4931 + SCTPDBG(SCTP_DEBUG_PCB3,"None\n"); 1.4932 + } 1.4933 +#endif /* SCTP_DEBUG */ 1.4934 + switch (firstaddr->sa_family) { 1.4935 +#ifdef INET 1.4936 + case AF_INET: 1.4937 + { 1.4938 + struct sockaddr_in *sin; 1.4939 + 1.4940 + sin = (struct sockaddr_in *)firstaddr; 1.4941 + if ((ntohs(sin->sin_port) == 0) || 1.4942 + (sin->sin_addr.s_addr == INADDR_ANY) || 1.4943 + (sin->sin_addr.s_addr == INADDR_BROADCAST) || 1.4944 + IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 1.4945 + /* Invalid address */ 1.4946 + SCTP_INP_RUNLOCK(inp); 1.4947 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); 1.4948 + *error = EINVAL; 1.4949 + return (NULL); 1.4950 + } 1.4951 + rport = sin->sin_port; 1.4952 + break; 1.4953 + } 1.4954 +#endif 1.4955 +#ifdef INET6 1.4956 + case AF_INET6: 1.4957 + { 1.4958 + struct sockaddr_in6 *sin6; 1.4959 + 1.4960 + sin6 = (struct sockaddr_in6 *)firstaddr; 1.4961 + if ((ntohs(sin6->sin6_port) == 0) || 1.4962 + IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 1.4963 + IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 1.4964 + /* Invalid address */ 1.4965 + SCTP_INP_RUNLOCK(inp); 1.4966 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); 1.4967 + *error = EINVAL; 1.4968 + return (NULL); 1.4969 + } 1.4970 + rport = sin6->sin6_port; 1.4971 + break; 1.4972 + } 1.4973 +#endif 1.4974 +#if defined(__Userspace__) 1.4975 + case AF_CONN: 1.4976 + { 1.4977 + struct sockaddr_conn *sconn; 1.4978 + 1.4979 + sconn = (struct sockaddr_conn *)firstaddr; 1.4980 + if ((ntohs(sconn->sconn_port) == 0) || 1.4981 + (sconn->sconn_addr == NULL)) { 1.4982 + /* Invalid address */ 1.4983 + SCTP_INP_RUNLOCK(inp); 1.4984 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); 1.4985 + *error = EINVAL; 1.4986 + return (NULL); 1.4987 + } 1.4988 + rport = sconn->sconn_port; 1.4989 + break; 1.4990 + } 1.4991 +#endif 1.4992 + default: 1.4993 + /* not supported family type */ 1.4994 + SCTP_INP_RUNLOCK(inp); 1.4995 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); 1.4996 + *error = EINVAL; 1.4997 + return (NULL); 1.4998 + } 1.4999 + SCTP_INP_RUNLOCK(inp); 1.5000 + if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 1.5001 + /* 1.5002 + * If you have not performed a bind, then we need to do the 1.5003 + * ephemeral bind for you. 1.5004 + */ 1.5005 + if ((err = sctp_inpcb_bind(inp->sctp_socket, 1.5006 + (struct sockaddr *)NULL, 1.5007 + (struct sctp_ifa *)NULL, 1.5008 +#ifndef __Panda__ 1.5009 + p 1.5010 +#else 1.5011 + (struct proc *)NULL 1.5012 +#endif 1.5013 + ))) { 1.5014 + /* bind error, probably perm */ 1.5015 + *error = err; 1.5016 + return (NULL); 1.5017 + } 1.5018 + } 1.5019 + stcb = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_asoc), struct sctp_tcb); 1.5020 + if (stcb == NULL) { 1.5021 + /* out of memory? */ 1.5022 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOMEM); 1.5023 + *error = ENOMEM; 1.5024 + return (NULL); 1.5025 + } 1.5026 + SCTP_INCR_ASOC_COUNT(); 1.5027 + 1.5028 + bzero(stcb, sizeof(*stcb)); 1.5029 + asoc = &stcb->asoc; 1.5030 + 1.5031 + asoc->assoc_id = sctp_aloc_a_assoc_id(inp, stcb); 1.5032 + SCTP_TCB_LOCK_INIT(stcb); 1.5033 + SCTP_TCB_SEND_LOCK_INIT(stcb); 1.5034 + stcb->rport = rport; 1.5035 + /* setup back pointer's */ 1.5036 + stcb->sctp_ep = inp; 1.5037 + stcb->sctp_socket = inp->sctp_socket; 1.5038 + if ((err = sctp_init_asoc(inp, stcb, override_tag, vrf_id))) { 1.5039 + /* failed */ 1.5040 + SCTP_TCB_LOCK_DESTROY(stcb); 1.5041 + SCTP_TCB_SEND_LOCK_DESTROY(stcb); 1.5042 + LIST_REMOVE(stcb, sctp_tcbasocidhash); 1.5043 + SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb); 1.5044 + SCTP_DECR_ASOC_COUNT(); 1.5045 + *error = err; 1.5046 + return (NULL); 1.5047 + } 1.5048 + /* and the port */ 1.5049 + SCTP_INP_INFO_WLOCK(); 1.5050 + SCTP_INP_WLOCK(inp); 1.5051 + if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 1.5052 + /* inpcb freed while alloc going on */ 1.5053 + SCTP_TCB_LOCK_DESTROY(stcb); 1.5054 + SCTP_TCB_SEND_LOCK_DESTROY(stcb); 1.5055 + LIST_REMOVE(stcb, sctp_tcbasocidhash); 1.5056 + SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb); 1.5057 + SCTP_INP_WUNLOCK(inp); 1.5058 + SCTP_INP_INFO_WUNLOCK(); 1.5059 + SCTP_DECR_ASOC_COUNT(); 1.5060 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); 1.5061 + *error = EINVAL; 1.5062 + return (NULL); 1.5063 + } 1.5064 + SCTP_TCB_LOCK(stcb); 1.5065 + 1.5066 + /* now that my_vtag is set, add it to the hash */ 1.5067 + head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))]; 1.5068 + /* put it in the bucket in the vtag hash of assoc's for the system */ 1.5069 + LIST_INSERT_HEAD(head, stcb, sctp_asocs); 1.5070 + SCTP_INP_INFO_WUNLOCK(); 1.5071 + 1.5072 + if ((err = sctp_add_remote_addr(stcb, firstaddr, NULL, SCTP_DO_SETSCOPE, SCTP_ALLOC_ASOC))) { 1.5073 + /* failure.. memory error? */ 1.5074 + if (asoc->strmout) { 1.5075 + SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1.5076 + asoc->strmout = NULL; 1.5077 + } 1.5078 + if (asoc->mapping_array) { 1.5079 + SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1.5080 + asoc->mapping_array = NULL; 1.5081 + } 1.5082 + if (asoc->nr_mapping_array) { 1.5083 + SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1.5084 + asoc->nr_mapping_array = NULL; 1.5085 + } 1.5086 + SCTP_DECR_ASOC_COUNT(); 1.5087 + SCTP_TCB_UNLOCK(stcb); 1.5088 + SCTP_TCB_LOCK_DESTROY(stcb); 1.5089 + SCTP_TCB_SEND_LOCK_DESTROY(stcb); 1.5090 + LIST_REMOVE(stcb, sctp_tcbasocidhash); 1.5091 + SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb); 1.5092 + SCTP_INP_WUNLOCK(inp); 1.5093 + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS); 1.5094 + *error = ENOBUFS; 1.5095 + return (NULL); 1.5096 + } 1.5097 + /* Init all the timers */ 1.5098 + SCTP_OS_TIMER_INIT(&asoc->dack_timer.timer); 1.5099 + SCTP_OS_TIMER_INIT(&asoc->strreset_timer.timer); 1.5100 + SCTP_OS_TIMER_INIT(&asoc->asconf_timer.timer); 1.5101 + SCTP_OS_TIMER_INIT(&asoc->shut_guard_timer.timer); 1.5102 + SCTP_OS_TIMER_INIT(&asoc->autoclose_timer.timer); 1.5103 + SCTP_OS_TIMER_INIT(&asoc->delayed_event_timer.timer); 1.5104 + SCTP_OS_TIMER_INIT(&asoc->delete_prim_timer.timer); 1.5105 + 1.5106 + LIST_INSERT_HEAD(&inp->sctp_asoc_list, stcb, sctp_tcblist); 1.5107 + /* now file the port under the hash as well */ 1.5108 + if (inp->sctp_tcbhash != NULL) { 1.5109 + head = &inp->sctp_tcbhash[SCTP_PCBHASH_ALLADDR(stcb->rport, 1.5110 + inp->sctp_hashmark)]; 1.5111 + LIST_INSERT_HEAD(head, stcb, sctp_tcbhash); 1.5112 + } 1.5113 + SCTP_INP_WUNLOCK(inp); 1.5114 + SCTPDBG(SCTP_DEBUG_PCB1, "Association %p now allocated\n", (void *)stcb); 1.5115 + return (stcb); 1.5116 +} 1.5117 + 1.5118 + 1.5119 +void 1.5120 +sctp_remove_net(struct sctp_tcb *stcb, struct sctp_nets *net) 1.5121 +{ 1.5122 + struct sctp_association *asoc; 1.5123 + 1.5124 + asoc = &stcb->asoc; 1.5125 + asoc->numnets--; 1.5126 + TAILQ_REMOVE(&asoc->nets, net, sctp_next); 1.5127 + if (net == asoc->primary_destination) { 1.5128 + /* Reset primary */ 1.5129 + struct sctp_nets *lnet; 1.5130 + 1.5131 + lnet = TAILQ_FIRST(&asoc->nets); 1.5132 + /* Mobility adaptation 1.5133 + Ideally, if deleted destination is the primary, it becomes 1.5134 + a fast retransmission trigger by the subsequent SET PRIMARY. 1.5135 + (by micchie) 1.5136 + */ 1.5137 + if (sctp_is_mobility_feature_on(stcb->sctp_ep, 1.5138 + SCTP_MOBILITY_BASE) || 1.5139 + sctp_is_mobility_feature_on(stcb->sctp_ep, 1.5140 + SCTP_MOBILITY_FASTHANDOFF)) { 1.5141 + SCTPDBG(SCTP_DEBUG_ASCONF1, "remove_net: primary dst is deleting\n"); 1.5142 + if (asoc->deleted_primary != NULL) { 1.5143 + SCTPDBG(SCTP_DEBUG_ASCONF1, "remove_net: deleted primary may be already stored\n"); 1.5144 + goto out; 1.5145 + } 1.5146 + asoc->deleted_primary = net; 1.5147 + atomic_add_int(&net->ref_count, 1); 1.5148 + memset(&net->lastsa, 0, sizeof(net->lastsa)); 1.5149 + memset(&net->lastsv, 0, sizeof(net->lastsv)); 1.5150 + sctp_mobility_feature_on(stcb->sctp_ep, 1.5151 + SCTP_MOBILITY_PRIM_DELETED); 1.5152 + sctp_timer_start(SCTP_TIMER_TYPE_PRIM_DELETED, 1.5153 + stcb->sctp_ep, stcb, NULL); 1.5154 + } 1.5155 +out: 1.5156 + /* Try to find a confirmed primary */ 1.5157 + asoc->primary_destination = sctp_find_alternate_net(stcb, lnet, 0); 1.5158 + } 1.5159 + if (net == asoc->last_data_chunk_from) { 1.5160 + /* Reset primary */ 1.5161 + asoc->last_data_chunk_from = TAILQ_FIRST(&asoc->nets); 1.5162 + } 1.5163 + if (net == asoc->last_control_chunk_from) { 1.5164 + /* Clear net */ 1.5165 + asoc->last_control_chunk_from = NULL; 1.5166 + } 1.5167 + if (net == stcb->asoc.alternate) { 1.5168 + sctp_free_remote_addr(stcb->asoc.alternate); 1.5169 + stcb->asoc.alternate = NULL; 1.5170 + } 1.5171 + sctp_free_remote_addr(net); 1.5172 +} 1.5173 + 1.5174 +/* 1.5175 + * remove a remote endpoint address from an association, it will fail if the 1.5176 + * address does not exist. 1.5177 + */ 1.5178 +int 1.5179 +sctp_del_remote_addr(struct sctp_tcb *stcb, struct sockaddr *remaddr) 1.5180 +{ 1.5181 + /* 1.5182 + * Here we need to remove a remote address. This is quite simple, we 1.5183 + * first find it in the list of address for the association 1.5184 + * (tasoc->asoc.nets) and then if it is there, we do a LIST_REMOVE 1.5185 + * on that item. Note we do not allow it to be removed if there are 1.5186 + * no other addresses. 1.5187 + */ 1.5188 + struct sctp_association *asoc; 1.5189 + struct sctp_nets *net, *nnet; 1.5190 + 1.5191 + asoc = &stcb->asoc; 1.5192 + 1.5193 + /* locate the address */ 1.5194 + TAILQ_FOREACH_SAFE(net, &asoc->nets, sctp_next, nnet) { 1.5195 + if (net->ro._l_addr.sa.sa_family != remaddr->sa_family) { 1.5196 + continue; 1.5197 + } 1.5198 + if (sctp_cmpaddr((struct sockaddr *)&net->ro._l_addr, 1.5199 + remaddr)) { 1.5200 + /* we found the guy */ 1.5201 + if (asoc->numnets < 2) { 1.5202 + /* Must have at LEAST two remote addresses */ 1.5203 + return (-1); 1.5204 + } else { 1.5205 + sctp_remove_net(stcb, net); 1.5206 + return (0); 1.5207 + } 1.5208 + } 1.5209 + } 1.5210 + /* not found. */ 1.5211 + return (-2); 1.5212 +} 1.5213 + 1.5214 +void 1.5215 +sctp_delete_from_timewait(uint32_t tag, uint16_t lport, uint16_t rport) 1.5216 +{ 1.5217 + struct sctpvtaghead *chain; 1.5218 + struct sctp_tagblock *twait_block; 1.5219 + int found = 0; 1.5220 + int i; 1.5221 + 1.5222 + chain = &SCTP_BASE_INFO(vtag_timewait)[(tag % SCTP_STACK_VTAG_HASH_SIZE)]; 1.5223 + LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) { 1.5224 + for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) { 1.5225 + if ((twait_block->vtag_block[i].v_tag == tag) && 1.5226 + (twait_block->vtag_block[i].lport == lport) && 1.5227 + (twait_block->vtag_block[i].rport == rport)) { 1.5228 + twait_block->vtag_block[i].tv_sec_at_expire = 0; 1.5229 + twait_block->vtag_block[i].v_tag = 0; 1.5230 + twait_block->vtag_block[i].lport = 0; 1.5231 + twait_block->vtag_block[i].rport = 0; 1.5232 + found = 1; 1.5233 + break; 1.5234 + } 1.5235 + } 1.5236 + if (found) 1.5237 + break; 1.5238 + } 1.5239 +} 1.5240 + 1.5241 +int 1.5242 +sctp_is_in_timewait(uint32_t tag, uint16_t lport, uint16_t rport) 1.5243 +{ 1.5244 + struct sctpvtaghead *chain; 1.5245 + struct sctp_tagblock *twait_block; 1.5246 + int found = 0; 1.5247 + int i; 1.5248 + 1.5249 + SCTP_INP_INFO_WLOCK(); 1.5250 + chain = &SCTP_BASE_INFO(vtag_timewait)[(tag % SCTP_STACK_VTAG_HASH_SIZE)]; 1.5251 + LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) { 1.5252 + for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) { 1.5253 + if ((twait_block->vtag_block[i].v_tag == tag) && 1.5254 + (twait_block->vtag_block[i].lport == lport) && 1.5255 + (twait_block->vtag_block[i].rport == rport)) { 1.5256 + found = 1; 1.5257 + break; 1.5258 + } 1.5259 + } 1.5260 + if (found) 1.5261 + break; 1.5262 + } 1.5263 + SCTP_INP_INFO_WUNLOCK(); 1.5264 + return (found); 1.5265 +} 1.5266 + 1.5267 + 1.5268 +void 1.5269 +sctp_add_vtag_to_timewait(uint32_t tag, uint32_t time, uint16_t lport, uint16_t rport) 1.5270 +{ 1.5271 + struct sctpvtaghead *chain; 1.5272 + struct sctp_tagblock *twait_block; 1.5273 + struct timeval now; 1.5274 + int set, i; 1.5275 + 1.5276 + if (time == 0) { 1.5277 + /* Its disabled */ 1.5278 + return; 1.5279 + } 1.5280 + (void)SCTP_GETTIME_TIMEVAL(&now); 1.5281 + chain = &SCTP_BASE_INFO(vtag_timewait)[(tag % SCTP_STACK_VTAG_HASH_SIZE)]; 1.5282 + set = 0; 1.5283 + LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) { 1.5284 + /* Block(s) present, lets find space, and expire on the fly */ 1.5285 + for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) { 1.5286 + if ((twait_block->vtag_block[i].v_tag == 0) && 1.5287 + !set) { 1.5288 + twait_block->vtag_block[i].tv_sec_at_expire = 1.5289 + now.tv_sec + time; 1.5290 + twait_block->vtag_block[i].v_tag = tag; 1.5291 + twait_block->vtag_block[i].lport = lport; 1.5292 + twait_block->vtag_block[i].rport = rport; 1.5293 + set = 1; 1.5294 + } else if ((twait_block->vtag_block[i].v_tag) && 1.5295 + ((long)twait_block->vtag_block[i].tv_sec_at_expire < now.tv_sec)) { 1.5296 + /* Audit expires this guy */ 1.5297 + twait_block->vtag_block[i].tv_sec_at_expire = 0; 1.5298 + twait_block->vtag_block[i].v_tag = 0; 1.5299 + twait_block->vtag_block[i].lport = 0; 1.5300 + twait_block->vtag_block[i].rport = 0; 1.5301 + if (set == 0) { 1.5302 + /* Reuse it for my new tag */ 1.5303 + twait_block->vtag_block[i].tv_sec_at_expire = now.tv_sec + time; 1.5304 + twait_block->vtag_block[i].v_tag = tag; 1.5305 + twait_block->vtag_block[i].lport = lport; 1.5306 + twait_block->vtag_block[i].rport = rport; 1.5307 + set = 1; 1.5308 + } 1.5309 + } 1.5310 + } 1.5311 + if (set) { 1.5312 + /* 1.5313 + * We only do up to the block where we can 1.5314 + * place our tag for audits 1.5315 + */ 1.5316 + break; 1.5317 + } 1.5318 + } 1.5319 + /* Need to add a new block to chain */ 1.5320 + if (!set) { 1.5321 + SCTP_MALLOC(twait_block, struct sctp_tagblock *, 1.5322 + sizeof(struct sctp_tagblock), SCTP_M_TIMW); 1.5323 + if (twait_block == NULL) { 1.5324 +#ifdef INVARIANTS 1.5325 + panic("Can not alloc tagblock"); 1.5326 +#endif 1.5327 + return; 1.5328 + } 1.5329 + memset(twait_block, 0, sizeof(struct sctp_tagblock)); 1.5330 + LIST_INSERT_HEAD(chain, twait_block, sctp_nxt_tagblock); 1.5331 + twait_block->vtag_block[0].tv_sec_at_expire = now.tv_sec + time; 1.5332 + twait_block->vtag_block[0].v_tag = tag; 1.5333 + twait_block->vtag_block[0].lport = lport; 1.5334 + twait_block->vtag_block[0].rport = rport; 1.5335 + } 1.5336 +} 1.5337 + 1.5338 + 1.5339 +#ifdef __Panda__ 1.5340 +void panda_wakeup_socket(struct socket *so); 1.5341 +#endif 1.5342 + 1.5343 +/*- 1.5344 + * Free the association after un-hashing the remote port. This 1.5345 + * function ALWAYS returns holding NO LOCK on the stcb. It DOES 1.5346 + * expect that the input to this function IS a locked TCB. 1.5347 + * It will return 0, if it did NOT destroy the association (instead 1.5348 + * it unlocks it. It will return NON-zero if it either destroyed the 1.5349 + * association OR the association is already destroyed. 1.5350 + */ 1.5351 +int 1.5352 +sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfree, int from_location) 1.5353 +{ 1.5354 + int i; 1.5355 + struct sctp_association *asoc; 1.5356 + struct sctp_nets *net, *nnet; 1.5357 + struct sctp_laddr *laddr, *naddr; 1.5358 + struct sctp_tmit_chunk *chk, *nchk; 1.5359 + struct sctp_asconf_addr *aparam, *naparam; 1.5360 + struct sctp_asconf_ack *aack, *naack; 1.5361 + struct sctp_stream_reset_list *strrst, *nstrrst; 1.5362 + struct sctp_queued_to_read *sq, *nsq; 1.5363 + struct sctp_stream_queue_pending *sp, *nsp; 1.5364 + sctp_sharedkey_t *shared_key, *nshared_key; 1.5365 + struct socket *so; 1.5366 + 1.5367 + /* first, lets purge the entry from the hash table. */ 1.5368 +#if defined(__APPLE__) 1.5369 + sctp_lock_assert(SCTP_INP_SO(inp)); 1.5370 +#endif 1.5371 + 1.5372 +#ifdef SCTP_LOG_CLOSING 1.5373 + sctp_log_closing(inp, stcb, 6); 1.5374 +#endif 1.5375 + if (stcb->asoc.state == 0) { 1.5376 +#ifdef SCTP_LOG_CLOSING 1.5377 + sctp_log_closing(inp, NULL, 7); 1.5378 +#endif 1.5379 + /* there is no asoc, really TSNH :-0 */ 1.5380 + return (1); 1.5381 + } 1.5382 + if (stcb->asoc.alternate) { 1.5383 + sctp_free_remote_addr(stcb->asoc.alternate); 1.5384 + stcb->asoc.alternate = NULL; 1.5385 + } 1.5386 +#if !defined(__APPLE__) /* TEMP: moved to below */ 1.5387 + /* TEMP CODE */ 1.5388 + if (stcb->freed_from_where == 0) { 1.5389 + /* Only record the first place free happened from */ 1.5390 + stcb->freed_from_where = from_location; 1.5391 + } 1.5392 + /* TEMP CODE */ 1.5393 +#endif 1.5394 + 1.5395 + asoc = &stcb->asoc; 1.5396 + if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1.5397 + (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) 1.5398 + /* nothing around */ 1.5399 + so = NULL; 1.5400 + else 1.5401 + so = inp->sctp_socket; 1.5402 + 1.5403 + /* 1.5404 + * We used timer based freeing if a reader or writer is in the way. 1.5405 + * So we first check if we are actually being called from a timer, 1.5406 + * if so we abort early if a reader or writer is still in the way. 1.5407 + */ 1.5408 + if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) && 1.5409 + (from_inpcbfree == SCTP_NORMAL_PROC)) { 1.5410 + /* 1.5411 + * is it the timer driving us? if so are the reader/writers 1.5412 + * gone? 1.5413 + */ 1.5414 + if (stcb->asoc.refcnt) { 1.5415 + /* nope, reader or writer in the way */ 1.5416 + sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL); 1.5417 + /* no asoc destroyed */ 1.5418 + SCTP_TCB_UNLOCK(stcb); 1.5419 +#ifdef SCTP_LOG_CLOSING 1.5420 + sctp_log_closing(inp, stcb, 8); 1.5421 +#endif 1.5422 + return (0); 1.5423 + } 1.5424 + } 1.5425 + /* now clean up any other timers */ 1.5426 + (void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer); 1.5427 + asoc->dack_timer.self = NULL; 1.5428 + (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer); 1.5429 + /*- 1.5430 + * For stream reset we don't blast this unless 1.5431 + * it is a str-reset timer, it might be the 1.5432 + * free-asoc timer which we DON'T want to 1.5433 + * disturb. 1.5434 + */ 1.5435 + if (asoc->strreset_timer.type == SCTP_TIMER_TYPE_STRRESET) 1.5436 + asoc->strreset_timer.self = NULL; 1.5437 + (void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer); 1.5438 + asoc->asconf_timer.self = NULL; 1.5439 + (void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer); 1.5440 + asoc->autoclose_timer.self = NULL; 1.5441 + (void)SCTP_OS_TIMER_STOP(&asoc->shut_guard_timer.timer); 1.5442 + asoc->shut_guard_timer.self = NULL; 1.5443 + (void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer); 1.5444 + asoc->delayed_event_timer.self = NULL; 1.5445 + /* Mobility adaptation */ 1.5446 + (void)SCTP_OS_TIMER_STOP(&asoc->delete_prim_timer.timer); 1.5447 + asoc->delete_prim_timer.self = NULL; 1.5448 + TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1.5449 + (void)SCTP_OS_TIMER_STOP(&net->rxt_timer.timer); 1.5450 + net->rxt_timer.self = NULL; 1.5451 + (void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer); 1.5452 + net->pmtu_timer.self = NULL; 1.5453 + (void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer); 1.5454 + net->hb_timer.self = NULL; 1.5455 + } 1.5456 + /* Now the read queue needs to be cleaned up (only once) */ 1.5457 + if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0) { 1.5458 + stcb->asoc.state |= SCTP_STATE_ABOUT_TO_BE_FREED; 1.5459 + SCTP_INP_READ_LOCK(inp); 1.5460 + TAILQ_FOREACH(sq, &inp->read_queue, next) { 1.5461 + if (sq->stcb == stcb) { 1.5462 + sq->do_not_ref_stcb = 1; 1.5463 + sq->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 1.5464 + /* If there is no end, there never 1.5465 + * will be now. 1.5466 + */ 1.5467 + if (sq->end_added == 0) { 1.5468 + /* Held for PD-API clear that. */ 1.5469 + sq->pdapi_aborted = 1; 1.5470 + sq->held_length = 0; 1.5471 + if (sctp_stcb_is_feature_on(inp, stcb, SCTP_PCB_FLAGS_PDAPIEVNT) && (so != NULL)) { 1.5472 + /* 1.5473 + * Need to add a PD-API aborted indication. 1.5474 + * Setting the control_pdapi assures that it will 1.5475 + * be added right after this msg. 1.5476 + */ 1.5477 + uint32_t strseq; 1.5478 + stcb->asoc.control_pdapi = sq; 1.5479 + strseq = (sq->sinfo_stream << 16) | sq->sinfo_ssn; 1.5480 + sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 1.5481 + stcb, 1.5482 + SCTP_PARTIAL_DELIVERY_ABORTED, 1.5483 + (void *)&strseq, 1.5484 + SCTP_SO_LOCKED); 1.5485 + stcb->asoc.control_pdapi = NULL; 1.5486 + } 1.5487 + } 1.5488 + /* Add an end to wake them */ 1.5489 + sq->end_added = 1; 1.5490 + } 1.5491 + } 1.5492 + SCTP_INP_READ_UNLOCK(inp); 1.5493 + if (stcb->block_entry) { 1.5494 + SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_PCB, ECONNRESET); 1.5495 + stcb->block_entry->error = ECONNRESET; 1.5496 + stcb->block_entry = NULL; 1.5497 + } 1.5498 + } 1.5499 + if ((stcb->asoc.refcnt) || (stcb->asoc.state & SCTP_STATE_IN_ACCEPT_QUEUE)) { 1.5500 + /* Someone holds a reference OR the socket is unaccepted yet. 1.5501 + */ 1.5502 + if ((stcb->asoc.refcnt) || 1.5503 + (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1.5504 + (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 1.5505 + stcb->asoc.state &= ~SCTP_STATE_IN_ACCEPT_QUEUE; 1.5506 + sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL); 1.5507 + } 1.5508 + SCTP_TCB_UNLOCK(stcb); 1.5509 + if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1.5510 + (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) 1.5511 + /* nothing around */ 1.5512 + so = NULL; 1.5513 + if (so) { 1.5514 + /* Wake any reader/writers */ 1.5515 + sctp_sorwakeup(inp, so); 1.5516 + sctp_sowwakeup(inp, so); 1.5517 + } 1.5518 + 1.5519 +#ifdef SCTP_LOG_CLOSING 1.5520 + sctp_log_closing(inp, stcb, 9); 1.5521 +#endif 1.5522 + /* no asoc destroyed */ 1.5523 + return (0); 1.5524 + } 1.5525 +#ifdef SCTP_LOG_CLOSING 1.5526 + sctp_log_closing(inp, stcb, 10); 1.5527 +#endif 1.5528 + /* When I reach here, no others want 1.5529 + * to kill the assoc yet.. and I own 1.5530 + * the lock. Now its possible an abort 1.5531 + * comes in when I do the lock exchange 1.5532 + * below to grab all the locks to do 1.5533 + * the final take out. to prevent this 1.5534 + * we increment the count, which will 1.5535 + * start a timer and blow out above thus 1.5536 + * assuring us that we hold exclusive 1.5537 + * killing of the asoc. Note that 1.5538 + * after getting back the TCB lock 1.5539 + * we will go ahead and increment the 1.5540 + * counter back up and stop any timer 1.5541 + * a passing stranger may have started :-S 1.5542 + */ 1.5543 + if (from_inpcbfree == SCTP_NORMAL_PROC) { 1.5544 + atomic_add_int(&stcb->asoc.refcnt, 1); 1.5545 + 1.5546 + SCTP_TCB_UNLOCK(stcb); 1.5547 + SCTP_INP_INFO_WLOCK(); 1.5548 + SCTP_INP_WLOCK(inp); 1.5549 + SCTP_TCB_LOCK(stcb); 1.5550 + } 1.5551 + /* Double check the GONE flag */ 1.5552 + if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1.5553 + (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) 1.5554 + /* nothing around */ 1.5555 + so = NULL; 1.5556 + 1.5557 + if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1.5558 + (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 1.5559 + /* 1.5560 + * For TCP type we need special handling when we are 1.5561 + * connected. We also include the peel'ed off ones to. 1.5562 + */ 1.5563 + if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) { 1.5564 + inp->sctp_flags &= ~SCTP_PCB_FLAGS_CONNECTED; 1.5565 + inp->sctp_flags |= SCTP_PCB_FLAGS_WAS_CONNECTED; 1.5566 + if (so) { 1.5567 + SOCK_LOCK(so); 1.5568 + if (so->so_rcv.sb_cc == 0) { 1.5569 + so->so_state &= ~(SS_ISCONNECTING | 1.5570 + SS_ISDISCONNECTING | 1.5571 + SS_ISCONFIRMING | 1.5572 + SS_ISCONNECTED); 1.5573 + } 1.5574 +#if defined(__APPLE__) 1.5575 + socantrcvmore(so); 1.5576 +#else 1.5577 + socantrcvmore_locked(so); 1.5578 +#endif 1.5579 + sctp_sowwakeup(inp, so); 1.5580 + sctp_sorwakeup(inp, so); 1.5581 + SCTP_SOWAKEUP(so); 1.5582 + } 1.5583 + } 1.5584 + } 1.5585 + 1.5586 + /* Make it invalid too, that way if its 1.5587 + * about to run it will abort and return. 1.5588 + */ 1.5589 + /* re-increment the lock */ 1.5590 + if (from_inpcbfree == SCTP_NORMAL_PROC) { 1.5591 + atomic_add_int(&stcb->asoc.refcnt, -1); 1.5592 + } 1.5593 + if (stcb->asoc.refcnt) { 1.5594 + stcb->asoc.state &= ~SCTP_STATE_IN_ACCEPT_QUEUE; 1.5595 + sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL); 1.5596 + if (from_inpcbfree == SCTP_NORMAL_PROC) { 1.5597 + SCTP_INP_INFO_WUNLOCK(); 1.5598 + SCTP_INP_WUNLOCK(inp); 1.5599 + } 1.5600 + SCTP_TCB_UNLOCK(stcb); 1.5601 + return (0); 1.5602 + } 1.5603 + asoc->state = 0; 1.5604 + if (inp->sctp_tcbhash) { 1.5605 + LIST_REMOVE(stcb, sctp_tcbhash); 1.5606 + } 1.5607 + if (stcb->asoc.in_asocid_hash) { 1.5608 + LIST_REMOVE(stcb, sctp_tcbasocidhash); 1.5609 + } 1.5610 + /* Now lets remove it from the list of ALL associations in the EP */ 1.5611 + LIST_REMOVE(stcb, sctp_tcblist); 1.5612 + if (from_inpcbfree == SCTP_NORMAL_PROC) { 1.5613 + SCTP_INP_INCR_REF(inp); 1.5614 + SCTP_INP_WUNLOCK(inp); 1.5615 + } 1.5616 + /* pull from vtag hash */ 1.5617 + LIST_REMOVE(stcb, sctp_asocs); 1.5618 + sctp_add_vtag_to_timewait(asoc->my_vtag, SCTP_BASE_SYSCTL(sctp_vtag_time_wait), 1.5619 + inp->sctp_lport, stcb->rport); 1.5620 + 1.5621 + /* Now restop the timers to be sure 1.5622 + * this is paranoia at is finest! 1.5623 + */ 1.5624 + (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer); 1.5625 + (void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer); 1.5626 + (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer); 1.5627 + (void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer); 1.5628 + (void)SCTP_OS_TIMER_STOP(&asoc->shut_guard_timer.timer); 1.5629 + (void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer); 1.5630 + (void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer); 1.5631 + TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1.5632 + (void)SCTP_OS_TIMER_STOP(&net->rxt_timer.timer); 1.5633 + (void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer); 1.5634 + (void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer); 1.5635 + } 1.5636 + 1.5637 + asoc->strreset_timer.type = SCTP_TIMER_TYPE_NONE; 1.5638 + /* 1.5639 + * The chunk lists and such SHOULD be empty but we check them just 1.5640 + * in case. 1.5641 + */ 1.5642 + /* anything on the wheel needs to be removed */ 1.5643 + for (i = 0; i < asoc->streamoutcnt; i++) { 1.5644 + struct sctp_stream_out *outs; 1.5645 + 1.5646 + outs = &asoc->strmout[i]; 1.5647 + /* now clean up any chunks here */ 1.5648 + TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 1.5649 + TAILQ_REMOVE(&outs->outqueue, sp, next); 1.5650 + sctp_free_spbufspace(stcb, asoc, sp); 1.5651 + if (sp->data) { 1.5652 + if (so) { 1.5653 + /* Still an open socket - report */ 1.5654 + sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 1.5655 + 0, (void *)sp, SCTP_SO_LOCKED); 1.5656 + } 1.5657 + if (sp->data) { 1.5658 + sctp_m_freem(sp->data); 1.5659 + sp->data = NULL; 1.5660 + sp->tail_mbuf = NULL; 1.5661 + sp->length = 0; 1.5662 + } 1.5663 + } 1.5664 + if (sp->net) { 1.5665 + sctp_free_remote_addr(sp->net); 1.5666 + sp->net = NULL; 1.5667 + } 1.5668 + sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED); 1.5669 + } 1.5670 + } 1.5671 + /*sa_ignore FREED_MEMORY*/ 1.5672 + TAILQ_FOREACH_SAFE(strrst, &asoc->resetHead, next_resp, nstrrst) { 1.5673 + TAILQ_REMOVE(&asoc->resetHead, strrst, next_resp); 1.5674 + SCTP_FREE(strrst, SCTP_M_STRESET); 1.5675 + } 1.5676 + TAILQ_FOREACH_SAFE(sq, &asoc->pending_reply_queue, next, nsq) { 1.5677 + TAILQ_REMOVE(&asoc->pending_reply_queue, sq, next); 1.5678 + if (sq->data) { 1.5679 + sctp_m_freem(sq->data); 1.5680 + sq->data = NULL; 1.5681 + } 1.5682 + sctp_free_remote_addr(sq->whoFrom); 1.5683 + sq->whoFrom = NULL; 1.5684 + sq->stcb = NULL; 1.5685 + /* Free the ctl entry */ 1.5686 + SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), sq); 1.5687 + SCTP_DECR_READQ_COUNT(); 1.5688 + /*sa_ignore FREED_MEMORY*/ 1.5689 + } 1.5690 + TAILQ_FOREACH_SAFE(chk, &asoc->free_chunks, sctp_next, nchk) { 1.5691 + TAILQ_REMOVE(&asoc->free_chunks, chk, sctp_next); 1.5692 + if (chk->data) { 1.5693 + sctp_m_freem(chk->data); 1.5694 + chk->data = NULL; 1.5695 + } 1.5696 + if (chk->holds_key_ref) 1.5697 + sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED); 1.5698 + SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk); 1.5699 + SCTP_DECR_CHK_COUNT(); 1.5700 + atomic_subtract_int(&SCTP_BASE_INFO(ipi_free_chunks), 1); 1.5701 + asoc->free_chunk_cnt--; 1.5702 + /*sa_ignore FREED_MEMORY*/ 1.5703 + } 1.5704 + /* pending send queue SHOULD be empty */ 1.5705 + TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 1.5706 + if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) { 1.5707 + asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--; 1.5708 +#ifdef INVARIANTS 1.5709 + } else { 1.5710 + panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number); 1.5711 +#endif 1.5712 + } 1.5713 + TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 1.5714 + if (chk->data) { 1.5715 + if (so) { 1.5716 + /* Still a socket? */ 1.5717 + sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 1.5718 + 0, chk, SCTP_SO_LOCKED); 1.5719 + } 1.5720 + if (chk->data) { 1.5721 + sctp_m_freem(chk->data); 1.5722 + chk->data = NULL; 1.5723 + } 1.5724 + } 1.5725 + if (chk->holds_key_ref) 1.5726 + sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED); 1.5727 + if (chk->whoTo) { 1.5728 + sctp_free_remote_addr(chk->whoTo); 1.5729 + chk->whoTo = NULL; 1.5730 + } 1.5731 + SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk); 1.5732 + SCTP_DECR_CHK_COUNT(); 1.5733 + /*sa_ignore FREED_MEMORY*/ 1.5734 + } 1.5735 + /* sent queue SHOULD be empty */ 1.5736 + TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 1.5737 + if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 1.5738 + if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) { 1.5739 + asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--; 1.5740 +#ifdef INVARIANTS 1.5741 + } else { 1.5742 + panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number); 1.5743 +#endif 1.5744 + } 1.5745 + } 1.5746 + TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 1.5747 + if (chk->data) { 1.5748 + if (so) { 1.5749 + /* Still a socket? */ 1.5750 + sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 1.5751 + 0, chk, SCTP_SO_LOCKED); 1.5752 + } 1.5753 + if (chk->data) { 1.5754 + sctp_m_freem(chk->data); 1.5755 + chk->data = NULL; 1.5756 + } 1.5757 + } 1.5758 + if (chk->holds_key_ref) 1.5759 + sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED); 1.5760 + sctp_free_remote_addr(chk->whoTo); 1.5761 + SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk); 1.5762 + SCTP_DECR_CHK_COUNT(); 1.5763 + /*sa_ignore FREED_MEMORY*/ 1.5764 + } 1.5765 +#ifdef INVARIANTS 1.5766 + for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1.5767 + if (stcb->asoc.strmout[i].chunks_on_queues > 0) { 1.5768 + panic("%u chunks left for stream %u.", stcb->asoc.strmout[i].chunks_on_queues, i); 1.5769 + } 1.5770 + } 1.5771 +#endif 1.5772 + /* control queue MAY not be empty */ 1.5773 + TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) { 1.5774 + TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next); 1.5775 + if (chk->data) { 1.5776 + sctp_m_freem(chk->data); 1.5777 + chk->data = NULL; 1.5778 + } 1.5779 + if (chk->holds_key_ref) 1.5780 + sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED); 1.5781 + sctp_free_remote_addr(chk->whoTo); 1.5782 + SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk); 1.5783 + SCTP_DECR_CHK_COUNT(); 1.5784 + /*sa_ignore FREED_MEMORY*/ 1.5785 + } 1.5786 + /* ASCONF queue MAY not be empty */ 1.5787 + TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) { 1.5788 + TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next); 1.5789 + if (chk->data) { 1.5790 + sctp_m_freem(chk->data); 1.5791 + chk->data = NULL; 1.5792 + } 1.5793 + if (chk->holds_key_ref) 1.5794 + sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED); 1.5795 + sctp_free_remote_addr(chk->whoTo); 1.5796 + SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk); 1.5797 + SCTP_DECR_CHK_COUNT(); 1.5798 + /*sa_ignore FREED_MEMORY*/ 1.5799 + } 1.5800 + TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) { 1.5801 + TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 1.5802 + if (chk->data) { 1.5803 + sctp_m_freem(chk->data); 1.5804 + chk->data = NULL; 1.5805 + } 1.5806 + if (chk->holds_key_ref) 1.5807 + sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED); 1.5808 + sctp_free_remote_addr(chk->whoTo); 1.5809 + SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk); 1.5810 + SCTP_DECR_CHK_COUNT(); 1.5811 + /*sa_ignore FREED_MEMORY*/ 1.5812 + } 1.5813 + 1.5814 + if (asoc->mapping_array) { 1.5815 + SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1.5816 + asoc->mapping_array = NULL; 1.5817 + } 1.5818 + if (asoc->nr_mapping_array) { 1.5819 + SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1.5820 + asoc->nr_mapping_array = NULL; 1.5821 + } 1.5822 + /* the stream outs */ 1.5823 + if (asoc->strmout) { 1.5824 + SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1.5825 + asoc->strmout = NULL; 1.5826 + } 1.5827 + asoc->strm_realoutsize = asoc->streamoutcnt = 0; 1.5828 + if (asoc->strmin) { 1.5829 + struct sctp_queued_to_read *ctl, *nctl; 1.5830 + 1.5831 + for (i = 0; i < asoc->streamincnt; i++) { 1.5832 + TAILQ_FOREACH_SAFE(ctl, &asoc->strmin[i].inqueue, next, nctl) { 1.5833 + TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next); 1.5834 + sctp_free_remote_addr(ctl->whoFrom); 1.5835 + if (ctl->data) { 1.5836 + sctp_m_freem(ctl->data); 1.5837 + ctl->data = NULL; 1.5838 + } 1.5839 + /* 1.5840 + * We don't free the address here 1.5841 + * since all the net's were freed 1.5842 + * above. 1.5843 + */ 1.5844 + SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), ctl); 1.5845 + SCTP_DECR_READQ_COUNT(); 1.5846 + } 1.5847 + } 1.5848 + SCTP_FREE(asoc->strmin, SCTP_M_STRMI); 1.5849 + asoc->strmin = NULL; 1.5850 + } 1.5851 + asoc->streamincnt = 0; 1.5852 + TAILQ_FOREACH_SAFE(net, &asoc->nets, sctp_next, nnet) { 1.5853 +#ifdef INVARIANTS 1.5854 + if (SCTP_BASE_INFO(ipi_count_raddr) == 0) { 1.5855 + panic("no net's left alloc'ed, or list points to itself"); 1.5856 + } 1.5857 +#endif 1.5858 + TAILQ_REMOVE(&asoc->nets, net, sctp_next); 1.5859 + sctp_free_remote_addr(net); 1.5860 + } 1.5861 + LIST_FOREACH_SAFE(laddr, &asoc->sctp_restricted_addrs, sctp_nxt_addr, naddr) { 1.5862 + /*sa_ignore FREED_MEMORY*/ 1.5863 + sctp_remove_laddr(laddr); 1.5864 + } 1.5865 + 1.5866 + /* pending asconf (address) parameters */ 1.5867 + TAILQ_FOREACH_SAFE(aparam, &asoc->asconf_queue, next, naparam) { 1.5868 + /*sa_ignore FREED_MEMORY*/ 1.5869 + TAILQ_REMOVE(&asoc->asconf_queue, aparam, next); 1.5870 + SCTP_FREE(aparam,SCTP_M_ASC_ADDR); 1.5871 + } 1.5872 + TAILQ_FOREACH_SAFE(aack, &asoc->asconf_ack_sent, next, naack) { 1.5873 + /*sa_ignore FREED_MEMORY*/ 1.5874 + TAILQ_REMOVE(&asoc->asconf_ack_sent, aack, next); 1.5875 + if (aack->data != NULL) { 1.5876 + sctp_m_freem(aack->data); 1.5877 + } 1.5878 + SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asconf_ack), aack); 1.5879 + } 1.5880 + /* clean up auth stuff */ 1.5881 + if (asoc->local_hmacs) 1.5882 + sctp_free_hmaclist(asoc->local_hmacs); 1.5883 + if (asoc->peer_hmacs) 1.5884 + sctp_free_hmaclist(asoc->peer_hmacs); 1.5885 + 1.5886 + if (asoc->local_auth_chunks) 1.5887 + sctp_free_chunklist(asoc->local_auth_chunks); 1.5888 + if (asoc->peer_auth_chunks) 1.5889 + sctp_free_chunklist(asoc->peer_auth_chunks); 1.5890 + 1.5891 + sctp_free_authinfo(&asoc->authinfo); 1.5892 + 1.5893 + LIST_FOREACH_SAFE(shared_key, &asoc->shared_keys, next, nshared_key) { 1.5894 + LIST_REMOVE(shared_key, next); 1.5895 + sctp_free_sharedkey(shared_key); 1.5896 + /*sa_ignore FREED_MEMORY*/ 1.5897 + } 1.5898 + 1.5899 + /* Insert new items here :> */ 1.5900 + 1.5901 + /* Get rid of LOCK */ 1.5902 + SCTP_TCB_UNLOCK(stcb); 1.5903 + SCTP_TCB_LOCK_DESTROY(stcb); 1.5904 + SCTP_TCB_SEND_LOCK_DESTROY(stcb); 1.5905 + if (from_inpcbfree == SCTP_NORMAL_PROC) { 1.5906 + SCTP_INP_INFO_WUNLOCK(); 1.5907 + SCTP_INP_RLOCK(inp); 1.5908 + } 1.5909 +#if defined(__APPLE__) /* TEMP CODE */ 1.5910 + stcb->freed_from_where = from_location; 1.5911 +#endif 1.5912 +#ifdef SCTP_TRACK_FREED_ASOCS 1.5913 + if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 1.5914 + /* now clean up the tasoc itself */ 1.5915 + SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb); 1.5916 + SCTP_DECR_ASOC_COUNT(); 1.5917 + } else { 1.5918 + LIST_INSERT_HEAD(&inp->sctp_asoc_free_list, stcb, sctp_tcblist); 1.5919 + } 1.5920 +#else 1.5921 + SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb); 1.5922 + SCTP_DECR_ASOC_COUNT(); 1.5923 +#endif 1.5924 + if (from_inpcbfree == SCTP_NORMAL_PROC) { 1.5925 + if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 1.5926 + /* If its NOT the inp_free calling us AND 1.5927 + * sctp_close as been called, we 1.5928 + * call back... 1.5929 + */ 1.5930 + SCTP_INP_RUNLOCK(inp); 1.5931 + /* This will start the kill timer (if we are 1.5932 + * the last one) since we hold an increment yet. But 1.5933 + * this is the only safe way to do this 1.5934 + * since otherwise if the socket closes 1.5935 + * at the same time we are here we might 1.5936 + * collide in the cleanup. 1.5937 + */ 1.5938 + sctp_inpcb_free(inp, 1.5939 + SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE, 1.5940 + SCTP_CALLED_DIRECTLY_NOCMPSET); 1.5941 + SCTP_INP_DECR_REF(inp); 1.5942 + goto out_of; 1.5943 + } else { 1.5944 + /* The socket is still open. */ 1.5945 + SCTP_INP_DECR_REF(inp); 1.5946 + } 1.5947 + } 1.5948 + if (from_inpcbfree == SCTP_NORMAL_PROC) { 1.5949 + SCTP_INP_RUNLOCK(inp); 1.5950 + } 1.5951 + out_of: 1.5952 + /* destroyed the asoc */ 1.5953 +#ifdef SCTP_LOG_CLOSING 1.5954 + sctp_log_closing(inp, NULL, 11); 1.5955 +#endif 1.5956 + return (1); 1.5957 +} 1.5958 + 1.5959 + 1.5960 + 1.5961 +/* 1.5962 + * determine if a destination is "reachable" based upon the addresses bound 1.5963 + * to the current endpoint (e.g. only v4 or v6 currently bound) 1.5964 + */ 1.5965 +/* 1.5966 + * FIX: if we allow assoc-level bindx(), then this needs to be fixed to use 1.5967 + * assoc level v4/v6 flags, as the assoc *may* not have the same address 1.5968 + * types bound as its endpoint 1.5969 + */ 1.5970 +int 1.5971 +sctp_destination_is_reachable(struct sctp_tcb *stcb, struct sockaddr *destaddr) 1.5972 +{ 1.5973 + struct sctp_inpcb *inp; 1.5974 + int answer; 1.5975 + 1.5976 + /* 1.5977 + * No locks here, the TCB, in all cases is already locked and an 1.5978 + * assoc is up. There is either a INP lock by the caller applied (in 1.5979 + * asconf case when deleting an address) or NOT in the HB case, 1.5980 + * however if HB then the INP increment is up and the INP will not 1.5981 + * be removed (on top of the fact that we have a TCB lock). So we 1.5982 + * only want to read the sctp_flags, which is either bound-all or 1.5983 + * not.. no protection needed since once an assoc is up you can't be 1.5984 + * changing your binding. 1.5985 + */ 1.5986 + inp = stcb->sctp_ep; 1.5987 + if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1.5988 + /* if bound all, destination is not restricted */ 1.5989 + /* 1.5990 + * RRS: Question during lock work: Is this correct? If you 1.5991 + * are bound-all you still might need to obey the V4--V6 1.5992 + * flags??? IMO this bound-all stuff needs to be removed! 1.5993 + */ 1.5994 + return (1); 1.5995 + } 1.5996 + /* NOTE: all "scope" checks are done when local addresses are added */ 1.5997 + switch (destaddr->sa_family) { 1.5998 +#ifdef INET6 1.5999 + case AF_INET6: 1.6000 +#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__)) 1.6001 + answer = inp->inp_vflag & INP_IPV6; 1.6002 +#else 1.6003 + answer = inp->ip_inp.inp.inp_vflag & INP_IPV6; 1.6004 +#endif 1.6005 + break; 1.6006 +#endif 1.6007 +#ifdef INET 1.6008 + case AF_INET: 1.6009 +#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__)) 1.6010 + answer = inp->inp_vflag & INP_IPV4; 1.6011 +#else 1.6012 + answer = inp->ip_inp.inp.inp_vflag & INP_IPV4; 1.6013 +#endif 1.6014 + break; 1.6015 +#endif 1.6016 +#if defined(__Userspace__) 1.6017 + case AF_CONN: 1.6018 + answer = inp->ip_inp.inp.inp_vflag & INP_CONN; 1.6019 + break; 1.6020 +#endif 1.6021 + default: 1.6022 + /* invalid family, so it's unreachable */ 1.6023 + answer = 0; 1.6024 + break; 1.6025 + } 1.6026 + return (answer); 1.6027 +} 1.6028 + 1.6029 +/* 1.6030 + * update the inp_vflags on an endpoint 1.6031 + */ 1.6032 +static void 1.6033 +sctp_update_ep_vflag(struct sctp_inpcb *inp) 1.6034 +{ 1.6035 + struct sctp_laddr *laddr; 1.6036 + 1.6037 + /* first clear the flag */ 1.6038 +#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__)) 1.6039 + inp->inp_vflag = 0; 1.6040 +#else 1.6041 + inp->ip_inp.inp.inp_vflag = 0; 1.6042 +#endif 1.6043 + /* set the flag based on addresses on the ep list */ 1.6044 + LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 1.6045 + if (laddr->ifa == NULL) { 1.6046 + SCTPDBG(SCTP_DEBUG_PCB1, "%s: NULL ifa\n", 1.6047 + __FUNCTION__); 1.6048 + continue; 1.6049 + } 1.6050 + 1.6051 + if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) { 1.6052 + continue; 1.6053 + } 1.6054 + switch (laddr->ifa->address.sa.sa_family) { 1.6055 +#ifdef INET6 1.6056 + case AF_INET6: 1.6057 +#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__)) 1.6058 + inp->inp_vflag |= INP_IPV6; 1.6059 +#else 1.6060 + inp->ip_inp.inp.inp_vflag |= INP_IPV6; 1.6061 +#endif 1.6062 + break; 1.6063 +#endif 1.6064 +#ifdef INET 1.6065 + case AF_INET: 1.6066 +#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__)) 1.6067 + inp->inp_vflag |= INP_IPV4; 1.6068 +#else 1.6069 + inp->ip_inp.inp.inp_vflag |= INP_IPV4; 1.6070 +#endif 1.6071 + break; 1.6072 +#endif 1.6073 +#if defined(__Userspace__) 1.6074 + case AF_CONN: 1.6075 + inp->ip_inp.inp.inp_vflag |= INP_CONN; 1.6076 + break; 1.6077 +#endif 1.6078 + default: 1.6079 + break; 1.6080 + } 1.6081 + } 1.6082 +} 1.6083 + 1.6084 +/* 1.6085 + * Add the address to the endpoint local address list There is nothing to be 1.6086 + * done if we are bound to all addresses 1.6087 + */ 1.6088 +void 1.6089 +sctp_add_local_addr_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa, uint32_t action) 1.6090 +{ 1.6091 + struct sctp_laddr *laddr; 1.6092 + int fnd, error = 0; 1.6093 + 1.6094 + fnd = 0; 1.6095 + 1.6096 + if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1.6097 + /* You are already bound to all. You have it already */ 1.6098 + return; 1.6099 + } 1.6100 +#ifdef INET6 1.6101 + if (ifa->address.sa.sa_family == AF_INET6) { 1.6102 + if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { 1.6103 + /* Can't bind a non-useable addr. */ 1.6104 + return; 1.6105 + } 1.6106 + } 1.6107 +#endif 1.6108 + /* first, is it already present? */ 1.6109 + LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 1.6110 + if (laddr->ifa == ifa) { 1.6111 + fnd = 1; 1.6112 + break; 1.6113 + } 1.6114 + } 1.6115 + 1.6116 + if (fnd == 0) { 1.6117 + /* Not in the ep list */ 1.6118 + error = sctp_insert_laddr(&inp->sctp_addr_list, ifa, action); 1.6119 + if (error != 0) 1.6120 + return; 1.6121 + inp->laddr_count++; 1.6122 + /* update inp_vflag flags */ 1.6123 + switch (ifa->address.sa.sa_family) { 1.6124 +#ifdef INET6 1.6125 + case AF_INET6: 1.6126 +#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__)) 1.6127 + inp->inp_vflag |= INP_IPV6; 1.6128 +#else 1.6129 + inp->ip_inp.inp.inp_vflag |= INP_IPV6; 1.6130 +#endif 1.6131 + break; 1.6132 +#endif 1.6133 +#ifdef INET 1.6134 + case AF_INET: 1.6135 +#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__)) 1.6136 + inp->inp_vflag |= INP_IPV4; 1.6137 +#else 1.6138 + inp->ip_inp.inp.inp_vflag |= INP_IPV4; 1.6139 +#endif 1.6140 + break; 1.6141 +#endif 1.6142 +#if defined(__Userspace__) 1.6143 + case AF_CONN: 1.6144 + inp->ip_inp.inp.inp_vflag |= INP_CONN; 1.6145 + break; 1.6146 +#endif 1.6147 + default: 1.6148 + break; 1.6149 + } 1.6150 + } 1.6151 + return; 1.6152 +} 1.6153 + 1.6154 + 1.6155 +/* 1.6156 + * select a new (hopefully reachable) destination net (should only be used 1.6157 + * when we deleted an ep addr that is the only usable source address to reach 1.6158 + * the destination net) 1.6159 + */ 1.6160 +static void 1.6161 +sctp_select_primary_destination(struct sctp_tcb *stcb) 1.6162 +{ 1.6163 + struct sctp_nets *net; 1.6164 + 1.6165 + TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1.6166 + /* for now, we'll just pick the first reachable one we find */ 1.6167 + if (net->dest_state & SCTP_ADDR_UNCONFIRMED) 1.6168 + continue; 1.6169 + if (sctp_destination_is_reachable(stcb, 1.6170 + (struct sockaddr *)&net->ro._l_addr)) { 1.6171 + /* found a reachable destination */ 1.6172 + stcb->asoc.primary_destination = net; 1.6173 + } 1.6174 + } 1.6175 + /* I can't there from here! ...we're gonna die shortly... */ 1.6176 +} 1.6177 + 1.6178 + 1.6179 +/* 1.6180 + * Delete the address from the endpoint local address list There is nothing 1.6181 + * to be done if we are bound to all addresses 1.6182 + */ 1.6183 +void 1.6184 +sctp_del_local_addr_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa) 1.6185 +{ 1.6186 + struct sctp_laddr *laddr; 1.6187 + int fnd; 1.6188 + 1.6189 + fnd = 0; 1.6190 + if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 1.6191 + /* You are already bound to all. You have it already */ 1.6192 + return; 1.6193 + } 1.6194 + LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 1.6195 + if (laddr->ifa == ifa) { 1.6196 + fnd = 1; 1.6197 + break; 1.6198 + } 1.6199 + } 1.6200 + if (fnd && (inp->laddr_count < 2)) { 1.6201 + /* can't delete unless there are at LEAST 2 addresses */ 1.6202 + return; 1.6203 + } 1.6204 + if (fnd) { 1.6205 + /* 1.6206 + * clean up any use of this address go through our 1.6207 + * associations and clear any last_used_address that match 1.6208 + * this one for each assoc, see if a new primary_destination 1.6209 + * is needed 1.6210 + */ 1.6211 + struct sctp_tcb *stcb; 1.6212 + 1.6213 + /* clean up "next_addr_touse" */ 1.6214 + if (inp->next_addr_touse == laddr) 1.6215 + /* delete this address */ 1.6216 + inp->next_addr_touse = NULL; 1.6217 + 1.6218 + /* clean up "last_used_address" */ 1.6219 + LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 1.6220 + struct sctp_nets *net; 1.6221 + SCTP_TCB_LOCK(stcb); 1.6222 + if (stcb->asoc.last_used_address == laddr) 1.6223 + /* delete this address */ 1.6224 + stcb->asoc.last_used_address = NULL; 1.6225 + /* Now spin through all the nets and purge any ref to laddr */ 1.6226 + TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1.6227 + if (net->ro._s_addr && 1.6228 + (net->ro._s_addr->ifa == laddr->ifa)) { 1.6229 + /* Yep, purge src address selected */ 1.6230 + sctp_rtentry_t *rt; 1.6231 + 1.6232 + /* delete this address if cached */ 1.6233 + rt = net->ro.ro_rt; 1.6234 + if (rt != NULL) { 1.6235 + RTFREE(rt); 1.6236 + net->ro.ro_rt = NULL; 1.6237 + } 1.6238 + sctp_free_ifa(net->ro._s_addr); 1.6239 + net->ro._s_addr = NULL; 1.6240 + net->src_addr_selected = 0; 1.6241 + } 1.6242 + } 1.6243 + SCTP_TCB_UNLOCK(stcb); 1.6244 + } /* for each tcb */ 1.6245 + /* remove it from the ep list */ 1.6246 + sctp_remove_laddr(laddr); 1.6247 + inp->laddr_count--; 1.6248 + /* update inp_vflag flags */ 1.6249 + sctp_update_ep_vflag(inp); 1.6250 + } 1.6251 + return; 1.6252 +} 1.6253 + 1.6254 +/* 1.6255 + * Add the address to the TCB local address restricted list. 1.6256 + * This is a "pending" address list (eg. addresses waiting for an 1.6257 + * ASCONF-ACK response) and cannot be used as a valid source address. 1.6258 + */ 1.6259 +void 1.6260 +sctp_add_local_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa) 1.6261 +{ 1.6262 + struct sctp_laddr *laddr; 1.6263 + struct sctpladdr *list; 1.6264 + 1.6265 + /* 1.6266 + * Assumes TCB is locked.. and possibly the INP. May need to 1.6267 + * confirm/fix that if we need it and is not the case. 1.6268 + */ 1.6269 + list = &stcb->asoc.sctp_restricted_addrs; 1.6270 + 1.6271 +#ifdef INET6 1.6272 + if (ifa->address.sa.sa_family == AF_INET6) { 1.6273 + if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) { 1.6274 + /* Can't bind a non-existent addr. */ 1.6275 + return; 1.6276 + } 1.6277 + } 1.6278 +#endif 1.6279 + /* does the address already exist? */ 1.6280 + LIST_FOREACH(laddr, list, sctp_nxt_addr) { 1.6281 + if (laddr->ifa == ifa) { 1.6282 + return; 1.6283 + } 1.6284 + } 1.6285 + 1.6286 + /* add to the list */ 1.6287 + (void)sctp_insert_laddr(list, ifa, 0); 1.6288 + return; 1.6289 +} 1.6290 + 1.6291 +/* 1.6292 + * insert an laddr entry with the given ifa for the desired list 1.6293 + */ 1.6294 +int 1.6295 +sctp_insert_laddr(struct sctpladdr *list, struct sctp_ifa *ifa, uint32_t act) 1.6296 +{ 1.6297 + struct sctp_laddr *laddr; 1.6298 + 1.6299 + laddr = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 1.6300 + if (laddr == NULL) { 1.6301 + /* out of memory? */ 1.6302 + SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL); 1.6303 + return (EINVAL); 1.6304 + } 1.6305 + SCTP_INCR_LADDR_COUNT(); 1.6306 + bzero(laddr, sizeof(*laddr)); 1.6307 + (void)SCTP_GETTIME_TIMEVAL(&laddr->start_time); 1.6308 + laddr->ifa = ifa; 1.6309 + laddr->action = act; 1.6310 + atomic_add_int(&ifa->refcount, 1); 1.6311 + /* insert it */ 1.6312 + LIST_INSERT_HEAD(list, laddr, sctp_nxt_addr); 1.6313 + 1.6314 + return (0); 1.6315 +} 1.6316 + 1.6317 +/* 1.6318 + * Remove an laddr entry from the local address list (on an assoc) 1.6319 + */ 1.6320 +void 1.6321 +sctp_remove_laddr(struct sctp_laddr *laddr) 1.6322 +{ 1.6323 + 1.6324 + /* remove from the list */ 1.6325 + LIST_REMOVE(laddr, sctp_nxt_addr); 1.6326 + sctp_free_ifa(laddr->ifa); 1.6327 + SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_laddr), laddr); 1.6328 + SCTP_DECR_LADDR_COUNT(); 1.6329 +} 1.6330 + 1.6331 +/* 1.6332 + * Remove a local address from the TCB local address restricted list 1.6333 + */ 1.6334 +void 1.6335 +sctp_del_local_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa) 1.6336 +{ 1.6337 + struct sctp_inpcb *inp; 1.6338 + struct sctp_laddr *laddr; 1.6339 + 1.6340 + /* 1.6341 + * This is called by asconf work. It is assumed that a) The TCB is 1.6342 + * locked and b) The INP is locked. This is true in as much as I can 1.6343 + * trace through the entry asconf code where I did these locks. 1.6344 + * Again, the ASCONF code is a bit different in that it does lock 1.6345 + * the INP during its work often times. This must be since we don't 1.6346 + * want other proc's looking up things while what they are looking 1.6347 + * up is changing :-D 1.6348 + */ 1.6349 + 1.6350 + inp = stcb->sctp_ep; 1.6351 + /* if subset bound and don't allow ASCONF's, can't delete last */ 1.6352 + if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) && 1.6353 + sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF)) { 1.6354 + if (stcb->sctp_ep->laddr_count < 2) { 1.6355 + /* can't delete last address */ 1.6356 + return; 1.6357 + } 1.6358 + } 1.6359 + LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) { 1.6360 + /* remove the address if it exists */ 1.6361 + if (laddr->ifa == NULL) 1.6362 + continue; 1.6363 + if (laddr->ifa == ifa) { 1.6364 + sctp_remove_laddr(laddr); 1.6365 + return; 1.6366 + } 1.6367 + } 1.6368 + 1.6369 + /* address not found! */ 1.6370 + return; 1.6371 +} 1.6372 + 1.6373 +#if defined(__FreeBSD__) 1.6374 +/* 1.6375 + * Temporarily remove for __APPLE__ until we use the Tiger equivalents 1.6376 + */ 1.6377 +/* sysctl */ 1.6378 +static int sctp_max_number_of_assoc = SCTP_MAX_NUM_OF_ASOC; 1.6379 +static int sctp_scale_up_for_address = SCTP_SCALE_FOR_ADDR; 1.6380 +#endif /* FreeBSD || APPLE */ 1.6381 + 1.6382 + 1.6383 + 1.6384 +#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP) 1.6385 +struct sctp_mcore_ctrl *sctp_mcore_workers = NULL; 1.6386 +int *sctp_cpuarry = NULL; 1.6387 +void 1.6388 +sctp_queue_to_mcore(struct mbuf *m, int off, int cpu_to_use) 1.6389 +{ 1.6390 + /* Queue a packet to a processor for the specified core */ 1.6391 + struct sctp_mcore_queue *qent; 1.6392 + struct sctp_mcore_ctrl *wkq; 1.6393 + int need_wake = 0; 1.6394 + if (sctp_mcore_workers == NULL) { 1.6395 + /* Something went way bad during setup */ 1.6396 + sctp_input_with_port(m, off, 0); 1.6397 + return; 1.6398 + } 1.6399 + SCTP_MALLOC(qent, struct sctp_mcore_queue *, 1.6400 + (sizeof(struct sctp_mcore_queue)), 1.6401 + SCTP_M_MCORE); 1.6402 + if (qent == NULL) { 1.6403 + /* This is trouble */ 1.6404 + sctp_input_with_port(m, off, 0); 1.6405 + return; 1.6406 + } 1.6407 +#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 1.6408 + qent->vn = curvnet; 1.6409 +#endif 1.6410 + qent->m = m; 1.6411 + qent->off = off; 1.6412 + qent->v6 = 0; 1.6413 + wkq = &sctp_mcore_workers[cpu_to_use]; 1.6414 + SCTP_MCORE_QLOCK(wkq); 1.6415 + 1.6416 + TAILQ_INSERT_TAIL(&wkq->que, qent, next); 1.6417 + if (wkq->running == 0) { 1.6418 + need_wake = 1; 1.6419 + } 1.6420 + SCTP_MCORE_QUNLOCK(wkq); 1.6421 + if (need_wake) { 1.6422 + wakeup(&wkq->running); 1.6423 + } 1.6424 +} 1.6425 + 1.6426 +static void 1.6427 +sctp_mcore_thread(void *arg) 1.6428 +{ 1.6429 + 1.6430 + struct sctp_mcore_ctrl *wkq; 1.6431 + struct sctp_mcore_queue *qent; 1.6432 + 1.6433 + wkq = (struct sctp_mcore_ctrl *)arg; 1.6434 + struct mbuf *m; 1.6435 + int off, v6; 1.6436 + 1.6437 + /* Wait for first tickle */ 1.6438 + SCTP_MCORE_LOCK(wkq); 1.6439 + wkq->running = 0; 1.6440 + msleep(&wkq->running, 1.6441 + &wkq->core_mtx, 1.6442 + 0, "wait for pkt", 0); 1.6443 + SCTP_MCORE_UNLOCK(wkq); 1.6444 + 1.6445 + /* Bind to our cpu */ 1.6446 + thread_lock(curthread); 1.6447 + sched_bind(curthread, wkq->cpuid); 1.6448 + thread_unlock(curthread); 1.6449 + 1.6450 + /* Now lets start working */ 1.6451 + SCTP_MCORE_LOCK(wkq); 1.6452 + /* Now grab lock and go */ 1.6453 + for (;;) { 1.6454 + SCTP_MCORE_QLOCK(wkq); 1.6455 + skip_sleep: 1.6456 + wkq->running = 1; 1.6457 + qent = TAILQ_FIRST(&wkq->que); 1.6458 + if (qent) { 1.6459 + TAILQ_REMOVE(&wkq->que, qent, next); 1.6460 + SCTP_MCORE_QUNLOCK(wkq); 1.6461 +#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 1.6462 + CURVNET_SET(qent->vn); 1.6463 +#endif 1.6464 + m = qent->m; 1.6465 + off = qent->off; 1.6466 + v6 = qent->v6; 1.6467 + SCTP_FREE(qent, SCTP_M_MCORE); 1.6468 + if (v6 == 0) { 1.6469 + sctp_input_with_port(m, off, 0); 1.6470 + } else { 1.6471 + SCTP_PRINTF("V6 not yet supported\n"); 1.6472 + sctp_m_freem(m); 1.6473 + } 1.6474 +#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 1.6475 + CURVNET_RESTORE(); 1.6476 +#endif 1.6477 + SCTP_MCORE_QLOCK(wkq); 1.6478 + } 1.6479 + wkq->running = 0; 1.6480 + if (!TAILQ_EMPTY(&wkq->que)) { 1.6481 + goto skip_sleep; 1.6482 + } 1.6483 + SCTP_MCORE_QUNLOCK(wkq); 1.6484 + msleep(&wkq->running, 1.6485 + &wkq->core_mtx, 1.6486 + 0, "wait for pkt", 0); 1.6487 + } 1.6488 +} 1.6489 + 1.6490 +static void 1.6491 +sctp_startup_mcore_threads(void) 1.6492 +{ 1.6493 + int i, cpu; 1.6494 + 1.6495 + if (mp_ncpus == 1) 1.6496 + return; 1.6497 + 1.6498 + if (sctp_mcore_workers != NULL) { 1.6499 + /* Already been here in some previous 1.6500 + * vnet? 1.6501 + */ 1.6502 + return; 1.6503 + } 1.6504 + SCTP_MALLOC(sctp_mcore_workers, struct sctp_mcore_ctrl *, 1.6505 + ((mp_maxid+1) * sizeof(struct sctp_mcore_ctrl)), 1.6506 + SCTP_M_MCORE); 1.6507 + if (sctp_mcore_workers == NULL) { 1.6508 + /* TSNH I hope */ 1.6509 + return; 1.6510 + } 1.6511 + memset(sctp_mcore_workers, 0 , ((mp_maxid+1) * 1.6512 + sizeof(struct sctp_mcore_ctrl))); 1.6513 + /* Init the structures */ 1.6514 + for (i = 0; i<=mp_maxid; i++) { 1.6515 + TAILQ_INIT(&sctp_mcore_workers[i].que); 1.6516 + SCTP_MCORE_LOCK_INIT(&sctp_mcore_workers[i]); 1.6517 + SCTP_MCORE_QLOCK_INIT(&sctp_mcore_workers[i]); 1.6518 + sctp_mcore_workers[i].cpuid = i; 1.6519 + } 1.6520 + if (sctp_cpuarry == NULL) { 1.6521 + SCTP_MALLOC(sctp_cpuarry, int *, 1.6522 + (mp_ncpus * sizeof(int)), 1.6523 + SCTP_M_MCORE); 1.6524 + i = 0; 1.6525 + CPU_FOREACH(cpu) { 1.6526 + sctp_cpuarry[i] = cpu; 1.6527 + i++; 1.6528 + } 1.6529 + } 1.6530 + 1.6531 + /* Now start them all */ 1.6532 + CPU_FOREACH(cpu) { 1.6533 +#if __FreeBSD_version <= 701000 1.6534 + (void)kthread_create(sctp_mcore_thread, 1.6535 + (void *)&sctp_mcore_workers[cpu], 1.6536 + &sctp_mcore_workers[cpu].thread_proc, 1.6537 + RFPROC, 1.6538 + SCTP_KTHREAD_PAGES, 1.6539 + SCTP_MCORE_NAME); 1.6540 + 1.6541 +#else 1.6542 + (void)kproc_create(sctp_mcore_thread, 1.6543 + (void *)&sctp_mcore_workers[cpu], 1.6544 + &sctp_mcore_workers[cpu].thread_proc, 1.6545 + RFPROC, 1.6546 + SCTP_KTHREAD_PAGES, 1.6547 + SCTP_MCORE_NAME); 1.6548 +#endif 1.6549 + 1.6550 + } 1.6551 +} 1.6552 +#endif 1.6553 +#if defined(__FreeBSD__) && __FreeBSD_cc_version >= 1100000 1.6554 +static struct mbuf * 1.6555 +sctp_netisr_hdlr(struct mbuf *m, uintptr_t source) 1.6556 +{ 1.6557 + struct ip *ip; 1.6558 + struct sctphdr *sh; 1.6559 + int offset; 1.6560 + uint32_t flowid, tag; 1.6561 + 1.6562 + /* 1.6563 + * No flow id built by lower layers fix it so we 1.6564 + * create one. 1.6565 + */ 1.6566 + ip = mtod(m, struct ip *); 1.6567 + offset = (ip->ip_hl << 2) + sizeof(struct sctphdr); 1.6568 + if (SCTP_BUF_LEN(m) < offset) { 1.6569 + if ((m = m_pullup(m, offset)) == NULL) { 1.6570 + SCTP_STAT_INCR(sctps_hdrops); 1.6571 + return (NULL); 1.6572 + } 1.6573 + ip = mtod(m, struct ip *); 1.6574 + } 1.6575 + sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 1.6576 + tag = htonl(sh->v_tag); 1.6577 + flowid = tag ^ ntohs(sh->dest_port) ^ ntohs(sh->src_port); 1.6578 + m->m_pkthdr.flowid = flowid; 1.6579 + m->m_flags |= M_FLOWID; 1.6580 + return (m); 1.6581 +} 1.6582 +#endif 1.6583 + 1.6584 +void 1.6585 +sctp_pcb_init() 1.6586 +{ 1.6587 + /* 1.6588 + * SCTP initialization for the PCB structures should be called by 1.6589 + * the sctp_init() funciton. 1.6590 + */ 1.6591 + int i; 1.6592 + struct timeval tv; 1.6593 + 1.6594 + if (SCTP_BASE_VAR(sctp_pcb_initialized) != 0) { 1.6595 + /* error I was called twice */ 1.6596 + return; 1.6597 + } 1.6598 + SCTP_BASE_VAR(sctp_pcb_initialized) = 1; 1.6599 + 1.6600 +#if defined(SCTP_LOCAL_TRACE_BUF) 1.6601 +#if defined(__Windows__) 1.6602 + if (SCTP_BASE_SYSCTL(sctp_log) != NULL) { 1.6603 + bzero(SCTP_BASE_SYSCTL(sctp_log), sizeof(struct sctp_log)); 1.6604 + } 1.6605 +#else 1.6606 + bzero(&SCTP_BASE_SYSCTL(sctp_log), sizeof(struct sctp_log)); 1.6607 +#endif 1.6608 +#endif 1.6609 +#if defined(__FreeBSD__) && defined(SMP) && defined(SCTP_USE_PERCPU_STAT) 1.6610 + SCTP_MALLOC(SCTP_BASE_STATS, struct sctpstat *, 1.6611 + ((mp_maxid+1) * sizeof(struct sctpstat)), 1.6612 + SCTP_M_MCORE); 1.6613 +#endif 1.6614 + (void)SCTP_GETTIME_TIMEVAL(&tv); 1.6615 +#if defined(__FreeBSD__) && defined(SMP) && defined(SCTP_USE_PERCPU_STAT) 1.6616 + bzero(SCTP_BASE_STATS, (sizeof(struct sctpstat) * (mp_maxid+1))); 1.6617 + SCTP_BASE_STATS[PCPU_GET(cpuid)].sctps_discontinuitytime.tv_sec = (uint32_t)tv.tv_sec; 1.6618 + SCTP_BASE_STATS[PCPU_GET(cpuid)].sctps_discontinuitytime.tv_usec = (uint32_t)tv.tv_usec; 1.6619 +#else 1.6620 + bzero(&SCTP_BASE_STATS, sizeof(struct sctpstat)); 1.6621 + SCTP_BASE_STAT(sctps_discontinuitytime).tv_sec = (uint32_t)tv.tv_sec; 1.6622 + SCTP_BASE_STAT(sctps_discontinuitytime).tv_usec = (uint32_t)tv.tv_usec; 1.6623 +#endif 1.6624 + /* init the empty list of (All) Endpoints */ 1.6625 + LIST_INIT(&SCTP_BASE_INFO(listhead)); 1.6626 +#if defined(__APPLE__) 1.6627 + LIST_INIT(&SCTP_BASE_INFO(inplisthead)); 1.6628 +#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) || defined(APPLE_LION) || defined(APPLE_MOUNTAINLION) 1.6629 + SCTP_BASE_INFO(sctbinfo).listhead = &SCTP_BASE_INFO(inplisthead); 1.6630 + SCTP_BASE_INFO(sctbinfo).mtx_grp_attr = lck_grp_attr_alloc_init(); 1.6631 + lck_grp_attr_setdefault(SCTP_BASE_INFO(sctbinfo).mtx_grp_attr); 1.6632 + SCTP_BASE_INFO(sctbinfo).mtx_grp = lck_grp_alloc_init("sctppcb", SCTP_BASE_INFO(sctbinfo).mtx_grp_attr); 1.6633 + SCTP_BASE_INFO(sctbinfo).mtx_attr = lck_attr_alloc_init(); 1.6634 + lck_attr_setdefault(SCTP_BASE_INFO(sctbinfo).mtx_attr); 1.6635 +#else 1.6636 + SCTP_BASE_INFO(sctbinfo).ipi_listhead = &SCTP_BASE_INFO(inplisthead); 1.6637 + SCTP_BASE_INFO(sctbinfo).ipi_lock_grp_attr = lck_grp_attr_alloc_init(); 1.6638 + lck_grp_attr_setdefault(SCTP_BASE_INFO(sctbinfo).ipi_lock_grp_attr); 1.6639 + SCTP_BASE_INFO(sctbinfo).ipi_lock_grp = lck_grp_alloc_init("sctppcb", SCTP_BASE_INFO(sctbinfo).ipi_lock_grp_attr); 1.6640 + SCTP_BASE_INFO(sctbinfo).ipi_lock_attr = lck_attr_alloc_init(); 1.6641 + lck_attr_setdefault(SCTP_BASE_INFO(sctbinfo).ipi_lock_attr); 1.6642 +#endif 1.6643 +#if !defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION) 1.6644 + SCTP_BASE_INFO(sctbinfo).ipi_gc = sctp_gc; 1.6645 + in_pcbinfo_attach(&SCTP_BASE_INFO(sctbinfo)); 1.6646 +#endif 1.6647 +#endif 1.6648 + 1.6649 + 1.6650 + /* init the hash table of endpoints */ 1.6651 +#if defined(__FreeBSD__) 1.6652 +#if defined(__FreeBSD_cc_version) && __FreeBSD_cc_version >= 440000 1.6653 + TUNABLE_INT_FETCH("net.inet.sctp.tcbhashsize", &SCTP_BASE_SYSCTL(sctp_hashtblsize)); 1.6654 + TUNABLE_INT_FETCH("net.inet.sctp.pcbhashsize", &SCTP_BASE_SYSCTL(sctp_pcbtblsize)); 1.6655 + TUNABLE_INT_FETCH("net.inet.sctp.chunkscale", &SCTP_BASE_SYSCTL(sctp_chunkscale)); 1.6656 +#else 1.6657 + TUNABLE_INT_FETCH("net.inet.sctp.tcbhashsize", SCTP_TCBHASHSIZE, 1.6658 + SCTP_BASE_SYSCTL(sctp_hashtblsize)); 1.6659 + TUNABLE_INT_FETCH("net.inet.sctp.pcbhashsize", SCTP_PCBHASHSIZE, 1.6660 + SCTP_BASE_SYSCTL(sctp_pcbtblsize)); 1.6661 + TUNABLE_INT_FETCH("net.inet.sctp.chunkscale", SCTP_CHUNKQUEUE_SCALE, 1.6662 + SCTP_BASE_SYSCTL(sctp_chunkscale)); 1.6663 +#endif 1.6664 +#endif 1.6665 + SCTP_BASE_INFO(sctp_asochash) = SCTP_HASH_INIT((SCTP_BASE_SYSCTL(sctp_hashtblsize) * 31), 1.6666 + &SCTP_BASE_INFO(hashasocmark)); 1.6667 + SCTP_BASE_INFO(sctp_ephash) = SCTP_HASH_INIT(SCTP_BASE_SYSCTL(sctp_hashtblsize), 1.6668 + &SCTP_BASE_INFO(hashmark)); 1.6669 + SCTP_BASE_INFO(sctp_tcpephash) = SCTP_HASH_INIT(SCTP_BASE_SYSCTL(sctp_hashtblsize), 1.6670 + &SCTP_BASE_INFO(hashtcpmark)); 1.6671 + SCTP_BASE_INFO(hashtblsize) = SCTP_BASE_SYSCTL(sctp_hashtblsize); 1.6672 + 1.6673 + 1.6674 + SCTP_BASE_INFO(sctp_vrfhash) = SCTP_HASH_INIT(SCTP_SIZE_OF_VRF_HASH, 1.6675 + &SCTP_BASE_INFO(hashvrfmark)); 1.6676 + 1.6677 + SCTP_BASE_INFO(vrf_ifn_hash) = SCTP_HASH_INIT(SCTP_VRF_IFN_HASH_SIZE, 1.6678 + &SCTP_BASE_INFO(vrf_ifn_hashmark)); 1.6679 + /* init the zones */ 1.6680 + /* 1.6681 + * FIX ME: Should check for NULL returns, but if it does fail we are 1.6682 + * doomed to panic anyways... add later maybe. 1.6683 + */ 1.6684 + SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_ep), "sctp_ep", 1.6685 + sizeof(struct sctp_inpcb), maxsockets); 1.6686 + 1.6687 + SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_asoc), "sctp_asoc", 1.6688 + sizeof(struct sctp_tcb), sctp_max_number_of_assoc); 1.6689 + 1.6690 + SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_laddr), "sctp_laddr", 1.6691 + sizeof(struct sctp_laddr), 1.6692 + (sctp_max_number_of_assoc * sctp_scale_up_for_address)); 1.6693 + 1.6694 + SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_net), "sctp_raddr", 1.6695 + sizeof(struct sctp_nets), 1.6696 + (sctp_max_number_of_assoc * sctp_scale_up_for_address)); 1.6697 + 1.6698 + SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_chunk), "sctp_chunk", 1.6699 + sizeof(struct sctp_tmit_chunk), 1.6700 + (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale))); 1.6701 + 1.6702 + SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_readq), "sctp_readq", 1.6703 + sizeof(struct sctp_queued_to_read), 1.6704 + (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale))); 1.6705 + 1.6706 + SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_strmoq), "sctp_stream_msg_out", 1.6707 + sizeof(struct sctp_stream_queue_pending), 1.6708 + (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale))); 1.6709 + 1.6710 + SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_asconf), "sctp_asconf", 1.6711 + sizeof(struct sctp_asconf), 1.6712 + (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale))); 1.6713 + 1.6714 + SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_asconf_ack), "sctp_asconf_ack", 1.6715 + sizeof(struct sctp_asconf_ack), 1.6716 + (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale))); 1.6717 + 1.6718 + 1.6719 + /* Master Lock INIT for info structure */ 1.6720 + SCTP_INP_INFO_LOCK_INIT(); 1.6721 + SCTP_STATLOG_INIT_LOCK(); 1.6722 + 1.6723 + SCTP_IPI_COUNT_INIT(); 1.6724 + SCTP_IPI_ADDR_INIT(); 1.6725 +#ifdef SCTP_PACKET_LOGGING 1.6726 + SCTP_IP_PKTLOG_INIT(); 1.6727 +#endif 1.6728 + LIST_INIT(&SCTP_BASE_INFO(addr_wq)); 1.6729 + 1.6730 + SCTP_WQ_ADDR_INIT(); 1.6731 + /* not sure if we need all the counts */ 1.6732 + SCTP_BASE_INFO(ipi_count_ep) = 0; 1.6733 + /* assoc/tcb zone info */ 1.6734 + SCTP_BASE_INFO(ipi_count_asoc) = 0; 1.6735 + /* local addrlist zone info */ 1.6736 + SCTP_BASE_INFO(ipi_count_laddr) = 0; 1.6737 + /* remote addrlist zone info */ 1.6738 + SCTP_BASE_INFO(ipi_count_raddr) = 0; 1.6739 + /* chunk info */ 1.6740 + SCTP_BASE_INFO(ipi_count_chunk) = 0; 1.6741 + 1.6742 + /* socket queue zone info */ 1.6743 + SCTP_BASE_INFO(ipi_count_readq) = 0; 1.6744 + 1.6745 + /* stream out queue cont */ 1.6746 + SCTP_BASE_INFO(ipi_count_strmoq) = 0; 1.6747 + 1.6748 + SCTP_BASE_INFO(ipi_free_strmoq) = 0; 1.6749 + SCTP_BASE_INFO(ipi_free_chunks) = 0; 1.6750 + 1.6751 + SCTP_OS_TIMER_INIT(&SCTP_BASE_INFO(addr_wq_timer.timer)); 1.6752 + 1.6753 + /* Init the TIMEWAIT list */ 1.6754 + for (i = 0; i < SCTP_STACK_VTAG_HASH_SIZE; i++) { 1.6755 + LIST_INIT(&SCTP_BASE_INFO(vtag_timewait)[i]); 1.6756 + } 1.6757 +#if defined(SCTP_PROCESS_LEVEL_LOCKS) 1.6758 +#if defined(__Userspace_os_Windows) 1.6759 + InitializeConditionVariable(&sctp_it_ctl.iterator_wakeup); 1.6760 +#else 1.6761 + (void)pthread_cond_init(&sctp_it_ctl.iterator_wakeup, NULL); 1.6762 +#endif 1.6763 +#endif 1.6764 + sctp_startup_iterator(); 1.6765 + 1.6766 +#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP) 1.6767 + sctp_startup_mcore_threads(); 1.6768 +#endif 1.6769 + 1.6770 +#ifndef __Panda__ 1.6771 + /* 1.6772 + * INIT the default VRF which for BSD is the only one, other O/S's 1.6773 + * may have more. But initially they must start with one and then 1.6774 + * add the VRF's as addresses are added. 1.6775 + */ 1.6776 + sctp_init_vrf_list(SCTP_DEFAULT_VRF); 1.6777 +#endif 1.6778 +#if defined(__FreeBSD__) && __FreeBSD_cc_version >= 1100000 1.6779 + if (ip_register_flow_handler(sctp_netisr_hdlr, IPPROTO_SCTP)) { 1.6780 + SCTP_PRINTF("***SCTP- Error can't register netisr handler***\n"); 1.6781 + } 1.6782 +#endif 1.6783 +#if defined(_SCTP_NEEDS_CALLOUT_) || defined(_USER_SCTP_NEEDS_CALLOUT_) 1.6784 + /* allocate the lock for the callout/timer queue */ 1.6785 + SCTP_TIMERQ_LOCK_INIT(); 1.6786 + TAILQ_INIT(&SCTP_BASE_INFO(callqueue)); 1.6787 +#endif 1.6788 +#if defined(__Userspace__) 1.6789 + mbuf_init(NULL); 1.6790 + atomic_init(); 1.6791 +#if defined(INET) || defined(INET6) 1.6792 + recv_thread_init(); 1.6793 +#endif 1.6794 +#endif 1.6795 +} 1.6796 + 1.6797 +/* 1.6798 + * Assumes that the SCTP_BASE_INFO() lock is NOT held. 1.6799 + */ 1.6800 +void 1.6801 +sctp_pcb_finish(void) 1.6802 +{ 1.6803 + struct sctp_vrflist *vrf_bucket; 1.6804 + struct sctp_vrf *vrf, *nvrf; 1.6805 + struct sctp_ifn *ifn, *nifn; 1.6806 + struct sctp_ifa *ifa, *nifa; 1.6807 + struct sctpvtaghead *chain; 1.6808 + struct sctp_tagblock *twait_block, *prev_twait_block; 1.6809 + struct sctp_laddr *wi, *nwi; 1.6810 + int i; 1.6811 + struct sctp_iterator *it, *nit; 1.6812 + 1.6813 +#if !defined(__FreeBSD__) 1.6814 + /* Notify the iterator to exit. */ 1.6815 + SCTP_IPI_ITERATOR_WQ_LOCK(); 1.6816 + sctp_it_ctl.iterator_flags |= SCTP_ITERATOR_MUST_EXIT; 1.6817 + sctp_wakeup_iterator(); 1.6818 + SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1.6819 +#endif 1.6820 +#if defined(__APPLE__) 1.6821 +#if !defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION) 1.6822 + in_pcbinfo_detach(&SCTP_BASE_INFO(sctbinfo)); 1.6823 +#endif 1.6824 + SCTP_IPI_ITERATOR_WQ_LOCK(); 1.6825 + do { 1.6826 + msleep(&sctp_it_ctl.iterator_flags, 1.6827 + sctp_it_ctl.ipi_iterator_wq_mtx, 1.6828 + 0, "waiting_for_work", 0); 1.6829 + } while ((sctp_it_ctl.iterator_flags & SCTP_ITERATOR_EXITED) == 0); 1.6830 + thread_deallocate(sctp_it_ctl.thread_proc); 1.6831 + SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1.6832 +#endif 1.6833 +#if defined(__Windows__) 1.6834 + if (sctp_it_ctl.iterator_thread_obj != NULL) { 1.6835 + NTSTATUS status = STATUS_SUCCESS; 1.6836 + 1.6837 + KeSetEvent(&sctp_it_ctl.iterator_wakeup[1], IO_NO_INCREMENT, FALSE); 1.6838 + status = KeWaitForSingleObject(sctp_it_ctl.iterator_thread_obj, 1.6839 + Executive, 1.6840 + KernelMode, 1.6841 + FALSE, 1.6842 + NULL); 1.6843 + ObDereferenceObject(sctp_it_ctl.iterator_thread_obj); 1.6844 + } 1.6845 +#endif 1.6846 +#if defined(__Userspace__) 1.6847 + if (sctp_it_ctl.thread_proc) { 1.6848 +#if defined(__Userspace_os_Windows) 1.6849 + WaitForSingleObject(sctp_it_ctl.thread_proc, INFINITE); 1.6850 + CloseHandle(sctp_it_ctl.thread_proc); 1.6851 + sctp_it_ctl.thread_proc = NULL; 1.6852 +#else 1.6853 + pthread_join(sctp_it_ctl.thread_proc, NULL); 1.6854 + sctp_it_ctl.thread_proc = 0; 1.6855 +#endif 1.6856 + } 1.6857 +#endif 1.6858 +#if defined(SCTP_PROCESS_LEVEL_LOCKS) 1.6859 +#if defined(__Userspace_os_Windows) 1.6860 + DeleteConditionVariable(&sctp_it_ctl.iterator_wakeup); 1.6861 +#else 1.6862 + pthread_cond_destroy(&sctp_it_ctl.iterator_wakeup); 1.6863 +#endif 1.6864 +#endif 1.6865 + /* In FreeBSD the iterator thread never exits 1.6866 + * but we do clean up. 1.6867 + * The only way FreeBSD reaches here is if we have VRF's 1.6868 + * but we still add the ifdef to make it compile on old versions. 1.6869 + */ 1.6870 + SCTP_IPI_ITERATOR_WQ_LOCK(); 1.6871 + TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) { 1.6872 +#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 1.6873 + if (it->vn != curvnet) { 1.6874 + continue; 1.6875 + } 1.6876 +#endif 1.6877 + TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1.6878 + if (it->function_atend != NULL) { 1.6879 + (*it->function_atend) (it->pointer, it->val); 1.6880 + } 1.6881 + SCTP_FREE(it,SCTP_M_ITER); 1.6882 + } 1.6883 + SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1.6884 +#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 1.6885 + SCTP_ITERATOR_LOCK(); 1.6886 + if ((sctp_it_ctl.cur_it) && 1.6887 + (sctp_it_ctl.cur_it->vn == curvnet)) { 1.6888 + sctp_it_ctl.iterator_flags |= SCTP_ITERATOR_STOP_CUR_IT; 1.6889 + } 1.6890 + SCTP_ITERATOR_UNLOCK(); 1.6891 +#endif 1.6892 +#if !defined(__FreeBSD__) 1.6893 + SCTP_IPI_ITERATOR_WQ_DESTROY(); 1.6894 + SCTP_ITERATOR_LOCK_DESTROY(); 1.6895 +#endif 1.6896 + SCTP_OS_TIMER_STOP(&SCTP_BASE_INFO(addr_wq_timer.timer)); 1.6897 + SCTP_WQ_ADDR_LOCK(); 1.6898 + LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1.6899 + LIST_REMOVE(wi, sctp_nxt_addr); 1.6900 + SCTP_DECR_LADDR_COUNT(); 1.6901 + SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_laddr), wi); 1.6902 + } 1.6903 + SCTP_WQ_ADDR_UNLOCK(); 1.6904 + 1.6905 + /* 1.6906 + * free the vrf/ifn/ifa lists and hashes (be sure address monitor 1.6907 + * is destroyed first). 1.6908 + */ 1.6909 + vrf_bucket = &SCTP_BASE_INFO(sctp_vrfhash)[(SCTP_DEFAULT_VRFID & SCTP_BASE_INFO(hashvrfmark))]; 1.6910 + LIST_FOREACH_SAFE(vrf, vrf_bucket, next_vrf, nvrf) { 1.6911 + LIST_FOREACH_SAFE(ifn, &vrf->ifnlist, next_ifn, nifn) { 1.6912 + LIST_FOREACH_SAFE(ifa, &ifn->ifalist, next_ifa, nifa) { 1.6913 + /* free the ifa */ 1.6914 + LIST_REMOVE(ifa, next_bucket); 1.6915 + LIST_REMOVE(ifa, next_ifa); 1.6916 + SCTP_FREE(ifa, SCTP_M_IFA); 1.6917 + } 1.6918 + /* free the ifn */ 1.6919 + LIST_REMOVE(ifn, next_bucket); 1.6920 + LIST_REMOVE(ifn, next_ifn); 1.6921 + SCTP_FREE(ifn, SCTP_M_IFN); 1.6922 + } 1.6923 + SCTP_HASH_FREE(vrf->vrf_addr_hash, vrf->vrf_addr_hashmark); 1.6924 + /* free the vrf */ 1.6925 + LIST_REMOVE(vrf, next_vrf); 1.6926 + SCTP_FREE(vrf, SCTP_M_VRF); 1.6927 + } 1.6928 + /* free the vrf hashes */ 1.6929 + SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_vrfhash), SCTP_BASE_INFO(hashvrfmark)); 1.6930 + SCTP_HASH_FREE(SCTP_BASE_INFO(vrf_ifn_hash), SCTP_BASE_INFO(vrf_ifn_hashmark)); 1.6931 +#if defined(__Userspace__) && !defined(__Userspace_os_Windows) 1.6932 + /* free memory allocated by getifaddrs call */ 1.6933 +#if defined(INET) || defined(INET6) 1.6934 + freeifaddrs(g_interfaces); 1.6935 +#endif 1.6936 +#endif 1.6937 + 1.6938 + /* free the TIMEWAIT list elements malloc'd in the function 1.6939 + * sctp_add_vtag_to_timewait()... 1.6940 + */ 1.6941 + for (i = 0; i < SCTP_STACK_VTAG_HASH_SIZE; i++) { 1.6942 + chain = &SCTP_BASE_INFO(vtag_timewait)[i]; 1.6943 + if (!LIST_EMPTY(chain)) { 1.6944 + prev_twait_block = NULL; 1.6945 + LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) { 1.6946 + if (prev_twait_block) { 1.6947 + SCTP_FREE(prev_twait_block, SCTP_M_TIMW); 1.6948 + } 1.6949 + prev_twait_block = twait_block; 1.6950 + } 1.6951 + SCTP_FREE(prev_twait_block, SCTP_M_TIMW); 1.6952 + } 1.6953 + } 1.6954 + 1.6955 + /* free the locks and mutexes */ 1.6956 +#if defined(__APPLE__) 1.6957 + SCTP_TIMERQ_LOCK_DESTROY(); 1.6958 +#endif 1.6959 +#ifdef SCTP_PACKET_LOGGING 1.6960 + SCTP_IP_PKTLOG_DESTROY(); 1.6961 +#endif 1.6962 + SCTP_IPI_ADDR_DESTROY(); 1.6963 +#if defined(__APPLE__) 1.6964 + SCTP_IPI_COUNT_DESTROY(); 1.6965 +#endif 1.6966 + SCTP_STATLOG_DESTROY(); 1.6967 + SCTP_INP_INFO_LOCK_DESTROY(); 1.6968 + 1.6969 + SCTP_WQ_ADDR_DESTROY(); 1.6970 + 1.6971 +#if defined(__APPLE__) 1.6972 +#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) || defined(APPLE_LION) || defined(APPLE_MOUNTAINLION) 1.6973 + lck_grp_attr_free(SCTP_BASE_INFO(sctbinfo).mtx_grp_attr); 1.6974 + lck_grp_free(SCTP_BASE_INFO(sctbinfo).mtx_grp); 1.6975 + lck_attr_free(SCTP_BASE_INFO(sctbinfo).mtx_attr); 1.6976 +#else 1.6977 + lck_grp_attr_free(SCTP_BASE_INFO(sctbinfo).ipi_lock_grp_attr); 1.6978 + lck_grp_free(SCTP_BASE_INFO(sctbinfo).ipi_lock_grp); 1.6979 + lck_attr_free(SCTP_BASE_INFO(sctbinfo).ipi_lock_attr); 1.6980 +#endif 1.6981 +#endif 1.6982 +#if defined(__Userspace__) 1.6983 + SCTP_TIMERQ_LOCK_DESTROY(); 1.6984 + SCTP_ZONE_DESTROY(zone_mbuf); 1.6985 + SCTP_ZONE_DESTROY(zone_clust); 1.6986 + SCTP_ZONE_DESTROY(zone_ext_refcnt); 1.6987 +#endif 1.6988 +#if defined(__Windows__) || defined(__FreeBSD__) || defined(__Userspace__) 1.6989 + SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_ep)); 1.6990 + SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_asoc)); 1.6991 + SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_laddr)); 1.6992 + SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_net)); 1.6993 + SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_chunk)); 1.6994 + SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_readq)); 1.6995 + SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_strmoq)); 1.6996 + SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_asconf)); 1.6997 + SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_asconf_ack)); 1.6998 +#endif 1.6999 + /* Get rid of other stuff to */ 1.7000 + if (SCTP_BASE_INFO(sctp_asochash) != NULL) 1.7001 + SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_asochash), SCTP_BASE_INFO(hashasocmark)); 1.7002 + if (SCTP_BASE_INFO(sctp_ephash) != NULL) 1.7003 + SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_ephash), SCTP_BASE_INFO(hashmark)); 1.7004 + if (SCTP_BASE_INFO(sctp_tcpephash) != NULL) 1.7005 + SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_tcpephash), SCTP_BASE_INFO(hashtcpmark)); 1.7006 +#if defined(__FreeBSD__) && defined(SMP) && defined(SCTP_USE_PERCPU_STAT) 1.7007 + SCTP_FREE(SCTP_BASE_STATS, SCTP_M_MCORE); 1.7008 +#endif 1.7009 +} 1.7010 + 1.7011 + 1.7012 +int 1.7013 +sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m, 1.7014 + int offset, int limit, 1.7015 + struct sockaddr *src, struct sockaddr *dst, 1.7016 + struct sockaddr *altsa) 1.7017 +{ 1.7018 + /* 1.7019 + * grub through the INIT pulling addresses and loading them to the 1.7020 + * nets structure in the asoc. The from address in the mbuf should 1.7021 + * also be loaded (if it is not already). This routine can be called 1.7022 + * with either INIT or INIT-ACK's as long as the m points to the IP 1.7023 + * packet and the offset points to the beginning of the parameters. 1.7024 + */ 1.7025 + struct sctp_inpcb *inp; 1.7026 + struct sctp_nets *net, *nnet, *net_tmp; 1.7027 + struct sctp_paramhdr *phdr, parm_buf; 1.7028 + struct sctp_tcb *stcb_tmp; 1.7029 + uint16_t ptype, plen; 1.7030 + struct sockaddr *sa; 1.7031 + uint8_t random_store[SCTP_PARAM_BUFFER_SIZE]; 1.7032 + struct sctp_auth_random *p_random = NULL; 1.7033 + uint16_t random_len = 0; 1.7034 + uint8_t hmacs_store[SCTP_PARAM_BUFFER_SIZE]; 1.7035 + struct sctp_auth_hmac_algo *hmacs = NULL; 1.7036 + uint16_t hmacs_len = 0; 1.7037 + uint8_t saw_asconf = 0; 1.7038 + uint8_t saw_asconf_ack = 0; 1.7039 + uint8_t chunks_store[SCTP_PARAM_BUFFER_SIZE]; 1.7040 + struct sctp_auth_chunk_list *chunks = NULL; 1.7041 + uint16_t num_chunks = 0; 1.7042 + sctp_key_t *new_key; 1.7043 + uint32_t keylen; 1.7044 + int got_random = 0, got_hmacs = 0, got_chklist = 0; 1.7045 + uint8_t ecn_allowed; 1.7046 +#ifdef INET 1.7047 + struct sockaddr_in sin; 1.7048 +#endif 1.7049 +#ifdef INET6 1.7050 + struct sockaddr_in6 sin6; 1.7051 +#endif 1.7052 + 1.7053 + /* First get the destination address setup too. */ 1.7054 +#ifdef INET 1.7055 + memset(&sin, 0, sizeof(sin)); 1.7056 + sin.sin_family = AF_INET; 1.7057 +#ifdef HAVE_SIN_LEN 1.7058 + sin.sin_len = sizeof(sin); 1.7059 +#endif 1.7060 + sin.sin_port = stcb->rport; 1.7061 +#endif 1.7062 +#ifdef INET6 1.7063 + memset(&sin6, 0, sizeof(sin6)); 1.7064 + sin6.sin6_family = AF_INET6; 1.7065 +#ifdef HAVE_SIN6_LEN 1.7066 + sin6.sin6_len = sizeof(struct sockaddr_in6); 1.7067 +#endif 1.7068 + sin6.sin6_port = stcb->rport; 1.7069 +#endif 1.7070 + if (altsa) { 1.7071 + sa = altsa; 1.7072 + } else { 1.7073 + sa = src; 1.7074 + } 1.7075 + /* Turn off ECN until we get through all params */ 1.7076 + ecn_allowed = 0; 1.7077 + TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1.7078 + /* mark all addresses that we have currently on the list */ 1.7079 + net->dest_state |= SCTP_ADDR_NOT_IN_ASSOC; 1.7080 + } 1.7081 + /* does the source address already exist? if so skip it */ 1.7082 + inp = stcb->sctp_ep; 1.7083 + atomic_add_int(&stcb->asoc.refcnt, 1); 1.7084 + stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net_tmp, dst, stcb); 1.7085 + atomic_add_int(&stcb->asoc.refcnt, -1); 1.7086 + 1.7087 + if ((stcb_tmp == NULL && inp == stcb->sctp_ep) || inp == NULL) { 1.7088 + /* we must add the source address */ 1.7089 + /* no scope set here since we have a tcb already. */ 1.7090 + switch (sa->sa_family) { 1.7091 +#ifdef INET 1.7092 + case AF_INET: 1.7093 + if (stcb->asoc.scope.ipv4_addr_legal) { 1.7094 + if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_2)) { 1.7095 + return (-1); 1.7096 + } 1.7097 + } 1.7098 + break; 1.7099 +#endif 1.7100 +#ifdef INET6 1.7101 + case AF_INET6: 1.7102 + if (stcb->asoc.scope.ipv6_addr_legal) { 1.7103 + if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_3)) { 1.7104 + return (-2); 1.7105 + } 1.7106 + } 1.7107 + break; 1.7108 +#endif 1.7109 +#if defined(__Userspace__) 1.7110 + case AF_CONN: 1.7111 + if (stcb->asoc.scope.conn_addr_legal) { 1.7112 + if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_3)) { 1.7113 + return (-2); 1.7114 + } 1.7115 + } 1.7116 + break; 1.7117 +#endif 1.7118 + default: 1.7119 + break; 1.7120 + } 1.7121 + } else { 1.7122 + if (net_tmp != NULL && stcb_tmp == stcb) { 1.7123 + net_tmp->dest_state &= ~SCTP_ADDR_NOT_IN_ASSOC; 1.7124 + } else if (stcb_tmp != stcb) { 1.7125 + /* It belongs to another association? */ 1.7126 + if (stcb_tmp) 1.7127 + SCTP_TCB_UNLOCK(stcb_tmp); 1.7128 + return (-3); 1.7129 + } 1.7130 + } 1.7131 + if (stcb->asoc.state == 0) { 1.7132 + /* the assoc was freed? */ 1.7133 + return (-4); 1.7134 + } 1.7135 + /* 1.7136 + * peer must explicitly turn this on. This may have been initialized 1.7137 + * to be "on" in order to allow local addr changes while INIT's are 1.7138 + * in flight. 1.7139 + */ 1.7140 + stcb->asoc.peer_supports_asconf = 0; 1.7141 + /* now we must go through each of the params. */ 1.7142 + phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf)); 1.7143 + while (phdr) { 1.7144 + ptype = ntohs(phdr->param_type); 1.7145 + plen = ntohs(phdr->param_length); 1.7146 + /* 1.7147 + * SCTP_PRINTF("ptype => %0x, plen => %d\n", (uint32_t)ptype, 1.7148 + * (int)plen); 1.7149 + */ 1.7150 + if (offset + plen > limit) { 1.7151 + break; 1.7152 + } 1.7153 + if (plen == 0) { 1.7154 + break; 1.7155 + } 1.7156 +#ifdef INET 1.7157 + if (ptype == SCTP_IPV4_ADDRESS) { 1.7158 + if (stcb->asoc.scope.ipv4_addr_legal) { 1.7159 + struct sctp_ipv4addr_param *p4, p4_buf; 1.7160 + 1.7161 + /* ok get the v4 address and check/add */ 1.7162 + phdr = sctp_get_next_param(m, offset, 1.7163 + (struct sctp_paramhdr *)&p4_buf, 1.7164 + sizeof(p4_buf)); 1.7165 + if (plen != sizeof(struct sctp_ipv4addr_param) || 1.7166 + phdr == NULL) { 1.7167 + return (-5); 1.7168 + } 1.7169 + p4 = (struct sctp_ipv4addr_param *)phdr; 1.7170 + sin.sin_addr.s_addr = p4->addr; 1.7171 + if (IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) { 1.7172 + /* Skip multi-cast addresses */ 1.7173 + goto next_param; 1.7174 + } 1.7175 + if ((sin.sin_addr.s_addr == INADDR_BROADCAST) || 1.7176 + (sin.sin_addr.s_addr == INADDR_ANY)) { 1.7177 + goto next_param; 1.7178 + } 1.7179 + sa = (struct sockaddr *)&sin; 1.7180 + inp = stcb->sctp_ep; 1.7181 + atomic_add_int(&stcb->asoc.refcnt, 1); 1.7182 + stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net, 1.7183 + dst, stcb); 1.7184 + atomic_add_int(&stcb->asoc.refcnt, -1); 1.7185 + 1.7186 + if ((stcb_tmp == NULL && inp == stcb->sctp_ep) || 1.7187 + inp == NULL) { 1.7188 + /* we must add the source address */ 1.7189 + /* 1.7190 + * no scope set since we have a tcb 1.7191 + * already 1.7192 + */ 1.7193 + 1.7194 + /* 1.7195 + * we must validate the state again 1.7196 + * here 1.7197 + */ 1.7198 + add_it_now: 1.7199 + if (stcb->asoc.state == 0) { 1.7200 + /* the assoc was freed? */ 1.7201 + return (-7); 1.7202 + } 1.7203 + if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_4)) { 1.7204 + return (-8); 1.7205 + } 1.7206 + } else if (stcb_tmp == stcb) { 1.7207 + if (stcb->asoc.state == 0) { 1.7208 + /* the assoc was freed? */ 1.7209 + return (-10); 1.7210 + } 1.7211 + if (net != NULL) { 1.7212 + /* clear flag */ 1.7213 + net->dest_state &= 1.7214 + ~SCTP_ADDR_NOT_IN_ASSOC; 1.7215 + } 1.7216 + } else { 1.7217 + /* 1.7218 + * strange, address is in another 1.7219 + * assoc? straighten out locks. 1.7220 + */ 1.7221 + if (stcb_tmp) { 1.7222 + if (SCTP_GET_STATE(&stcb_tmp->asoc) & SCTP_STATE_COOKIE_WAIT) { 1.7223 + /* in setup state we abort this guy */ 1.7224 + sctp_abort_an_association(stcb_tmp->sctp_ep, 1.7225 + stcb_tmp, NULL, SCTP_SO_NOT_LOCKED); 1.7226 + goto add_it_now; 1.7227 + } 1.7228 + SCTP_TCB_UNLOCK(stcb_tmp); 1.7229 + } 1.7230 + 1.7231 + if (stcb->asoc.state == 0) { 1.7232 + /* the assoc was freed? */ 1.7233 + return (-12); 1.7234 + } 1.7235 + return (-13); 1.7236 + } 1.7237 + } 1.7238 + } else 1.7239 +#endif 1.7240 +#ifdef INET6 1.7241 + if (ptype == SCTP_IPV6_ADDRESS) { 1.7242 + if (stcb->asoc.scope.ipv6_addr_legal) { 1.7243 + /* ok get the v6 address and check/add */ 1.7244 + struct sctp_ipv6addr_param *p6, p6_buf; 1.7245 + 1.7246 + phdr = sctp_get_next_param(m, offset, 1.7247 + (struct sctp_paramhdr *)&p6_buf, 1.7248 + sizeof(p6_buf)); 1.7249 + if (plen != sizeof(struct sctp_ipv6addr_param) || 1.7250 + phdr == NULL) { 1.7251 + return (-14); 1.7252 + } 1.7253 + p6 = (struct sctp_ipv6addr_param *)phdr; 1.7254 + memcpy((caddr_t)&sin6.sin6_addr, p6->addr, 1.7255 + sizeof(p6->addr)); 1.7256 + if (IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) { 1.7257 + /* Skip multi-cast addresses */ 1.7258 + goto next_param; 1.7259 + } 1.7260 + if (IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) { 1.7261 + /* Link local make no sense without scope */ 1.7262 + goto next_param; 1.7263 + } 1.7264 + sa = (struct sockaddr *)&sin6; 1.7265 + inp = stcb->sctp_ep; 1.7266 + atomic_add_int(&stcb->asoc.refcnt, 1); 1.7267 + stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net, 1.7268 + dst, stcb); 1.7269 + atomic_add_int(&stcb->asoc.refcnt, -1); 1.7270 + if (stcb_tmp == NULL && 1.7271 + (inp == stcb->sctp_ep || inp == NULL)) { 1.7272 + /* 1.7273 + * we must validate the state again 1.7274 + * here 1.7275 + */ 1.7276 + add_it_now6: 1.7277 + if (stcb->asoc.state == 0) { 1.7278 + /* the assoc was freed? */ 1.7279 + return (-16); 1.7280 + } 1.7281 + /* 1.7282 + * we must add the address, no scope 1.7283 + * set 1.7284 + */ 1.7285 + if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_5)) { 1.7286 + return (-17); 1.7287 + } 1.7288 + } else if (stcb_tmp == stcb) { 1.7289 + /* 1.7290 + * we must validate the state again 1.7291 + * here 1.7292 + */ 1.7293 + if (stcb->asoc.state == 0) { 1.7294 + /* the assoc was freed? */ 1.7295 + return (-19); 1.7296 + } 1.7297 + if (net != NULL) { 1.7298 + /* clear flag */ 1.7299 + net->dest_state &= 1.7300 + ~SCTP_ADDR_NOT_IN_ASSOC; 1.7301 + } 1.7302 + } else { 1.7303 + /* 1.7304 + * strange, address is in another 1.7305 + * assoc? straighten out locks. 1.7306 + */ 1.7307 + if (stcb_tmp) 1.7308 + if (SCTP_GET_STATE(&stcb_tmp->asoc) & SCTP_STATE_COOKIE_WAIT) { 1.7309 + /* in setup state we abort this guy */ 1.7310 + sctp_abort_an_association(stcb_tmp->sctp_ep, 1.7311 + stcb_tmp, NULL, SCTP_SO_NOT_LOCKED); 1.7312 + goto add_it_now6; 1.7313 + } 1.7314 + SCTP_TCB_UNLOCK(stcb_tmp); 1.7315 + 1.7316 + if (stcb->asoc.state == 0) { 1.7317 + /* the assoc was freed? */ 1.7318 + return (-21); 1.7319 + } 1.7320 + return (-22); 1.7321 + } 1.7322 + } 1.7323 + } else 1.7324 +#endif 1.7325 + if (ptype == SCTP_ECN_CAPABLE) { 1.7326 + ecn_allowed = 1; 1.7327 + } else if (ptype == SCTP_ULP_ADAPTATION) { 1.7328 + if (stcb->asoc.state != SCTP_STATE_OPEN) { 1.7329 + struct sctp_adaptation_layer_indication ai, *aip; 1.7330 + 1.7331 + phdr = sctp_get_next_param(m, offset, 1.7332 + (struct sctp_paramhdr *)&ai, sizeof(ai)); 1.7333 + aip = (struct sctp_adaptation_layer_indication *)phdr; 1.7334 + if (aip) { 1.7335 + stcb->asoc.peers_adaptation = ntohl(aip->indication); 1.7336 + stcb->asoc.adaptation_needed = 1; 1.7337 + } 1.7338 + } 1.7339 + } else if (ptype == SCTP_SET_PRIM_ADDR) { 1.7340 + struct sctp_asconf_addr_param lstore, *fee; 1.7341 + int lptype; 1.7342 + struct sockaddr *lsa = NULL; 1.7343 +#ifdef INET 1.7344 + struct sctp_asconf_addrv4_param *fii; 1.7345 +#endif 1.7346 + 1.7347 + stcb->asoc.peer_supports_asconf = 1; 1.7348 + if (plen > sizeof(lstore)) { 1.7349 + return (-23); 1.7350 + } 1.7351 + phdr = sctp_get_next_param(m, offset, 1.7352 + (struct sctp_paramhdr *)&lstore, 1.7353 + min(plen,sizeof(lstore))); 1.7354 + if (phdr == NULL) { 1.7355 + return (-24); 1.7356 + } 1.7357 + fee = (struct sctp_asconf_addr_param *)phdr; 1.7358 + lptype = ntohs(fee->addrp.ph.param_type); 1.7359 + switch (lptype) { 1.7360 +#ifdef INET 1.7361 + case SCTP_IPV4_ADDRESS: 1.7362 + if (plen != 1.7363 + sizeof(struct sctp_asconf_addrv4_param)) { 1.7364 + SCTP_PRINTF("Sizeof setprim in init/init ack not %d but %d - ignored\n", 1.7365 + (int)sizeof(struct sctp_asconf_addrv4_param), 1.7366 + plen); 1.7367 + } else { 1.7368 + fii = (struct sctp_asconf_addrv4_param *)fee; 1.7369 + sin.sin_addr.s_addr = fii->addrp.addr; 1.7370 + lsa = (struct sockaddr *)&sin; 1.7371 + } 1.7372 + break; 1.7373 +#endif 1.7374 +#ifdef INET6 1.7375 + case SCTP_IPV6_ADDRESS: 1.7376 + if (plen != 1.7377 + sizeof(struct sctp_asconf_addr_param)) { 1.7378 + SCTP_PRINTF("Sizeof setprim (v6) in init/init ack not %d but %d - ignored\n", 1.7379 + (int)sizeof(struct sctp_asconf_addr_param), 1.7380 + plen); 1.7381 + } else { 1.7382 + memcpy(sin6.sin6_addr.s6_addr, 1.7383 + fee->addrp.addr, 1.7384 + sizeof(fee->addrp.addr)); 1.7385 + lsa = (struct sockaddr *)&sin6; 1.7386 + } 1.7387 + break; 1.7388 +#endif 1.7389 + default: 1.7390 + break; 1.7391 + } 1.7392 + if (lsa) { 1.7393 + (void)sctp_set_primary_addr(stcb, sa, NULL); 1.7394 + } 1.7395 + } else if (ptype == SCTP_HAS_NAT_SUPPORT) { 1.7396 + stcb->asoc.peer_supports_nat = 1; 1.7397 + } else if (ptype == SCTP_PRSCTP_SUPPORTED) { 1.7398 + /* Peer supports pr-sctp */ 1.7399 + stcb->asoc.peer_supports_prsctp = 1; 1.7400 + } else if (ptype == SCTP_SUPPORTED_CHUNK_EXT) { 1.7401 + /* A supported extension chunk */ 1.7402 + struct sctp_supported_chunk_types_param *pr_supported; 1.7403 + uint8_t local_store[SCTP_PARAM_BUFFER_SIZE]; 1.7404 + int num_ent, i; 1.7405 + 1.7406 + phdr = sctp_get_next_param(m, offset, 1.7407 + (struct sctp_paramhdr *)&local_store, min(sizeof(local_store),plen)); 1.7408 + if (phdr == NULL) { 1.7409 + return (-25); 1.7410 + } 1.7411 + stcb->asoc.peer_supports_asconf = 0; 1.7412 + stcb->asoc.peer_supports_prsctp = 0; 1.7413 + stcb->asoc.peer_supports_pktdrop = 0; 1.7414 + stcb->asoc.peer_supports_strreset = 0; 1.7415 + stcb->asoc.peer_supports_nr_sack = 0; 1.7416 + stcb->asoc.peer_supports_auth = 0; 1.7417 + pr_supported = (struct sctp_supported_chunk_types_param *)phdr; 1.7418 + num_ent = plen - sizeof(struct sctp_paramhdr); 1.7419 + for (i = 0; i < num_ent; i++) { 1.7420 + switch (pr_supported->chunk_types[i]) { 1.7421 + case SCTP_ASCONF: 1.7422 + case SCTP_ASCONF_ACK: 1.7423 + stcb->asoc.peer_supports_asconf = 1; 1.7424 + break; 1.7425 + case SCTP_FORWARD_CUM_TSN: 1.7426 + stcb->asoc.peer_supports_prsctp = 1; 1.7427 + break; 1.7428 + case SCTP_PACKET_DROPPED: 1.7429 + stcb->asoc.peer_supports_pktdrop = 1; 1.7430 + break; 1.7431 + case SCTP_NR_SELECTIVE_ACK: 1.7432 + stcb->asoc.peer_supports_nr_sack = 1; 1.7433 + break; 1.7434 + case SCTP_STREAM_RESET: 1.7435 + stcb->asoc.peer_supports_strreset = 1; 1.7436 + break; 1.7437 + case SCTP_AUTHENTICATION: 1.7438 + stcb->asoc.peer_supports_auth = 1; 1.7439 + break; 1.7440 + default: 1.7441 + /* one I have not learned yet */ 1.7442 + break; 1.7443 + 1.7444 + } 1.7445 + } 1.7446 + } else if (ptype == SCTP_RANDOM) { 1.7447 + if (plen > sizeof(random_store)) 1.7448 + break; 1.7449 + if (got_random) { 1.7450 + /* already processed a RANDOM */ 1.7451 + goto next_param; 1.7452 + } 1.7453 + phdr = sctp_get_next_param(m, offset, 1.7454 + (struct sctp_paramhdr *)random_store, 1.7455 + min(sizeof(random_store),plen)); 1.7456 + if (phdr == NULL) 1.7457 + return (-26); 1.7458 + p_random = (struct sctp_auth_random *)phdr; 1.7459 + random_len = plen - sizeof(*p_random); 1.7460 + /* enforce the random length */ 1.7461 + if (random_len != SCTP_AUTH_RANDOM_SIZE_REQUIRED) { 1.7462 + SCTPDBG(SCTP_DEBUG_AUTH1, "SCTP: invalid RANDOM len\n"); 1.7463 + return (-27); 1.7464 + } 1.7465 + got_random = 1; 1.7466 + } else if (ptype == SCTP_HMAC_LIST) { 1.7467 + int num_hmacs; 1.7468 + int i; 1.7469 + 1.7470 + if (plen > sizeof(hmacs_store)) 1.7471 + break; 1.7472 + if (got_hmacs) { 1.7473 + /* already processed a HMAC list */ 1.7474 + goto next_param; 1.7475 + } 1.7476 + phdr = sctp_get_next_param(m, offset, 1.7477 + (struct sctp_paramhdr *)hmacs_store, 1.7478 + min(plen,sizeof(hmacs_store))); 1.7479 + if (phdr == NULL) 1.7480 + return (-28); 1.7481 + hmacs = (struct sctp_auth_hmac_algo *)phdr; 1.7482 + hmacs_len = plen - sizeof(*hmacs); 1.7483 + num_hmacs = hmacs_len / sizeof(hmacs->hmac_ids[0]); 1.7484 + /* validate the hmac list */ 1.7485 + if (sctp_verify_hmac_param(hmacs, num_hmacs)) { 1.7486 + return (-29); 1.7487 + } 1.7488 + if (stcb->asoc.peer_hmacs != NULL) 1.7489 + sctp_free_hmaclist(stcb->asoc.peer_hmacs); 1.7490 + stcb->asoc.peer_hmacs = sctp_alloc_hmaclist(num_hmacs); 1.7491 + if (stcb->asoc.peer_hmacs != NULL) { 1.7492 + for (i = 0; i < num_hmacs; i++) { 1.7493 + (void)sctp_auth_add_hmacid(stcb->asoc.peer_hmacs, 1.7494 + ntohs(hmacs->hmac_ids[i])); 1.7495 + } 1.7496 + } 1.7497 + got_hmacs = 1; 1.7498 + } else if (ptype == SCTP_CHUNK_LIST) { 1.7499 + int i; 1.7500 + 1.7501 + if (plen > sizeof(chunks_store)) 1.7502 + break; 1.7503 + if (got_chklist) { 1.7504 + /* already processed a Chunks list */ 1.7505 + goto next_param; 1.7506 + } 1.7507 + phdr = sctp_get_next_param(m, offset, 1.7508 + (struct sctp_paramhdr *)chunks_store, 1.7509 + min(plen,sizeof(chunks_store))); 1.7510 + if (phdr == NULL) 1.7511 + return (-30); 1.7512 + chunks = (struct sctp_auth_chunk_list *)phdr; 1.7513 + num_chunks = plen - sizeof(*chunks); 1.7514 + if (stcb->asoc.peer_auth_chunks != NULL) 1.7515 + sctp_clear_chunklist(stcb->asoc.peer_auth_chunks); 1.7516 + else 1.7517 + stcb->asoc.peer_auth_chunks = sctp_alloc_chunklist(); 1.7518 + for (i = 0; i < num_chunks; i++) { 1.7519 + (void)sctp_auth_add_chunk(chunks->chunk_types[i], 1.7520 + stcb->asoc.peer_auth_chunks); 1.7521 + /* record asconf/asconf-ack if listed */ 1.7522 + if (chunks->chunk_types[i] == SCTP_ASCONF) 1.7523 + saw_asconf = 1; 1.7524 + if (chunks->chunk_types[i] == SCTP_ASCONF_ACK) 1.7525 + saw_asconf_ack = 1; 1.7526 + 1.7527 + } 1.7528 + got_chklist = 1; 1.7529 + } else if ((ptype == SCTP_HEARTBEAT_INFO) || 1.7530 + (ptype == SCTP_STATE_COOKIE) || 1.7531 + (ptype == SCTP_UNRECOG_PARAM) || 1.7532 + (ptype == SCTP_COOKIE_PRESERVE) || 1.7533 + (ptype == SCTP_SUPPORTED_ADDRTYPE) || 1.7534 + (ptype == SCTP_ADD_IP_ADDRESS) || 1.7535 + (ptype == SCTP_DEL_IP_ADDRESS) || 1.7536 + (ptype == SCTP_ERROR_CAUSE_IND) || 1.7537 + (ptype == SCTP_SUCCESS_REPORT)) { 1.7538 + /* don't care */ ; 1.7539 + } else { 1.7540 + if ((ptype & 0x8000) == 0x0000) { 1.7541 + /* 1.7542 + * must stop processing the rest of the 1.7543 + * param's. Any report bits were handled 1.7544 + * with the call to 1.7545 + * sctp_arethere_unrecognized_parameters() 1.7546 + * when the INIT or INIT-ACK was first seen. 1.7547 + */ 1.7548 + break; 1.7549 + } 1.7550 + } 1.7551 + 1.7552 + next_param: 1.7553 + offset += SCTP_SIZE32(plen); 1.7554 + if (offset >= limit) { 1.7555 + break; 1.7556 + } 1.7557 + phdr = sctp_get_next_param(m, offset, &parm_buf, 1.7558 + sizeof(parm_buf)); 1.7559 + } 1.7560 + /* Now check to see if we need to purge any addresses */ 1.7561 + TAILQ_FOREACH_SAFE(net, &stcb->asoc.nets, sctp_next, nnet) { 1.7562 + if ((net->dest_state & SCTP_ADDR_NOT_IN_ASSOC) == 1.7563 + SCTP_ADDR_NOT_IN_ASSOC) { 1.7564 + /* This address has been removed from the asoc */ 1.7565 + /* remove and free it */ 1.7566 + stcb->asoc.numnets--; 1.7567 + TAILQ_REMOVE(&stcb->asoc.nets, net, sctp_next); 1.7568 + sctp_free_remote_addr(net); 1.7569 + if (net == stcb->asoc.primary_destination) { 1.7570 + stcb->asoc.primary_destination = NULL; 1.7571 + sctp_select_primary_destination(stcb); 1.7572 + } 1.7573 + } 1.7574 + } 1.7575 + if (ecn_allowed == 0) { 1.7576 + stcb->asoc.ecn_allowed = 0; 1.7577 + } 1.7578 + /* validate authentication required parameters */ 1.7579 + if (got_random && got_hmacs) { 1.7580 + stcb->asoc.peer_supports_auth = 1; 1.7581 + } else { 1.7582 + stcb->asoc.peer_supports_auth = 0; 1.7583 + } 1.7584 + if (!stcb->asoc.peer_supports_auth && got_chklist) { 1.7585 + /* peer does not support auth but sent a chunks list? */ 1.7586 + return (-31); 1.7587 + } 1.7588 + if (!SCTP_BASE_SYSCTL(sctp_asconf_auth_nochk) && stcb->asoc.peer_supports_asconf && 1.7589 + !stcb->asoc.peer_supports_auth) { 1.7590 + /* peer supports asconf but not auth? */ 1.7591 + return (-32); 1.7592 + } else if ((stcb->asoc.peer_supports_asconf) && (stcb->asoc.peer_supports_auth) && 1.7593 + ((saw_asconf == 0) || (saw_asconf_ack == 0))) { 1.7594 + return (-33); 1.7595 + } 1.7596 + /* concatenate the full random key */ 1.7597 + keylen = sizeof(*p_random) + random_len + sizeof(*hmacs) + hmacs_len; 1.7598 + if (chunks != NULL) { 1.7599 + keylen += sizeof(*chunks) + num_chunks; 1.7600 + } 1.7601 + new_key = sctp_alloc_key(keylen); 1.7602 + if (new_key != NULL) { 1.7603 + /* copy in the RANDOM */ 1.7604 + if (p_random != NULL) { 1.7605 + keylen = sizeof(*p_random) + random_len; 1.7606 + bcopy(p_random, new_key->key, keylen); 1.7607 + } 1.7608 + /* append in the AUTH chunks */ 1.7609 + if (chunks != NULL) { 1.7610 + bcopy(chunks, new_key->key + keylen, 1.7611 + sizeof(*chunks) + num_chunks); 1.7612 + keylen += sizeof(*chunks) + num_chunks; 1.7613 + } 1.7614 + /* append in the HMACs */ 1.7615 + if (hmacs != NULL) { 1.7616 + bcopy(hmacs, new_key->key + keylen, 1.7617 + sizeof(*hmacs) + hmacs_len); 1.7618 + } 1.7619 + } else { 1.7620 + /* failed to get memory for the key */ 1.7621 + return (-34); 1.7622 + } 1.7623 + if (stcb->asoc.authinfo.peer_random != NULL) 1.7624 + sctp_free_key(stcb->asoc.authinfo.peer_random); 1.7625 + stcb->asoc.authinfo.peer_random = new_key; 1.7626 + sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.assoc_keyid); 1.7627 + sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.recv_keyid); 1.7628 + 1.7629 + return (0); 1.7630 +} 1.7631 + 1.7632 +int 1.7633 +sctp_set_primary_addr(struct sctp_tcb *stcb, struct sockaddr *sa, 1.7634 + struct sctp_nets *net) 1.7635 +{ 1.7636 + /* make sure the requested primary address exists in the assoc */ 1.7637 + if (net == NULL && sa) 1.7638 + net = sctp_findnet(stcb, sa); 1.7639 + 1.7640 + if (net == NULL) { 1.7641 + /* didn't find the requested primary address! */ 1.7642 + return (-1); 1.7643 + } else { 1.7644 + /* set the primary address */ 1.7645 + if (net->dest_state & SCTP_ADDR_UNCONFIRMED) { 1.7646 + /* Must be confirmed, so queue to set */ 1.7647 + net->dest_state |= SCTP_ADDR_REQ_PRIMARY; 1.7648 + return (0); 1.7649 + } 1.7650 + stcb->asoc.primary_destination = net; 1.7651 + if (!(net->dest_state & SCTP_ADDR_PF) && (stcb->asoc.alternate)) { 1.7652 + sctp_free_remote_addr(stcb->asoc.alternate); 1.7653 + stcb->asoc.alternate = NULL; 1.7654 + } 1.7655 + net = TAILQ_FIRST(&stcb->asoc.nets); 1.7656 + if (net != stcb->asoc.primary_destination) { 1.7657 + /* first one on the list is NOT the primary 1.7658 + * sctp_cmpaddr() is much more efficient if 1.7659 + * the primary is the first on the list, make it 1.7660 + * so. 1.7661 + */ 1.7662 + TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); 1.7663 + TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); 1.7664 + } 1.7665 + return (0); 1.7666 + } 1.7667 +} 1.7668 + 1.7669 +int 1.7670 +sctp_is_vtag_good(uint32_t tag, uint16_t lport, uint16_t rport, struct timeval *now) 1.7671 +{ 1.7672 + /* 1.7673 + * This function serves two purposes. It will see if a TAG can be 1.7674 + * re-used and return 1 for yes it is ok and 0 for don't use that 1.7675 + * tag. A secondary function it will do is purge out old tags that 1.7676 + * can be removed. 1.7677 + */ 1.7678 + struct sctpvtaghead *chain; 1.7679 + struct sctp_tagblock *twait_block; 1.7680 + struct sctpasochead *head; 1.7681 + struct sctp_tcb *stcb; 1.7682 + int i; 1.7683 + 1.7684 + SCTP_INP_INFO_RLOCK(); 1.7685 + head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag, 1.7686 + SCTP_BASE_INFO(hashasocmark))]; 1.7687 + if (head == NULL) { 1.7688 + /* invalid vtag */ 1.7689 + goto skip_vtag_check; 1.7690 + } 1.7691 + LIST_FOREACH(stcb, head, sctp_asocs) { 1.7692 + /* We choose not to lock anything here. TCB's can't be 1.7693 + * removed since we have the read lock, so they can't 1.7694 + * be freed on us, same thing for the INP. I may 1.7695 + * be wrong with this assumption, but we will go 1.7696 + * with it for now :-) 1.7697 + */ 1.7698 + if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) { 1.7699 + continue; 1.7700 + } 1.7701 + if (stcb->asoc.my_vtag == tag) { 1.7702 + /* candidate */ 1.7703 + if (stcb->rport != rport) { 1.7704 + continue; 1.7705 + } 1.7706 + if (stcb->sctp_ep->sctp_lport != lport) { 1.7707 + continue; 1.7708 + } 1.7709 + /* Its a used tag set */ 1.7710 + SCTP_INP_INFO_RUNLOCK(); 1.7711 + return (0); 1.7712 + } 1.7713 + } 1.7714 +skip_vtag_check: 1.7715 + 1.7716 + chain = &SCTP_BASE_INFO(vtag_timewait)[(tag % SCTP_STACK_VTAG_HASH_SIZE)]; 1.7717 + /* Now what about timed wait ? */ 1.7718 + LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) { 1.7719 + /* 1.7720 + * Block(s) are present, lets see if we have this tag in the 1.7721 + * list 1.7722 + */ 1.7723 + for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) { 1.7724 + if (twait_block->vtag_block[i].v_tag == 0) { 1.7725 + /* not used */ 1.7726 + continue; 1.7727 + } else if ((long)twait_block->vtag_block[i].tv_sec_at_expire < 1.7728 + now->tv_sec) { 1.7729 + /* Audit expires this guy */ 1.7730 + twait_block->vtag_block[i].tv_sec_at_expire = 0; 1.7731 + twait_block->vtag_block[i].v_tag = 0; 1.7732 + twait_block->vtag_block[i].lport = 0; 1.7733 + twait_block->vtag_block[i].rport = 0; 1.7734 + } else if ((twait_block->vtag_block[i].v_tag == tag) && 1.7735 + (twait_block->vtag_block[i].lport == lport) && 1.7736 + (twait_block->vtag_block[i].rport == rport)) { 1.7737 + /* Bad tag, sorry :< */ 1.7738 + SCTP_INP_INFO_RUNLOCK(); 1.7739 + return (0); 1.7740 + } 1.7741 + } 1.7742 + } 1.7743 + SCTP_INP_INFO_RUNLOCK(); 1.7744 + return (1); 1.7745 +} 1.7746 + 1.7747 +static void 1.7748 +sctp_drain_mbufs(struct sctp_tcb *stcb) 1.7749 +{ 1.7750 + /* 1.7751 + * We must hunt this association for MBUF's past the cumack (i.e. 1.7752 + * out of order data that we can renege on). 1.7753 + */ 1.7754 + struct sctp_association *asoc; 1.7755 + struct sctp_tmit_chunk *chk, *nchk; 1.7756 + uint32_t cumulative_tsn_p1; 1.7757 + struct sctp_queued_to_read *ctl, *nctl; 1.7758 + int cnt, strmat; 1.7759 + uint32_t gap, i; 1.7760 + int fnd = 0; 1.7761 + 1.7762 + /* We look for anything larger than the cum-ack + 1 */ 1.7763 + 1.7764 + asoc = &stcb->asoc; 1.7765 + if (asoc->cumulative_tsn == asoc->highest_tsn_inside_map) { 1.7766 + /* none we can reneg on. */ 1.7767 + return; 1.7768 + } 1.7769 + SCTP_STAT_INCR(sctps_protocol_drains_done); 1.7770 + cumulative_tsn_p1 = asoc->cumulative_tsn + 1; 1.7771 + cnt = 0; 1.7772 + /* First look in the re-assembly queue */ 1.7773 + TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) { 1.7774 + if (SCTP_TSN_GT(chk->rec.data.TSN_seq, cumulative_tsn_p1)) { 1.7775 + /* Yep it is above cum-ack */ 1.7776 + cnt++; 1.7777 + SCTP_CALC_TSN_TO_GAP(gap, chk->rec.data.TSN_seq, asoc->mapping_array_base_tsn); 1.7778 + asoc->size_on_reasm_queue = sctp_sbspace_sub(asoc->size_on_reasm_queue, chk->send_size); 1.7779 + sctp_ucount_decr(asoc->cnt_on_reasm_queue); 1.7780 + SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); 1.7781 + TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next); 1.7782 + if (chk->data) { 1.7783 + sctp_m_freem(chk->data); 1.7784 + chk->data = NULL; 1.7785 + } 1.7786 + sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1.7787 + } 1.7788 + } 1.7789 + /* Ok that was fun, now we will drain all the inbound streams? */ 1.7790 + for (strmat = 0; strmat < asoc->streamincnt; strmat++) { 1.7791 + TAILQ_FOREACH_SAFE(ctl, &asoc->strmin[strmat].inqueue, next, nctl) { 1.7792 + if (SCTP_TSN_GT(ctl->sinfo_tsn, cumulative_tsn_p1)) { 1.7793 + /* Yep it is above cum-ack */ 1.7794 + cnt++; 1.7795 + SCTP_CALC_TSN_TO_GAP(gap, ctl->sinfo_tsn, asoc->mapping_array_base_tsn); 1.7796 + asoc->size_on_all_streams = sctp_sbspace_sub(asoc->size_on_all_streams, ctl->length); 1.7797 + sctp_ucount_decr(asoc->cnt_on_all_streams); 1.7798 + SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); 1.7799 + TAILQ_REMOVE(&asoc->strmin[strmat].inqueue, ctl, next); 1.7800 + if (ctl->data) { 1.7801 + sctp_m_freem(ctl->data); 1.7802 + ctl->data = NULL; 1.7803 + } 1.7804 + sctp_free_remote_addr(ctl->whoFrom); 1.7805 + SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), ctl); 1.7806 + SCTP_DECR_READQ_COUNT(); 1.7807 + } 1.7808 + } 1.7809 + } 1.7810 + if (cnt) { 1.7811 + /* We must back down to see what the new highest is */ 1.7812 + for (i = asoc->highest_tsn_inside_map; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) { 1.7813 + SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn); 1.7814 + if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 1.7815 + asoc->highest_tsn_inside_map = i; 1.7816 + fnd = 1; 1.7817 + break; 1.7818 + } 1.7819 + } 1.7820 + if (!fnd) { 1.7821 + asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1; 1.7822 + } 1.7823 + 1.7824 + /* 1.7825 + * Question, should we go through the delivery queue? The only 1.7826 + * reason things are on here is the app not reading OR a p-d-api up. 1.7827 + * An attacker COULD send enough in to initiate the PD-API and then 1.7828 + * send a bunch of stuff to other streams... these would wind up on 1.7829 + * the delivery queue.. and then we would not get to them. But in 1.7830 + * order to do this I then have to back-track and un-deliver 1.7831 + * sequence numbers in streams.. el-yucko. I think for now we will 1.7832 + * NOT look at the delivery queue and leave it to be something to 1.7833 + * consider later. An alternative would be to abort the P-D-API with 1.7834 + * a notification and then deliver the data.... Or another method 1.7835 + * might be to keep track of how many times the situation occurs and 1.7836 + * if we see a possible attack underway just abort the association. 1.7837 + */ 1.7838 +#ifdef SCTP_DEBUG 1.7839 + SCTPDBG(SCTP_DEBUG_PCB1, "Freed %d chunks from reneg harvest\n", cnt); 1.7840 +#endif 1.7841 + /* 1.7842 + * Now do we need to find a new 1.7843 + * asoc->highest_tsn_inside_map? 1.7844 + */ 1.7845 + asoc->last_revoke_count = cnt; 1.7846 + (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 1.7847 + /*sa_ignore NO_NULL_CHK*/ 1.7848 + sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1.7849 + sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_DRAIN, SCTP_SO_NOT_LOCKED); 1.7850 + } 1.7851 + /* 1.7852 + * Another issue, in un-setting the TSN's in the mapping array we 1.7853 + * DID NOT adjust the highest_tsn marker. This will cause one of two 1.7854 + * things to occur. It may cause us to do extra work in checking for 1.7855 + * our mapping array movement. More importantly it may cause us to 1.7856 + * SACK every datagram. This may not be a bad thing though since we 1.7857 + * will recover once we get our cum-ack above and all this stuff we 1.7858 + * dumped recovered. 1.7859 + */ 1.7860 +} 1.7861 + 1.7862 +void 1.7863 +sctp_drain() 1.7864 +{ 1.7865 + /* 1.7866 + * We must walk the PCB lists for ALL associations here. The system 1.7867 + * is LOW on MBUF's and needs help. This is where reneging will 1.7868 + * occur. We really hope this does NOT happen! 1.7869 + */ 1.7870 +#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 1.7871 + VNET_ITERATOR_DECL(vnet_iter); 1.7872 +#else 1.7873 + struct sctp_inpcb *inp; 1.7874 + struct sctp_tcb *stcb; 1.7875 + 1.7876 + SCTP_STAT_INCR(sctps_protocol_drain_calls); 1.7877 + if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 1.7878 + return; 1.7879 + } 1.7880 +#endif 1.7881 +#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 1.7882 + VNET_LIST_RLOCK_NOSLEEP(); 1.7883 + VNET_FOREACH(vnet_iter) { 1.7884 + CURVNET_SET(vnet_iter); 1.7885 + struct sctp_inpcb *inp; 1.7886 + struct sctp_tcb *stcb; 1.7887 +#endif 1.7888 + 1.7889 +#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 1.7890 + SCTP_STAT_INCR(sctps_protocol_drain_calls); 1.7891 + if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 1.7892 +#ifdef VIMAGE 1.7893 + continue; 1.7894 +#else 1.7895 + return; 1.7896 +#endif 1.7897 + } 1.7898 +#endif 1.7899 + SCTP_INP_INFO_RLOCK(); 1.7900 + LIST_FOREACH(inp, &SCTP_BASE_INFO(listhead), sctp_list) { 1.7901 + /* For each endpoint */ 1.7902 + SCTP_INP_RLOCK(inp); 1.7903 + LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 1.7904 + /* For each association */ 1.7905 + SCTP_TCB_LOCK(stcb); 1.7906 + sctp_drain_mbufs(stcb); 1.7907 + SCTP_TCB_UNLOCK(stcb); 1.7908 + } 1.7909 + SCTP_INP_RUNLOCK(inp); 1.7910 + } 1.7911 + SCTP_INP_INFO_RUNLOCK(); 1.7912 +#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 1.7913 + CURVNET_RESTORE(); 1.7914 + } 1.7915 + VNET_LIST_RUNLOCK_NOSLEEP(); 1.7916 +#endif 1.7917 +} 1.7918 + 1.7919 +/* 1.7920 + * start a new iterator 1.7921 + * iterates through all endpoints and associations based on the pcb_state 1.7922 + * flags and asoc_state. "af" (mandatory) is executed for all matching 1.7923 + * assocs and "ef" (optional) is executed when the iterator completes. 1.7924 + * "inpf" (optional) is executed for each new endpoint as it is being 1.7925 + * iterated through. inpe (optional) is called when the inp completes 1.7926 + * its way through all the stcbs. 1.7927 + */ 1.7928 +int 1.7929 +sctp_initiate_iterator(inp_func inpf, 1.7930 + asoc_func af, 1.7931 + inp_func inpe, 1.7932 + uint32_t pcb_state, 1.7933 + uint32_t pcb_features, 1.7934 + uint32_t asoc_state, 1.7935 + void *argp, 1.7936 + uint32_t argi, 1.7937 + end_func ef, 1.7938 + struct sctp_inpcb *s_inp, 1.7939 + uint8_t chunk_output_off) 1.7940 +{ 1.7941 + struct sctp_iterator *it = NULL; 1.7942 + 1.7943 + if (af == NULL) { 1.7944 + return (-1); 1.7945 + } 1.7946 + SCTP_MALLOC(it, struct sctp_iterator *, sizeof(struct sctp_iterator), 1.7947 + SCTP_M_ITER); 1.7948 + if (it == NULL) { 1.7949 + SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOMEM); 1.7950 + return (ENOMEM); 1.7951 + } 1.7952 + memset(it, 0, sizeof(*it)); 1.7953 + it->function_assoc = af; 1.7954 + it->function_inp = inpf; 1.7955 + if (inpf) 1.7956 + it->done_current_ep = 0; 1.7957 + else 1.7958 + it->done_current_ep = 1; 1.7959 + it->function_atend = ef; 1.7960 + it->pointer = argp; 1.7961 + it->val = argi; 1.7962 + it->pcb_flags = pcb_state; 1.7963 + it->pcb_features = pcb_features; 1.7964 + it->asoc_state = asoc_state; 1.7965 + it->function_inp_end = inpe; 1.7966 + it->no_chunk_output = chunk_output_off; 1.7967 +#if defined(__FreeBSD__) && __FreeBSD_version >= 801000 1.7968 + it->vn = curvnet; 1.7969 +#endif 1.7970 + if (s_inp) { 1.7971 + /* Assume lock is held here */ 1.7972 + it->inp = s_inp; 1.7973 + SCTP_INP_INCR_REF(it->inp); 1.7974 + it->iterator_flags = SCTP_ITERATOR_DO_SINGLE_INP; 1.7975 + } else { 1.7976 + SCTP_INP_INFO_RLOCK(); 1.7977 + it->inp = LIST_FIRST(&SCTP_BASE_INFO(listhead)); 1.7978 + if (it->inp) { 1.7979 + SCTP_INP_INCR_REF(it->inp); 1.7980 + } 1.7981 + SCTP_INP_INFO_RUNLOCK(); 1.7982 + it->iterator_flags = SCTP_ITERATOR_DO_ALL_INP; 1.7983 + 1.7984 + } 1.7985 + SCTP_IPI_ITERATOR_WQ_LOCK(); 1.7986 + 1.7987 + TAILQ_INSERT_TAIL(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1.7988 + if (sctp_it_ctl.iterator_running == 0) { 1.7989 + sctp_wakeup_iterator(); 1.7990 + } 1.7991 + SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1.7992 + /* sa_ignore MEMLEAK {memory is put on the tailq for the iterator} */ 1.7993 + return (0); 1.7994 +}