1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/netwerk/sctp/src/netinet/sctp_timer.c Wed Dec 31 06:09:35 2014 +0100 1.3 @@ -0,0 +1,1623 @@ 1.4 +/*- 1.5 + * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 1.6 + * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 1.7 + * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 1.8 + * 1.9 + * Redistribution and use in source and binary forms, with or without 1.10 + * modification, are permitted provided that the following conditions are met: 1.11 + * 1.12 + * a) Redistributions of source code must retain the above copyright notice, 1.13 + * this list of conditions and the following disclaimer. 1.14 + * 1.15 + * b) Redistributions in binary form must reproduce the above copyright 1.16 + * notice, this list of conditions and the following disclaimer in 1.17 + * the documentation and/or other materials provided with the distribution. 1.18 + * 1.19 + * c) Neither the name of Cisco Systems, Inc. nor the names of its 1.20 + * contributors may be used to endorse or promote products derived 1.21 + * from this software without specific prior written permission. 1.22 + * 1.23 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 1.24 + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 1.25 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1.26 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 1.27 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 1.28 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 1.29 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 1.30 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 1.31 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 1.32 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 1.33 + * THE POSSIBILITY OF SUCH DAMAGE. 1.34 + */ 1.35 + 1.36 +#ifdef __FreeBSD__ 1.37 +#include <sys/cdefs.h> 1.38 +__FBSDID("$FreeBSD: head/sys/netinet/sctp_timer.c 257359 2013-10-29 20:04:50Z tuexen $"); 1.39 +#endif 1.40 + 1.41 +#define _IP_VHL 1.42 +#include <netinet/sctp_os.h> 1.43 +#include <netinet/sctp_pcb.h> 1.44 +#ifdef INET6 1.45 +#if defined(__Userspace_os_FreeBSD) 1.46 +#include <netinet6/sctp6_var.h> 1.47 +#endif 1.48 +#endif 1.49 +#include <netinet/sctp_var.h> 1.50 +#include <netinet/sctp_sysctl.h> 1.51 +#include <netinet/sctp_timer.h> 1.52 +#include <netinet/sctputil.h> 1.53 +#include <netinet/sctp_output.h> 1.54 +#include <netinet/sctp_header.h> 1.55 +#include <netinet/sctp_indata.h> 1.56 +#include <netinet/sctp_asconf.h> 1.57 +#include <netinet/sctp_input.h> 1.58 +#include <netinet/sctp.h> 1.59 +#include <netinet/sctp_uio.h> 1.60 +#if !defined(__Userspace_os_Windows) 1.61 +#include <netinet/udp.h> 1.62 +#endif 1.63 + 1.64 +#if defined(__APPLE__) 1.65 +#define APPLE_FILE_NO 6 1.66 +#endif 1.67 + 1.68 +void 1.69 +sctp_audit_retranmission_queue(struct sctp_association *asoc) 1.70 +{ 1.71 + struct sctp_tmit_chunk *chk; 1.72 + 1.73 + SCTPDBG(SCTP_DEBUG_TIMER4, "Audit invoked on send queue cnt:%d onqueue:%d\n", 1.74 + asoc->sent_queue_retran_cnt, 1.75 + asoc->sent_queue_cnt); 1.76 + asoc->sent_queue_retran_cnt = 0; 1.77 + asoc->sent_queue_cnt = 0; 1.78 + TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 1.79 + if (chk->sent == SCTP_DATAGRAM_RESEND) { 1.80 + sctp_ucount_incr(asoc->sent_queue_retran_cnt); 1.81 + } 1.82 + asoc->sent_queue_cnt++; 1.83 + } 1.84 + TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 1.85 + if (chk->sent == SCTP_DATAGRAM_RESEND) { 1.86 + sctp_ucount_incr(asoc->sent_queue_retran_cnt); 1.87 + } 1.88 + } 1.89 + TAILQ_FOREACH(chk, &asoc->asconf_send_queue, sctp_next) { 1.90 + if (chk->sent == SCTP_DATAGRAM_RESEND) { 1.91 + sctp_ucount_incr(asoc->sent_queue_retran_cnt); 1.92 + } 1.93 + } 1.94 + SCTPDBG(SCTP_DEBUG_TIMER4, "Audit completes retran:%d onqueue:%d\n", 1.95 + asoc->sent_queue_retran_cnt, 1.96 + asoc->sent_queue_cnt); 1.97 +} 1.98 + 1.99 +int 1.100 +sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1.101 + struct sctp_nets *net, uint16_t threshold) 1.102 +{ 1.103 + if (net) { 1.104 + net->error_count++; 1.105 + SCTPDBG(SCTP_DEBUG_TIMER4, "Error count for %p now %d thresh:%d\n", 1.106 + (void *)net, net->error_count, 1.107 + net->failure_threshold); 1.108 + if (net->error_count > net->failure_threshold) { 1.109 + /* We had a threshold failure */ 1.110 + if (net->dest_state & SCTP_ADDR_REACHABLE) { 1.111 + net->dest_state &= ~SCTP_ADDR_REACHABLE; 1.112 + net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY; 1.113 + net->dest_state &= ~SCTP_ADDR_PF; 1.114 + sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 1.115 + stcb, 0, 1.116 + (void *)net, SCTP_SO_NOT_LOCKED); 1.117 + } 1.118 + } else if ((net->pf_threshold < net->failure_threshold) && 1.119 + (net->error_count > net->pf_threshold)) { 1.120 + if (!(net->dest_state & SCTP_ADDR_PF)) { 1.121 + net->dest_state |= SCTP_ADDR_PF; 1.122 + net->last_active = sctp_get_tick_count(); 1.123 + sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED); 1.124 + sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_TIMER + SCTP_LOC_3); 1.125 + sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 1.126 + } 1.127 + } 1.128 + } 1.129 + if (stcb == NULL) 1.130 + return (0); 1.131 + 1.132 + if (net) { 1.133 + if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) { 1.134 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 1.135 + sctp_misc_ints(SCTP_THRESHOLD_INCR, 1.136 + stcb->asoc.overall_error_count, 1.137 + (stcb->asoc.overall_error_count+1), 1.138 + SCTP_FROM_SCTP_TIMER, 1.139 + __LINE__); 1.140 + } 1.141 + stcb->asoc.overall_error_count++; 1.142 + } 1.143 + } else { 1.144 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 1.145 + sctp_misc_ints(SCTP_THRESHOLD_INCR, 1.146 + stcb->asoc.overall_error_count, 1.147 + (stcb->asoc.overall_error_count+1), 1.148 + SCTP_FROM_SCTP_TIMER, 1.149 + __LINE__); 1.150 + } 1.151 + stcb->asoc.overall_error_count++; 1.152 + } 1.153 + SCTPDBG(SCTP_DEBUG_TIMER4, "Overall error count for %p now %d thresh:%u state:%x\n", 1.154 + (void *)&stcb->asoc, stcb->asoc.overall_error_count, 1.155 + (uint32_t)threshold, 1.156 + ((net == NULL) ? (uint32_t) 0 : (uint32_t) net->dest_state)); 1.157 + /* 1.158 + * We specifically do not do >= to give the assoc one more change 1.159 + * before we fail it. 1.160 + */ 1.161 + if (stcb->asoc.overall_error_count > threshold) { 1.162 + /* Abort notification sends a ULP notify */ 1.163 + struct mbuf *oper; 1.164 + 1.165 + oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1.166 + 0, M_NOWAIT, 1, MT_DATA); 1.167 + if (oper) { 1.168 + struct sctp_paramhdr *ph; 1.169 + uint32_t *ippp; 1.170 + 1.171 + SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 1.172 + sizeof(uint32_t); 1.173 + ph = mtod(oper, struct sctp_paramhdr *); 1.174 + ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1.175 + ph->param_length = htons(SCTP_BUF_LEN(oper)); 1.176 + ippp = (uint32_t *) (ph + 1); 1.177 + *ippp = htonl(SCTP_FROM_SCTP_TIMER+SCTP_LOC_1); 1.178 + } 1.179 + inp->last_abort_code = SCTP_FROM_SCTP_TIMER+SCTP_LOC_1; 1.180 + sctp_abort_an_association(inp, stcb, oper, SCTP_SO_NOT_LOCKED); 1.181 + return (1); 1.182 + } 1.183 + return (0); 1.184 +} 1.185 + 1.186 +/* 1.187 + * sctp_find_alternate_net() returns a non-NULL pointer as long 1.188 + * the argument net is non-NULL. 1.189 + */ 1.190 +struct sctp_nets * 1.191 +sctp_find_alternate_net(struct sctp_tcb *stcb, 1.192 + struct sctp_nets *net, 1.193 + int mode) 1.194 +{ 1.195 + /* Find and return an alternate network if possible */ 1.196 + struct sctp_nets *alt, *mnet, *min_errors_net = NULL , *max_cwnd_net = NULL; 1.197 + int once; 1.198 + /* JRS 5/14/07 - Initialize min_errors to an impossible value. */ 1.199 + int min_errors = -1; 1.200 + uint32_t max_cwnd = 0; 1.201 + 1.202 + if (stcb->asoc.numnets == 1) { 1.203 + /* No others but net */ 1.204 + return (TAILQ_FIRST(&stcb->asoc.nets)); 1.205 + } 1.206 + /* 1.207 + * JRS 5/14/07 - If mode is set to 2, use the CMT PF find alternate net algorithm. 1.208 + * This algorithm chooses the active destination (not in PF state) with the largest 1.209 + * cwnd value. If all destinations are in PF state, unreachable, or unconfirmed, choose 1.210 + * the desination that is in PF state with the lowest error count. In case of a tie, 1.211 + * choose the destination that was most recently active. 1.212 + */ 1.213 + if (mode == 2) { 1.214 + TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) { 1.215 + /* JRS 5/14/07 - If the destination is unreachable or unconfirmed, skip it. */ 1.216 + if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) || 1.217 + (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) { 1.218 + continue; 1.219 + } 1.220 + /* 1.221 + * JRS 5/14/07 - If the destination is reachable but in PF state, compare 1.222 + * the error count of the destination to the minimum error count seen thus far. 1.223 + * Store the destination with the lower error count. If the error counts are 1.224 + * equal, store the destination that was most recently active. 1.225 + */ 1.226 + if (mnet->dest_state & SCTP_ADDR_PF) { 1.227 + /* 1.228 + * JRS 5/14/07 - If the destination under consideration is the current 1.229 + * destination, work as if the error count is one higher. The 1.230 + * actual error count will not be incremented until later in the 1.231 + * t3 handler. 1.232 + */ 1.233 + if (mnet == net) { 1.234 + if (min_errors == -1) { 1.235 + min_errors = mnet->error_count + 1; 1.236 + min_errors_net = mnet; 1.237 + } else if (mnet->error_count + 1 < min_errors) { 1.238 + min_errors = mnet->error_count + 1; 1.239 + min_errors_net = mnet; 1.240 + } else if (mnet->error_count + 1 == min_errors 1.241 + && mnet->last_active > min_errors_net->last_active) { 1.242 + min_errors_net = mnet; 1.243 + min_errors = mnet->error_count + 1; 1.244 + } 1.245 + continue; 1.246 + } else { 1.247 + if (min_errors == -1) { 1.248 + min_errors = mnet->error_count; 1.249 + min_errors_net = mnet; 1.250 + } else if (mnet->error_count < min_errors) { 1.251 + min_errors = mnet->error_count; 1.252 + min_errors_net = mnet; 1.253 + } else if (mnet->error_count == min_errors 1.254 + && mnet->last_active > min_errors_net->last_active) { 1.255 + min_errors_net = mnet; 1.256 + min_errors = mnet->error_count; 1.257 + } 1.258 + continue; 1.259 + } 1.260 + } 1.261 + /* 1.262 + * JRS 5/14/07 - If the destination is reachable and not in PF state, compare the 1.263 + * cwnd of the destination to the highest cwnd seen thus far. Store the 1.264 + * destination with the higher cwnd value. If the cwnd values are equal, 1.265 + * randomly choose one of the two destinations. 1.266 + */ 1.267 + if (max_cwnd < mnet->cwnd) { 1.268 + max_cwnd_net = mnet; 1.269 + max_cwnd = mnet->cwnd; 1.270 + } else if (max_cwnd == mnet->cwnd) { 1.271 + uint32_t rndval; 1.272 + uint8_t this_random; 1.273 + 1.274 + if (stcb->asoc.hb_random_idx > 3) { 1.275 + rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 1.276 + memcpy(stcb->asoc.hb_random_values, &rndval, sizeof(stcb->asoc.hb_random_values)); 1.277 + this_random = stcb->asoc.hb_random_values[0]; 1.278 + stcb->asoc.hb_random_idx++; 1.279 + stcb->asoc.hb_ect_randombit = 0; 1.280 + } else { 1.281 + this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 1.282 + stcb->asoc.hb_random_idx++; 1.283 + stcb->asoc.hb_ect_randombit = 0; 1.284 + } 1.285 + if (this_random % 2 == 1) { 1.286 + max_cwnd_net = mnet; 1.287 + max_cwnd = mnet->cwnd; /* Useless? */ 1.288 + } 1.289 + } 1.290 + } 1.291 + if (max_cwnd_net == NULL) { 1.292 + if (min_errors_net == NULL) { 1.293 + return (net); 1.294 + } 1.295 + return (min_errors_net); 1.296 + } else { 1.297 + return (max_cwnd_net); 1.298 + } 1.299 + } /* JRS 5/14/07 - If mode is set to 1, use the CMT policy for choosing an alternate net. */ 1.300 + else if (mode == 1) { 1.301 + TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) { 1.302 + if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) || 1.303 + (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) { 1.304 + /* 1.305 + * will skip ones that are not-reachable or 1.306 + * unconfirmed 1.307 + */ 1.308 + continue; 1.309 + } 1.310 + if (max_cwnd < mnet->cwnd) { 1.311 + max_cwnd_net = mnet; 1.312 + max_cwnd = mnet->cwnd; 1.313 + } else if (max_cwnd == mnet->cwnd) { 1.314 + uint32_t rndval; 1.315 + uint8_t this_random; 1.316 + 1.317 + if (stcb->asoc.hb_random_idx > 3) { 1.318 + rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 1.319 + memcpy(stcb->asoc.hb_random_values, &rndval, 1.320 + sizeof(stcb->asoc.hb_random_values)); 1.321 + this_random = stcb->asoc.hb_random_values[0]; 1.322 + stcb->asoc.hb_random_idx = 0; 1.323 + stcb->asoc.hb_ect_randombit = 0; 1.324 + } else { 1.325 + this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 1.326 + stcb->asoc.hb_random_idx++; 1.327 + stcb->asoc.hb_ect_randombit = 0; 1.328 + } 1.329 + if (this_random % 2) { 1.330 + max_cwnd_net = mnet; 1.331 + max_cwnd = mnet->cwnd; 1.332 + } 1.333 + } 1.334 + } 1.335 + if (max_cwnd_net) { 1.336 + return (max_cwnd_net); 1.337 + } 1.338 + } 1.339 + mnet = net; 1.340 + once = 0; 1.341 + 1.342 + if (mnet == NULL) { 1.343 + mnet = TAILQ_FIRST(&stcb->asoc.nets); 1.344 + if (mnet == NULL) { 1.345 + return (NULL); 1.346 + } 1.347 + } 1.348 + do { 1.349 + alt = TAILQ_NEXT(mnet, sctp_next); 1.350 + if (alt == NULL) 1.351 + { 1.352 + once++; 1.353 + if (once > 1) { 1.354 + break; 1.355 + } 1.356 + alt = TAILQ_FIRST(&stcb->asoc.nets); 1.357 + if (alt == NULL) { 1.358 + return (NULL); 1.359 + } 1.360 + } 1.361 + if (alt->ro.ro_rt == NULL) { 1.362 + if (alt->ro._s_addr) { 1.363 + sctp_free_ifa(alt->ro._s_addr); 1.364 + alt->ro._s_addr = NULL; 1.365 + } 1.366 + alt->src_addr_selected = 0; 1.367 + } 1.368 + /*sa_ignore NO_NULL_CHK*/ 1.369 + if (((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) && 1.370 + (alt->ro.ro_rt != NULL) && 1.371 + (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED))) { 1.372 + /* Found a reachable address */ 1.373 + break; 1.374 + } 1.375 + mnet = alt; 1.376 + } while (alt != NULL); 1.377 + 1.378 + if (alt == NULL) { 1.379 + /* Case where NO insv network exists (dormant state) */ 1.380 + /* we rotate destinations */ 1.381 + once = 0; 1.382 + mnet = net; 1.383 + do { 1.384 + if (mnet == NULL) { 1.385 + return (TAILQ_FIRST(&stcb->asoc.nets)); 1.386 + } 1.387 + alt = TAILQ_NEXT(mnet, sctp_next); 1.388 + if (alt == NULL) { 1.389 + once++; 1.390 + if (once > 1) { 1.391 + break; 1.392 + } 1.393 + alt = TAILQ_FIRST(&stcb->asoc.nets); 1.394 + } 1.395 + /*sa_ignore NO_NULL_CHK*/ 1.396 + if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) && 1.397 + (alt != net)) { 1.398 + /* Found an alternate address */ 1.399 + break; 1.400 + } 1.401 + mnet = alt; 1.402 + } while (alt != NULL); 1.403 + } 1.404 + if (alt == NULL) { 1.405 + return (net); 1.406 + } 1.407 + return (alt); 1.408 +} 1.409 + 1.410 +static void 1.411 +sctp_backoff_on_timeout(struct sctp_tcb *stcb, 1.412 + struct sctp_nets *net, 1.413 + int win_probe, 1.414 + int num_marked, int num_abandoned) 1.415 +{ 1.416 + if (net->RTO == 0) { 1.417 + net->RTO = stcb->asoc.minrto; 1.418 + } 1.419 + net->RTO <<= 1; 1.420 + if (net->RTO > stcb->asoc.maxrto) { 1.421 + net->RTO = stcb->asoc.maxrto; 1.422 + } 1.423 + if ((win_probe == 0) && (num_marked || num_abandoned)) { 1.424 + /* We don't apply penalty to window probe scenarios */ 1.425 + /* JRS - Use the congestion control given in the CC module */ 1.426 + stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout(stcb, net); 1.427 + } 1.428 +} 1.429 + 1.430 +#ifndef INVARIANTS 1.431 +static void 1.432 +sctp_recover_sent_list(struct sctp_tcb *stcb) 1.433 +{ 1.434 + struct sctp_tmit_chunk *chk, *nchk; 1.435 + struct sctp_association *asoc; 1.436 + 1.437 + asoc = &stcb->asoc; 1.438 + TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 1.439 + if (SCTP_TSN_GE(asoc->last_acked_seq, chk->rec.data.TSN_seq)) { 1.440 + SCTP_PRINTF("Found chk:%p tsn:%x <= last_acked_seq:%x\n", 1.441 + (void *)chk, chk->rec.data.TSN_seq, asoc->last_acked_seq); 1.442 + if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 1.443 + if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) { 1.444 + asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--; 1.445 + } 1.446 + } 1.447 + TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 1.448 + if (PR_SCTP_ENABLED(chk->flags)) { 1.449 + if (asoc->pr_sctp_cnt != 0) 1.450 + asoc->pr_sctp_cnt--; 1.451 + } 1.452 + if (chk->data) { 1.453 + /*sa_ignore NO_NULL_CHK*/ 1.454 + sctp_free_bufspace(stcb, asoc, chk, 1); 1.455 + sctp_m_freem(chk->data); 1.456 + chk->data = NULL; 1.457 + if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(chk->flags)) { 1.458 + asoc->sent_queue_cnt_removeable--; 1.459 + } 1.460 + } 1.461 + asoc->sent_queue_cnt--; 1.462 + sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1.463 + } 1.464 + } 1.465 + SCTP_PRINTF("after recover order is as follows\n"); 1.466 + TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 1.467 + SCTP_PRINTF("chk:%p TSN:%x\n", (void *)chk, chk->rec.data.TSN_seq); 1.468 + } 1.469 +} 1.470 +#endif 1.471 + 1.472 +static int 1.473 +sctp_mark_all_for_resend(struct sctp_tcb *stcb, 1.474 + struct sctp_nets *net, 1.475 + struct sctp_nets *alt, 1.476 + int window_probe, 1.477 + int *num_marked, 1.478 + int *num_abandoned) 1.479 +{ 1.480 + 1.481 + /* 1.482 + * Mark all chunks (well not all) that were sent to *net for 1.483 + * retransmission. Move them to alt for there destination as well... 1.484 + * We only mark chunks that have been outstanding long enough to 1.485 + * have received feed-back. 1.486 + */ 1.487 + struct sctp_tmit_chunk *chk, *nchk; 1.488 + struct sctp_nets *lnets; 1.489 + struct timeval now, min_wait, tv; 1.490 + int cur_rto; 1.491 + int cnt_abandoned; 1.492 + int audit_tf, num_mk, fir; 1.493 + unsigned int cnt_mk; 1.494 + uint32_t orig_flight, orig_tf; 1.495 + uint32_t tsnlast, tsnfirst; 1.496 + int recovery_cnt = 0; 1.497 + 1.498 + 1.499 + /* none in flight now */ 1.500 + audit_tf = 0; 1.501 + fir = 0; 1.502 + /* 1.503 + * figure out how long a data chunk must be pending before we can 1.504 + * mark it .. 1.505 + */ 1.506 + (void)SCTP_GETTIME_TIMEVAL(&now); 1.507 + /* get cur rto in micro-seconds */ 1.508 + cur_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 1.509 + cur_rto *= 1000; 1.510 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 1.511 + sctp_log_fr(cur_rto, 1.512 + stcb->asoc.peers_rwnd, 1.513 + window_probe, 1.514 + SCTP_FR_T3_MARK_TIME); 1.515 + sctp_log_fr(net->flight_size, 0, 0, SCTP_FR_CWND_REPORT); 1.516 + sctp_log_fr(net->flight_size, net->cwnd, stcb->asoc.total_flight, SCTP_FR_CWND_REPORT); 1.517 + } 1.518 + tv.tv_sec = cur_rto / 1000000; 1.519 + tv.tv_usec = cur_rto % 1000000; 1.520 +#ifndef __FreeBSD__ 1.521 + timersub(&now, &tv, &min_wait); 1.522 +#else 1.523 + min_wait = now; 1.524 + timevalsub(&min_wait, &tv); 1.525 +#endif 1.526 + if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) { 1.527 + /* 1.528 + * if we hit here, we don't have enough seconds on the clock 1.529 + * to account for the RTO. We just let the lower seconds be 1.530 + * the bounds and don't worry about it. This may mean we 1.531 + * will mark a lot more than we should. 1.532 + */ 1.533 + min_wait.tv_sec = min_wait.tv_usec = 0; 1.534 + } 1.535 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 1.536 + sctp_log_fr(cur_rto, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME); 1.537 + sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME); 1.538 + } 1.539 + /* 1.540 + * Our rwnd will be incorrect here since we are not adding back the 1.541 + * cnt * mbuf but we will fix that down below. 1.542 + */ 1.543 + orig_flight = net->flight_size; 1.544 + orig_tf = stcb->asoc.total_flight; 1.545 + 1.546 + net->fast_retran_ip = 0; 1.547 + /* Now on to each chunk */ 1.548 + cnt_abandoned = 0; 1.549 + num_mk = cnt_mk = 0; 1.550 + tsnfirst = tsnlast = 0; 1.551 +#ifndef INVARIANTS 1.552 + start_again: 1.553 +#endif 1.554 + TAILQ_FOREACH_SAFE(chk, &stcb->asoc.sent_queue, sctp_next, nchk) { 1.555 + if (SCTP_TSN_GE(stcb->asoc.last_acked_seq, chk->rec.data.TSN_seq)) { 1.556 + /* Strange case our list got out of order? */ 1.557 + SCTP_PRINTF("Our list is out of order? last_acked:%x chk:%x\n", 1.558 + (unsigned int)stcb->asoc.last_acked_seq, (unsigned int)chk->rec.data.TSN_seq); 1.559 + recovery_cnt++; 1.560 +#ifdef INVARIANTS 1.561 + panic("last acked >= chk on sent-Q"); 1.562 +#else 1.563 + SCTP_PRINTF("Recover attempts a restart cnt:%d\n", recovery_cnt); 1.564 + sctp_recover_sent_list(stcb); 1.565 + if (recovery_cnt < 10) { 1.566 + goto start_again; 1.567 + } else { 1.568 + SCTP_PRINTF("Recovery fails %d times??\n", recovery_cnt); 1.569 + } 1.570 +#endif 1.571 + } 1.572 + if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) { 1.573 + /* 1.574 + * found one to mark: If it is less than 1.575 + * DATAGRAM_ACKED it MUST not be a skipped or marked 1.576 + * TSN but instead one that is either already set 1.577 + * for retransmission OR one that needs 1.578 + * retransmission. 1.579 + */ 1.580 + 1.581 + /* validate its been outstanding long enough */ 1.582 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 1.583 + sctp_log_fr(chk->rec.data.TSN_seq, 1.584 + chk->sent_rcv_time.tv_sec, 1.585 + chk->sent_rcv_time.tv_usec, 1.586 + SCTP_FR_T3_MARK_TIME); 1.587 + } 1.588 + if ((chk->sent_rcv_time.tv_sec > min_wait.tv_sec) && (window_probe == 0)) { 1.589 + /* 1.590 + * we have reached a chunk that was sent 1.591 + * some seconds past our min.. forget it we 1.592 + * will find no more to send. 1.593 + */ 1.594 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 1.595 + sctp_log_fr(0, 1.596 + chk->sent_rcv_time.tv_sec, 1.597 + chk->sent_rcv_time.tv_usec, 1.598 + SCTP_FR_T3_STOPPED); 1.599 + } 1.600 + continue; 1.601 + } else if ((chk->sent_rcv_time.tv_sec == min_wait.tv_sec) && 1.602 + (window_probe == 0)) { 1.603 + /* 1.604 + * we must look at the micro seconds to 1.605 + * know. 1.606 + */ 1.607 + if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) { 1.608 + /* 1.609 + * ok it was sent after our boundary 1.610 + * time. 1.611 + */ 1.612 + continue; 1.613 + } 1.614 + } 1.615 + if (stcb->asoc.peer_supports_prsctp && PR_SCTP_TTL_ENABLED(chk->flags)) { 1.616 + /* Is it expired? */ 1.617 +#ifndef __FreeBSD__ 1.618 + if (timercmp(&now, &chk->rec.data.timetodrop, >)) { 1.619 +#else 1.620 + if (timevalcmp(&now, &chk->rec.data.timetodrop, >)) { 1.621 +#endif 1.622 + /* Yes so drop it */ 1.623 + if (chk->data) { 1.624 + (void)sctp_release_pr_sctp_chunk(stcb, 1.625 + chk, 1.626 + 1, 1.627 + SCTP_SO_NOT_LOCKED); 1.628 + cnt_abandoned++; 1.629 + } 1.630 + continue; 1.631 + } 1.632 + } 1.633 + if (stcb->asoc.peer_supports_prsctp && PR_SCTP_RTX_ENABLED(chk->flags)) { 1.634 + /* Has it been retransmitted tv_sec times? */ 1.635 + if (chk->snd_count > chk->rec.data.timetodrop.tv_sec) { 1.636 + if (chk->data) { 1.637 + (void)sctp_release_pr_sctp_chunk(stcb, 1.638 + chk, 1.639 + 1, 1.640 + SCTP_SO_NOT_LOCKED); 1.641 + cnt_abandoned++; 1.642 + } 1.643 + continue; 1.644 + } 1.645 + } 1.646 + if (chk->sent < SCTP_DATAGRAM_RESEND) { 1.647 + sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1.648 + num_mk++; 1.649 + if (fir == 0) { 1.650 + fir = 1; 1.651 + tsnfirst = chk->rec.data.TSN_seq; 1.652 + } 1.653 + tsnlast = chk->rec.data.TSN_seq; 1.654 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 1.655 + sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count, 1.656 + 0, SCTP_FR_T3_MARKED); 1.657 + } 1.658 + 1.659 + if (chk->rec.data.chunk_was_revoked) { 1.660 + /* deflate the cwnd */ 1.661 + chk->whoTo->cwnd -= chk->book_size; 1.662 + chk->rec.data.chunk_was_revoked = 0; 1.663 + } 1.664 + net->marked_retrans++; 1.665 + stcb->asoc.marked_retrans++; 1.666 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 1.667 + sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND_TO, 1.668 + chk->whoTo->flight_size, 1.669 + chk->book_size, 1.670 + (uintptr_t)chk->whoTo, 1.671 + chk->rec.data.TSN_seq); 1.672 + } 1.673 + sctp_flight_size_decrease(chk); 1.674 + sctp_total_flight_decrease(stcb, chk); 1.675 + stcb->asoc.peers_rwnd += chk->send_size; 1.676 + stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 1.677 + } 1.678 + chk->sent = SCTP_DATAGRAM_RESEND; 1.679 + SCTP_STAT_INCR(sctps_markedretrans); 1.680 + 1.681 + /* reset the TSN for striking and other FR stuff */ 1.682 + chk->rec.data.doing_fast_retransmit = 0; 1.683 + /* Clear any time so NO RTT is being done */ 1.684 + 1.685 + if (chk->do_rtt) { 1.686 + if (chk->whoTo->rto_needed == 0) { 1.687 + chk->whoTo->rto_needed = 1; 1.688 + } 1.689 + } 1.690 + chk->do_rtt = 0; 1.691 + if (alt != net) { 1.692 + sctp_free_remote_addr(chk->whoTo); 1.693 + chk->no_fr_allowed = 1; 1.694 + chk->whoTo = alt; 1.695 + atomic_add_int(&alt->ref_count, 1); 1.696 + } else { 1.697 + chk->no_fr_allowed = 0; 1.698 + if (TAILQ_EMPTY(&stcb->asoc.send_queue)) { 1.699 + chk->rec.data.fast_retran_tsn = stcb->asoc.sending_seq; 1.700 + } else { 1.701 + chk->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq; 1.702 + } 1.703 + } 1.704 + /* CMT: Do not allow FRs on retransmitted TSNs. 1.705 + */ 1.706 + if (stcb->asoc.sctp_cmt_on_off > 0) { 1.707 + chk->no_fr_allowed = 1; 1.708 + } 1.709 +#ifdef THIS_SHOULD_NOT_BE_DONE 1.710 + } else if (chk->sent == SCTP_DATAGRAM_ACKED) { 1.711 + /* remember highest acked one */ 1.712 + could_be_sent = chk; 1.713 +#endif 1.714 + } 1.715 + if (chk->sent == SCTP_DATAGRAM_RESEND) { 1.716 + cnt_mk++; 1.717 + } 1.718 + } 1.719 + if ((orig_flight - net->flight_size) != (orig_tf - stcb->asoc.total_flight)) { 1.720 + /* we did not subtract the same things? */ 1.721 + audit_tf = 1; 1.722 + } 1.723 + 1.724 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 1.725 + sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT); 1.726 + } 1.727 +#ifdef SCTP_DEBUG 1.728 + if (num_mk) { 1.729 + SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n", 1.730 + tsnlast); 1.731 + SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%ld\n", 1.732 + num_mk, (u_long)stcb->asoc.peers_rwnd); 1.733 + SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n", 1.734 + tsnlast); 1.735 + SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%d\n", 1.736 + num_mk, 1.737 + (int)stcb->asoc.peers_rwnd); 1.738 + } 1.739 +#endif 1.740 + *num_marked = num_mk; 1.741 + *num_abandoned = cnt_abandoned; 1.742 + /* Now check for a ECN Echo that may be stranded And 1.743 + * include the cnt_mk'd to have all resends in the 1.744 + * control queue. 1.745 + */ 1.746 + TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 1.747 + if (chk->sent == SCTP_DATAGRAM_RESEND) { 1.748 + cnt_mk++; 1.749 + } 1.750 + if ((chk->whoTo == net) && 1.751 + (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 1.752 + sctp_free_remote_addr(chk->whoTo); 1.753 + chk->whoTo = alt; 1.754 + if (chk->sent != SCTP_DATAGRAM_RESEND) { 1.755 + chk->sent = SCTP_DATAGRAM_RESEND; 1.756 + sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1.757 + cnt_mk++; 1.758 + } 1.759 + atomic_add_int(&alt->ref_count, 1); 1.760 + } 1.761 + } 1.762 +#ifdef THIS_SHOULD_NOT_BE_DONE 1.763 + if ((stcb->asoc.sent_queue_retran_cnt == 0) && (could_be_sent)) { 1.764 + /* fix it so we retransmit the highest acked anyway */ 1.765 + sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1.766 + cnt_mk++; 1.767 + could_be_sent->sent = SCTP_DATAGRAM_RESEND; 1.768 + } 1.769 +#endif 1.770 + if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) { 1.771 +#ifdef INVARIANTS 1.772 + SCTP_PRINTF("Local Audit says there are %d for retran asoc cnt:%d we marked:%d this time\n", 1.773 + cnt_mk, stcb->asoc.sent_queue_retran_cnt, num_mk); 1.774 +#endif 1.775 +#ifndef SCTP_AUDITING_ENABLED 1.776 + stcb->asoc.sent_queue_retran_cnt = cnt_mk; 1.777 +#endif 1.778 + } 1.779 + if (audit_tf) { 1.780 + SCTPDBG(SCTP_DEBUG_TIMER4, 1.781 + "Audit total flight due to negative value net:%p\n", 1.782 + (void *)net); 1.783 + stcb->asoc.total_flight = 0; 1.784 + stcb->asoc.total_flight_count = 0; 1.785 + /* Clear all networks flight size */ 1.786 + TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) { 1.787 + lnets->flight_size = 0; 1.788 + SCTPDBG(SCTP_DEBUG_TIMER4, 1.789 + "Net:%p c-f cwnd:%d ssthresh:%d\n", 1.790 + (void *)lnets, lnets->cwnd, lnets->ssthresh); 1.791 + } 1.792 + TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1.793 + if (chk->sent < SCTP_DATAGRAM_RESEND) { 1.794 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 1.795 + sctp_misc_ints(SCTP_FLIGHT_LOG_UP, 1.796 + chk->whoTo->flight_size, 1.797 + chk->book_size, 1.798 + (uintptr_t)chk->whoTo, 1.799 + chk->rec.data.TSN_seq); 1.800 + } 1.801 + 1.802 + sctp_flight_size_increase(chk); 1.803 + sctp_total_flight_increase(stcb, chk); 1.804 + } 1.805 + } 1.806 + } 1.807 + /* We return 1 if we only have a window probe outstanding */ 1.808 + return (0); 1.809 +} 1.810 + 1.811 + 1.812 +int 1.813 +sctp_t3rxt_timer(struct sctp_inpcb *inp, 1.814 + struct sctp_tcb *stcb, 1.815 + struct sctp_nets *net) 1.816 +{ 1.817 + struct sctp_nets *alt; 1.818 + int win_probe, num_mk, num_abandoned; 1.819 + 1.820 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 1.821 + sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT); 1.822 + } 1.823 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1.824 + struct sctp_nets *lnet; 1.825 + 1.826 + TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 1.827 + if (net == lnet) { 1.828 + sctp_log_cwnd(stcb, lnet, 1, SCTP_CWND_LOG_FROM_T3); 1.829 + } else { 1.830 + sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_LOG_FROM_T3); 1.831 + } 1.832 + } 1.833 + } 1.834 + /* Find an alternate and mark those for retransmission */ 1.835 + if ((stcb->asoc.peers_rwnd == 0) && 1.836 + (stcb->asoc.total_flight < net->mtu)) { 1.837 + SCTP_STAT_INCR(sctps_timowindowprobe); 1.838 + win_probe = 1; 1.839 + } else { 1.840 + win_probe = 0; 1.841 + } 1.842 + 1.843 + if (win_probe == 0) { 1.844 + /* We don't do normal threshold management on window probes */ 1.845 + if (sctp_threshold_management(inp, stcb, net, 1.846 + stcb->asoc.max_send_times)) { 1.847 + /* Association was destroyed */ 1.848 + return (1); 1.849 + } else { 1.850 + if (net != stcb->asoc.primary_destination) { 1.851 + /* send a immediate HB if our RTO is stale */ 1.852 + struct timeval now; 1.853 + unsigned int ms_goneby; 1.854 + 1.855 + (void)SCTP_GETTIME_TIMEVAL(&now); 1.856 + if (net->last_sent_time.tv_sec) { 1.857 + ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000; 1.858 + } else { 1.859 + ms_goneby = 0; 1.860 + } 1.861 + if ((net->dest_state & SCTP_ADDR_PF) == 0) { 1.862 + if ((ms_goneby > net->RTO) || (net->RTO == 0)) { 1.863 + /* 1.864 + * no recent feed back in an RTO or 1.865 + * more, request a RTT update 1.866 + */ 1.867 + sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED); 1.868 + } 1.869 + } 1.870 + } 1.871 + } 1.872 + } else { 1.873 + /* 1.874 + * For a window probe we don't penalize the net's but only 1.875 + * the association. This may fail it if SACKs are not coming 1.876 + * back. If sack's are coming with rwnd locked at 0, we will 1.877 + * continue to hold things waiting for rwnd to raise 1.878 + */ 1.879 + if (sctp_threshold_management(inp, stcb, NULL, 1.880 + stcb->asoc.max_send_times)) { 1.881 + /* Association was destroyed */ 1.882 + return (1); 1.883 + } 1.884 + } 1.885 + if (stcb->asoc.sctp_cmt_on_off > 0) { 1.886 + if (net->pf_threshold < net->failure_threshold) { 1.887 + alt = sctp_find_alternate_net(stcb, net, 2); 1.888 + } else { 1.889 + /* 1.890 + * CMT: Using RTX_SSTHRESH policy for CMT. 1.891 + * If CMT is being used, then pick dest with 1.892 + * largest ssthresh for any retransmission. 1.893 + */ 1.894 + alt = sctp_find_alternate_net(stcb, net, 1); 1.895 + /* 1.896 + * CUCv2: If a different dest is picked for 1.897 + * the retransmission, then new 1.898 + * (rtx-)pseudo_cumack needs to be tracked 1.899 + * for orig dest. Let CUCv2 track new (rtx-) 1.900 + * pseudo-cumack always. 1.901 + */ 1.902 + net->find_pseudo_cumack = 1; 1.903 + net->find_rtx_pseudo_cumack = 1; 1.904 + } 1.905 + } else { 1.906 + alt = sctp_find_alternate_net(stcb, net, 0); 1.907 + } 1.908 + 1.909 + num_mk = 0; 1.910 + num_abandoned = 0; 1.911 + (void)sctp_mark_all_for_resend(stcb, net, alt, win_probe, 1.912 + &num_mk, &num_abandoned); 1.913 + /* FR Loss recovery just ended with the T3. */ 1.914 + stcb->asoc.fast_retran_loss_recovery = 0; 1.915 + 1.916 + /* CMT FR loss recovery ended with the T3 */ 1.917 + net->fast_retran_loss_recovery = 0; 1.918 + if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) && 1.919 + (net->flight_size == 0)) { 1.920 + (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins)(stcb, net); 1.921 + } 1.922 + 1.923 + /* 1.924 + * setup the sat loss recovery that prevents satellite cwnd advance. 1.925 + */ 1.926 + stcb->asoc.sat_t3_loss_recovery = 1; 1.927 + stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq; 1.928 + 1.929 + /* Backoff the timer and cwnd */ 1.930 + sctp_backoff_on_timeout(stcb, net, win_probe, num_mk, num_abandoned); 1.931 + if ((!(net->dest_state & SCTP_ADDR_REACHABLE)) || 1.932 + (net->dest_state & SCTP_ADDR_PF)) { 1.933 + /* Move all pending over too */ 1.934 + sctp_move_chunks_from_net(stcb, net); 1.935 + 1.936 + /* Get the address that failed, to 1.937 + * force a new src address selecton and 1.938 + * a route allocation. 1.939 + */ 1.940 + if (net->ro._s_addr) { 1.941 + sctp_free_ifa(net->ro._s_addr); 1.942 + net->ro._s_addr = NULL; 1.943 + } 1.944 + net->src_addr_selected = 0; 1.945 + 1.946 + /* Force a route allocation too */ 1.947 + if (net->ro.ro_rt) { 1.948 + RTFREE(net->ro.ro_rt); 1.949 + net->ro.ro_rt = NULL; 1.950 + } 1.951 + 1.952 + /* Was it our primary? */ 1.953 + if ((stcb->asoc.primary_destination == net) && (alt != net)) { 1.954 + /* 1.955 + * Yes, note it as such and find an alternate note: 1.956 + * this means HB code must use this to resent the 1.957 + * primary if it goes active AND if someone does a 1.958 + * change-primary then this flag must be cleared 1.959 + * from any net structures. 1.960 + */ 1.961 + if (stcb->asoc.alternate) { 1.962 + sctp_free_remote_addr(stcb->asoc.alternate); 1.963 + } 1.964 + stcb->asoc.alternate = alt; 1.965 + atomic_add_int(&stcb->asoc.alternate->ref_count, 1); 1.966 + } 1.967 + } 1.968 + /* 1.969 + * Special case for cookie-echo'ed case, we don't do output but must 1.970 + * await the COOKIE-ACK before retransmission 1.971 + */ 1.972 + if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 1.973 + /* 1.974 + * Here we just reset the timer and start again since we 1.975 + * have not established the asoc 1.976 + */ 1.977 + sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 1.978 + return (0); 1.979 + } 1.980 + if (stcb->asoc.peer_supports_prsctp) { 1.981 + struct sctp_tmit_chunk *lchk; 1.982 + 1.983 + lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc); 1.984 + /* C3. See if we need to send a Fwd-TSN */ 1.985 + if (SCTP_TSN_GT(stcb->asoc.advanced_peer_ack_point, stcb->asoc.last_acked_seq)) { 1.986 + send_forward_tsn(stcb, &stcb->asoc); 1.987 + if (lchk) { 1.988 + /* Assure a timer is up */ 1.989 + sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo); 1.990 + } 1.991 + } 1.992 + } 1.993 + if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1.994 + sctp_log_cwnd(stcb, net, net->cwnd, SCTP_CWND_LOG_FROM_RTX); 1.995 + } 1.996 + return (0); 1.997 +} 1.998 + 1.999 +int 1.1000 +sctp_t1init_timer(struct sctp_inpcb *inp, 1.1001 + struct sctp_tcb *stcb, 1.1002 + struct sctp_nets *net) 1.1003 +{ 1.1004 + /* bump the thresholds */ 1.1005 + if (stcb->asoc.delayed_connection) { 1.1006 + /* 1.1007 + * special hook for delayed connection. The library did NOT 1.1008 + * complete the rest of its sends. 1.1009 + */ 1.1010 + stcb->asoc.delayed_connection = 0; 1.1011 + sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED); 1.1012 + return (0); 1.1013 + } 1.1014 + if (SCTP_GET_STATE((&stcb->asoc)) != SCTP_STATE_COOKIE_WAIT) { 1.1015 + return (0); 1.1016 + } 1.1017 + if (sctp_threshold_management(inp, stcb, net, 1.1018 + stcb->asoc.max_init_times)) { 1.1019 + /* Association was destroyed */ 1.1020 + return (1); 1.1021 + } 1.1022 + stcb->asoc.dropped_special_cnt = 0; 1.1023 + sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0, 0); 1.1024 + if (stcb->asoc.initial_init_rto_max < net->RTO) { 1.1025 + net->RTO = stcb->asoc.initial_init_rto_max; 1.1026 + } 1.1027 + if (stcb->asoc.numnets > 1) { 1.1028 + /* If we have more than one addr use it */ 1.1029 + struct sctp_nets *alt; 1.1030 + 1.1031 + alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination, 0); 1.1032 + if (alt != stcb->asoc.primary_destination) { 1.1033 + sctp_move_chunks_from_net(stcb, stcb->asoc.primary_destination); 1.1034 + stcb->asoc.primary_destination = alt; 1.1035 + } 1.1036 + } 1.1037 + /* Send out a new init */ 1.1038 + sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED); 1.1039 + return (0); 1.1040 +} 1.1041 + 1.1042 +/* 1.1043 + * For cookie and asconf we actually need to find and mark for resend, then 1.1044 + * increment the resend counter (after all the threshold management stuff of 1.1045 + * course). 1.1046 + */ 1.1047 +int 1.1048 +sctp_cookie_timer(struct sctp_inpcb *inp, 1.1049 + struct sctp_tcb *stcb, 1.1050 + struct sctp_nets *net SCTP_UNUSED) 1.1051 +{ 1.1052 + struct sctp_nets *alt; 1.1053 + struct sctp_tmit_chunk *cookie; 1.1054 + 1.1055 + /* first before all else we must find the cookie */ 1.1056 + TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) { 1.1057 + if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 1.1058 + break; 1.1059 + } 1.1060 + } 1.1061 + if (cookie == NULL) { 1.1062 + if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 1.1063 + /* FOOBAR! */ 1.1064 + struct mbuf *oper; 1.1065 + 1.1066 + oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1.1067 + 0, M_NOWAIT, 1, MT_DATA); 1.1068 + if (oper) { 1.1069 + struct sctp_paramhdr *ph; 1.1070 + uint32_t *ippp; 1.1071 + 1.1072 + SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 1.1073 + sizeof(uint32_t); 1.1074 + ph = mtod(oper, struct sctp_paramhdr *); 1.1075 + ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1.1076 + ph->param_length = htons(SCTP_BUF_LEN(oper)); 1.1077 + ippp = (uint32_t *) (ph + 1); 1.1078 + *ippp = htonl(SCTP_FROM_SCTP_TIMER+SCTP_LOC_3); 1.1079 + } 1.1080 + inp->last_abort_code = SCTP_FROM_SCTP_TIMER+SCTP_LOC_4; 1.1081 + sctp_abort_an_association(inp, stcb, oper, SCTP_SO_NOT_LOCKED); 1.1082 + } else { 1.1083 +#ifdef INVARIANTS 1.1084 + panic("Cookie timer expires in wrong state?"); 1.1085 +#else 1.1086 + SCTP_PRINTF("Strange in state %d not cookie-echoed yet c-e timer expires?\n", SCTP_GET_STATE(&stcb->asoc)); 1.1087 + return (0); 1.1088 +#endif 1.1089 + } 1.1090 + return (0); 1.1091 + } 1.1092 + /* Ok we found the cookie, threshold management next */ 1.1093 + if (sctp_threshold_management(inp, stcb, cookie->whoTo, 1.1094 + stcb->asoc.max_init_times)) { 1.1095 + /* Assoc is over */ 1.1096 + return (1); 1.1097 + } 1.1098 + /* 1.1099 + * cleared theshold management now lets backoff the address & select 1.1100 + * an alternate 1.1101 + */ 1.1102 + stcb->asoc.dropped_special_cnt = 0; 1.1103 + sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0, 0); 1.1104 + alt = sctp_find_alternate_net(stcb, cookie->whoTo, 0); 1.1105 + if (alt != cookie->whoTo) { 1.1106 + sctp_free_remote_addr(cookie->whoTo); 1.1107 + cookie->whoTo = alt; 1.1108 + atomic_add_int(&alt->ref_count, 1); 1.1109 + } 1.1110 + /* Now mark the retran info */ 1.1111 + if (cookie->sent != SCTP_DATAGRAM_RESEND) { 1.1112 + sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1.1113 + } 1.1114 + cookie->sent = SCTP_DATAGRAM_RESEND; 1.1115 + /* 1.1116 + * Now call the output routine to kick out the cookie again, Note we 1.1117 + * don't mark any chunks for retran so that FR will need to kick in 1.1118 + * to move these (or a send timer). 1.1119 + */ 1.1120 + return (0); 1.1121 +} 1.1122 + 1.1123 +int 1.1124 +sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1.1125 + struct sctp_nets *net) 1.1126 +{ 1.1127 + struct sctp_nets *alt; 1.1128 + struct sctp_tmit_chunk *strrst = NULL, *chk = NULL; 1.1129 + 1.1130 + if (stcb->asoc.stream_reset_outstanding == 0) { 1.1131 + return (0); 1.1132 + } 1.1133 + /* find the existing STRRESET, we use the seq number we sent out on */ 1.1134 + (void)sctp_find_stream_reset(stcb, stcb->asoc.str_reset_seq_out, &strrst); 1.1135 + if (strrst == NULL) { 1.1136 + return (0); 1.1137 + } 1.1138 + /* do threshold management */ 1.1139 + if (sctp_threshold_management(inp, stcb, strrst->whoTo, 1.1140 + stcb->asoc.max_send_times)) { 1.1141 + /* Assoc is over */ 1.1142 + return (1); 1.1143 + } 1.1144 + /* 1.1145 + * cleared theshold management now lets backoff the address & select 1.1146 + * an alternate 1.1147 + */ 1.1148 + sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0, 0); 1.1149 + alt = sctp_find_alternate_net(stcb, strrst->whoTo, 0); 1.1150 + sctp_free_remote_addr(strrst->whoTo); 1.1151 + strrst->whoTo = alt; 1.1152 + atomic_add_int(&alt->ref_count, 1); 1.1153 + 1.1154 + /* See if a ECN Echo is also stranded */ 1.1155 + TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 1.1156 + if ((chk->whoTo == net) && 1.1157 + (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 1.1158 + sctp_free_remote_addr(chk->whoTo); 1.1159 + if (chk->sent != SCTP_DATAGRAM_RESEND) { 1.1160 + chk->sent = SCTP_DATAGRAM_RESEND; 1.1161 + sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1.1162 + } 1.1163 + chk->whoTo = alt; 1.1164 + atomic_add_int(&alt->ref_count, 1); 1.1165 + } 1.1166 + } 1.1167 + if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 1.1168 + /* 1.1169 + * If the address went un-reachable, we need to move to 1.1170 + * alternates for ALL chk's in queue 1.1171 + */ 1.1172 + sctp_move_chunks_from_net(stcb, net); 1.1173 + } 1.1174 + /* mark the retran info */ 1.1175 + if (strrst->sent != SCTP_DATAGRAM_RESEND) 1.1176 + sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1.1177 + strrst->sent = SCTP_DATAGRAM_RESEND; 1.1178 + 1.1179 + /* restart the timer */ 1.1180 + sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo); 1.1181 + return (0); 1.1182 +} 1.1183 + 1.1184 +int 1.1185 +sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1.1186 + struct sctp_nets *net) 1.1187 +{ 1.1188 + struct sctp_nets *alt; 1.1189 + struct sctp_tmit_chunk *asconf, *chk; 1.1190 + 1.1191 + /* is this a first send, or a retransmission? */ 1.1192 + if (TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) { 1.1193 + /* compose a new ASCONF chunk and send it */ 1.1194 + sctp_send_asconf(stcb, net, SCTP_ADDR_NOT_LOCKED); 1.1195 + } else { 1.1196 + /* 1.1197 + * Retransmission of the existing ASCONF is needed 1.1198 + */ 1.1199 + 1.1200 + /* find the existing ASCONF */ 1.1201 + asconf = TAILQ_FIRST(&stcb->asoc.asconf_send_queue); 1.1202 + if (asconf == NULL) { 1.1203 + return (0); 1.1204 + } 1.1205 + /* do threshold management */ 1.1206 + if (sctp_threshold_management(inp, stcb, asconf->whoTo, 1.1207 + stcb->asoc.max_send_times)) { 1.1208 + /* Assoc is over */ 1.1209 + return (1); 1.1210 + } 1.1211 + if (asconf->snd_count > stcb->asoc.max_send_times) { 1.1212 + /* 1.1213 + * Something is rotten: our peer is not responding to 1.1214 + * ASCONFs but apparently is to other chunks. i.e. it 1.1215 + * is not properly handling the chunk type upper bits. 1.1216 + * Mark this peer as ASCONF incapable and cleanup. 1.1217 + */ 1.1218 + SCTPDBG(SCTP_DEBUG_TIMER1, "asconf_timer: Peer has not responded to our repeated ASCONFs\n"); 1.1219 + sctp_asconf_cleanup(stcb, net); 1.1220 + return (0); 1.1221 + } 1.1222 + /* 1.1223 + * cleared threshold management, so now backoff the net and 1.1224 + * select an alternate 1.1225 + */ 1.1226 + sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0, 0); 1.1227 + alt = sctp_find_alternate_net(stcb, asconf->whoTo, 0); 1.1228 + if (asconf->whoTo != alt) { 1.1229 + sctp_free_remote_addr(asconf->whoTo); 1.1230 + asconf->whoTo = alt; 1.1231 + atomic_add_int(&alt->ref_count, 1); 1.1232 + } 1.1233 + 1.1234 + /* See if an ECN Echo is also stranded */ 1.1235 + TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 1.1236 + if ((chk->whoTo == net) && 1.1237 + (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 1.1238 + sctp_free_remote_addr(chk->whoTo); 1.1239 + chk->whoTo = alt; 1.1240 + if (chk->sent != SCTP_DATAGRAM_RESEND) { 1.1241 + chk->sent = SCTP_DATAGRAM_RESEND; 1.1242 + sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1.1243 + } 1.1244 + atomic_add_int(&alt->ref_count, 1); 1.1245 + } 1.1246 + } 1.1247 + TAILQ_FOREACH(chk, &stcb->asoc.asconf_send_queue, sctp_next) { 1.1248 + if (chk->whoTo != alt) { 1.1249 + sctp_free_remote_addr(chk->whoTo); 1.1250 + chk->whoTo = alt; 1.1251 + atomic_add_int(&alt->ref_count, 1); 1.1252 + } 1.1253 + if (asconf->sent != SCTP_DATAGRAM_RESEND && chk->sent != SCTP_DATAGRAM_UNSENT) 1.1254 + sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1.1255 + chk->sent = SCTP_DATAGRAM_RESEND; 1.1256 + } 1.1257 + if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 1.1258 + /* 1.1259 + * If the address went un-reachable, we need to move 1.1260 + * to the alternate for ALL chunks in queue 1.1261 + */ 1.1262 + sctp_move_chunks_from_net(stcb, net); 1.1263 + } 1.1264 + /* mark the retran info */ 1.1265 + if (asconf->sent != SCTP_DATAGRAM_RESEND) 1.1266 + sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1.1267 + asconf->sent = SCTP_DATAGRAM_RESEND; 1.1268 + 1.1269 + /* send another ASCONF if any and we can do */ 1.1270 + sctp_send_asconf(stcb, alt, SCTP_ADDR_NOT_LOCKED); 1.1271 + } 1.1272 + return (0); 1.1273 +} 1.1274 + 1.1275 +/* Mobility adaptation */ 1.1276 +void 1.1277 +sctp_delete_prim_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1.1278 + struct sctp_nets *net SCTP_UNUSED) 1.1279 +{ 1.1280 + if (stcb->asoc.deleted_primary == NULL) { 1.1281 + SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: deleted_primary is not stored...\n"); 1.1282 + sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); 1.1283 + return; 1.1284 + } 1.1285 + SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: finished to keep deleted primary "); 1.1286 + SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.deleted_primary->ro._l_addr.sa); 1.1287 + sctp_free_remote_addr(stcb->asoc.deleted_primary); 1.1288 + stcb->asoc.deleted_primary = NULL; 1.1289 + sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); 1.1290 + return; 1.1291 +} 1.1292 + 1.1293 +/* 1.1294 + * For the shutdown and shutdown-ack, we do not keep one around on the 1.1295 + * control queue. This means we must generate a new one and call the general 1.1296 + * chunk output routine, AFTER having done threshold management. 1.1297 + * It is assumed that net is non-NULL. 1.1298 + */ 1.1299 +int 1.1300 +sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1.1301 + struct sctp_nets *net) 1.1302 +{ 1.1303 + struct sctp_nets *alt; 1.1304 + 1.1305 + /* first threshold managment */ 1.1306 + if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1.1307 + /* Assoc is over */ 1.1308 + return (1); 1.1309 + } 1.1310 + sctp_backoff_on_timeout(stcb, net, 1, 0, 0); 1.1311 + /* second select an alternative */ 1.1312 + alt = sctp_find_alternate_net(stcb, net, 0); 1.1313 + 1.1314 + /* third generate a shutdown into the queue for out net */ 1.1315 + sctp_send_shutdown(stcb, alt); 1.1316 + 1.1317 + /* fourth restart timer */ 1.1318 + sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt); 1.1319 + return (0); 1.1320 +} 1.1321 + 1.1322 +int 1.1323 +sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1.1324 + struct sctp_nets *net) 1.1325 +{ 1.1326 + struct sctp_nets *alt; 1.1327 + 1.1328 + /* first threshold managment */ 1.1329 + if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1.1330 + /* Assoc is over */ 1.1331 + return (1); 1.1332 + } 1.1333 + sctp_backoff_on_timeout(stcb, net, 1, 0, 0); 1.1334 + /* second select an alternative */ 1.1335 + alt = sctp_find_alternate_net(stcb, net, 0); 1.1336 + 1.1337 + /* third generate a shutdown into the queue for out net */ 1.1338 + sctp_send_shutdown_ack(stcb, alt); 1.1339 + 1.1340 + /* fourth restart timer */ 1.1341 + sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt); 1.1342 + return (0); 1.1343 +} 1.1344 + 1.1345 +static void 1.1346 +sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp, 1.1347 + struct sctp_tcb *stcb) 1.1348 +{ 1.1349 + struct sctp_stream_queue_pending *sp; 1.1350 + unsigned int i, chks_in_queue = 0; 1.1351 + int being_filled = 0; 1.1352 + /* 1.1353 + * This function is ONLY called when the send/sent queues are empty. 1.1354 + */ 1.1355 + if ((stcb == NULL) || (inp == NULL)) 1.1356 + return; 1.1357 + 1.1358 + if (stcb->asoc.sent_queue_retran_cnt) { 1.1359 + SCTP_PRINTF("Hmm, sent_queue_retran_cnt is non-zero %d\n", 1.1360 + stcb->asoc.sent_queue_retran_cnt); 1.1361 + stcb->asoc.sent_queue_retran_cnt = 0; 1.1362 + } 1.1363 + if (stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, &stcb->asoc)) { 1.1364 + /* No stream scheduler information, initialize scheduler */ 1.1365 + stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 0); 1.1366 + if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, &stcb->asoc)) { 1.1367 + /* yep, we lost a stream or two */ 1.1368 + SCTP_PRINTF("Found additional streams NOT managed by scheduler, corrected\n"); 1.1369 + } else { 1.1370 + /* no streams lost */ 1.1371 + stcb->asoc.total_output_queue_size = 0; 1.1372 + } 1.1373 + } 1.1374 + /* Check to see if some data queued, if so report it */ 1.1375 + for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1.1376 + if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) { 1.1377 + TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) { 1.1378 + if (sp->msg_is_complete) 1.1379 + being_filled++; 1.1380 + chks_in_queue++; 1.1381 + } 1.1382 + } 1.1383 + } 1.1384 + if (chks_in_queue != stcb->asoc.stream_queue_cnt) { 1.1385 + SCTP_PRINTF("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n", 1.1386 + stcb->asoc.stream_queue_cnt, chks_in_queue); 1.1387 + } 1.1388 + if (chks_in_queue) { 1.1389 + /* call the output queue function */ 1.1390 + sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1.1391 + if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) && 1.1392 + (TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 1.1393 + /* 1.1394 + * Probably should go in and make it go back through 1.1395 + * and add fragments allowed 1.1396 + */ 1.1397 + if (being_filled == 0) { 1.1398 + SCTP_PRINTF("Still nothing moved %d chunks are stuck\n", 1.1399 + chks_in_queue); 1.1400 + } 1.1401 + } 1.1402 + } else { 1.1403 + SCTP_PRINTF("Found no chunks on any queue tot:%lu\n", 1.1404 + (u_long)stcb->asoc.total_output_queue_size); 1.1405 + stcb->asoc.total_output_queue_size = 0; 1.1406 + } 1.1407 +} 1.1408 + 1.1409 +int 1.1410 +sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1.1411 + struct sctp_nets *net) 1.1412 +{ 1.1413 + uint8_t net_was_pf; 1.1414 + 1.1415 + if (net->dest_state & SCTP_ADDR_PF) { 1.1416 + net_was_pf = 1; 1.1417 + } else { 1.1418 + net_was_pf = 0; 1.1419 + } 1.1420 + if (net->hb_responded == 0) { 1.1421 + if (net->ro._s_addr) { 1.1422 + /* Invalidate the src address if we did not get 1.1423 + * a response last time. 1.1424 + */ 1.1425 + sctp_free_ifa(net->ro._s_addr); 1.1426 + net->ro._s_addr = NULL; 1.1427 + net->src_addr_selected = 0; 1.1428 + } 1.1429 + sctp_backoff_on_timeout(stcb, net, 1, 0, 0); 1.1430 + if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1.1431 + /* Assoc is over */ 1.1432 + return (1); 1.1433 + } 1.1434 + } 1.1435 + /* Zero PBA, if it needs it */ 1.1436 + if (net->partial_bytes_acked) { 1.1437 + net->partial_bytes_acked = 0; 1.1438 + } 1.1439 + if ((stcb->asoc.total_output_queue_size > 0) && 1.1440 + (TAILQ_EMPTY(&stcb->asoc.send_queue)) && 1.1441 + (TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 1.1442 + sctp_audit_stream_queues_for_size(inp, stcb); 1.1443 + } 1.1444 + if (!(net->dest_state & SCTP_ADDR_NOHB) && 1.1445 + !((net_was_pf == 0) && (net->dest_state & SCTP_ADDR_PF))) { 1.1446 + /* when move to PF during threshold mangement, a HB has been 1.1447 + queued in that routine */ 1.1448 + uint32_t ms_gone_by; 1.1449 + 1.1450 + if ((net->last_sent_time.tv_sec > 0) || 1.1451 + (net->last_sent_time.tv_usec > 0)) { 1.1452 +#ifdef __FreeBSD__ 1.1453 + struct timeval diff; 1.1454 + 1.1455 + SCTP_GETTIME_TIMEVAL(&diff); 1.1456 + timevalsub(&diff, &net->last_sent_time); 1.1457 +#else 1.1458 + struct timeval diff, now; 1.1459 + 1.1460 + SCTP_GETTIME_TIMEVAL(&now); 1.1461 + timersub(&now, &net->last_sent_time, &diff); 1.1462 +#endif 1.1463 + ms_gone_by = (uint32_t)(diff.tv_sec * 1000) + 1.1464 + (uint32_t)(diff.tv_usec / 1000); 1.1465 + } else { 1.1466 + ms_gone_by = 0xffffffff; 1.1467 + } 1.1468 + if ((ms_gone_by >= net->heart_beat_delay) || 1.1469 + (net->dest_state & SCTP_ADDR_PF)) { 1.1470 + sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED); 1.1471 + } 1.1472 + } 1.1473 + return (0); 1.1474 +} 1.1475 + 1.1476 +void 1.1477 +sctp_pathmtu_timer(struct sctp_inpcb *inp, 1.1478 + struct sctp_tcb *stcb, 1.1479 + struct sctp_nets *net) 1.1480 +{ 1.1481 + uint32_t next_mtu, mtu; 1.1482 + 1.1483 + next_mtu = sctp_get_next_mtu(net->mtu); 1.1484 + 1.1485 + if ((next_mtu > net->mtu) && (net->port == 0)) { 1.1486 + if ((net->src_addr_selected == 0) || 1.1487 + (net->ro._s_addr == NULL) || 1.1488 + (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) { 1.1489 + if ((net->ro._s_addr != NULL) && (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) { 1.1490 + sctp_free_ifa(net->ro._s_addr); 1.1491 + net->ro._s_addr = NULL; 1.1492 + net->src_addr_selected = 0; 1.1493 + } else if (net->ro._s_addr == NULL) { 1.1494 +#if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE) 1.1495 + if (net->ro._l_addr.sa.sa_family == AF_INET6) { 1.1496 + struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 1.1497 + /* KAME hack: embed scopeid */ 1.1498 +#if defined(__APPLE__) 1.1499 +#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) 1.1500 + (void)in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL); 1.1501 +#else 1.1502 + (void)in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL); 1.1503 +#endif 1.1504 +#elif defined(SCTP_KAME) 1.1505 + (void)sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)); 1.1506 +#else 1.1507 + (void)in6_embedscope(&sin6->sin6_addr, sin6); 1.1508 +#endif 1.1509 + } 1.1510 +#endif 1.1511 + 1.1512 + net->ro._s_addr = sctp_source_address_selection(inp, 1.1513 + stcb, 1.1514 + (sctp_route_t *)&net->ro, 1.1515 + net, 0, stcb->asoc.vrf_id); 1.1516 +#if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE) 1.1517 + if (net->ro._l_addr.sa.sa_family == AF_INET6) { 1.1518 + struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 1.1519 +#ifdef SCTP_KAME 1.1520 + (void)sa6_recoverscope(sin6); 1.1521 +#else 1.1522 + (void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL); 1.1523 +#endif /* SCTP_KAME */ 1.1524 + } 1.1525 +#endif /* INET6 */ 1.1526 + } 1.1527 + if (net->ro._s_addr) 1.1528 + net->src_addr_selected = 1; 1.1529 + } 1.1530 + if (net->ro._s_addr) { 1.1531 + mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._s_addr.sa, net->ro.ro_rt); 1.1532 + if (net->port) { 1.1533 + mtu -= sizeof(struct udphdr); 1.1534 + } 1.1535 + if (mtu > next_mtu) { 1.1536 + net->mtu = next_mtu; 1.1537 + } 1.1538 + } 1.1539 + } 1.1540 + /* restart the timer */ 1.1541 + sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 1.1542 +} 1.1543 + 1.1544 +void 1.1545 +sctp_autoclose_timer(struct sctp_inpcb *inp, 1.1546 + struct sctp_tcb *stcb, 1.1547 + struct sctp_nets *net) 1.1548 +{ 1.1549 + struct timeval tn, *tim_touse; 1.1550 + struct sctp_association *asoc; 1.1551 + int ticks_gone_by; 1.1552 + 1.1553 + (void)SCTP_GETTIME_TIMEVAL(&tn); 1.1554 + if (stcb->asoc.sctp_autoclose_ticks && 1.1555 + sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1.1556 + /* Auto close is on */ 1.1557 + asoc = &stcb->asoc; 1.1558 + /* pick the time to use */ 1.1559 + if (asoc->time_last_rcvd.tv_sec > 1.1560 + asoc->time_last_sent.tv_sec) { 1.1561 + tim_touse = &asoc->time_last_rcvd; 1.1562 + } else { 1.1563 + tim_touse = &asoc->time_last_sent; 1.1564 + } 1.1565 + /* Now has long enough transpired to autoclose? */ 1.1566 + ticks_gone_by = SEC_TO_TICKS(tn.tv_sec - tim_touse->tv_sec); 1.1567 + if ((ticks_gone_by > 0) && 1.1568 + (ticks_gone_by >= (int)asoc->sctp_autoclose_ticks)) { 1.1569 + /* 1.1570 + * autoclose time has hit, call the output routine, 1.1571 + * which should do nothing just to be SURE we don't 1.1572 + * have hanging data. We can then safely check the 1.1573 + * queues and know that we are clear to send 1.1574 + * shutdown 1.1575 + */ 1.1576 + sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 1.1577 + /* Are we clean? */ 1.1578 + if (TAILQ_EMPTY(&asoc->send_queue) && 1.1579 + TAILQ_EMPTY(&asoc->sent_queue)) { 1.1580 + /* 1.1581 + * there is nothing queued to send, so I'm 1.1582 + * done... 1.1583 + */ 1.1584 + if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1.1585 + /* only send SHUTDOWN 1st time thru */ 1.1586 + struct sctp_nets *netp; 1.1587 + 1.1588 + if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 1.1589 + (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1.1590 + SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1.1591 + } 1.1592 + SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 1.1593 + SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 1.1594 + sctp_stop_timers_for_shutdown(stcb); 1.1595 + if (stcb->asoc.alternate) { 1.1596 + netp = stcb->asoc.alternate; 1.1597 + } else { 1.1598 + netp = stcb->asoc.primary_destination; 1.1599 + } 1.1600 + sctp_send_shutdown(stcb, netp); 1.1601 + sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 1.1602 + stcb->sctp_ep, stcb, 1.1603 + netp); 1.1604 + sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1.1605 + stcb->sctp_ep, stcb, 1.1606 + netp); 1.1607 + } 1.1608 + } 1.1609 + } else { 1.1610 + /* 1.1611 + * No auto close at this time, reset t-o to check 1.1612 + * later 1.1613 + */ 1.1614 + int tmp; 1.1615 + 1.1616 + /* fool the timer startup to use the time left */ 1.1617 + tmp = asoc->sctp_autoclose_ticks; 1.1618 + asoc->sctp_autoclose_ticks -= ticks_gone_by; 1.1619 + sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, 1.1620 + net); 1.1621 + /* restore the real tick value */ 1.1622 + asoc->sctp_autoclose_ticks = tmp; 1.1623 + } 1.1624 + } 1.1625 +} 1.1626 +