Wed, 31 Dec 2014 07:53:36 +0100
Correct small whitespace inconsistency, lost while renaming variables.
michael@0 | 1 | /*- |
michael@0 | 2 | * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. |
michael@0 | 3 | * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. |
michael@0 | 4 | * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. |
michael@0 | 5 | * |
michael@0 | 6 | * Redistribution and use in source and binary forms, with or without |
michael@0 | 7 | * modification, are permitted provided that the following conditions are met: |
michael@0 | 8 | * |
michael@0 | 9 | * a) Redistributions of source code must retain the above copyright notice, |
michael@0 | 10 | * this list of conditions and the following disclaimer. |
michael@0 | 11 | * |
michael@0 | 12 | * b) Redistributions in binary form must reproduce the above copyright |
michael@0 | 13 | * notice, this list of conditions and the following disclaimer in |
michael@0 | 14 | * the documentation and/or other materials provided with the distribution. |
michael@0 | 15 | * |
michael@0 | 16 | * c) Neither the name of Cisco Systems, Inc. nor the names of its |
michael@0 | 17 | * contributors may be used to endorse or promote products derived |
michael@0 | 18 | * from this software without specific prior written permission. |
michael@0 | 19 | * |
michael@0 | 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
michael@0 | 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, |
michael@0 | 22 | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
michael@0 | 23 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
michael@0 | 24 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
michael@0 | 25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
michael@0 | 26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
michael@0 | 27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
michael@0 | 28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
michael@0 | 29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
michael@0 | 30 | * THE POSSIBILITY OF SUCH DAMAGE. |
michael@0 | 31 | */ |
michael@0 | 32 | |
michael@0 | 33 | #ifdef __FreeBSD__ |
michael@0 | 34 | #include <sys/cdefs.h> |
michael@0 | 35 | __FBSDID("$FreeBSD: head/sys/netinet/sctp_timer.c 257359 2013-10-29 20:04:50Z tuexen $"); |
michael@0 | 36 | #endif |
michael@0 | 37 | |
michael@0 | 38 | #define _IP_VHL |
michael@0 | 39 | #include <netinet/sctp_os.h> |
michael@0 | 40 | #include <netinet/sctp_pcb.h> |
michael@0 | 41 | #ifdef INET6 |
michael@0 | 42 | #if defined(__Userspace_os_FreeBSD) |
michael@0 | 43 | #include <netinet6/sctp6_var.h> |
michael@0 | 44 | #endif |
michael@0 | 45 | #endif |
michael@0 | 46 | #include <netinet/sctp_var.h> |
michael@0 | 47 | #include <netinet/sctp_sysctl.h> |
michael@0 | 48 | #include <netinet/sctp_timer.h> |
michael@0 | 49 | #include <netinet/sctputil.h> |
michael@0 | 50 | #include <netinet/sctp_output.h> |
michael@0 | 51 | #include <netinet/sctp_header.h> |
michael@0 | 52 | #include <netinet/sctp_indata.h> |
michael@0 | 53 | #include <netinet/sctp_asconf.h> |
michael@0 | 54 | #include <netinet/sctp_input.h> |
michael@0 | 55 | #include <netinet/sctp.h> |
michael@0 | 56 | #include <netinet/sctp_uio.h> |
michael@0 | 57 | #if !defined(__Userspace_os_Windows) |
michael@0 | 58 | #include <netinet/udp.h> |
michael@0 | 59 | #endif |
michael@0 | 60 | |
michael@0 | 61 | #if defined(__APPLE__) |
michael@0 | 62 | #define APPLE_FILE_NO 6 |
michael@0 | 63 | #endif |
michael@0 | 64 | |
michael@0 | 65 | void |
michael@0 | 66 | sctp_audit_retranmission_queue(struct sctp_association *asoc) |
michael@0 | 67 | { |
michael@0 | 68 | struct sctp_tmit_chunk *chk; |
michael@0 | 69 | |
michael@0 | 70 | SCTPDBG(SCTP_DEBUG_TIMER4, "Audit invoked on send queue cnt:%d onqueue:%d\n", |
michael@0 | 71 | asoc->sent_queue_retran_cnt, |
michael@0 | 72 | asoc->sent_queue_cnt); |
michael@0 | 73 | asoc->sent_queue_retran_cnt = 0; |
michael@0 | 74 | asoc->sent_queue_cnt = 0; |
michael@0 | 75 | TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { |
michael@0 | 76 | if (chk->sent == SCTP_DATAGRAM_RESEND) { |
michael@0 | 77 | sctp_ucount_incr(asoc->sent_queue_retran_cnt); |
michael@0 | 78 | } |
michael@0 | 79 | asoc->sent_queue_cnt++; |
michael@0 | 80 | } |
michael@0 | 81 | TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { |
michael@0 | 82 | if (chk->sent == SCTP_DATAGRAM_RESEND) { |
michael@0 | 83 | sctp_ucount_incr(asoc->sent_queue_retran_cnt); |
michael@0 | 84 | } |
michael@0 | 85 | } |
michael@0 | 86 | TAILQ_FOREACH(chk, &asoc->asconf_send_queue, sctp_next) { |
michael@0 | 87 | if (chk->sent == SCTP_DATAGRAM_RESEND) { |
michael@0 | 88 | sctp_ucount_incr(asoc->sent_queue_retran_cnt); |
michael@0 | 89 | } |
michael@0 | 90 | } |
michael@0 | 91 | SCTPDBG(SCTP_DEBUG_TIMER4, "Audit completes retran:%d onqueue:%d\n", |
michael@0 | 92 | asoc->sent_queue_retran_cnt, |
michael@0 | 93 | asoc->sent_queue_cnt); |
michael@0 | 94 | } |
michael@0 | 95 | |
michael@0 | 96 | int |
michael@0 | 97 | sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb, |
michael@0 | 98 | struct sctp_nets *net, uint16_t threshold) |
michael@0 | 99 | { |
michael@0 | 100 | if (net) { |
michael@0 | 101 | net->error_count++; |
michael@0 | 102 | SCTPDBG(SCTP_DEBUG_TIMER4, "Error count for %p now %d thresh:%d\n", |
michael@0 | 103 | (void *)net, net->error_count, |
michael@0 | 104 | net->failure_threshold); |
michael@0 | 105 | if (net->error_count > net->failure_threshold) { |
michael@0 | 106 | /* We had a threshold failure */ |
michael@0 | 107 | if (net->dest_state & SCTP_ADDR_REACHABLE) { |
michael@0 | 108 | net->dest_state &= ~SCTP_ADDR_REACHABLE; |
michael@0 | 109 | net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY; |
michael@0 | 110 | net->dest_state &= ~SCTP_ADDR_PF; |
michael@0 | 111 | sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, |
michael@0 | 112 | stcb, 0, |
michael@0 | 113 | (void *)net, SCTP_SO_NOT_LOCKED); |
michael@0 | 114 | } |
michael@0 | 115 | } else if ((net->pf_threshold < net->failure_threshold) && |
michael@0 | 116 | (net->error_count > net->pf_threshold)) { |
michael@0 | 117 | if (!(net->dest_state & SCTP_ADDR_PF)) { |
michael@0 | 118 | net->dest_state |= SCTP_ADDR_PF; |
michael@0 | 119 | net->last_active = sctp_get_tick_count(); |
michael@0 | 120 | sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED); |
michael@0 | 121 | sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_TIMER + SCTP_LOC_3); |
michael@0 | 122 | sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); |
michael@0 | 123 | } |
michael@0 | 124 | } |
michael@0 | 125 | } |
michael@0 | 126 | if (stcb == NULL) |
michael@0 | 127 | return (0); |
michael@0 | 128 | |
michael@0 | 129 | if (net) { |
michael@0 | 130 | if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) { |
michael@0 | 131 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { |
michael@0 | 132 | sctp_misc_ints(SCTP_THRESHOLD_INCR, |
michael@0 | 133 | stcb->asoc.overall_error_count, |
michael@0 | 134 | (stcb->asoc.overall_error_count+1), |
michael@0 | 135 | SCTP_FROM_SCTP_TIMER, |
michael@0 | 136 | __LINE__); |
michael@0 | 137 | } |
michael@0 | 138 | stcb->asoc.overall_error_count++; |
michael@0 | 139 | } |
michael@0 | 140 | } else { |
michael@0 | 141 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { |
michael@0 | 142 | sctp_misc_ints(SCTP_THRESHOLD_INCR, |
michael@0 | 143 | stcb->asoc.overall_error_count, |
michael@0 | 144 | (stcb->asoc.overall_error_count+1), |
michael@0 | 145 | SCTP_FROM_SCTP_TIMER, |
michael@0 | 146 | __LINE__); |
michael@0 | 147 | } |
michael@0 | 148 | stcb->asoc.overall_error_count++; |
michael@0 | 149 | } |
michael@0 | 150 | SCTPDBG(SCTP_DEBUG_TIMER4, "Overall error count for %p now %d thresh:%u state:%x\n", |
michael@0 | 151 | (void *)&stcb->asoc, stcb->asoc.overall_error_count, |
michael@0 | 152 | (uint32_t)threshold, |
michael@0 | 153 | ((net == NULL) ? (uint32_t) 0 : (uint32_t) net->dest_state)); |
michael@0 | 154 | /* |
michael@0 | 155 | * We specifically do not do >= to give the assoc one more change |
michael@0 | 156 | * before we fail it. |
michael@0 | 157 | */ |
michael@0 | 158 | if (stcb->asoc.overall_error_count > threshold) { |
michael@0 | 159 | /* Abort notification sends a ULP notify */ |
michael@0 | 160 | struct mbuf *oper; |
michael@0 | 161 | |
michael@0 | 162 | oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), |
michael@0 | 163 | 0, M_NOWAIT, 1, MT_DATA); |
michael@0 | 164 | if (oper) { |
michael@0 | 165 | struct sctp_paramhdr *ph; |
michael@0 | 166 | uint32_t *ippp; |
michael@0 | 167 | |
michael@0 | 168 | SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + |
michael@0 | 169 | sizeof(uint32_t); |
michael@0 | 170 | ph = mtod(oper, struct sctp_paramhdr *); |
michael@0 | 171 | ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); |
michael@0 | 172 | ph->param_length = htons(SCTP_BUF_LEN(oper)); |
michael@0 | 173 | ippp = (uint32_t *) (ph + 1); |
michael@0 | 174 | *ippp = htonl(SCTP_FROM_SCTP_TIMER+SCTP_LOC_1); |
michael@0 | 175 | } |
michael@0 | 176 | inp->last_abort_code = SCTP_FROM_SCTP_TIMER+SCTP_LOC_1; |
michael@0 | 177 | sctp_abort_an_association(inp, stcb, oper, SCTP_SO_NOT_LOCKED); |
michael@0 | 178 | return (1); |
michael@0 | 179 | } |
michael@0 | 180 | return (0); |
michael@0 | 181 | } |
michael@0 | 182 | |
michael@0 | 183 | /* |
michael@0 | 184 | * sctp_find_alternate_net() returns a non-NULL pointer as long |
michael@0 | 185 | * the argument net is non-NULL. |
michael@0 | 186 | */ |
michael@0 | 187 | struct sctp_nets * |
michael@0 | 188 | sctp_find_alternate_net(struct sctp_tcb *stcb, |
michael@0 | 189 | struct sctp_nets *net, |
michael@0 | 190 | int mode) |
michael@0 | 191 | { |
michael@0 | 192 | /* Find and return an alternate network if possible */ |
michael@0 | 193 | struct sctp_nets *alt, *mnet, *min_errors_net = NULL , *max_cwnd_net = NULL; |
michael@0 | 194 | int once; |
michael@0 | 195 | /* JRS 5/14/07 - Initialize min_errors to an impossible value. */ |
michael@0 | 196 | int min_errors = -1; |
michael@0 | 197 | uint32_t max_cwnd = 0; |
michael@0 | 198 | |
michael@0 | 199 | if (stcb->asoc.numnets == 1) { |
michael@0 | 200 | /* No others but net */ |
michael@0 | 201 | return (TAILQ_FIRST(&stcb->asoc.nets)); |
michael@0 | 202 | } |
michael@0 | 203 | /* |
michael@0 | 204 | * JRS 5/14/07 - If mode is set to 2, use the CMT PF find alternate net algorithm. |
michael@0 | 205 | * This algorithm chooses the active destination (not in PF state) with the largest |
michael@0 | 206 | * cwnd value. If all destinations are in PF state, unreachable, or unconfirmed, choose |
michael@0 | 207 | * the desination that is in PF state with the lowest error count. In case of a tie, |
michael@0 | 208 | * choose the destination that was most recently active. |
michael@0 | 209 | */ |
michael@0 | 210 | if (mode == 2) { |
michael@0 | 211 | TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) { |
michael@0 | 212 | /* JRS 5/14/07 - If the destination is unreachable or unconfirmed, skip it. */ |
michael@0 | 213 | if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) || |
michael@0 | 214 | (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) { |
michael@0 | 215 | continue; |
michael@0 | 216 | } |
michael@0 | 217 | /* |
michael@0 | 218 | * JRS 5/14/07 - If the destination is reachable but in PF state, compare |
michael@0 | 219 | * the error count of the destination to the minimum error count seen thus far. |
michael@0 | 220 | * Store the destination with the lower error count. If the error counts are |
michael@0 | 221 | * equal, store the destination that was most recently active. |
michael@0 | 222 | */ |
michael@0 | 223 | if (mnet->dest_state & SCTP_ADDR_PF) { |
michael@0 | 224 | /* |
michael@0 | 225 | * JRS 5/14/07 - If the destination under consideration is the current |
michael@0 | 226 | * destination, work as if the error count is one higher. The |
michael@0 | 227 | * actual error count will not be incremented until later in the |
michael@0 | 228 | * t3 handler. |
michael@0 | 229 | */ |
michael@0 | 230 | if (mnet == net) { |
michael@0 | 231 | if (min_errors == -1) { |
michael@0 | 232 | min_errors = mnet->error_count + 1; |
michael@0 | 233 | min_errors_net = mnet; |
michael@0 | 234 | } else if (mnet->error_count + 1 < min_errors) { |
michael@0 | 235 | min_errors = mnet->error_count + 1; |
michael@0 | 236 | min_errors_net = mnet; |
michael@0 | 237 | } else if (mnet->error_count + 1 == min_errors |
michael@0 | 238 | && mnet->last_active > min_errors_net->last_active) { |
michael@0 | 239 | min_errors_net = mnet; |
michael@0 | 240 | min_errors = mnet->error_count + 1; |
michael@0 | 241 | } |
michael@0 | 242 | continue; |
michael@0 | 243 | } else { |
michael@0 | 244 | if (min_errors == -1) { |
michael@0 | 245 | min_errors = mnet->error_count; |
michael@0 | 246 | min_errors_net = mnet; |
michael@0 | 247 | } else if (mnet->error_count < min_errors) { |
michael@0 | 248 | min_errors = mnet->error_count; |
michael@0 | 249 | min_errors_net = mnet; |
michael@0 | 250 | } else if (mnet->error_count == min_errors |
michael@0 | 251 | && mnet->last_active > min_errors_net->last_active) { |
michael@0 | 252 | min_errors_net = mnet; |
michael@0 | 253 | min_errors = mnet->error_count; |
michael@0 | 254 | } |
michael@0 | 255 | continue; |
michael@0 | 256 | } |
michael@0 | 257 | } |
michael@0 | 258 | /* |
michael@0 | 259 | * JRS 5/14/07 - If the destination is reachable and not in PF state, compare the |
michael@0 | 260 | * cwnd of the destination to the highest cwnd seen thus far. Store the |
michael@0 | 261 | * destination with the higher cwnd value. If the cwnd values are equal, |
michael@0 | 262 | * randomly choose one of the two destinations. |
michael@0 | 263 | */ |
michael@0 | 264 | if (max_cwnd < mnet->cwnd) { |
michael@0 | 265 | max_cwnd_net = mnet; |
michael@0 | 266 | max_cwnd = mnet->cwnd; |
michael@0 | 267 | } else if (max_cwnd == mnet->cwnd) { |
michael@0 | 268 | uint32_t rndval; |
michael@0 | 269 | uint8_t this_random; |
michael@0 | 270 | |
michael@0 | 271 | if (stcb->asoc.hb_random_idx > 3) { |
michael@0 | 272 | rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); |
michael@0 | 273 | memcpy(stcb->asoc.hb_random_values, &rndval, sizeof(stcb->asoc.hb_random_values)); |
michael@0 | 274 | this_random = stcb->asoc.hb_random_values[0]; |
michael@0 | 275 | stcb->asoc.hb_random_idx++; |
michael@0 | 276 | stcb->asoc.hb_ect_randombit = 0; |
michael@0 | 277 | } else { |
michael@0 | 278 | this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; |
michael@0 | 279 | stcb->asoc.hb_random_idx++; |
michael@0 | 280 | stcb->asoc.hb_ect_randombit = 0; |
michael@0 | 281 | } |
michael@0 | 282 | if (this_random % 2 == 1) { |
michael@0 | 283 | max_cwnd_net = mnet; |
michael@0 | 284 | max_cwnd = mnet->cwnd; /* Useless? */ |
michael@0 | 285 | } |
michael@0 | 286 | } |
michael@0 | 287 | } |
michael@0 | 288 | if (max_cwnd_net == NULL) { |
michael@0 | 289 | if (min_errors_net == NULL) { |
michael@0 | 290 | return (net); |
michael@0 | 291 | } |
michael@0 | 292 | return (min_errors_net); |
michael@0 | 293 | } else { |
michael@0 | 294 | return (max_cwnd_net); |
michael@0 | 295 | } |
michael@0 | 296 | } /* JRS 5/14/07 - If mode is set to 1, use the CMT policy for choosing an alternate net. */ |
michael@0 | 297 | else if (mode == 1) { |
michael@0 | 298 | TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) { |
michael@0 | 299 | if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) || |
michael@0 | 300 | (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) { |
michael@0 | 301 | /* |
michael@0 | 302 | * will skip ones that are not-reachable or |
michael@0 | 303 | * unconfirmed |
michael@0 | 304 | */ |
michael@0 | 305 | continue; |
michael@0 | 306 | } |
michael@0 | 307 | if (max_cwnd < mnet->cwnd) { |
michael@0 | 308 | max_cwnd_net = mnet; |
michael@0 | 309 | max_cwnd = mnet->cwnd; |
michael@0 | 310 | } else if (max_cwnd == mnet->cwnd) { |
michael@0 | 311 | uint32_t rndval; |
michael@0 | 312 | uint8_t this_random; |
michael@0 | 313 | |
michael@0 | 314 | if (stcb->asoc.hb_random_idx > 3) { |
michael@0 | 315 | rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); |
michael@0 | 316 | memcpy(stcb->asoc.hb_random_values, &rndval, |
michael@0 | 317 | sizeof(stcb->asoc.hb_random_values)); |
michael@0 | 318 | this_random = stcb->asoc.hb_random_values[0]; |
michael@0 | 319 | stcb->asoc.hb_random_idx = 0; |
michael@0 | 320 | stcb->asoc.hb_ect_randombit = 0; |
michael@0 | 321 | } else { |
michael@0 | 322 | this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; |
michael@0 | 323 | stcb->asoc.hb_random_idx++; |
michael@0 | 324 | stcb->asoc.hb_ect_randombit = 0; |
michael@0 | 325 | } |
michael@0 | 326 | if (this_random % 2) { |
michael@0 | 327 | max_cwnd_net = mnet; |
michael@0 | 328 | max_cwnd = mnet->cwnd; |
michael@0 | 329 | } |
michael@0 | 330 | } |
michael@0 | 331 | } |
michael@0 | 332 | if (max_cwnd_net) { |
michael@0 | 333 | return (max_cwnd_net); |
michael@0 | 334 | } |
michael@0 | 335 | } |
michael@0 | 336 | mnet = net; |
michael@0 | 337 | once = 0; |
michael@0 | 338 | |
michael@0 | 339 | if (mnet == NULL) { |
michael@0 | 340 | mnet = TAILQ_FIRST(&stcb->asoc.nets); |
michael@0 | 341 | if (mnet == NULL) { |
michael@0 | 342 | return (NULL); |
michael@0 | 343 | } |
michael@0 | 344 | } |
michael@0 | 345 | do { |
michael@0 | 346 | alt = TAILQ_NEXT(mnet, sctp_next); |
michael@0 | 347 | if (alt == NULL) |
michael@0 | 348 | { |
michael@0 | 349 | once++; |
michael@0 | 350 | if (once > 1) { |
michael@0 | 351 | break; |
michael@0 | 352 | } |
michael@0 | 353 | alt = TAILQ_FIRST(&stcb->asoc.nets); |
michael@0 | 354 | if (alt == NULL) { |
michael@0 | 355 | return (NULL); |
michael@0 | 356 | } |
michael@0 | 357 | } |
michael@0 | 358 | if (alt->ro.ro_rt == NULL) { |
michael@0 | 359 | if (alt->ro._s_addr) { |
michael@0 | 360 | sctp_free_ifa(alt->ro._s_addr); |
michael@0 | 361 | alt->ro._s_addr = NULL; |
michael@0 | 362 | } |
michael@0 | 363 | alt->src_addr_selected = 0; |
michael@0 | 364 | } |
michael@0 | 365 | /*sa_ignore NO_NULL_CHK*/ |
michael@0 | 366 | if (((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) && |
michael@0 | 367 | (alt->ro.ro_rt != NULL) && |
michael@0 | 368 | (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED))) { |
michael@0 | 369 | /* Found a reachable address */ |
michael@0 | 370 | break; |
michael@0 | 371 | } |
michael@0 | 372 | mnet = alt; |
michael@0 | 373 | } while (alt != NULL); |
michael@0 | 374 | |
michael@0 | 375 | if (alt == NULL) { |
michael@0 | 376 | /* Case where NO insv network exists (dormant state) */ |
michael@0 | 377 | /* we rotate destinations */ |
michael@0 | 378 | once = 0; |
michael@0 | 379 | mnet = net; |
michael@0 | 380 | do { |
michael@0 | 381 | if (mnet == NULL) { |
michael@0 | 382 | return (TAILQ_FIRST(&stcb->asoc.nets)); |
michael@0 | 383 | } |
michael@0 | 384 | alt = TAILQ_NEXT(mnet, sctp_next); |
michael@0 | 385 | if (alt == NULL) { |
michael@0 | 386 | once++; |
michael@0 | 387 | if (once > 1) { |
michael@0 | 388 | break; |
michael@0 | 389 | } |
michael@0 | 390 | alt = TAILQ_FIRST(&stcb->asoc.nets); |
michael@0 | 391 | } |
michael@0 | 392 | /*sa_ignore NO_NULL_CHK*/ |
michael@0 | 393 | if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) && |
michael@0 | 394 | (alt != net)) { |
michael@0 | 395 | /* Found an alternate address */ |
michael@0 | 396 | break; |
michael@0 | 397 | } |
michael@0 | 398 | mnet = alt; |
michael@0 | 399 | } while (alt != NULL); |
michael@0 | 400 | } |
michael@0 | 401 | if (alt == NULL) { |
michael@0 | 402 | return (net); |
michael@0 | 403 | } |
michael@0 | 404 | return (alt); |
michael@0 | 405 | } |
michael@0 | 406 | |
michael@0 | 407 | static void |
michael@0 | 408 | sctp_backoff_on_timeout(struct sctp_tcb *stcb, |
michael@0 | 409 | struct sctp_nets *net, |
michael@0 | 410 | int win_probe, |
michael@0 | 411 | int num_marked, int num_abandoned) |
michael@0 | 412 | { |
michael@0 | 413 | if (net->RTO == 0) { |
michael@0 | 414 | net->RTO = stcb->asoc.minrto; |
michael@0 | 415 | } |
michael@0 | 416 | net->RTO <<= 1; |
michael@0 | 417 | if (net->RTO > stcb->asoc.maxrto) { |
michael@0 | 418 | net->RTO = stcb->asoc.maxrto; |
michael@0 | 419 | } |
michael@0 | 420 | if ((win_probe == 0) && (num_marked || num_abandoned)) { |
michael@0 | 421 | /* We don't apply penalty to window probe scenarios */ |
michael@0 | 422 | /* JRS - Use the congestion control given in the CC module */ |
michael@0 | 423 | stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout(stcb, net); |
michael@0 | 424 | } |
michael@0 | 425 | } |
michael@0 | 426 | |
michael@0 | 427 | #ifndef INVARIANTS |
michael@0 | 428 | static void |
michael@0 | 429 | sctp_recover_sent_list(struct sctp_tcb *stcb) |
michael@0 | 430 | { |
michael@0 | 431 | struct sctp_tmit_chunk *chk, *nchk; |
michael@0 | 432 | struct sctp_association *asoc; |
michael@0 | 433 | |
michael@0 | 434 | asoc = &stcb->asoc; |
michael@0 | 435 | TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { |
michael@0 | 436 | if (SCTP_TSN_GE(asoc->last_acked_seq, chk->rec.data.TSN_seq)) { |
michael@0 | 437 | SCTP_PRINTF("Found chk:%p tsn:%x <= last_acked_seq:%x\n", |
michael@0 | 438 | (void *)chk, chk->rec.data.TSN_seq, asoc->last_acked_seq); |
michael@0 | 439 | if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { |
michael@0 | 440 | if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) { |
michael@0 | 441 | asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--; |
michael@0 | 442 | } |
michael@0 | 443 | } |
michael@0 | 444 | TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); |
michael@0 | 445 | if (PR_SCTP_ENABLED(chk->flags)) { |
michael@0 | 446 | if (asoc->pr_sctp_cnt != 0) |
michael@0 | 447 | asoc->pr_sctp_cnt--; |
michael@0 | 448 | } |
michael@0 | 449 | if (chk->data) { |
michael@0 | 450 | /*sa_ignore NO_NULL_CHK*/ |
michael@0 | 451 | sctp_free_bufspace(stcb, asoc, chk, 1); |
michael@0 | 452 | sctp_m_freem(chk->data); |
michael@0 | 453 | chk->data = NULL; |
michael@0 | 454 | if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(chk->flags)) { |
michael@0 | 455 | asoc->sent_queue_cnt_removeable--; |
michael@0 | 456 | } |
michael@0 | 457 | } |
michael@0 | 458 | asoc->sent_queue_cnt--; |
michael@0 | 459 | sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); |
michael@0 | 460 | } |
michael@0 | 461 | } |
michael@0 | 462 | SCTP_PRINTF("after recover order is as follows\n"); |
michael@0 | 463 | TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { |
michael@0 | 464 | SCTP_PRINTF("chk:%p TSN:%x\n", (void *)chk, chk->rec.data.TSN_seq); |
michael@0 | 465 | } |
michael@0 | 466 | } |
michael@0 | 467 | #endif |
michael@0 | 468 | |
michael@0 | 469 | static int |
michael@0 | 470 | sctp_mark_all_for_resend(struct sctp_tcb *stcb, |
michael@0 | 471 | struct sctp_nets *net, |
michael@0 | 472 | struct sctp_nets *alt, |
michael@0 | 473 | int window_probe, |
michael@0 | 474 | int *num_marked, |
michael@0 | 475 | int *num_abandoned) |
michael@0 | 476 | { |
michael@0 | 477 | |
michael@0 | 478 | /* |
michael@0 | 479 | * Mark all chunks (well not all) that were sent to *net for |
michael@0 | 480 | * retransmission. Move them to alt for there destination as well... |
michael@0 | 481 | * We only mark chunks that have been outstanding long enough to |
michael@0 | 482 | * have received feed-back. |
michael@0 | 483 | */ |
michael@0 | 484 | struct sctp_tmit_chunk *chk, *nchk; |
michael@0 | 485 | struct sctp_nets *lnets; |
michael@0 | 486 | struct timeval now, min_wait, tv; |
michael@0 | 487 | int cur_rto; |
michael@0 | 488 | int cnt_abandoned; |
michael@0 | 489 | int audit_tf, num_mk, fir; |
michael@0 | 490 | unsigned int cnt_mk; |
michael@0 | 491 | uint32_t orig_flight, orig_tf; |
michael@0 | 492 | uint32_t tsnlast, tsnfirst; |
michael@0 | 493 | int recovery_cnt = 0; |
michael@0 | 494 | |
michael@0 | 495 | |
michael@0 | 496 | /* none in flight now */ |
michael@0 | 497 | audit_tf = 0; |
michael@0 | 498 | fir = 0; |
michael@0 | 499 | /* |
michael@0 | 500 | * figure out how long a data chunk must be pending before we can |
michael@0 | 501 | * mark it .. |
michael@0 | 502 | */ |
michael@0 | 503 | (void)SCTP_GETTIME_TIMEVAL(&now); |
michael@0 | 504 | /* get cur rto in micro-seconds */ |
michael@0 | 505 | cur_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; |
michael@0 | 506 | cur_rto *= 1000; |
michael@0 | 507 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
michael@0 | 508 | sctp_log_fr(cur_rto, |
michael@0 | 509 | stcb->asoc.peers_rwnd, |
michael@0 | 510 | window_probe, |
michael@0 | 511 | SCTP_FR_T3_MARK_TIME); |
michael@0 | 512 | sctp_log_fr(net->flight_size, 0, 0, SCTP_FR_CWND_REPORT); |
michael@0 | 513 | sctp_log_fr(net->flight_size, net->cwnd, stcb->asoc.total_flight, SCTP_FR_CWND_REPORT); |
michael@0 | 514 | } |
michael@0 | 515 | tv.tv_sec = cur_rto / 1000000; |
michael@0 | 516 | tv.tv_usec = cur_rto % 1000000; |
michael@0 | 517 | #ifndef __FreeBSD__ |
michael@0 | 518 | timersub(&now, &tv, &min_wait); |
michael@0 | 519 | #else |
michael@0 | 520 | min_wait = now; |
michael@0 | 521 | timevalsub(&min_wait, &tv); |
michael@0 | 522 | #endif |
michael@0 | 523 | if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) { |
michael@0 | 524 | /* |
michael@0 | 525 | * if we hit here, we don't have enough seconds on the clock |
michael@0 | 526 | * to account for the RTO. We just let the lower seconds be |
michael@0 | 527 | * the bounds and don't worry about it. This may mean we |
michael@0 | 528 | * will mark a lot more than we should. |
michael@0 | 529 | */ |
michael@0 | 530 | min_wait.tv_sec = min_wait.tv_usec = 0; |
michael@0 | 531 | } |
michael@0 | 532 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
michael@0 | 533 | sctp_log_fr(cur_rto, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME); |
michael@0 | 534 | sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME); |
michael@0 | 535 | } |
michael@0 | 536 | /* |
michael@0 | 537 | * Our rwnd will be incorrect here since we are not adding back the |
michael@0 | 538 | * cnt * mbuf but we will fix that down below. |
michael@0 | 539 | */ |
michael@0 | 540 | orig_flight = net->flight_size; |
michael@0 | 541 | orig_tf = stcb->asoc.total_flight; |
michael@0 | 542 | |
michael@0 | 543 | net->fast_retran_ip = 0; |
michael@0 | 544 | /* Now on to each chunk */ |
michael@0 | 545 | cnt_abandoned = 0; |
michael@0 | 546 | num_mk = cnt_mk = 0; |
michael@0 | 547 | tsnfirst = tsnlast = 0; |
michael@0 | 548 | #ifndef INVARIANTS |
michael@0 | 549 | start_again: |
michael@0 | 550 | #endif |
michael@0 | 551 | TAILQ_FOREACH_SAFE(chk, &stcb->asoc.sent_queue, sctp_next, nchk) { |
michael@0 | 552 | if (SCTP_TSN_GE(stcb->asoc.last_acked_seq, chk->rec.data.TSN_seq)) { |
michael@0 | 553 | /* Strange case our list got out of order? */ |
michael@0 | 554 | SCTP_PRINTF("Our list is out of order? last_acked:%x chk:%x\n", |
michael@0 | 555 | (unsigned int)stcb->asoc.last_acked_seq, (unsigned int)chk->rec.data.TSN_seq); |
michael@0 | 556 | recovery_cnt++; |
michael@0 | 557 | #ifdef INVARIANTS |
michael@0 | 558 | panic("last acked >= chk on sent-Q"); |
michael@0 | 559 | #else |
michael@0 | 560 | SCTP_PRINTF("Recover attempts a restart cnt:%d\n", recovery_cnt); |
michael@0 | 561 | sctp_recover_sent_list(stcb); |
michael@0 | 562 | if (recovery_cnt < 10) { |
michael@0 | 563 | goto start_again; |
michael@0 | 564 | } else { |
michael@0 | 565 | SCTP_PRINTF("Recovery fails %d times??\n", recovery_cnt); |
michael@0 | 566 | } |
michael@0 | 567 | #endif |
michael@0 | 568 | } |
michael@0 | 569 | if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) { |
michael@0 | 570 | /* |
michael@0 | 571 | * found one to mark: If it is less than |
michael@0 | 572 | * DATAGRAM_ACKED it MUST not be a skipped or marked |
michael@0 | 573 | * TSN but instead one that is either already set |
michael@0 | 574 | * for retransmission OR one that needs |
michael@0 | 575 | * retransmission. |
michael@0 | 576 | */ |
michael@0 | 577 | |
michael@0 | 578 | /* validate its been outstanding long enough */ |
michael@0 | 579 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
michael@0 | 580 | sctp_log_fr(chk->rec.data.TSN_seq, |
michael@0 | 581 | chk->sent_rcv_time.tv_sec, |
michael@0 | 582 | chk->sent_rcv_time.tv_usec, |
michael@0 | 583 | SCTP_FR_T3_MARK_TIME); |
michael@0 | 584 | } |
michael@0 | 585 | if ((chk->sent_rcv_time.tv_sec > min_wait.tv_sec) && (window_probe == 0)) { |
michael@0 | 586 | /* |
michael@0 | 587 | * we have reached a chunk that was sent |
michael@0 | 588 | * some seconds past our min.. forget it we |
michael@0 | 589 | * will find no more to send. |
michael@0 | 590 | */ |
michael@0 | 591 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
michael@0 | 592 | sctp_log_fr(0, |
michael@0 | 593 | chk->sent_rcv_time.tv_sec, |
michael@0 | 594 | chk->sent_rcv_time.tv_usec, |
michael@0 | 595 | SCTP_FR_T3_STOPPED); |
michael@0 | 596 | } |
michael@0 | 597 | continue; |
michael@0 | 598 | } else if ((chk->sent_rcv_time.tv_sec == min_wait.tv_sec) && |
michael@0 | 599 | (window_probe == 0)) { |
michael@0 | 600 | /* |
michael@0 | 601 | * we must look at the micro seconds to |
michael@0 | 602 | * know. |
michael@0 | 603 | */ |
michael@0 | 604 | if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) { |
michael@0 | 605 | /* |
michael@0 | 606 | * ok it was sent after our boundary |
michael@0 | 607 | * time. |
michael@0 | 608 | */ |
michael@0 | 609 | continue; |
michael@0 | 610 | } |
michael@0 | 611 | } |
michael@0 | 612 | if (stcb->asoc.peer_supports_prsctp && PR_SCTP_TTL_ENABLED(chk->flags)) { |
michael@0 | 613 | /* Is it expired? */ |
michael@0 | 614 | #ifndef __FreeBSD__ |
michael@0 | 615 | if (timercmp(&now, &chk->rec.data.timetodrop, >)) { |
michael@0 | 616 | #else |
michael@0 | 617 | if (timevalcmp(&now, &chk->rec.data.timetodrop, >)) { |
michael@0 | 618 | #endif |
michael@0 | 619 | /* Yes so drop it */ |
michael@0 | 620 | if (chk->data) { |
michael@0 | 621 | (void)sctp_release_pr_sctp_chunk(stcb, |
michael@0 | 622 | chk, |
michael@0 | 623 | 1, |
michael@0 | 624 | SCTP_SO_NOT_LOCKED); |
michael@0 | 625 | cnt_abandoned++; |
michael@0 | 626 | } |
michael@0 | 627 | continue; |
michael@0 | 628 | } |
michael@0 | 629 | } |
michael@0 | 630 | if (stcb->asoc.peer_supports_prsctp && PR_SCTP_RTX_ENABLED(chk->flags)) { |
michael@0 | 631 | /* Has it been retransmitted tv_sec times? */ |
michael@0 | 632 | if (chk->snd_count > chk->rec.data.timetodrop.tv_sec) { |
michael@0 | 633 | if (chk->data) { |
michael@0 | 634 | (void)sctp_release_pr_sctp_chunk(stcb, |
michael@0 | 635 | chk, |
michael@0 | 636 | 1, |
michael@0 | 637 | SCTP_SO_NOT_LOCKED); |
michael@0 | 638 | cnt_abandoned++; |
michael@0 | 639 | } |
michael@0 | 640 | continue; |
michael@0 | 641 | } |
michael@0 | 642 | } |
michael@0 | 643 | if (chk->sent < SCTP_DATAGRAM_RESEND) { |
michael@0 | 644 | sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); |
michael@0 | 645 | num_mk++; |
michael@0 | 646 | if (fir == 0) { |
michael@0 | 647 | fir = 1; |
michael@0 | 648 | tsnfirst = chk->rec.data.TSN_seq; |
michael@0 | 649 | } |
michael@0 | 650 | tsnlast = chk->rec.data.TSN_seq; |
michael@0 | 651 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
michael@0 | 652 | sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count, |
michael@0 | 653 | 0, SCTP_FR_T3_MARKED); |
michael@0 | 654 | } |
michael@0 | 655 | |
michael@0 | 656 | if (chk->rec.data.chunk_was_revoked) { |
michael@0 | 657 | /* deflate the cwnd */ |
michael@0 | 658 | chk->whoTo->cwnd -= chk->book_size; |
michael@0 | 659 | chk->rec.data.chunk_was_revoked = 0; |
michael@0 | 660 | } |
michael@0 | 661 | net->marked_retrans++; |
michael@0 | 662 | stcb->asoc.marked_retrans++; |
michael@0 | 663 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { |
michael@0 | 664 | sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND_TO, |
michael@0 | 665 | chk->whoTo->flight_size, |
michael@0 | 666 | chk->book_size, |
michael@0 | 667 | (uintptr_t)chk->whoTo, |
michael@0 | 668 | chk->rec.data.TSN_seq); |
michael@0 | 669 | } |
michael@0 | 670 | sctp_flight_size_decrease(chk); |
michael@0 | 671 | sctp_total_flight_decrease(stcb, chk); |
michael@0 | 672 | stcb->asoc.peers_rwnd += chk->send_size; |
michael@0 | 673 | stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); |
michael@0 | 674 | } |
michael@0 | 675 | chk->sent = SCTP_DATAGRAM_RESEND; |
michael@0 | 676 | SCTP_STAT_INCR(sctps_markedretrans); |
michael@0 | 677 | |
michael@0 | 678 | /* reset the TSN for striking and other FR stuff */ |
michael@0 | 679 | chk->rec.data.doing_fast_retransmit = 0; |
michael@0 | 680 | /* Clear any time so NO RTT is being done */ |
michael@0 | 681 | |
michael@0 | 682 | if (chk->do_rtt) { |
michael@0 | 683 | if (chk->whoTo->rto_needed == 0) { |
michael@0 | 684 | chk->whoTo->rto_needed = 1; |
michael@0 | 685 | } |
michael@0 | 686 | } |
michael@0 | 687 | chk->do_rtt = 0; |
michael@0 | 688 | if (alt != net) { |
michael@0 | 689 | sctp_free_remote_addr(chk->whoTo); |
michael@0 | 690 | chk->no_fr_allowed = 1; |
michael@0 | 691 | chk->whoTo = alt; |
michael@0 | 692 | atomic_add_int(&alt->ref_count, 1); |
michael@0 | 693 | } else { |
michael@0 | 694 | chk->no_fr_allowed = 0; |
michael@0 | 695 | if (TAILQ_EMPTY(&stcb->asoc.send_queue)) { |
michael@0 | 696 | chk->rec.data.fast_retran_tsn = stcb->asoc.sending_seq; |
michael@0 | 697 | } else { |
michael@0 | 698 | chk->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq; |
michael@0 | 699 | } |
michael@0 | 700 | } |
michael@0 | 701 | /* CMT: Do not allow FRs on retransmitted TSNs. |
michael@0 | 702 | */ |
michael@0 | 703 | if (stcb->asoc.sctp_cmt_on_off > 0) { |
michael@0 | 704 | chk->no_fr_allowed = 1; |
michael@0 | 705 | } |
michael@0 | 706 | #ifdef THIS_SHOULD_NOT_BE_DONE |
michael@0 | 707 | } else if (chk->sent == SCTP_DATAGRAM_ACKED) { |
michael@0 | 708 | /* remember highest acked one */ |
michael@0 | 709 | could_be_sent = chk; |
michael@0 | 710 | #endif |
michael@0 | 711 | } |
michael@0 | 712 | if (chk->sent == SCTP_DATAGRAM_RESEND) { |
michael@0 | 713 | cnt_mk++; |
michael@0 | 714 | } |
michael@0 | 715 | } |
michael@0 | 716 | if ((orig_flight - net->flight_size) != (orig_tf - stcb->asoc.total_flight)) { |
michael@0 | 717 | /* we did not subtract the same things? */ |
michael@0 | 718 | audit_tf = 1; |
michael@0 | 719 | } |
michael@0 | 720 | |
michael@0 | 721 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
michael@0 | 722 | sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT); |
michael@0 | 723 | } |
michael@0 | 724 | #ifdef SCTP_DEBUG |
michael@0 | 725 | if (num_mk) { |
michael@0 | 726 | SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n", |
michael@0 | 727 | tsnlast); |
michael@0 | 728 | SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%ld\n", |
michael@0 | 729 | num_mk, (u_long)stcb->asoc.peers_rwnd); |
michael@0 | 730 | SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n", |
michael@0 | 731 | tsnlast); |
michael@0 | 732 | SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%d\n", |
michael@0 | 733 | num_mk, |
michael@0 | 734 | (int)stcb->asoc.peers_rwnd); |
michael@0 | 735 | } |
michael@0 | 736 | #endif |
michael@0 | 737 | *num_marked = num_mk; |
michael@0 | 738 | *num_abandoned = cnt_abandoned; |
michael@0 | 739 | /* Now check for a ECN Echo that may be stranded And |
michael@0 | 740 | * include the cnt_mk'd to have all resends in the |
michael@0 | 741 | * control queue. |
michael@0 | 742 | */ |
michael@0 | 743 | TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { |
michael@0 | 744 | if (chk->sent == SCTP_DATAGRAM_RESEND) { |
michael@0 | 745 | cnt_mk++; |
michael@0 | 746 | } |
michael@0 | 747 | if ((chk->whoTo == net) && |
michael@0 | 748 | (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { |
michael@0 | 749 | sctp_free_remote_addr(chk->whoTo); |
michael@0 | 750 | chk->whoTo = alt; |
michael@0 | 751 | if (chk->sent != SCTP_DATAGRAM_RESEND) { |
michael@0 | 752 | chk->sent = SCTP_DATAGRAM_RESEND; |
michael@0 | 753 | sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); |
michael@0 | 754 | cnt_mk++; |
michael@0 | 755 | } |
michael@0 | 756 | atomic_add_int(&alt->ref_count, 1); |
michael@0 | 757 | } |
michael@0 | 758 | } |
michael@0 | 759 | #ifdef THIS_SHOULD_NOT_BE_DONE |
michael@0 | 760 | if ((stcb->asoc.sent_queue_retran_cnt == 0) && (could_be_sent)) { |
michael@0 | 761 | /* fix it so we retransmit the highest acked anyway */ |
michael@0 | 762 | sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); |
michael@0 | 763 | cnt_mk++; |
michael@0 | 764 | could_be_sent->sent = SCTP_DATAGRAM_RESEND; |
michael@0 | 765 | } |
michael@0 | 766 | #endif |
michael@0 | 767 | if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) { |
michael@0 | 768 | #ifdef INVARIANTS |
michael@0 | 769 | SCTP_PRINTF("Local Audit says there are %d for retran asoc cnt:%d we marked:%d this time\n", |
michael@0 | 770 | cnt_mk, stcb->asoc.sent_queue_retran_cnt, num_mk); |
michael@0 | 771 | #endif |
michael@0 | 772 | #ifndef SCTP_AUDITING_ENABLED |
michael@0 | 773 | stcb->asoc.sent_queue_retran_cnt = cnt_mk; |
michael@0 | 774 | #endif |
michael@0 | 775 | } |
michael@0 | 776 | if (audit_tf) { |
michael@0 | 777 | SCTPDBG(SCTP_DEBUG_TIMER4, |
michael@0 | 778 | "Audit total flight due to negative value net:%p\n", |
michael@0 | 779 | (void *)net); |
michael@0 | 780 | stcb->asoc.total_flight = 0; |
michael@0 | 781 | stcb->asoc.total_flight_count = 0; |
michael@0 | 782 | /* Clear all networks flight size */ |
michael@0 | 783 | TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) { |
michael@0 | 784 | lnets->flight_size = 0; |
michael@0 | 785 | SCTPDBG(SCTP_DEBUG_TIMER4, |
michael@0 | 786 | "Net:%p c-f cwnd:%d ssthresh:%d\n", |
michael@0 | 787 | (void *)lnets, lnets->cwnd, lnets->ssthresh); |
michael@0 | 788 | } |
michael@0 | 789 | TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { |
michael@0 | 790 | if (chk->sent < SCTP_DATAGRAM_RESEND) { |
michael@0 | 791 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { |
michael@0 | 792 | sctp_misc_ints(SCTP_FLIGHT_LOG_UP, |
michael@0 | 793 | chk->whoTo->flight_size, |
michael@0 | 794 | chk->book_size, |
michael@0 | 795 | (uintptr_t)chk->whoTo, |
michael@0 | 796 | chk->rec.data.TSN_seq); |
michael@0 | 797 | } |
michael@0 | 798 | |
michael@0 | 799 | sctp_flight_size_increase(chk); |
michael@0 | 800 | sctp_total_flight_increase(stcb, chk); |
michael@0 | 801 | } |
michael@0 | 802 | } |
michael@0 | 803 | } |
michael@0 | 804 | /* We return 1 if we only have a window probe outstanding */ |
michael@0 | 805 | return (0); |
michael@0 | 806 | } |
michael@0 | 807 | |
michael@0 | 808 | |
michael@0 | 809 | int |
michael@0 | 810 | sctp_t3rxt_timer(struct sctp_inpcb *inp, |
michael@0 | 811 | struct sctp_tcb *stcb, |
michael@0 | 812 | struct sctp_nets *net) |
michael@0 | 813 | { |
michael@0 | 814 | struct sctp_nets *alt; |
michael@0 | 815 | int win_probe, num_mk, num_abandoned; |
michael@0 | 816 | |
michael@0 | 817 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { |
michael@0 | 818 | sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT); |
michael@0 | 819 | } |
michael@0 | 820 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { |
michael@0 | 821 | struct sctp_nets *lnet; |
michael@0 | 822 | |
michael@0 | 823 | TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { |
michael@0 | 824 | if (net == lnet) { |
michael@0 | 825 | sctp_log_cwnd(stcb, lnet, 1, SCTP_CWND_LOG_FROM_T3); |
michael@0 | 826 | } else { |
michael@0 | 827 | sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_LOG_FROM_T3); |
michael@0 | 828 | } |
michael@0 | 829 | } |
michael@0 | 830 | } |
michael@0 | 831 | /* Find an alternate and mark those for retransmission */ |
michael@0 | 832 | if ((stcb->asoc.peers_rwnd == 0) && |
michael@0 | 833 | (stcb->asoc.total_flight < net->mtu)) { |
michael@0 | 834 | SCTP_STAT_INCR(sctps_timowindowprobe); |
michael@0 | 835 | win_probe = 1; |
michael@0 | 836 | } else { |
michael@0 | 837 | win_probe = 0; |
michael@0 | 838 | } |
michael@0 | 839 | |
michael@0 | 840 | if (win_probe == 0) { |
michael@0 | 841 | /* We don't do normal threshold management on window probes */ |
michael@0 | 842 | if (sctp_threshold_management(inp, stcb, net, |
michael@0 | 843 | stcb->asoc.max_send_times)) { |
michael@0 | 844 | /* Association was destroyed */ |
michael@0 | 845 | return (1); |
michael@0 | 846 | } else { |
michael@0 | 847 | if (net != stcb->asoc.primary_destination) { |
michael@0 | 848 | /* send a immediate HB if our RTO is stale */ |
michael@0 | 849 | struct timeval now; |
michael@0 | 850 | unsigned int ms_goneby; |
michael@0 | 851 | |
michael@0 | 852 | (void)SCTP_GETTIME_TIMEVAL(&now); |
michael@0 | 853 | if (net->last_sent_time.tv_sec) { |
michael@0 | 854 | ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000; |
michael@0 | 855 | } else { |
michael@0 | 856 | ms_goneby = 0; |
michael@0 | 857 | } |
michael@0 | 858 | if ((net->dest_state & SCTP_ADDR_PF) == 0) { |
michael@0 | 859 | if ((ms_goneby > net->RTO) || (net->RTO == 0)) { |
michael@0 | 860 | /* |
michael@0 | 861 | * no recent feed back in an RTO or |
michael@0 | 862 | * more, request a RTT update |
michael@0 | 863 | */ |
michael@0 | 864 | sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED); |
michael@0 | 865 | } |
michael@0 | 866 | } |
michael@0 | 867 | } |
michael@0 | 868 | } |
michael@0 | 869 | } else { |
michael@0 | 870 | /* |
michael@0 | 871 | * For a window probe we don't penalize the net's but only |
michael@0 | 872 | * the association. This may fail it if SACKs are not coming |
michael@0 | 873 | * back. If sack's are coming with rwnd locked at 0, we will |
michael@0 | 874 | * continue to hold things waiting for rwnd to raise |
michael@0 | 875 | */ |
michael@0 | 876 | if (sctp_threshold_management(inp, stcb, NULL, |
michael@0 | 877 | stcb->asoc.max_send_times)) { |
michael@0 | 878 | /* Association was destroyed */ |
michael@0 | 879 | return (1); |
michael@0 | 880 | } |
michael@0 | 881 | } |
michael@0 | 882 | if (stcb->asoc.sctp_cmt_on_off > 0) { |
michael@0 | 883 | if (net->pf_threshold < net->failure_threshold) { |
michael@0 | 884 | alt = sctp_find_alternate_net(stcb, net, 2); |
michael@0 | 885 | } else { |
michael@0 | 886 | /* |
michael@0 | 887 | * CMT: Using RTX_SSTHRESH policy for CMT. |
michael@0 | 888 | * If CMT is being used, then pick dest with |
michael@0 | 889 | * largest ssthresh for any retransmission. |
michael@0 | 890 | */ |
michael@0 | 891 | alt = sctp_find_alternate_net(stcb, net, 1); |
michael@0 | 892 | /* |
michael@0 | 893 | * CUCv2: If a different dest is picked for |
michael@0 | 894 | * the retransmission, then new |
michael@0 | 895 | * (rtx-)pseudo_cumack needs to be tracked |
michael@0 | 896 | * for orig dest. Let CUCv2 track new (rtx-) |
michael@0 | 897 | * pseudo-cumack always. |
michael@0 | 898 | */ |
michael@0 | 899 | net->find_pseudo_cumack = 1; |
michael@0 | 900 | net->find_rtx_pseudo_cumack = 1; |
michael@0 | 901 | } |
michael@0 | 902 | } else { |
michael@0 | 903 | alt = sctp_find_alternate_net(stcb, net, 0); |
michael@0 | 904 | } |
michael@0 | 905 | |
michael@0 | 906 | num_mk = 0; |
michael@0 | 907 | num_abandoned = 0; |
michael@0 | 908 | (void)sctp_mark_all_for_resend(stcb, net, alt, win_probe, |
michael@0 | 909 | &num_mk, &num_abandoned); |
michael@0 | 910 | /* FR Loss recovery just ended with the T3. */ |
michael@0 | 911 | stcb->asoc.fast_retran_loss_recovery = 0; |
michael@0 | 912 | |
michael@0 | 913 | /* CMT FR loss recovery ended with the T3 */ |
michael@0 | 914 | net->fast_retran_loss_recovery = 0; |
michael@0 | 915 | if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) && |
michael@0 | 916 | (net->flight_size == 0)) { |
michael@0 | 917 | (*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins)(stcb, net); |
michael@0 | 918 | } |
michael@0 | 919 | |
michael@0 | 920 | /* |
michael@0 | 921 | * setup the sat loss recovery that prevents satellite cwnd advance. |
michael@0 | 922 | */ |
michael@0 | 923 | stcb->asoc.sat_t3_loss_recovery = 1; |
michael@0 | 924 | stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq; |
michael@0 | 925 | |
michael@0 | 926 | /* Backoff the timer and cwnd */ |
michael@0 | 927 | sctp_backoff_on_timeout(stcb, net, win_probe, num_mk, num_abandoned); |
michael@0 | 928 | if ((!(net->dest_state & SCTP_ADDR_REACHABLE)) || |
michael@0 | 929 | (net->dest_state & SCTP_ADDR_PF)) { |
michael@0 | 930 | /* Move all pending over too */ |
michael@0 | 931 | sctp_move_chunks_from_net(stcb, net); |
michael@0 | 932 | |
michael@0 | 933 | /* Get the address that failed, to |
michael@0 | 934 | * force a new src address selecton and |
michael@0 | 935 | * a route allocation. |
michael@0 | 936 | */ |
michael@0 | 937 | if (net->ro._s_addr) { |
michael@0 | 938 | sctp_free_ifa(net->ro._s_addr); |
michael@0 | 939 | net->ro._s_addr = NULL; |
michael@0 | 940 | } |
michael@0 | 941 | net->src_addr_selected = 0; |
michael@0 | 942 | |
michael@0 | 943 | /* Force a route allocation too */ |
michael@0 | 944 | if (net->ro.ro_rt) { |
michael@0 | 945 | RTFREE(net->ro.ro_rt); |
michael@0 | 946 | net->ro.ro_rt = NULL; |
michael@0 | 947 | } |
michael@0 | 948 | |
michael@0 | 949 | /* Was it our primary? */ |
michael@0 | 950 | if ((stcb->asoc.primary_destination == net) && (alt != net)) { |
michael@0 | 951 | /* |
michael@0 | 952 | * Yes, note it as such and find an alternate note: |
michael@0 | 953 | * this means HB code must use this to resent the |
michael@0 | 954 | * primary if it goes active AND if someone does a |
michael@0 | 955 | * change-primary then this flag must be cleared |
michael@0 | 956 | * from any net structures. |
michael@0 | 957 | */ |
michael@0 | 958 | if (stcb->asoc.alternate) { |
michael@0 | 959 | sctp_free_remote_addr(stcb->asoc.alternate); |
michael@0 | 960 | } |
michael@0 | 961 | stcb->asoc.alternate = alt; |
michael@0 | 962 | atomic_add_int(&stcb->asoc.alternate->ref_count, 1); |
michael@0 | 963 | } |
michael@0 | 964 | } |
michael@0 | 965 | /* |
michael@0 | 966 | * Special case for cookie-echo'ed case, we don't do output but must |
michael@0 | 967 | * await the COOKIE-ACK before retransmission |
michael@0 | 968 | */ |
michael@0 | 969 | if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { |
michael@0 | 970 | /* |
michael@0 | 971 | * Here we just reset the timer and start again since we |
michael@0 | 972 | * have not established the asoc |
michael@0 | 973 | */ |
michael@0 | 974 | sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); |
michael@0 | 975 | return (0); |
michael@0 | 976 | } |
michael@0 | 977 | if (stcb->asoc.peer_supports_prsctp) { |
michael@0 | 978 | struct sctp_tmit_chunk *lchk; |
michael@0 | 979 | |
michael@0 | 980 | lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc); |
michael@0 | 981 | /* C3. See if we need to send a Fwd-TSN */ |
michael@0 | 982 | if (SCTP_TSN_GT(stcb->asoc.advanced_peer_ack_point, stcb->asoc.last_acked_seq)) { |
michael@0 | 983 | send_forward_tsn(stcb, &stcb->asoc); |
michael@0 | 984 | if (lchk) { |
michael@0 | 985 | /* Assure a timer is up */ |
michael@0 | 986 | sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo); |
michael@0 | 987 | } |
michael@0 | 988 | } |
michael@0 | 989 | } |
michael@0 | 990 | if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { |
michael@0 | 991 | sctp_log_cwnd(stcb, net, net->cwnd, SCTP_CWND_LOG_FROM_RTX); |
michael@0 | 992 | } |
michael@0 | 993 | return (0); |
michael@0 | 994 | } |
michael@0 | 995 | |
michael@0 | 996 | int |
michael@0 | 997 | sctp_t1init_timer(struct sctp_inpcb *inp, |
michael@0 | 998 | struct sctp_tcb *stcb, |
michael@0 | 999 | struct sctp_nets *net) |
michael@0 | 1000 | { |
michael@0 | 1001 | /* bump the thresholds */ |
michael@0 | 1002 | if (stcb->asoc.delayed_connection) { |
michael@0 | 1003 | /* |
michael@0 | 1004 | * special hook for delayed connection. The library did NOT |
michael@0 | 1005 | * complete the rest of its sends. |
michael@0 | 1006 | */ |
michael@0 | 1007 | stcb->asoc.delayed_connection = 0; |
michael@0 | 1008 | sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED); |
michael@0 | 1009 | return (0); |
michael@0 | 1010 | } |
michael@0 | 1011 | if (SCTP_GET_STATE((&stcb->asoc)) != SCTP_STATE_COOKIE_WAIT) { |
michael@0 | 1012 | return (0); |
michael@0 | 1013 | } |
michael@0 | 1014 | if (sctp_threshold_management(inp, stcb, net, |
michael@0 | 1015 | stcb->asoc.max_init_times)) { |
michael@0 | 1016 | /* Association was destroyed */ |
michael@0 | 1017 | return (1); |
michael@0 | 1018 | } |
michael@0 | 1019 | stcb->asoc.dropped_special_cnt = 0; |
michael@0 | 1020 | sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0, 0); |
michael@0 | 1021 | if (stcb->asoc.initial_init_rto_max < net->RTO) { |
michael@0 | 1022 | net->RTO = stcb->asoc.initial_init_rto_max; |
michael@0 | 1023 | } |
michael@0 | 1024 | if (stcb->asoc.numnets > 1) { |
michael@0 | 1025 | /* If we have more than one addr use it */ |
michael@0 | 1026 | struct sctp_nets *alt; |
michael@0 | 1027 | |
michael@0 | 1028 | alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination, 0); |
michael@0 | 1029 | if (alt != stcb->asoc.primary_destination) { |
michael@0 | 1030 | sctp_move_chunks_from_net(stcb, stcb->asoc.primary_destination); |
michael@0 | 1031 | stcb->asoc.primary_destination = alt; |
michael@0 | 1032 | } |
michael@0 | 1033 | } |
michael@0 | 1034 | /* Send out a new init */ |
michael@0 | 1035 | sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED); |
michael@0 | 1036 | return (0); |
michael@0 | 1037 | } |
michael@0 | 1038 | |
michael@0 | 1039 | /* |
michael@0 | 1040 | * For cookie and asconf we actually need to find and mark for resend, then |
michael@0 | 1041 | * increment the resend counter (after all the threshold management stuff of |
michael@0 | 1042 | * course). |
michael@0 | 1043 | */ |
michael@0 | 1044 | int |
michael@0 | 1045 | sctp_cookie_timer(struct sctp_inpcb *inp, |
michael@0 | 1046 | struct sctp_tcb *stcb, |
michael@0 | 1047 | struct sctp_nets *net SCTP_UNUSED) |
michael@0 | 1048 | { |
michael@0 | 1049 | struct sctp_nets *alt; |
michael@0 | 1050 | struct sctp_tmit_chunk *cookie; |
michael@0 | 1051 | |
michael@0 | 1052 | /* first before all else we must find the cookie */ |
michael@0 | 1053 | TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) { |
michael@0 | 1054 | if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) { |
michael@0 | 1055 | break; |
michael@0 | 1056 | } |
michael@0 | 1057 | } |
michael@0 | 1058 | if (cookie == NULL) { |
michael@0 | 1059 | if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { |
michael@0 | 1060 | /* FOOBAR! */ |
michael@0 | 1061 | struct mbuf *oper; |
michael@0 | 1062 | |
michael@0 | 1063 | oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), |
michael@0 | 1064 | 0, M_NOWAIT, 1, MT_DATA); |
michael@0 | 1065 | if (oper) { |
michael@0 | 1066 | struct sctp_paramhdr *ph; |
michael@0 | 1067 | uint32_t *ippp; |
michael@0 | 1068 | |
michael@0 | 1069 | SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + |
michael@0 | 1070 | sizeof(uint32_t); |
michael@0 | 1071 | ph = mtod(oper, struct sctp_paramhdr *); |
michael@0 | 1072 | ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); |
michael@0 | 1073 | ph->param_length = htons(SCTP_BUF_LEN(oper)); |
michael@0 | 1074 | ippp = (uint32_t *) (ph + 1); |
michael@0 | 1075 | *ippp = htonl(SCTP_FROM_SCTP_TIMER+SCTP_LOC_3); |
michael@0 | 1076 | } |
michael@0 | 1077 | inp->last_abort_code = SCTP_FROM_SCTP_TIMER+SCTP_LOC_4; |
michael@0 | 1078 | sctp_abort_an_association(inp, stcb, oper, SCTP_SO_NOT_LOCKED); |
michael@0 | 1079 | } else { |
michael@0 | 1080 | #ifdef INVARIANTS |
michael@0 | 1081 | panic("Cookie timer expires in wrong state?"); |
michael@0 | 1082 | #else |
michael@0 | 1083 | SCTP_PRINTF("Strange in state %d not cookie-echoed yet c-e timer expires?\n", SCTP_GET_STATE(&stcb->asoc)); |
michael@0 | 1084 | return (0); |
michael@0 | 1085 | #endif |
michael@0 | 1086 | } |
michael@0 | 1087 | return (0); |
michael@0 | 1088 | } |
michael@0 | 1089 | /* Ok we found the cookie, threshold management next */ |
michael@0 | 1090 | if (sctp_threshold_management(inp, stcb, cookie->whoTo, |
michael@0 | 1091 | stcb->asoc.max_init_times)) { |
michael@0 | 1092 | /* Assoc is over */ |
michael@0 | 1093 | return (1); |
michael@0 | 1094 | } |
michael@0 | 1095 | /* |
michael@0 | 1096 | * cleared theshold management now lets backoff the address & select |
michael@0 | 1097 | * an alternate |
michael@0 | 1098 | */ |
michael@0 | 1099 | stcb->asoc.dropped_special_cnt = 0; |
michael@0 | 1100 | sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0, 0); |
michael@0 | 1101 | alt = sctp_find_alternate_net(stcb, cookie->whoTo, 0); |
michael@0 | 1102 | if (alt != cookie->whoTo) { |
michael@0 | 1103 | sctp_free_remote_addr(cookie->whoTo); |
michael@0 | 1104 | cookie->whoTo = alt; |
michael@0 | 1105 | atomic_add_int(&alt->ref_count, 1); |
michael@0 | 1106 | } |
michael@0 | 1107 | /* Now mark the retran info */ |
michael@0 | 1108 | if (cookie->sent != SCTP_DATAGRAM_RESEND) { |
michael@0 | 1109 | sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); |
michael@0 | 1110 | } |
michael@0 | 1111 | cookie->sent = SCTP_DATAGRAM_RESEND; |
michael@0 | 1112 | /* |
michael@0 | 1113 | * Now call the output routine to kick out the cookie again, Note we |
michael@0 | 1114 | * don't mark any chunks for retran so that FR will need to kick in |
michael@0 | 1115 | * to move these (or a send timer). |
michael@0 | 1116 | */ |
michael@0 | 1117 | return (0); |
michael@0 | 1118 | } |
michael@0 | 1119 | |
michael@0 | 1120 | int |
michael@0 | 1121 | sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, |
michael@0 | 1122 | struct sctp_nets *net) |
michael@0 | 1123 | { |
michael@0 | 1124 | struct sctp_nets *alt; |
michael@0 | 1125 | struct sctp_tmit_chunk *strrst = NULL, *chk = NULL; |
michael@0 | 1126 | |
michael@0 | 1127 | if (stcb->asoc.stream_reset_outstanding == 0) { |
michael@0 | 1128 | return (0); |
michael@0 | 1129 | } |
michael@0 | 1130 | /* find the existing STRRESET, we use the seq number we sent out on */ |
michael@0 | 1131 | (void)sctp_find_stream_reset(stcb, stcb->asoc.str_reset_seq_out, &strrst); |
michael@0 | 1132 | if (strrst == NULL) { |
michael@0 | 1133 | return (0); |
michael@0 | 1134 | } |
michael@0 | 1135 | /* do threshold management */ |
michael@0 | 1136 | if (sctp_threshold_management(inp, stcb, strrst->whoTo, |
michael@0 | 1137 | stcb->asoc.max_send_times)) { |
michael@0 | 1138 | /* Assoc is over */ |
michael@0 | 1139 | return (1); |
michael@0 | 1140 | } |
michael@0 | 1141 | /* |
michael@0 | 1142 | * cleared theshold management now lets backoff the address & select |
michael@0 | 1143 | * an alternate |
michael@0 | 1144 | */ |
michael@0 | 1145 | sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0, 0); |
michael@0 | 1146 | alt = sctp_find_alternate_net(stcb, strrst->whoTo, 0); |
michael@0 | 1147 | sctp_free_remote_addr(strrst->whoTo); |
michael@0 | 1148 | strrst->whoTo = alt; |
michael@0 | 1149 | atomic_add_int(&alt->ref_count, 1); |
michael@0 | 1150 | |
michael@0 | 1151 | /* See if a ECN Echo is also stranded */ |
michael@0 | 1152 | TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { |
michael@0 | 1153 | if ((chk->whoTo == net) && |
michael@0 | 1154 | (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { |
michael@0 | 1155 | sctp_free_remote_addr(chk->whoTo); |
michael@0 | 1156 | if (chk->sent != SCTP_DATAGRAM_RESEND) { |
michael@0 | 1157 | chk->sent = SCTP_DATAGRAM_RESEND; |
michael@0 | 1158 | sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); |
michael@0 | 1159 | } |
michael@0 | 1160 | chk->whoTo = alt; |
michael@0 | 1161 | atomic_add_int(&alt->ref_count, 1); |
michael@0 | 1162 | } |
michael@0 | 1163 | } |
michael@0 | 1164 | if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { |
michael@0 | 1165 | /* |
michael@0 | 1166 | * If the address went un-reachable, we need to move to |
michael@0 | 1167 | * alternates for ALL chk's in queue |
michael@0 | 1168 | */ |
michael@0 | 1169 | sctp_move_chunks_from_net(stcb, net); |
michael@0 | 1170 | } |
michael@0 | 1171 | /* mark the retran info */ |
michael@0 | 1172 | if (strrst->sent != SCTP_DATAGRAM_RESEND) |
michael@0 | 1173 | sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); |
michael@0 | 1174 | strrst->sent = SCTP_DATAGRAM_RESEND; |
michael@0 | 1175 | |
michael@0 | 1176 | /* restart the timer */ |
michael@0 | 1177 | sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo); |
michael@0 | 1178 | return (0); |
michael@0 | 1179 | } |
michael@0 | 1180 | |
michael@0 | 1181 | int |
michael@0 | 1182 | sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, |
michael@0 | 1183 | struct sctp_nets *net) |
michael@0 | 1184 | { |
michael@0 | 1185 | struct sctp_nets *alt; |
michael@0 | 1186 | struct sctp_tmit_chunk *asconf, *chk; |
michael@0 | 1187 | |
michael@0 | 1188 | /* is this a first send, or a retransmission? */ |
michael@0 | 1189 | if (TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) { |
michael@0 | 1190 | /* compose a new ASCONF chunk and send it */ |
michael@0 | 1191 | sctp_send_asconf(stcb, net, SCTP_ADDR_NOT_LOCKED); |
michael@0 | 1192 | } else { |
michael@0 | 1193 | /* |
michael@0 | 1194 | * Retransmission of the existing ASCONF is needed |
michael@0 | 1195 | */ |
michael@0 | 1196 | |
michael@0 | 1197 | /* find the existing ASCONF */ |
michael@0 | 1198 | asconf = TAILQ_FIRST(&stcb->asoc.asconf_send_queue); |
michael@0 | 1199 | if (asconf == NULL) { |
michael@0 | 1200 | return (0); |
michael@0 | 1201 | } |
michael@0 | 1202 | /* do threshold management */ |
michael@0 | 1203 | if (sctp_threshold_management(inp, stcb, asconf->whoTo, |
michael@0 | 1204 | stcb->asoc.max_send_times)) { |
michael@0 | 1205 | /* Assoc is over */ |
michael@0 | 1206 | return (1); |
michael@0 | 1207 | } |
michael@0 | 1208 | if (asconf->snd_count > stcb->asoc.max_send_times) { |
michael@0 | 1209 | /* |
michael@0 | 1210 | * Something is rotten: our peer is not responding to |
michael@0 | 1211 | * ASCONFs but apparently is to other chunks. i.e. it |
michael@0 | 1212 | * is not properly handling the chunk type upper bits. |
michael@0 | 1213 | * Mark this peer as ASCONF incapable and cleanup. |
michael@0 | 1214 | */ |
michael@0 | 1215 | SCTPDBG(SCTP_DEBUG_TIMER1, "asconf_timer: Peer has not responded to our repeated ASCONFs\n"); |
michael@0 | 1216 | sctp_asconf_cleanup(stcb, net); |
michael@0 | 1217 | return (0); |
michael@0 | 1218 | } |
michael@0 | 1219 | /* |
michael@0 | 1220 | * cleared threshold management, so now backoff the net and |
michael@0 | 1221 | * select an alternate |
michael@0 | 1222 | */ |
michael@0 | 1223 | sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0, 0); |
michael@0 | 1224 | alt = sctp_find_alternate_net(stcb, asconf->whoTo, 0); |
michael@0 | 1225 | if (asconf->whoTo != alt) { |
michael@0 | 1226 | sctp_free_remote_addr(asconf->whoTo); |
michael@0 | 1227 | asconf->whoTo = alt; |
michael@0 | 1228 | atomic_add_int(&alt->ref_count, 1); |
michael@0 | 1229 | } |
michael@0 | 1230 | |
michael@0 | 1231 | /* See if an ECN Echo is also stranded */ |
michael@0 | 1232 | TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { |
michael@0 | 1233 | if ((chk->whoTo == net) && |
michael@0 | 1234 | (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { |
michael@0 | 1235 | sctp_free_remote_addr(chk->whoTo); |
michael@0 | 1236 | chk->whoTo = alt; |
michael@0 | 1237 | if (chk->sent != SCTP_DATAGRAM_RESEND) { |
michael@0 | 1238 | chk->sent = SCTP_DATAGRAM_RESEND; |
michael@0 | 1239 | sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); |
michael@0 | 1240 | } |
michael@0 | 1241 | atomic_add_int(&alt->ref_count, 1); |
michael@0 | 1242 | } |
michael@0 | 1243 | } |
michael@0 | 1244 | TAILQ_FOREACH(chk, &stcb->asoc.asconf_send_queue, sctp_next) { |
michael@0 | 1245 | if (chk->whoTo != alt) { |
michael@0 | 1246 | sctp_free_remote_addr(chk->whoTo); |
michael@0 | 1247 | chk->whoTo = alt; |
michael@0 | 1248 | atomic_add_int(&alt->ref_count, 1); |
michael@0 | 1249 | } |
michael@0 | 1250 | if (asconf->sent != SCTP_DATAGRAM_RESEND && chk->sent != SCTP_DATAGRAM_UNSENT) |
michael@0 | 1251 | sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); |
michael@0 | 1252 | chk->sent = SCTP_DATAGRAM_RESEND; |
michael@0 | 1253 | } |
michael@0 | 1254 | if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { |
michael@0 | 1255 | /* |
michael@0 | 1256 | * If the address went un-reachable, we need to move |
michael@0 | 1257 | * to the alternate for ALL chunks in queue |
michael@0 | 1258 | */ |
michael@0 | 1259 | sctp_move_chunks_from_net(stcb, net); |
michael@0 | 1260 | } |
michael@0 | 1261 | /* mark the retran info */ |
michael@0 | 1262 | if (asconf->sent != SCTP_DATAGRAM_RESEND) |
michael@0 | 1263 | sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); |
michael@0 | 1264 | asconf->sent = SCTP_DATAGRAM_RESEND; |
michael@0 | 1265 | |
michael@0 | 1266 | /* send another ASCONF if any and we can do */ |
michael@0 | 1267 | sctp_send_asconf(stcb, alt, SCTP_ADDR_NOT_LOCKED); |
michael@0 | 1268 | } |
michael@0 | 1269 | return (0); |
michael@0 | 1270 | } |
michael@0 | 1271 | |
michael@0 | 1272 | /* Mobility adaptation */ |
michael@0 | 1273 | void |
michael@0 | 1274 | sctp_delete_prim_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, |
michael@0 | 1275 | struct sctp_nets *net SCTP_UNUSED) |
michael@0 | 1276 | { |
michael@0 | 1277 | if (stcb->asoc.deleted_primary == NULL) { |
michael@0 | 1278 | SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: deleted_primary is not stored...\n"); |
michael@0 | 1279 | sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); |
michael@0 | 1280 | return; |
michael@0 | 1281 | } |
michael@0 | 1282 | SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: finished to keep deleted primary "); |
michael@0 | 1283 | SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.deleted_primary->ro._l_addr.sa); |
michael@0 | 1284 | sctp_free_remote_addr(stcb->asoc.deleted_primary); |
michael@0 | 1285 | stcb->asoc.deleted_primary = NULL; |
michael@0 | 1286 | sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); |
michael@0 | 1287 | return; |
michael@0 | 1288 | } |
michael@0 | 1289 | |
michael@0 | 1290 | /* |
michael@0 | 1291 | * For the shutdown and shutdown-ack, we do not keep one around on the |
michael@0 | 1292 | * control queue. This means we must generate a new one and call the general |
michael@0 | 1293 | * chunk output routine, AFTER having done threshold management. |
michael@0 | 1294 | * It is assumed that net is non-NULL. |
michael@0 | 1295 | */ |
michael@0 | 1296 | int |
michael@0 | 1297 | sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, |
michael@0 | 1298 | struct sctp_nets *net) |
michael@0 | 1299 | { |
michael@0 | 1300 | struct sctp_nets *alt; |
michael@0 | 1301 | |
michael@0 | 1302 | /* first threshold managment */ |
michael@0 | 1303 | if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { |
michael@0 | 1304 | /* Assoc is over */ |
michael@0 | 1305 | return (1); |
michael@0 | 1306 | } |
michael@0 | 1307 | sctp_backoff_on_timeout(stcb, net, 1, 0, 0); |
michael@0 | 1308 | /* second select an alternative */ |
michael@0 | 1309 | alt = sctp_find_alternate_net(stcb, net, 0); |
michael@0 | 1310 | |
michael@0 | 1311 | /* third generate a shutdown into the queue for out net */ |
michael@0 | 1312 | sctp_send_shutdown(stcb, alt); |
michael@0 | 1313 | |
michael@0 | 1314 | /* fourth restart timer */ |
michael@0 | 1315 | sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt); |
michael@0 | 1316 | return (0); |
michael@0 | 1317 | } |
michael@0 | 1318 | |
michael@0 | 1319 | int |
michael@0 | 1320 | sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, |
michael@0 | 1321 | struct sctp_nets *net) |
michael@0 | 1322 | { |
michael@0 | 1323 | struct sctp_nets *alt; |
michael@0 | 1324 | |
michael@0 | 1325 | /* first threshold managment */ |
michael@0 | 1326 | if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { |
michael@0 | 1327 | /* Assoc is over */ |
michael@0 | 1328 | return (1); |
michael@0 | 1329 | } |
michael@0 | 1330 | sctp_backoff_on_timeout(stcb, net, 1, 0, 0); |
michael@0 | 1331 | /* second select an alternative */ |
michael@0 | 1332 | alt = sctp_find_alternate_net(stcb, net, 0); |
michael@0 | 1333 | |
michael@0 | 1334 | /* third generate a shutdown into the queue for out net */ |
michael@0 | 1335 | sctp_send_shutdown_ack(stcb, alt); |
michael@0 | 1336 | |
michael@0 | 1337 | /* fourth restart timer */ |
michael@0 | 1338 | sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt); |
michael@0 | 1339 | return (0); |
michael@0 | 1340 | } |
michael@0 | 1341 | |
michael@0 | 1342 | static void |
michael@0 | 1343 | sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp, |
michael@0 | 1344 | struct sctp_tcb *stcb) |
michael@0 | 1345 | { |
michael@0 | 1346 | struct sctp_stream_queue_pending *sp; |
michael@0 | 1347 | unsigned int i, chks_in_queue = 0; |
michael@0 | 1348 | int being_filled = 0; |
michael@0 | 1349 | /* |
michael@0 | 1350 | * This function is ONLY called when the send/sent queues are empty. |
michael@0 | 1351 | */ |
michael@0 | 1352 | if ((stcb == NULL) || (inp == NULL)) |
michael@0 | 1353 | return; |
michael@0 | 1354 | |
michael@0 | 1355 | if (stcb->asoc.sent_queue_retran_cnt) { |
michael@0 | 1356 | SCTP_PRINTF("Hmm, sent_queue_retran_cnt is non-zero %d\n", |
michael@0 | 1357 | stcb->asoc.sent_queue_retran_cnt); |
michael@0 | 1358 | stcb->asoc.sent_queue_retran_cnt = 0; |
michael@0 | 1359 | } |
michael@0 | 1360 | if (stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, &stcb->asoc)) { |
michael@0 | 1361 | /* No stream scheduler information, initialize scheduler */ |
michael@0 | 1362 | stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 0); |
michael@0 | 1363 | if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, &stcb->asoc)) { |
michael@0 | 1364 | /* yep, we lost a stream or two */ |
michael@0 | 1365 | SCTP_PRINTF("Found additional streams NOT managed by scheduler, corrected\n"); |
michael@0 | 1366 | } else { |
michael@0 | 1367 | /* no streams lost */ |
michael@0 | 1368 | stcb->asoc.total_output_queue_size = 0; |
michael@0 | 1369 | } |
michael@0 | 1370 | } |
michael@0 | 1371 | /* Check to see if some data queued, if so report it */ |
michael@0 | 1372 | for (i = 0; i < stcb->asoc.streamoutcnt; i++) { |
michael@0 | 1373 | if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) { |
michael@0 | 1374 | TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) { |
michael@0 | 1375 | if (sp->msg_is_complete) |
michael@0 | 1376 | being_filled++; |
michael@0 | 1377 | chks_in_queue++; |
michael@0 | 1378 | } |
michael@0 | 1379 | } |
michael@0 | 1380 | } |
michael@0 | 1381 | if (chks_in_queue != stcb->asoc.stream_queue_cnt) { |
michael@0 | 1382 | SCTP_PRINTF("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n", |
michael@0 | 1383 | stcb->asoc.stream_queue_cnt, chks_in_queue); |
michael@0 | 1384 | } |
michael@0 | 1385 | if (chks_in_queue) { |
michael@0 | 1386 | /* call the output queue function */ |
michael@0 | 1387 | sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); |
michael@0 | 1388 | if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) && |
michael@0 | 1389 | (TAILQ_EMPTY(&stcb->asoc.sent_queue))) { |
michael@0 | 1390 | /* |
michael@0 | 1391 | * Probably should go in and make it go back through |
michael@0 | 1392 | * and add fragments allowed |
michael@0 | 1393 | */ |
michael@0 | 1394 | if (being_filled == 0) { |
michael@0 | 1395 | SCTP_PRINTF("Still nothing moved %d chunks are stuck\n", |
michael@0 | 1396 | chks_in_queue); |
michael@0 | 1397 | } |
michael@0 | 1398 | } |
michael@0 | 1399 | } else { |
michael@0 | 1400 | SCTP_PRINTF("Found no chunks on any queue tot:%lu\n", |
michael@0 | 1401 | (u_long)stcb->asoc.total_output_queue_size); |
michael@0 | 1402 | stcb->asoc.total_output_queue_size = 0; |
michael@0 | 1403 | } |
michael@0 | 1404 | } |
michael@0 | 1405 | |
michael@0 | 1406 | int |
michael@0 | 1407 | sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, |
michael@0 | 1408 | struct sctp_nets *net) |
michael@0 | 1409 | { |
michael@0 | 1410 | uint8_t net_was_pf; |
michael@0 | 1411 | |
michael@0 | 1412 | if (net->dest_state & SCTP_ADDR_PF) { |
michael@0 | 1413 | net_was_pf = 1; |
michael@0 | 1414 | } else { |
michael@0 | 1415 | net_was_pf = 0; |
michael@0 | 1416 | } |
michael@0 | 1417 | if (net->hb_responded == 0) { |
michael@0 | 1418 | if (net->ro._s_addr) { |
michael@0 | 1419 | /* Invalidate the src address if we did not get |
michael@0 | 1420 | * a response last time. |
michael@0 | 1421 | */ |
michael@0 | 1422 | sctp_free_ifa(net->ro._s_addr); |
michael@0 | 1423 | net->ro._s_addr = NULL; |
michael@0 | 1424 | net->src_addr_selected = 0; |
michael@0 | 1425 | } |
michael@0 | 1426 | sctp_backoff_on_timeout(stcb, net, 1, 0, 0); |
michael@0 | 1427 | if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { |
michael@0 | 1428 | /* Assoc is over */ |
michael@0 | 1429 | return (1); |
michael@0 | 1430 | } |
michael@0 | 1431 | } |
michael@0 | 1432 | /* Zero PBA, if it needs it */ |
michael@0 | 1433 | if (net->partial_bytes_acked) { |
michael@0 | 1434 | net->partial_bytes_acked = 0; |
michael@0 | 1435 | } |
michael@0 | 1436 | if ((stcb->asoc.total_output_queue_size > 0) && |
michael@0 | 1437 | (TAILQ_EMPTY(&stcb->asoc.send_queue)) && |
michael@0 | 1438 | (TAILQ_EMPTY(&stcb->asoc.sent_queue))) { |
michael@0 | 1439 | sctp_audit_stream_queues_for_size(inp, stcb); |
michael@0 | 1440 | } |
michael@0 | 1441 | if (!(net->dest_state & SCTP_ADDR_NOHB) && |
michael@0 | 1442 | !((net_was_pf == 0) && (net->dest_state & SCTP_ADDR_PF))) { |
michael@0 | 1443 | /* when move to PF during threshold mangement, a HB has been |
michael@0 | 1444 | queued in that routine */ |
michael@0 | 1445 | uint32_t ms_gone_by; |
michael@0 | 1446 | |
michael@0 | 1447 | if ((net->last_sent_time.tv_sec > 0) || |
michael@0 | 1448 | (net->last_sent_time.tv_usec > 0)) { |
michael@0 | 1449 | #ifdef __FreeBSD__ |
michael@0 | 1450 | struct timeval diff; |
michael@0 | 1451 | |
michael@0 | 1452 | SCTP_GETTIME_TIMEVAL(&diff); |
michael@0 | 1453 | timevalsub(&diff, &net->last_sent_time); |
michael@0 | 1454 | #else |
michael@0 | 1455 | struct timeval diff, now; |
michael@0 | 1456 | |
michael@0 | 1457 | SCTP_GETTIME_TIMEVAL(&now); |
michael@0 | 1458 | timersub(&now, &net->last_sent_time, &diff); |
michael@0 | 1459 | #endif |
michael@0 | 1460 | ms_gone_by = (uint32_t)(diff.tv_sec * 1000) + |
michael@0 | 1461 | (uint32_t)(diff.tv_usec / 1000); |
michael@0 | 1462 | } else { |
michael@0 | 1463 | ms_gone_by = 0xffffffff; |
michael@0 | 1464 | } |
michael@0 | 1465 | if ((ms_gone_by >= net->heart_beat_delay) || |
michael@0 | 1466 | (net->dest_state & SCTP_ADDR_PF)) { |
michael@0 | 1467 | sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED); |
michael@0 | 1468 | } |
michael@0 | 1469 | } |
michael@0 | 1470 | return (0); |
michael@0 | 1471 | } |
michael@0 | 1472 | |
michael@0 | 1473 | void |
michael@0 | 1474 | sctp_pathmtu_timer(struct sctp_inpcb *inp, |
michael@0 | 1475 | struct sctp_tcb *stcb, |
michael@0 | 1476 | struct sctp_nets *net) |
michael@0 | 1477 | { |
michael@0 | 1478 | uint32_t next_mtu, mtu; |
michael@0 | 1479 | |
michael@0 | 1480 | next_mtu = sctp_get_next_mtu(net->mtu); |
michael@0 | 1481 | |
michael@0 | 1482 | if ((next_mtu > net->mtu) && (net->port == 0)) { |
michael@0 | 1483 | if ((net->src_addr_selected == 0) || |
michael@0 | 1484 | (net->ro._s_addr == NULL) || |
michael@0 | 1485 | (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) { |
michael@0 | 1486 | if ((net->ro._s_addr != NULL) && (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) { |
michael@0 | 1487 | sctp_free_ifa(net->ro._s_addr); |
michael@0 | 1488 | net->ro._s_addr = NULL; |
michael@0 | 1489 | net->src_addr_selected = 0; |
michael@0 | 1490 | } else if (net->ro._s_addr == NULL) { |
michael@0 | 1491 | #if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE) |
michael@0 | 1492 | if (net->ro._l_addr.sa.sa_family == AF_INET6) { |
michael@0 | 1493 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; |
michael@0 | 1494 | /* KAME hack: embed scopeid */ |
michael@0 | 1495 | #if defined(__APPLE__) |
michael@0 | 1496 | #if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) |
michael@0 | 1497 | (void)in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL); |
michael@0 | 1498 | #else |
michael@0 | 1499 | (void)in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL); |
michael@0 | 1500 | #endif |
michael@0 | 1501 | #elif defined(SCTP_KAME) |
michael@0 | 1502 | (void)sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)); |
michael@0 | 1503 | #else |
michael@0 | 1504 | (void)in6_embedscope(&sin6->sin6_addr, sin6); |
michael@0 | 1505 | #endif |
michael@0 | 1506 | } |
michael@0 | 1507 | #endif |
michael@0 | 1508 | |
michael@0 | 1509 | net->ro._s_addr = sctp_source_address_selection(inp, |
michael@0 | 1510 | stcb, |
michael@0 | 1511 | (sctp_route_t *)&net->ro, |
michael@0 | 1512 | net, 0, stcb->asoc.vrf_id); |
michael@0 | 1513 | #if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE) |
michael@0 | 1514 | if (net->ro._l_addr.sa.sa_family == AF_INET6) { |
michael@0 | 1515 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; |
michael@0 | 1516 | #ifdef SCTP_KAME |
michael@0 | 1517 | (void)sa6_recoverscope(sin6); |
michael@0 | 1518 | #else |
michael@0 | 1519 | (void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL); |
michael@0 | 1520 | #endif /* SCTP_KAME */ |
michael@0 | 1521 | } |
michael@0 | 1522 | #endif /* INET6 */ |
michael@0 | 1523 | } |
michael@0 | 1524 | if (net->ro._s_addr) |
michael@0 | 1525 | net->src_addr_selected = 1; |
michael@0 | 1526 | } |
michael@0 | 1527 | if (net->ro._s_addr) { |
michael@0 | 1528 | mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._s_addr.sa, net->ro.ro_rt); |
michael@0 | 1529 | if (net->port) { |
michael@0 | 1530 | mtu -= sizeof(struct udphdr); |
michael@0 | 1531 | } |
michael@0 | 1532 | if (mtu > next_mtu) { |
michael@0 | 1533 | net->mtu = next_mtu; |
michael@0 | 1534 | } |
michael@0 | 1535 | } |
michael@0 | 1536 | } |
michael@0 | 1537 | /* restart the timer */ |
michael@0 | 1538 | sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); |
michael@0 | 1539 | } |
michael@0 | 1540 | |
michael@0 | 1541 | void |
michael@0 | 1542 | sctp_autoclose_timer(struct sctp_inpcb *inp, |
michael@0 | 1543 | struct sctp_tcb *stcb, |
michael@0 | 1544 | struct sctp_nets *net) |
michael@0 | 1545 | { |
michael@0 | 1546 | struct timeval tn, *tim_touse; |
michael@0 | 1547 | struct sctp_association *asoc; |
michael@0 | 1548 | int ticks_gone_by; |
michael@0 | 1549 | |
michael@0 | 1550 | (void)SCTP_GETTIME_TIMEVAL(&tn); |
michael@0 | 1551 | if (stcb->asoc.sctp_autoclose_ticks && |
michael@0 | 1552 | sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { |
michael@0 | 1553 | /* Auto close is on */ |
michael@0 | 1554 | asoc = &stcb->asoc; |
michael@0 | 1555 | /* pick the time to use */ |
michael@0 | 1556 | if (asoc->time_last_rcvd.tv_sec > |
michael@0 | 1557 | asoc->time_last_sent.tv_sec) { |
michael@0 | 1558 | tim_touse = &asoc->time_last_rcvd; |
michael@0 | 1559 | } else { |
michael@0 | 1560 | tim_touse = &asoc->time_last_sent; |
michael@0 | 1561 | } |
michael@0 | 1562 | /* Now has long enough transpired to autoclose? */ |
michael@0 | 1563 | ticks_gone_by = SEC_TO_TICKS(tn.tv_sec - tim_touse->tv_sec); |
michael@0 | 1564 | if ((ticks_gone_by > 0) && |
michael@0 | 1565 | (ticks_gone_by >= (int)asoc->sctp_autoclose_ticks)) { |
michael@0 | 1566 | /* |
michael@0 | 1567 | * autoclose time has hit, call the output routine, |
michael@0 | 1568 | * which should do nothing just to be SURE we don't |
michael@0 | 1569 | * have hanging data. We can then safely check the |
michael@0 | 1570 | * queues and know that we are clear to send |
michael@0 | 1571 | * shutdown |
michael@0 | 1572 | */ |
michael@0 | 1573 | sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); |
michael@0 | 1574 | /* Are we clean? */ |
michael@0 | 1575 | if (TAILQ_EMPTY(&asoc->send_queue) && |
michael@0 | 1576 | TAILQ_EMPTY(&asoc->sent_queue)) { |
michael@0 | 1577 | /* |
michael@0 | 1578 | * there is nothing queued to send, so I'm |
michael@0 | 1579 | * done... |
michael@0 | 1580 | */ |
michael@0 | 1581 | if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { |
michael@0 | 1582 | /* only send SHUTDOWN 1st time thru */ |
michael@0 | 1583 | struct sctp_nets *netp; |
michael@0 | 1584 | |
michael@0 | 1585 | if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || |
michael@0 | 1586 | (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { |
michael@0 | 1587 | SCTP_STAT_DECR_GAUGE32(sctps_currestab); |
michael@0 | 1588 | } |
michael@0 | 1589 | SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); |
michael@0 | 1590 | SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); |
michael@0 | 1591 | sctp_stop_timers_for_shutdown(stcb); |
michael@0 | 1592 | if (stcb->asoc.alternate) { |
michael@0 | 1593 | netp = stcb->asoc.alternate; |
michael@0 | 1594 | } else { |
michael@0 | 1595 | netp = stcb->asoc.primary_destination; |
michael@0 | 1596 | } |
michael@0 | 1597 | sctp_send_shutdown(stcb, netp); |
michael@0 | 1598 | sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, |
michael@0 | 1599 | stcb->sctp_ep, stcb, |
michael@0 | 1600 | netp); |
michael@0 | 1601 | sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, |
michael@0 | 1602 | stcb->sctp_ep, stcb, |
michael@0 | 1603 | netp); |
michael@0 | 1604 | } |
michael@0 | 1605 | } |
michael@0 | 1606 | } else { |
michael@0 | 1607 | /* |
michael@0 | 1608 | * No auto close at this time, reset t-o to check |
michael@0 | 1609 | * later |
michael@0 | 1610 | */ |
michael@0 | 1611 | int tmp; |
michael@0 | 1612 | |
michael@0 | 1613 | /* fool the timer startup to use the time left */ |
michael@0 | 1614 | tmp = asoc->sctp_autoclose_ticks; |
michael@0 | 1615 | asoc->sctp_autoclose_ticks -= ticks_gone_by; |
michael@0 | 1616 | sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, |
michael@0 | 1617 | net); |
michael@0 | 1618 | /* restore the real tick value */ |
michael@0 | 1619 | asoc->sctp_autoclose_ticks = tmp; |
michael@0 | 1620 | } |
michael@0 | 1621 | } |
michael@0 | 1622 | } |
michael@0 | 1623 |