Thu, 15 Jan 2015 15:59:08 +0100
Implement a real Private Browsing Mode condition by changing the API/ABI;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.
michael@0 | 1 | /*- |
michael@0 | 2 | * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. |
michael@0 | 3 | * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. |
michael@0 | 4 | * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. |
michael@0 | 5 | * |
michael@0 | 6 | * Redistribution and use in source and binary forms, with or without |
michael@0 | 7 | * modification, are permitted provided that the following conditions are met: |
michael@0 | 8 | * |
michael@0 | 9 | * a) Redistributions of source code must retain the above copyright notice, |
michael@0 | 10 | * this list of conditions and the following disclaimer. |
michael@0 | 11 | * |
michael@0 | 12 | * b) Redistributions in binary form must reproduce the above copyright |
michael@0 | 13 | * notice, this list of conditions and the following disclaimer in |
michael@0 | 14 | * the documentation and/or other materials provided with the distribution. |
michael@0 | 15 | * |
michael@0 | 16 | * c) Neither the name of Cisco Systems, Inc. nor the names of its |
michael@0 | 17 | * contributors may be used to endorse or promote products derived |
michael@0 | 18 | * from this software without specific prior written permission. |
michael@0 | 19 | * |
michael@0 | 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
michael@0 | 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, |
michael@0 | 22 | * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
michael@0 | 23 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
michael@0 | 24 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
michael@0 | 25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
michael@0 | 26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
michael@0 | 27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
michael@0 | 28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
michael@0 | 29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
michael@0 | 30 | * THE POSSIBILITY OF SUCH DAMAGE. |
michael@0 | 31 | */ |
michael@0 | 32 | |
michael@0 | 33 | #ifdef __FreeBSD__ |
michael@0 | 34 | #include <sys/cdefs.h> |
michael@0 | 35 | __FBSDID("$FreeBSD: head/sys/netinet/sctp_structs.h 255190 2013-09-03 19:31:59Z tuexen $"); |
michael@0 | 36 | #endif |
michael@0 | 37 | |
michael@0 | 38 | #ifndef _NETINET_SCTP_STRUCTS_H_ |
michael@0 | 39 | #define _NETINET_SCTP_STRUCTS_H_ |
michael@0 | 40 | |
michael@0 | 41 | #include <netinet/sctp_os.h> |
michael@0 | 42 | #include <netinet/sctp_header.h> |
michael@0 | 43 | #include <netinet/sctp_auth.h> |
michael@0 | 44 | |
michael@0 | 45 | struct sctp_timer { |
michael@0 | 46 | sctp_os_timer_t timer; |
michael@0 | 47 | |
michael@0 | 48 | int type; |
michael@0 | 49 | /* |
michael@0 | 50 | * Depending on the timer type these will be setup and cast with the |
michael@0 | 51 | * appropriate entity. |
michael@0 | 52 | */ |
michael@0 | 53 | void *ep; |
michael@0 | 54 | void *tcb; |
michael@0 | 55 | void *net; |
michael@0 | 56 | #if defined(__FreeBSD__) && __FreeBSD_version >= 800000 |
michael@0 | 57 | void *vnet; |
michael@0 | 58 | #endif |
michael@0 | 59 | |
michael@0 | 60 | /* for sanity checking */ |
michael@0 | 61 | void *self; |
michael@0 | 62 | uint32_t ticks; |
michael@0 | 63 | uint32_t stopped_from; |
michael@0 | 64 | }; |
michael@0 | 65 | |
michael@0 | 66 | |
michael@0 | 67 | struct sctp_foo_stuff { |
michael@0 | 68 | struct sctp_inpcb *inp; |
michael@0 | 69 | uint32_t lineno; |
michael@0 | 70 | uint32_t ticks; |
michael@0 | 71 | int updown; |
michael@0 | 72 | }; |
michael@0 | 73 | |
michael@0 | 74 | |
michael@0 | 75 | /* |
michael@0 | 76 | * This is the information we track on each interface that we know about from |
michael@0 | 77 | * the distant end. |
michael@0 | 78 | */ |
michael@0 | 79 | TAILQ_HEAD(sctpnetlisthead, sctp_nets); |
michael@0 | 80 | |
michael@0 | 81 | struct sctp_stream_reset_list { |
michael@0 | 82 | TAILQ_ENTRY(sctp_stream_reset_list) next_resp; |
michael@0 | 83 | uint32_t tsn; |
michael@0 | 84 | uint32_t number_entries; |
michael@0 | 85 | uint16_t list_of_streams[]; |
michael@0 | 86 | }; |
michael@0 | 87 | |
michael@0 | 88 | TAILQ_HEAD(sctp_resethead, sctp_stream_reset_list); |
michael@0 | 89 | |
michael@0 | 90 | /* |
michael@0 | 91 | * Users of the iterator need to malloc a iterator with a call to |
michael@0 | 92 | * sctp_initiate_iterator(inp_func, assoc_func, inp_func, pcb_flags, pcb_features, |
michael@0 | 93 | * asoc_state, void-ptr-arg, uint32-arg, end_func, inp); |
michael@0 | 94 | * |
michael@0 | 95 | * Use the following two defines if you don't care what pcb flags are on the EP |
michael@0 | 96 | * and/or you don't care what state the association is in. |
michael@0 | 97 | * |
michael@0 | 98 | * Note that if you specify an INP as the last argument then ONLY each |
michael@0 | 99 | * association of that single INP will be executed upon. Note that the pcb |
michael@0 | 100 | * flags STILL apply so if the inp you specify has different pcb_flags then |
michael@0 | 101 | * what you put in pcb_flags nothing will happen. use SCTP_PCB_ANY_FLAGS to |
michael@0 | 102 | * assure the inp you specify gets treated. |
michael@0 | 103 | */ |
michael@0 | 104 | #define SCTP_PCB_ANY_FLAGS 0x00000000 |
michael@0 | 105 | #define SCTP_PCB_ANY_FEATURES 0x00000000 |
michael@0 | 106 | #define SCTP_ASOC_ANY_STATE 0x00000000 |
michael@0 | 107 | |
michael@0 | 108 | typedef void (*asoc_func) (struct sctp_inpcb *, struct sctp_tcb *, void *ptr, |
michael@0 | 109 | uint32_t val); |
michael@0 | 110 | typedef int (*inp_func) (struct sctp_inpcb *, void *ptr, uint32_t val); |
michael@0 | 111 | typedef void (*end_func) (void *ptr, uint32_t val); |
michael@0 | 112 | |
michael@0 | 113 | #if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP) |
michael@0 | 114 | /* whats on the mcore control struct */ |
michael@0 | 115 | struct sctp_mcore_queue { |
michael@0 | 116 | TAILQ_ENTRY(sctp_mcore_queue) next; |
michael@0 | 117 | #if defined(__FreeBSD__) && __FreeBSD_version >= 801000 |
michael@0 | 118 | struct vnet *vn; |
michael@0 | 119 | #endif |
michael@0 | 120 | struct mbuf *m; |
michael@0 | 121 | int off; |
michael@0 | 122 | int v6; |
michael@0 | 123 | }; |
michael@0 | 124 | |
michael@0 | 125 | TAILQ_HEAD(sctp_mcore_qhead, sctp_mcore_queue); |
michael@0 | 126 | |
michael@0 | 127 | struct sctp_mcore_ctrl { |
michael@0 | 128 | SCTP_PROCESS_STRUCT thread_proc; |
michael@0 | 129 | struct sctp_mcore_qhead que; |
michael@0 | 130 | struct mtx core_mtx; |
michael@0 | 131 | struct mtx que_mtx; |
michael@0 | 132 | int running; |
michael@0 | 133 | int cpuid; |
michael@0 | 134 | }; |
michael@0 | 135 | |
michael@0 | 136 | |
michael@0 | 137 | #endif |
michael@0 | 138 | |
michael@0 | 139 | |
michael@0 | 140 | struct sctp_iterator { |
michael@0 | 141 | TAILQ_ENTRY(sctp_iterator) sctp_nxt_itr; |
michael@0 | 142 | #if defined(__FreeBSD__) && __FreeBSD_version >= 801000 |
michael@0 | 143 | struct vnet *vn; |
michael@0 | 144 | #endif |
michael@0 | 145 | struct sctp_timer tmr; |
michael@0 | 146 | struct sctp_inpcb *inp; /* current endpoint */ |
michael@0 | 147 | struct sctp_tcb *stcb; /* current* assoc */ |
michael@0 | 148 | struct sctp_inpcb *next_inp; /* special hook to skip to */ |
michael@0 | 149 | asoc_func function_assoc; /* per assoc function */ |
michael@0 | 150 | inp_func function_inp; /* per endpoint function */ |
michael@0 | 151 | inp_func function_inp_end; /* end INP function */ |
michael@0 | 152 | end_func function_atend; /* iterator completion function */ |
michael@0 | 153 | void *pointer; /* pointer for apply func to use */ |
michael@0 | 154 | uint32_t val; /* value for apply func to use */ |
michael@0 | 155 | uint32_t pcb_flags; /* endpoint flags being checked */ |
michael@0 | 156 | uint32_t pcb_features; /* endpoint features being checked */ |
michael@0 | 157 | uint32_t asoc_state; /* assoc state being checked */ |
michael@0 | 158 | uint32_t iterator_flags; |
michael@0 | 159 | uint8_t no_chunk_output; |
michael@0 | 160 | uint8_t done_current_ep; |
michael@0 | 161 | }; |
michael@0 | 162 | /* iterator_flags values */ |
michael@0 | 163 | #define SCTP_ITERATOR_DO_ALL_INP 0x00000001 |
michael@0 | 164 | #define SCTP_ITERATOR_DO_SINGLE_INP 0x00000002 |
michael@0 | 165 | |
michael@0 | 166 | |
michael@0 | 167 | TAILQ_HEAD(sctpiterators, sctp_iterator); |
michael@0 | 168 | |
michael@0 | 169 | struct sctp_copy_all { |
michael@0 | 170 | struct sctp_inpcb *inp; /* ep */ |
michael@0 | 171 | struct mbuf *m; |
michael@0 | 172 | struct sctp_sndrcvinfo sndrcv; |
michael@0 | 173 | int sndlen; |
michael@0 | 174 | int cnt_sent; |
michael@0 | 175 | int cnt_failed; |
michael@0 | 176 | }; |
michael@0 | 177 | |
michael@0 | 178 | struct sctp_asconf_iterator { |
michael@0 | 179 | struct sctpladdr list_of_work; |
michael@0 | 180 | int cnt; |
michael@0 | 181 | }; |
michael@0 | 182 | |
michael@0 | 183 | struct iterator_control { |
michael@0 | 184 | #if defined(__FreeBSD__) |
michael@0 | 185 | struct mtx ipi_iterator_wq_mtx; |
michael@0 | 186 | struct mtx it_mtx; |
michael@0 | 187 | #elif defined(__APPLE__) |
michael@0 | 188 | lck_mtx_t *ipi_iterator_wq_mtx; |
michael@0 | 189 | lck_mtx_t *it_mtx; |
michael@0 | 190 | #elif defined(SCTP_PROCESS_LEVEL_LOCKS) |
michael@0 | 191 | #if defined(__Userspace__) |
michael@0 | 192 | userland_mutex_t ipi_iterator_wq_mtx; |
michael@0 | 193 | userland_mutex_t it_mtx; |
michael@0 | 194 | userland_cond_t iterator_wakeup; |
michael@0 | 195 | #else |
michael@0 | 196 | pthread_mutex_t ipi_iterator_wq_mtx; |
michael@0 | 197 | pthread_mutex_t it_mtx; |
michael@0 | 198 | pthread_cond_t iterator_wakeup; |
michael@0 | 199 | #endif |
michael@0 | 200 | #elif defined(__Windows__) |
michael@0 | 201 | struct spinlock it_lock; |
michael@0 | 202 | struct spinlock ipi_iterator_wq_lock; |
michael@0 | 203 | KEVENT iterator_wakeup[2]; |
michael@0 | 204 | PFILE_OBJECT iterator_thread_obj; |
michael@0 | 205 | #else |
michael@0 | 206 | void *it_mtx; |
michael@0 | 207 | #endif |
michael@0 | 208 | #if !defined(__Windows__) |
michael@0 | 209 | #if !defined(__Userspace__) |
michael@0 | 210 | SCTP_PROCESS_STRUCT thread_proc; |
michael@0 | 211 | #else |
michael@0 | 212 | userland_thread_t thread_proc; |
michael@0 | 213 | #endif |
michael@0 | 214 | #endif |
michael@0 | 215 | struct sctpiterators iteratorhead; |
michael@0 | 216 | struct sctp_iterator *cur_it; |
michael@0 | 217 | uint32_t iterator_running; |
michael@0 | 218 | uint32_t iterator_flags; |
michael@0 | 219 | }; |
michael@0 | 220 | #if !defined(__FreeBSD__) |
michael@0 | 221 | #define SCTP_ITERATOR_MUST_EXIT 0x00000001 |
michael@0 | 222 | #define SCTP_ITERATOR_EXITED 0x00000002 |
michael@0 | 223 | #endif |
michael@0 | 224 | #define SCTP_ITERATOR_STOP_CUR_IT 0x00000004 |
michael@0 | 225 | #define SCTP_ITERATOR_STOP_CUR_INP 0x00000008 |
michael@0 | 226 | |
michael@0 | 227 | struct sctp_net_route { |
michael@0 | 228 | sctp_rtentry_t *ro_rt; |
michael@0 | 229 | #if defined(__FreeBSD__) |
michael@0 | 230 | #if __FreeBSD_version >= 800000 |
michael@0 | 231 | void *ro_lle; |
michael@0 | 232 | #endif |
michael@0 | 233 | #if __FreeBSD_version >= 900000 |
michael@0 | 234 | void *ro_ia; |
michael@0 | 235 | int ro_flags; |
michael@0 | 236 | #endif |
michael@0 | 237 | #endif |
michael@0 | 238 | #if defined(__APPLE__) |
michael@0 | 239 | #if !defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION) |
michael@0 | 240 | struct ifaddr *ro_srcia; |
michael@0 | 241 | #endif |
michael@0 | 242 | #if !defined(APPLE_LEOPARD) |
michael@0 | 243 | uint32_t ro_flags; |
michael@0 | 244 | #endif |
michael@0 | 245 | #endif |
michael@0 | 246 | union sctp_sockstore _l_addr; /* remote peer addr */ |
michael@0 | 247 | struct sctp_ifa *_s_addr; /* our selected src addr */ |
michael@0 | 248 | }; |
michael@0 | 249 | |
michael@0 | 250 | struct htcp { |
michael@0 | 251 | uint16_t alpha; /* Fixed point arith, << 7 */ |
michael@0 | 252 | uint8_t beta; /* Fixed point arith, << 7 */ |
michael@0 | 253 | uint8_t modeswitch; /* Delay modeswitch until we had at least one congestion event */ |
michael@0 | 254 | uint32_t last_cong; /* Time since last congestion event end */ |
michael@0 | 255 | uint32_t undo_last_cong; |
michael@0 | 256 | uint16_t bytes_acked; |
michael@0 | 257 | uint32_t bytecount; |
michael@0 | 258 | uint32_t minRTT; |
michael@0 | 259 | uint32_t maxRTT; |
michael@0 | 260 | |
michael@0 | 261 | uint32_t undo_maxRTT; |
michael@0 | 262 | uint32_t undo_old_maxB; |
michael@0 | 263 | |
michael@0 | 264 | /* Bandwidth estimation */ |
michael@0 | 265 | uint32_t minB; |
michael@0 | 266 | uint32_t maxB; |
michael@0 | 267 | uint32_t old_maxB; |
michael@0 | 268 | uint32_t Bi; |
michael@0 | 269 | uint32_t lasttime; |
michael@0 | 270 | }; |
michael@0 | 271 | |
michael@0 | 272 | struct rtcc_cc { |
michael@0 | 273 | struct timeval tls; /* The time we started the sending */ |
michael@0 | 274 | uint64_t lbw; /* Our last estimated bw */ |
michael@0 | 275 | uint64_t lbw_rtt; /* RTT at bw estimate */ |
michael@0 | 276 | uint64_t bw_bytes; /* The total bytes since this sending began */ |
michael@0 | 277 | uint64_t bw_tot_time; /* The total time since sending began */ |
michael@0 | 278 | uint64_t new_tot_time; /* temp holding the new value */ |
michael@0 | 279 | uint64_t bw_bytes_at_last_rttc; /* What bw_bytes was at last rtt calc */ |
michael@0 | 280 | uint32_t cwnd_at_bw_set; /* Cwnd at last bw saved - lbw */ |
michael@0 | 281 | uint32_t vol_reduce; /* cnt of voluntary reductions */ |
michael@0 | 282 | uint16_t steady_step; /* The number required to be in steady state*/ |
michael@0 | 283 | uint16_t step_cnt; /* The current number */ |
michael@0 | 284 | uint8_t ret_from_eq; /* When all things are equal what do I return 0/1 - 1 no cc advance */ |
michael@0 | 285 | uint8_t use_dccc_ecn; /* Flag to enable DCCC ECN */ |
michael@0 | 286 | uint8_t tls_needs_set; /* Flag to indicate we need to set tls 0 or 1 means set at send 2 not */ |
michael@0 | 287 | uint8_t last_step_state; /* Last state if steady state stepdown is on */ |
michael@0 | 288 | uint8_t rtt_set_this_sack; /* Flag saying this sack had RTT calc on it */ |
michael@0 | 289 | uint8_t last_inst_ind; /* Last saved inst indication */ |
michael@0 | 290 | }; |
michael@0 | 291 | |
michael@0 | 292 | |
michael@0 | 293 | struct sctp_nets { |
michael@0 | 294 | TAILQ_ENTRY(sctp_nets) sctp_next; /* next link */ |
michael@0 | 295 | |
michael@0 | 296 | /* |
michael@0 | 297 | * Things on the top half may be able to be split into a common |
michael@0 | 298 | * structure shared by all. |
michael@0 | 299 | */ |
michael@0 | 300 | struct sctp_timer pmtu_timer; |
michael@0 | 301 | struct sctp_timer hb_timer; |
michael@0 | 302 | |
michael@0 | 303 | /* |
michael@0 | 304 | * The following two in combination equate to a route entry for v6 |
michael@0 | 305 | * or v4. |
michael@0 | 306 | */ |
michael@0 | 307 | struct sctp_net_route ro; |
michael@0 | 308 | |
michael@0 | 309 | /* mtu discovered so far */ |
michael@0 | 310 | uint32_t mtu; |
michael@0 | 311 | uint32_t ssthresh; /* not sure about this one for split */ |
michael@0 | 312 | uint32_t last_cwr_tsn; |
michael@0 | 313 | uint32_t cwr_window_tsn; |
michael@0 | 314 | uint32_t ecn_ce_pkt_cnt; |
michael@0 | 315 | uint32_t lost_cnt; |
michael@0 | 316 | /* smoothed average things for RTT and RTO itself */ |
michael@0 | 317 | int lastsa; |
michael@0 | 318 | int lastsv; |
michael@0 | 319 | uint64_t rtt; /* last measured rtt value in us */ |
michael@0 | 320 | unsigned int RTO; |
michael@0 | 321 | |
michael@0 | 322 | /* This is used for SHUTDOWN/SHUTDOWN-ACK/SEND or INIT timers */ |
michael@0 | 323 | struct sctp_timer rxt_timer; |
michael@0 | 324 | |
michael@0 | 325 | /* last time in seconds I sent to it */ |
michael@0 | 326 | struct timeval last_sent_time; |
michael@0 | 327 | union cc_control_data { |
michael@0 | 328 | struct htcp htcp_ca; /* JRS - struct used in HTCP algorithm */ |
michael@0 | 329 | struct rtcc_cc rtcc; /* rtcc module cc stuff */ |
michael@0 | 330 | } cc_mod; |
michael@0 | 331 | int ref_count; |
michael@0 | 332 | |
michael@0 | 333 | /* Congestion stats per destination */ |
michael@0 | 334 | /* |
michael@0 | 335 | * flight size variables and such, sorry Vern, I could not avoid |
michael@0 | 336 | * this if I wanted performance :> |
michael@0 | 337 | */ |
michael@0 | 338 | uint32_t flight_size; |
michael@0 | 339 | uint32_t cwnd; /* actual cwnd */ |
michael@0 | 340 | uint32_t prev_cwnd; /* cwnd before any processing */ |
michael@0 | 341 | uint32_t ecn_prev_cwnd; /* ECN prev cwnd at first ecn_echo seen in new window */ |
michael@0 | 342 | uint32_t partial_bytes_acked; /* in CA tracks when to incr a MTU */ |
michael@0 | 343 | /* tracking variables to avoid the aloc/free in sack processing */ |
michael@0 | 344 | unsigned int net_ack; |
michael@0 | 345 | unsigned int net_ack2; |
michael@0 | 346 | |
michael@0 | 347 | /* |
michael@0 | 348 | * JRS - 5/8/07 - Variable to track last time |
michael@0 | 349 | * a destination was active for CMT PF |
michael@0 | 350 | */ |
michael@0 | 351 | uint32_t last_active; |
michael@0 | 352 | |
michael@0 | 353 | /* |
michael@0 | 354 | * CMT variables (iyengar@cis.udel.edu) |
michael@0 | 355 | */ |
michael@0 | 356 | uint32_t this_sack_highest_newack; /* tracks highest TSN newly |
michael@0 | 357 | * acked for a given dest in |
michael@0 | 358 | * the current SACK. Used in |
michael@0 | 359 | * SFR and HTNA algos */ |
michael@0 | 360 | uint32_t pseudo_cumack; /* CMT CUC algorithm. Maintains next expected |
michael@0 | 361 | * pseudo-cumack for this destination */ |
michael@0 | 362 | uint32_t rtx_pseudo_cumack; /* CMT CUC algorithm. Maintains next |
michael@0 | 363 | * expected pseudo-cumack for this |
michael@0 | 364 | * destination */ |
michael@0 | 365 | |
michael@0 | 366 | /* CMT fast recovery variables */ |
michael@0 | 367 | uint32_t fast_recovery_tsn; |
michael@0 | 368 | uint32_t heartbeat_random1; |
michael@0 | 369 | uint32_t heartbeat_random2; |
michael@0 | 370 | #ifdef INET6 |
michael@0 | 371 | uint32_t flowlabel; |
michael@0 | 372 | #endif |
michael@0 | 373 | uint8_t dscp; |
michael@0 | 374 | |
michael@0 | 375 | struct timeval start_time; /* time when this net was created */ |
michael@0 | 376 | uint32_t marked_retrans; /* number or DATA chunks marked for |
michael@0 | 377 | timer based retransmissions */ |
michael@0 | 378 | uint32_t marked_fastretrans; |
michael@0 | 379 | uint32_t heart_beat_delay; /* Heart Beat delay in ms */ |
michael@0 | 380 | |
michael@0 | 381 | /* if this guy is ok or not ... status */ |
michael@0 | 382 | uint16_t dest_state; |
michael@0 | 383 | /* number of timeouts to consider the destination unreachable */ |
michael@0 | 384 | uint16_t failure_threshold; |
michael@0 | 385 | /* number of timeouts to consider the destination potentially failed */ |
michael@0 | 386 | uint16_t pf_threshold; |
michael@0 | 387 | /* error stats on the destination */ |
michael@0 | 388 | uint16_t error_count; |
michael@0 | 389 | /* UDP port number in case of UDP tunneling */ |
michael@0 | 390 | uint16_t port; |
michael@0 | 391 | |
michael@0 | 392 | uint8_t fast_retran_loss_recovery; |
michael@0 | 393 | uint8_t will_exit_fast_recovery; |
michael@0 | 394 | /* Flags that probably can be combined into dest_state */ |
michael@0 | 395 | uint8_t fast_retran_ip; /* fast retransmit in progress */ |
michael@0 | 396 | uint8_t hb_responded; |
michael@0 | 397 | uint8_t saw_newack; /* CMT's SFR algorithm flag */ |
michael@0 | 398 | uint8_t src_addr_selected; /* if we split we move */ |
michael@0 | 399 | uint8_t indx_of_eligible_next_to_use; |
michael@0 | 400 | uint8_t addr_is_local; /* its a local address (if known) could move |
michael@0 | 401 | * in split */ |
michael@0 | 402 | |
michael@0 | 403 | /* |
michael@0 | 404 | * CMT variables (iyengar@cis.udel.edu) |
michael@0 | 405 | */ |
michael@0 | 406 | uint8_t find_pseudo_cumack; /* CMT CUC algorithm. Flag used to |
michael@0 | 407 | * find a new pseudocumack. This flag |
michael@0 | 408 | * is set after a new pseudo-cumack |
michael@0 | 409 | * has been received and indicates |
michael@0 | 410 | * that the sender should find the |
michael@0 | 411 | * next pseudo-cumack expected for |
michael@0 | 412 | * this destination */ |
michael@0 | 413 | uint8_t find_rtx_pseudo_cumack; /* CMT CUCv2 algorithm. Flag used to |
michael@0 | 414 | * find a new rtx-pseudocumack. This |
michael@0 | 415 | * flag is set after a new |
michael@0 | 416 | * rtx-pseudo-cumack has been received |
michael@0 | 417 | * and indicates that the sender |
michael@0 | 418 | * should find the next |
michael@0 | 419 | * rtx-pseudo-cumack expected for this |
michael@0 | 420 | * destination */ |
michael@0 | 421 | uint8_t new_pseudo_cumack; /* CMT CUC algorithm. Flag used to |
michael@0 | 422 | * indicate if a new pseudo-cumack or |
michael@0 | 423 | * rtx-pseudo-cumack has been received */ |
michael@0 | 424 | uint8_t window_probe; /* Doing a window probe? */ |
michael@0 | 425 | uint8_t RTO_measured; /* Have we done the first measure */ |
michael@0 | 426 | uint8_t last_hs_used; /* index into the last HS table entry we used */ |
michael@0 | 427 | uint8_t lan_type; |
michael@0 | 428 | uint8_t rto_needed; |
michael@0 | 429 | #if defined(__FreeBSD__) |
michael@0 | 430 | uint32_t flowid; |
michael@0 | 431 | #ifdef INVARIANTS |
michael@0 | 432 | uint8_t flowidset; |
michael@0 | 433 | #endif |
michael@0 | 434 | #endif |
michael@0 | 435 | }; |
michael@0 | 436 | |
michael@0 | 437 | |
michael@0 | 438 | struct sctp_data_chunkrec { |
michael@0 | 439 | uint32_t TSN_seq; /* the TSN of this transmit */ |
michael@0 | 440 | uint16_t stream_seq; /* the stream sequence number of this transmit */ |
michael@0 | 441 | uint16_t stream_number; /* the stream number of this guy */ |
michael@0 | 442 | uint32_t payloadtype; |
michael@0 | 443 | uint32_t context; /* from send */ |
michael@0 | 444 | uint32_t cwnd_at_send; |
michael@0 | 445 | /* |
michael@0 | 446 | * part of the Highest sacked algorithm to be able to stroke counts |
michael@0 | 447 | * on ones that are FR'd. |
michael@0 | 448 | */ |
michael@0 | 449 | uint32_t fast_retran_tsn; /* sending_seq at the time of FR */ |
michael@0 | 450 | struct timeval timetodrop; /* time we drop it from queue */ |
michael@0 | 451 | uint8_t doing_fast_retransmit; |
michael@0 | 452 | uint8_t rcv_flags; /* flags pulled from data chunk on inbound for |
michael@0 | 453 | * outbound holds sending flags for PR-SCTP. |
michael@0 | 454 | */ |
michael@0 | 455 | uint8_t state_flags; |
michael@0 | 456 | uint8_t chunk_was_revoked; |
michael@0 | 457 | uint8_t fwd_tsn_cnt; |
michael@0 | 458 | }; |
michael@0 | 459 | |
michael@0 | 460 | TAILQ_HEAD(sctpchunk_listhead, sctp_tmit_chunk); |
michael@0 | 461 | |
michael@0 | 462 | /* The lower byte is used to enumerate PR_SCTP policies */ |
michael@0 | 463 | #define CHUNK_FLAGS_PR_SCTP_TTL SCTP_PR_SCTP_TTL |
michael@0 | 464 | #define CHUNK_FLAGS_PR_SCTP_BUF SCTP_PR_SCTP_BUF |
michael@0 | 465 | #define CHUNK_FLAGS_PR_SCTP_RTX SCTP_PR_SCTP_RTX |
michael@0 | 466 | |
michael@0 | 467 | /* The upper byte is used as a bit mask */ |
michael@0 | 468 | #define CHUNK_FLAGS_FRAGMENT_OK 0x0100 |
michael@0 | 469 | |
michael@0 | 470 | struct chk_id { |
michael@0 | 471 | uint16_t id; |
michael@0 | 472 | uint16_t can_take_data; |
michael@0 | 473 | }; |
michael@0 | 474 | |
michael@0 | 475 | |
michael@0 | 476 | struct sctp_tmit_chunk { |
michael@0 | 477 | union { |
michael@0 | 478 | struct sctp_data_chunkrec data; |
michael@0 | 479 | struct chk_id chunk_id; |
michael@0 | 480 | } rec; |
michael@0 | 481 | struct sctp_association *asoc; /* bp to asoc this belongs to */ |
michael@0 | 482 | struct timeval sent_rcv_time; /* filled in if RTT being calculated */ |
michael@0 | 483 | struct mbuf *data; /* pointer to mbuf chain of data */ |
michael@0 | 484 | struct mbuf *last_mbuf; /* pointer to last mbuf in chain */ |
michael@0 | 485 | struct sctp_nets *whoTo; |
michael@0 | 486 | TAILQ_ENTRY(sctp_tmit_chunk) sctp_next; /* next link */ |
michael@0 | 487 | int32_t sent; /* the send status */ |
michael@0 | 488 | uint16_t snd_count; /* number of times I sent */ |
michael@0 | 489 | uint16_t flags; /* flags, such as FRAGMENT_OK */ |
michael@0 | 490 | uint16_t send_size; |
michael@0 | 491 | uint16_t book_size; |
michael@0 | 492 | uint16_t mbcnt; |
michael@0 | 493 | uint16_t auth_keyid; |
michael@0 | 494 | uint8_t holds_key_ref; /* flag if auth keyid refcount is held */ |
michael@0 | 495 | uint8_t pad_inplace; |
michael@0 | 496 | uint8_t do_rtt; |
michael@0 | 497 | uint8_t book_size_scale; |
michael@0 | 498 | uint8_t no_fr_allowed; |
michael@0 | 499 | uint8_t copy_by_ref; |
michael@0 | 500 | uint8_t window_probe; |
michael@0 | 501 | }; |
michael@0 | 502 | |
michael@0 | 503 | /* |
michael@0 | 504 | * The first part of this structure MUST be the entire sinfo structure. Maybe |
michael@0 | 505 | * I should have made it a sub structure... we can circle back later and do |
michael@0 | 506 | * that if we want. |
michael@0 | 507 | */ |
michael@0 | 508 | struct sctp_queued_to_read { /* sinfo structure Pluse more */ |
michael@0 | 509 | uint16_t sinfo_stream; /* off the wire */ |
michael@0 | 510 | uint16_t sinfo_ssn; /* off the wire */ |
michael@0 | 511 | uint16_t sinfo_flags; /* SCTP_UNORDERED from wire use SCTP_EOF for |
michael@0 | 512 | * EOR */ |
michael@0 | 513 | uint32_t sinfo_ppid; /* off the wire */ |
michael@0 | 514 | uint32_t sinfo_context; /* pick this up from assoc def context? */ |
michael@0 | 515 | uint32_t sinfo_timetolive; /* not used by kernel */ |
michael@0 | 516 | uint32_t sinfo_tsn; /* Use this in reassembly as first TSN */ |
michael@0 | 517 | uint32_t sinfo_cumtsn; /* Use this in reassembly as last TSN */ |
michael@0 | 518 | sctp_assoc_t sinfo_assoc_id; /* our assoc id */ |
michael@0 | 519 | /* Non sinfo stuff */ |
michael@0 | 520 | uint32_t length; /* length of data */ |
michael@0 | 521 | uint32_t held_length; /* length held in sb */ |
michael@0 | 522 | struct sctp_nets *whoFrom; /* where it came from */ |
michael@0 | 523 | struct mbuf *data; /* front of the mbuf chain of data with |
michael@0 | 524 | * PKT_HDR */ |
michael@0 | 525 | struct mbuf *tail_mbuf; /* used for multi-part data */ |
michael@0 | 526 | struct mbuf *aux_data; /* used to hold/cache control if o/s does not take it from us */ |
michael@0 | 527 | struct sctp_tcb *stcb; /* assoc, used for window update */ |
michael@0 | 528 | TAILQ_ENTRY(sctp_queued_to_read) next; |
michael@0 | 529 | uint16_t port_from; |
michael@0 | 530 | uint16_t spec_flags; /* Flags to hold the notification field */ |
michael@0 | 531 | uint8_t do_not_ref_stcb; |
michael@0 | 532 | uint8_t end_added; |
michael@0 | 533 | uint8_t pdapi_aborted; |
michael@0 | 534 | uint8_t some_taken; |
michael@0 | 535 | }; |
michael@0 | 536 | |
michael@0 | 537 | /* This data structure will be on the outbound |
michael@0 | 538 | * stream queues. Data will be pulled off from |
michael@0 | 539 | * the front of the mbuf data and chunk-ified |
michael@0 | 540 | * by the output routines. We will custom |
michael@0 | 541 | * fit every chunk we pull to the send/sent |
michael@0 | 542 | * queue to make up the next full packet |
michael@0 | 543 | * if we can. An entry cannot be removed |
michael@0 | 544 | * from the stream_out queue until |
michael@0 | 545 | * the msg_is_complete flag is set. This |
michael@0 | 546 | * means at times data/tail_mbuf MIGHT |
michael@0 | 547 | * be NULL.. If that occurs it happens |
michael@0 | 548 | * for one of two reasons. Either the user |
michael@0 | 549 | * is blocked on a send() call and has not |
michael@0 | 550 | * awoken to copy more data down... OR |
michael@0 | 551 | * the user is in the explict MSG_EOR mode |
michael@0 | 552 | * and wrote some data, but has not completed |
michael@0 | 553 | * sending. |
michael@0 | 554 | */ |
michael@0 | 555 | struct sctp_stream_queue_pending { |
michael@0 | 556 | struct mbuf *data; |
michael@0 | 557 | struct mbuf *tail_mbuf; |
michael@0 | 558 | struct timeval ts; |
michael@0 | 559 | struct sctp_nets *net; |
michael@0 | 560 | TAILQ_ENTRY (sctp_stream_queue_pending) next; |
michael@0 | 561 | TAILQ_ENTRY (sctp_stream_queue_pending) ss_next; |
michael@0 | 562 | uint32_t length; |
michael@0 | 563 | uint32_t timetolive; |
michael@0 | 564 | uint32_t ppid; |
michael@0 | 565 | uint32_t context; |
michael@0 | 566 | uint16_t sinfo_flags; |
michael@0 | 567 | uint16_t stream; |
michael@0 | 568 | uint16_t act_flags; |
michael@0 | 569 | uint16_t auth_keyid; |
michael@0 | 570 | uint8_t holds_key_ref; |
michael@0 | 571 | uint8_t msg_is_complete; |
michael@0 | 572 | uint8_t some_taken; |
michael@0 | 573 | uint8_t sender_all_done; |
michael@0 | 574 | uint8_t put_last_out; |
michael@0 | 575 | uint8_t discard_rest; |
michael@0 | 576 | }; |
michael@0 | 577 | |
michael@0 | 578 | /* |
michael@0 | 579 | * this struct contains info that is used to track inbound stream data and |
michael@0 | 580 | * help with ordering. |
michael@0 | 581 | */ |
michael@0 | 582 | TAILQ_HEAD(sctpwheelunrel_listhead, sctp_stream_in); |
michael@0 | 583 | struct sctp_stream_in { |
michael@0 | 584 | struct sctp_readhead inqueue; |
michael@0 | 585 | uint16_t stream_no; |
michael@0 | 586 | uint16_t last_sequence_delivered; /* used for re-order */ |
michael@0 | 587 | uint8_t delivery_started; |
michael@0 | 588 | }; |
michael@0 | 589 | |
michael@0 | 590 | TAILQ_HEAD(sctpwheel_listhead, sctp_stream_out); |
michael@0 | 591 | TAILQ_HEAD(sctplist_listhead, sctp_stream_queue_pending); |
michael@0 | 592 | |
michael@0 | 593 | /* Round-robin schedulers */ |
michael@0 | 594 | struct ss_rr { |
michael@0 | 595 | /* next link in wheel */ |
michael@0 | 596 | TAILQ_ENTRY(sctp_stream_out) next_spoke; |
michael@0 | 597 | }; |
michael@0 | 598 | |
michael@0 | 599 | /* Priority scheduler */ |
michael@0 | 600 | struct ss_prio { |
michael@0 | 601 | /* next link in wheel */ |
michael@0 | 602 | TAILQ_ENTRY(sctp_stream_out) next_spoke; |
michael@0 | 603 | /* priority id */ |
michael@0 | 604 | uint16_t priority; |
michael@0 | 605 | }; |
michael@0 | 606 | |
michael@0 | 607 | /* Fair Bandwidth scheduler */ |
michael@0 | 608 | struct ss_fb { |
michael@0 | 609 | /* next link in wheel */ |
michael@0 | 610 | TAILQ_ENTRY(sctp_stream_out) next_spoke; |
michael@0 | 611 | /* stores message size */ |
michael@0 | 612 | int32_t rounds; |
michael@0 | 613 | }; |
michael@0 | 614 | |
michael@0 | 615 | /* |
michael@0 | 616 | * This union holds all data necessary for |
michael@0 | 617 | * different stream schedulers. |
michael@0 | 618 | */ |
michael@0 | 619 | union scheduling_data { |
michael@0 | 620 | struct sctpwheel_listhead out_wheel; |
michael@0 | 621 | struct sctplist_listhead out_list; |
michael@0 | 622 | }; |
michael@0 | 623 | |
michael@0 | 624 | /* |
michael@0 | 625 | * This union holds all parameters per stream |
michael@0 | 626 | * necessary for different stream schedulers. |
michael@0 | 627 | */ |
michael@0 | 628 | union scheduling_parameters { |
michael@0 | 629 | struct ss_rr rr; |
michael@0 | 630 | struct ss_prio prio; |
michael@0 | 631 | struct ss_fb fb; |
michael@0 | 632 | }; |
michael@0 | 633 | |
michael@0 | 634 | /* This struct is used to track the traffic on outbound streams */ |
michael@0 | 635 | struct sctp_stream_out { |
michael@0 | 636 | struct sctp_streamhead outqueue; |
michael@0 | 637 | union scheduling_parameters ss_params; |
michael@0 | 638 | uint32_t chunks_on_queues; |
michael@0 | 639 | uint16_t stream_no; |
michael@0 | 640 | uint16_t next_sequence_send; /* next one I expect to send out */ |
michael@0 | 641 | uint8_t last_msg_incomplete; |
michael@0 | 642 | }; |
michael@0 | 643 | |
michael@0 | 644 | /* used to keep track of the addresses yet to try to add/delete */ |
michael@0 | 645 | TAILQ_HEAD(sctp_asconf_addrhead, sctp_asconf_addr); |
michael@0 | 646 | struct sctp_asconf_addr { |
michael@0 | 647 | TAILQ_ENTRY(sctp_asconf_addr) next; |
michael@0 | 648 | struct sctp_asconf_addr_param ap; |
michael@0 | 649 | struct sctp_ifa *ifa; /* save the ifa for add/del ip */ |
michael@0 | 650 | uint8_t sent; /* has this been sent yet? */ |
michael@0 | 651 | uint8_t special_del; /* not to be used in lookup */ |
michael@0 | 652 | }; |
michael@0 | 653 | |
michael@0 | 654 | struct sctp_scoping { |
michael@0 | 655 | uint8_t ipv4_addr_legal; |
michael@0 | 656 | uint8_t ipv6_addr_legal; |
michael@0 | 657 | #if defined(__Userspace__) |
michael@0 | 658 | uint8_t conn_addr_legal; |
michael@0 | 659 | #endif |
michael@0 | 660 | uint8_t loopback_scope; |
michael@0 | 661 | uint8_t ipv4_local_scope; |
michael@0 | 662 | uint8_t local_scope; |
michael@0 | 663 | uint8_t site_scope; |
michael@0 | 664 | }; |
michael@0 | 665 | |
michael@0 | 666 | #define SCTP_TSN_LOG_SIZE 40 |
michael@0 | 667 | |
michael@0 | 668 | struct sctp_tsn_log { |
michael@0 | 669 | void *stcb; |
michael@0 | 670 | uint32_t tsn; |
michael@0 | 671 | uint16_t strm; |
michael@0 | 672 | uint16_t seq; |
michael@0 | 673 | uint16_t sz; |
michael@0 | 674 | uint16_t flgs; |
michael@0 | 675 | uint16_t in_pos; |
michael@0 | 676 | uint16_t in_out; |
michael@0 | 677 | }; |
michael@0 | 678 | |
michael@0 | 679 | #define SCTP_FS_SPEC_LOG_SIZE 200 |
michael@0 | 680 | struct sctp_fs_spec_log { |
michael@0 | 681 | uint32_t sent; |
michael@0 | 682 | uint32_t total_flight; |
michael@0 | 683 | uint32_t tsn; |
michael@0 | 684 | uint16_t book; |
michael@0 | 685 | uint8_t incr; |
michael@0 | 686 | uint8_t decr; |
michael@0 | 687 | }; |
michael@0 | 688 | |
michael@0 | 689 | /* This struct is here to cut out the compatiabilty |
michael@0 | 690 | * pad that bulks up both the inp and stcb. The non |
michael@0 | 691 | * pad portion MUST stay in complete sync with |
michael@0 | 692 | * sctp_sndrcvinfo... i.e. if sinfo_xxxx is added |
michael@0 | 693 | * this must be done here too. |
michael@0 | 694 | */ |
michael@0 | 695 | struct sctp_nonpad_sndrcvinfo { |
michael@0 | 696 | uint16_t sinfo_stream; |
michael@0 | 697 | uint16_t sinfo_ssn; |
michael@0 | 698 | uint16_t sinfo_flags; |
michael@0 | 699 | uint32_t sinfo_ppid; |
michael@0 | 700 | uint32_t sinfo_context; |
michael@0 | 701 | uint32_t sinfo_timetolive; |
michael@0 | 702 | uint32_t sinfo_tsn; |
michael@0 | 703 | uint32_t sinfo_cumtsn; |
michael@0 | 704 | sctp_assoc_t sinfo_assoc_id; |
michael@0 | 705 | uint16_t sinfo_keynumber; |
michael@0 | 706 | uint16_t sinfo_keynumber_valid; |
michael@0 | 707 | }; |
michael@0 | 708 | |
michael@0 | 709 | /* |
michael@0 | 710 | * JRS - Structure to hold function pointers to the functions responsible |
michael@0 | 711 | * for congestion control. |
michael@0 | 712 | */ |
michael@0 | 713 | |
michael@0 | 714 | struct sctp_cc_functions { |
michael@0 | 715 | void (*sctp_set_initial_cc_param)(struct sctp_tcb *stcb, struct sctp_nets *net); |
michael@0 | 716 | void (*sctp_cwnd_update_after_sack)(struct sctp_tcb *stcb, |
michael@0 | 717 | struct sctp_association *asoc, |
michael@0 | 718 | int accum_moved ,int reneged_all, int will_exit); |
michael@0 | 719 | void (*sctp_cwnd_update_exit_pf)(struct sctp_tcb *stcb, struct sctp_nets *net); |
michael@0 | 720 | void (*sctp_cwnd_update_after_fr)(struct sctp_tcb *stcb, |
michael@0 | 721 | struct sctp_association *asoc); |
michael@0 | 722 | void (*sctp_cwnd_update_after_timeout)(struct sctp_tcb *stcb, |
michael@0 | 723 | struct sctp_nets *net); |
michael@0 | 724 | void (*sctp_cwnd_update_after_ecn_echo)(struct sctp_tcb *stcb, |
michael@0 | 725 | struct sctp_nets *net, int in_window, int num_pkt_lost); |
michael@0 | 726 | void (*sctp_cwnd_update_after_packet_dropped)(struct sctp_tcb *stcb, |
michael@0 | 727 | struct sctp_nets *net, struct sctp_pktdrop_chunk *cp, |
michael@0 | 728 | uint32_t *bottle_bw, uint32_t *on_queue); |
michael@0 | 729 | void (*sctp_cwnd_update_after_output)(struct sctp_tcb *stcb, |
michael@0 | 730 | struct sctp_nets *net, int burst_limit); |
michael@0 | 731 | void (*sctp_cwnd_update_packet_transmitted)(struct sctp_tcb *stcb, |
michael@0 | 732 | struct sctp_nets *net); |
michael@0 | 733 | void (*sctp_cwnd_update_tsn_acknowledged)(struct sctp_nets *net, |
michael@0 | 734 | struct sctp_tmit_chunk *); |
michael@0 | 735 | void (*sctp_cwnd_new_transmission_begins)(struct sctp_tcb *stcb, |
michael@0 | 736 | struct sctp_nets *net); |
michael@0 | 737 | void (*sctp_cwnd_prepare_net_for_sack)(struct sctp_tcb *stcb, |
michael@0 | 738 | struct sctp_nets *net); |
michael@0 | 739 | int (*sctp_cwnd_socket_option)(struct sctp_tcb *stcb, int set, struct sctp_cc_option *); |
michael@0 | 740 | void (*sctp_rtt_calculated)(struct sctp_tcb *, struct sctp_nets *, struct timeval *); |
michael@0 | 741 | }; |
michael@0 | 742 | |
michael@0 | 743 | /* |
michael@0 | 744 | * RS - Structure to hold function pointers to the functions responsible |
michael@0 | 745 | * for stream scheduling. |
michael@0 | 746 | */ |
michael@0 | 747 | struct sctp_ss_functions { |
michael@0 | 748 | void (*sctp_ss_init)(struct sctp_tcb *stcb, struct sctp_association *asoc, |
michael@0 | 749 | int holds_lock); |
michael@0 | 750 | void (*sctp_ss_clear)(struct sctp_tcb *stcb, struct sctp_association *asoc, |
michael@0 | 751 | int clear_values, int holds_lock); |
michael@0 | 752 | void (*sctp_ss_init_stream)(struct sctp_stream_out *strq, struct sctp_stream_out *with_strq); |
michael@0 | 753 | void (*sctp_ss_add_to_stream)(struct sctp_tcb *stcb, struct sctp_association *asoc, |
michael@0 | 754 | struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp, int holds_lock); |
michael@0 | 755 | int (*sctp_ss_is_empty)(struct sctp_tcb *stcb, struct sctp_association *asoc); |
michael@0 | 756 | void (*sctp_ss_remove_from_stream)(struct sctp_tcb *stcb, struct sctp_association *asoc, |
michael@0 | 757 | struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp, int holds_lock); |
michael@0 | 758 | struct sctp_stream_out* (*sctp_ss_select_stream)(struct sctp_tcb *stcb, |
michael@0 | 759 | struct sctp_nets *net, struct sctp_association *asoc); |
michael@0 | 760 | void (*sctp_ss_scheduled)(struct sctp_tcb *stcb, struct sctp_nets *net, |
michael@0 | 761 | struct sctp_association *asoc, struct sctp_stream_out *strq, int moved_how_much); |
michael@0 | 762 | void (*sctp_ss_packet_done)(struct sctp_tcb *stcb, struct sctp_nets *net, |
michael@0 | 763 | struct sctp_association *asoc); |
michael@0 | 764 | int (*sctp_ss_get_value)(struct sctp_tcb *stcb, struct sctp_association *asoc, |
michael@0 | 765 | struct sctp_stream_out *strq, uint16_t *value); |
michael@0 | 766 | int (*sctp_ss_set_value)(struct sctp_tcb *stcb, struct sctp_association *asoc, |
michael@0 | 767 | struct sctp_stream_out *strq, uint16_t value); |
michael@0 | 768 | }; |
michael@0 | 769 | |
michael@0 | 770 | /* used to save ASCONF chunks for retransmission */ |
michael@0 | 771 | TAILQ_HEAD(sctp_asconf_head, sctp_asconf); |
michael@0 | 772 | struct sctp_asconf { |
michael@0 | 773 | TAILQ_ENTRY(sctp_asconf) next; |
michael@0 | 774 | uint32_t serial_number; |
michael@0 | 775 | uint16_t snd_count; |
michael@0 | 776 | struct mbuf *data; |
michael@0 | 777 | uint16_t len; |
michael@0 | 778 | }; |
michael@0 | 779 | |
michael@0 | 780 | /* used to save ASCONF-ACK chunks for retransmission */ |
michael@0 | 781 | TAILQ_HEAD(sctp_asconf_ackhead, sctp_asconf_ack); |
michael@0 | 782 | struct sctp_asconf_ack { |
michael@0 | 783 | TAILQ_ENTRY(sctp_asconf_ack) next; |
michael@0 | 784 | uint32_t serial_number; |
michael@0 | 785 | struct sctp_nets *last_sent_to; |
michael@0 | 786 | struct mbuf *data; |
michael@0 | 787 | uint16_t len; |
michael@0 | 788 | }; |
michael@0 | 789 | |
michael@0 | 790 | /* |
michael@0 | 791 | * Here we have information about each individual association that we track. |
michael@0 | 792 | * We probably in production would be more dynamic. But for ease of |
michael@0 | 793 | * implementation we will have a fixed array that we hunt for in a linear |
michael@0 | 794 | * fashion. |
michael@0 | 795 | */ |
michael@0 | 796 | struct sctp_association { |
michael@0 | 797 | /* association state */ |
michael@0 | 798 | int state; |
michael@0 | 799 | |
michael@0 | 800 | /* queue of pending addrs to add/delete */ |
michael@0 | 801 | struct sctp_asconf_addrhead asconf_queue; |
michael@0 | 802 | |
michael@0 | 803 | struct timeval time_entered; /* time we entered state */ |
michael@0 | 804 | struct timeval time_last_rcvd; |
michael@0 | 805 | struct timeval time_last_sent; |
michael@0 | 806 | struct timeval time_last_sat_advance; |
michael@0 | 807 | struct sctp_nonpad_sndrcvinfo def_send; |
michael@0 | 808 | |
michael@0 | 809 | /* timers and such */ |
michael@0 | 810 | struct sctp_timer dack_timer; /* Delayed ack timer */ |
michael@0 | 811 | struct sctp_timer asconf_timer; /* asconf */ |
michael@0 | 812 | struct sctp_timer strreset_timer; /* stream reset */ |
michael@0 | 813 | struct sctp_timer shut_guard_timer; /* shutdown guard */ |
michael@0 | 814 | struct sctp_timer autoclose_timer; /* automatic close timer */ |
michael@0 | 815 | struct sctp_timer delayed_event_timer; /* timer for delayed events */ |
michael@0 | 816 | struct sctp_timer delete_prim_timer; /* deleting primary dst */ |
michael@0 | 817 | |
michael@0 | 818 | /* list of restricted local addresses */ |
michael@0 | 819 | struct sctpladdr sctp_restricted_addrs; |
michael@0 | 820 | |
michael@0 | 821 | /* last local address pending deletion (waiting for an address add) */ |
michael@0 | 822 | struct sctp_ifa *asconf_addr_del_pending; |
michael@0 | 823 | /* Deleted primary destination (used to stop timer) */ |
michael@0 | 824 | struct sctp_nets *deleted_primary; |
michael@0 | 825 | |
michael@0 | 826 | struct sctpnetlisthead nets; /* remote address list */ |
michael@0 | 827 | |
michael@0 | 828 | /* Free chunk list */ |
michael@0 | 829 | struct sctpchunk_listhead free_chunks; |
michael@0 | 830 | |
michael@0 | 831 | /* Control chunk queue */ |
michael@0 | 832 | struct sctpchunk_listhead control_send_queue; |
michael@0 | 833 | |
michael@0 | 834 | /* ASCONF chunk queue */ |
michael@0 | 835 | struct sctpchunk_listhead asconf_send_queue; |
michael@0 | 836 | |
michael@0 | 837 | /* |
michael@0 | 838 | * Once a TSN hits the wire it is moved to the sent_queue. We |
michael@0 | 839 | * maintain two counts here (don't know if any but retran_cnt is |
michael@0 | 840 | * needed). The idea is that the sent_queue_retran_cnt reflects how |
michael@0 | 841 | * many chunks have been marked for retranmission by either T3-rxt |
michael@0 | 842 | * or FR. |
michael@0 | 843 | */ |
michael@0 | 844 | struct sctpchunk_listhead sent_queue; |
michael@0 | 845 | struct sctpchunk_listhead send_queue; |
michael@0 | 846 | |
michael@0 | 847 | /* re-assembly queue for fragmented chunks on the inbound path */ |
michael@0 | 848 | struct sctpchunk_listhead reasmqueue; |
michael@0 | 849 | |
michael@0 | 850 | /* Scheduling queues */ |
michael@0 | 851 | union scheduling_data ss_data; |
michael@0 | 852 | |
michael@0 | 853 | /* This pointer will be set to NULL |
michael@0 | 854 | * most of the time. But when we have |
michael@0 | 855 | * a fragmented message, where we could |
michael@0 | 856 | * not get out all of the message at |
michael@0 | 857 | * the last send then this will point |
michael@0 | 858 | * to the stream to go get data from. |
michael@0 | 859 | */ |
michael@0 | 860 | struct sctp_stream_out *locked_on_sending; |
michael@0 | 861 | |
michael@0 | 862 | /* If an iterator is looking at me, this is it */ |
michael@0 | 863 | struct sctp_iterator *stcb_starting_point_for_iterator; |
michael@0 | 864 | |
michael@0 | 865 | /* ASCONF save the last ASCONF-ACK so we can resend it if necessary */ |
michael@0 | 866 | struct sctp_asconf_ackhead asconf_ack_sent; |
michael@0 | 867 | |
michael@0 | 868 | /* |
michael@0 | 869 | * pointer to last stream reset queued to control queue by us with |
michael@0 | 870 | * requests. |
michael@0 | 871 | */ |
michael@0 | 872 | struct sctp_tmit_chunk *str_reset; |
michael@0 | 873 | /* |
michael@0 | 874 | * if Source Address Selection happening, this will rotate through |
michael@0 | 875 | * the link list. |
michael@0 | 876 | */ |
michael@0 | 877 | struct sctp_laddr *last_used_address; |
michael@0 | 878 | |
michael@0 | 879 | /* stream arrays */ |
michael@0 | 880 | struct sctp_stream_in *strmin; |
michael@0 | 881 | struct sctp_stream_out *strmout; |
michael@0 | 882 | uint8_t *mapping_array; |
michael@0 | 883 | /* primary destination to use */ |
michael@0 | 884 | struct sctp_nets *primary_destination; |
michael@0 | 885 | struct sctp_nets *alternate; /* If primary is down or PF */ |
michael@0 | 886 | /* For CMT */ |
michael@0 | 887 | struct sctp_nets *last_net_cmt_send_started; |
michael@0 | 888 | /* last place I got a data chunk from */ |
michael@0 | 889 | struct sctp_nets *last_data_chunk_from; |
michael@0 | 890 | /* last place I got a control from */ |
michael@0 | 891 | struct sctp_nets *last_control_chunk_from; |
michael@0 | 892 | |
michael@0 | 893 | /* circular looking for output selection */ |
michael@0 | 894 | struct sctp_stream_out *last_out_stream; |
michael@0 | 895 | |
michael@0 | 896 | /* |
michael@0 | 897 | * wait to the point the cum-ack passes req->send_reset_at_tsn for |
michael@0 | 898 | * any req on the list. |
michael@0 | 899 | */ |
michael@0 | 900 | struct sctp_resethead resetHead; |
michael@0 | 901 | |
michael@0 | 902 | /* queue of chunks waiting to be sent into the local stack */ |
michael@0 | 903 | struct sctp_readhead pending_reply_queue; |
michael@0 | 904 | |
michael@0 | 905 | /* JRS - the congestion control functions are in this struct */ |
michael@0 | 906 | struct sctp_cc_functions cc_functions; |
michael@0 | 907 | /* JRS - value to store the currently loaded congestion control module */ |
michael@0 | 908 | uint32_t congestion_control_module; |
michael@0 | 909 | /* RS - the stream scheduling functions are in this struct */ |
michael@0 | 910 | struct sctp_ss_functions ss_functions; |
michael@0 | 911 | /* RS - value to store the currently loaded stream scheduling module */ |
michael@0 | 912 | uint32_t stream_scheduling_module; |
michael@0 | 913 | |
michael@0 | 914 | uint32_t vrf_id; |
michael@0 | 915 | |
michael@0 | 916 | uint32_t cookie_preserve_req; |
michael@0 | 917 | /* ASCONF next seq I am sending out, inits at init-tsn */ |
michael@0 | 918 | uint32_t asconf_seq_out; |
michael@0 | 919 | uint32_t asconf_seq_out_acked; |
michael@0 | 920 | /* ASCONF last received ASCONF from peer, starts at peer's TSN-1 */ |
michael@0 | 921 | uint32_t asconf_seq_in; |
michael@0 | 922 | |
michael@0 | 923 | /* next seq I am sending in str reset messages */ |
michael@0 | 924 | uint32_t str_reset_seq_out; |
michael@0 | 925 | /* next seq I am expecting in str reset messages */ |
michael@0 | 926 | uint32_t str_reset_seq_in; |
michael@0 | 927 | |
michael@0 | 928 | /* various verification tag information */ |
michael@0 | 929 | uint32_t my_vtag; /* The tag to be used. if assoc is re-initited |
michael@0 | 930 | * by remote end, and I have unlocked this |
michael@0 | 931 | * will be regenerated to a new random value. */ |
michael@0 | 932 | uint32_t peer_vtag; /* The peers last tag */ |
michael@0 | 933 | |
michael@0 | 934 | uint32_t my_vtag_nonce; |
michael@0 | 935 | uint32_t peer_vtag_nonce; |
michael@0 | 936 | |
michael@0 | 937 | uint32_t assoc_id; |
michael@0 | 938 | |
michael@0 | 939 | /* This is the SCTP fragmentation threshold */ |
michael@0 | 940 | uint32_t smallest_mtu; |
michael@0 | 941 | |
michael@0 | 942 | /* |
michael@0 | 943 | * Special hook for Fast retransmit, allows us to track the highest |
michael@0 | 944 | * TSN that is NEW in this SACK if gap ack blocks are present. |
michael@0 | 945 | */ |
michael@0 | 946 | uint32_t this_sack_highest_gap; |
michael@0 | 947 | |
michael@0 | 948 | /* |
michael@0 | 949 | * The highest consecutive TSN that has been acked by peer on my |
michael@0 | 950 | * sends |
michael@0 | 951 | */ |
michael@0 | 952 | uint32_t last_acked_seq; |
michael@0 | 953 | |
michael@0 | 954 | /* The next TSN that I will use in sending. */ |
michael@0 | 955 | uint32_t sending_seq; |
michael@0 | 956 | |
michael@0 | 957 | /* Original seq number I used ??questionable to keep?? */ |
michael@0 | 958 | uint32_t init_seq_number; |
michael@0 | 959 | |
michael@0 | 960 | |
michael@0 | 961 | /* The Advanced Peer Ack Point, as required by the PR-SCTP */ |
michael@0 | 962 | /* (A1 in Section 4.2) */ |
michael@0 | 963 | uint32_t advanced_peer_ack_point; |
michael@0 | 964 | |
michael@0 | 965 | /* |
michael@0 | 966 | * The highest consequetive TSN at the bottom of the mapping array |
michael@0 | 967 | * (for his sends). |
michael@0 | 968 | */ |
michael@0 | 969 | uint32_t cumulative_tsn; |
michael@0 | 970 | /* |
michael@0 | 971 | * Used to track the mapping array and its offset bits. This MAY be |
michael@0 | 972 | * lower then cumulative_tsn. |
michael@0 | 973 | */ |
michael@0 | 974 | uint32_t mapping_array_base_tsn; |
michael@0 | 975 | /* |
michael@0 | 976 | * used to track highest TSN we have received and is listed in the |
michael@0 | 977 | * mapping array. |
michael@0 | 978 | */ |
michael@0 | 979 | uint32_t highest_tsn_inside_map; |
michael@0 | 980 | |
michael@0 | 981 | /* EY - new NR variables used for nr_sack based on mapping_array*/ |
michael@0 | 982 | uint8_t *nr_mapping_array; |
michael@0 | 983 | uint32_t highest_tsn_inside_nr_map; |
michael@0 | 984 | |
michael@0 | 985 | uint32_t fast_recovery_tsn; |
michael@0 | 986 | uint32_t sat_t3_recovery_tsn; |
michael@0 | 987 | uint32_t tsn_last_delivered; |
michael@0 | 988 | /* |
michael@0 | 989 | * For the pd-api we should re-write this a bit more efficent. We |
michael@0 | 990 | * could have multiple sctp_queued_to_read's that we are building at |
michael@0 | 991 | * once. Now we only do this when we get ready to deliver to the |
michael@0 | 992 | * socket buffer. Note that we depend on the fact that the struct is |
michael@0 | 993 | * "stuck" on the read queue until we finish all the pd-api. |
michael@0 | 994 | */ |
michael@0 | 995 | struct sctp_queued_to_read *control_pdapi; |
michael@0 | 996 | |
michael@0 | 997 | uint32_t tsn_of_pdapi_last_delivered; |
michael@0 | 998 | uint32_t pdapi_ppid; |
michael@0 | 999 | uint32_t context; |
michael@0 | 1000 | uint32_t last_reset_action[SCTP_MAX_RESET_PARAMS]; |
michael@0 | 1001 | uint32_t last_sending_seq[SCTP_MAX_RESET_PARAMS]; |
michael@0 | 1002 | uint32_t last_base_tsnsent[SCTP_MAX_RESET_PARAMS]; |
michael@0 | 1003 | #ifdef SCTP_ASOCLOG_OF_TSNS |
michael@0 | 1004 | /* |
michael@0 | 1005 | * special log - This adds considerable size |
michael@0 | 1006 | * to the asoc, but provides a log that you |
michael@0 | 1007 | * can use to detect problems via kgdb. |
michael@0 | 1008 | */ |
michael@0 | 1009 | struct sctp_tsn_log in_tsnlog[SCTP_TSN_LOG_SIZE]; |
michael@0 | 1010 | struct sctp_tsn_log out_tsnlog[SCTP_TSN_LOG_SIZE]; |
michael@0 | 1011 | uint32_t cumack_log[SCTP_TSN_LOG_SIZE]; |
michael@0 | 1012 | uint32_t cumack_logsnt[SCTP_TSN_LOG_SIZE]; |
michael@0 | 1013 | uint16_t tsn_in_at; |
michael@0 | 1014 | uint16_t tsn_out_at; |
michael@0 | 1015 | uint16_t tsn_in_wrapped; |
michael@0 | 1016 | uint16_t tsn_out_wrapped; |
michael@0 | 1017 | uint16_t cumack_log_at; |
michael@0 | 1018 | uint16_t cumack_log_atsnt; |
michael@0 | 1019 | #endif /* SCTP_ASOCLOG_OF_TSNS */ |
michael@0 | 1020 | #ifdef SCTP_FS_SPEC_LOG |
michael@0 | 1021 | struct sctp_fs_spec_log fslog[SCTP_FS_SPEC_LOG_SIZE]; |
michael@0 | 1022 | uint16_t fs_index; |
michael@0 | 1023 | #endif |
michael@0 | 1024 | |
michael@0 | 1025 | /* |
michael@0 | 1026 | * window state information and smallest MTU that I use to bound |
michael@0 | 1027 | * segmentation |
michael@0 | 1028 | */ |
michael@0 | 1029 | uint32_t peers_rwnd; |
michael@0 | 1030 | uint32_t my_rwnd; |
michael@0 | 1031 | uint32_t my_last_reported_rwnd; |
michael@0 | 1032 | uint32_t sctp_frag_point; |
michael@0 | 1033 | |
michael@0 | 1034 | uint32_t total_output_queue_size; |
michael@0 | 1035 | |
michael@0 | 1036 | uint32_t sb_cc; /* shadow of sb_cc */ |
michael@0 | 1037 | uint32_t sb_send_resv; /* amount reserved on a send */ |
michael@0 | 1038 | uint32_t my_rwnd_control_len; /* shadow of sb_mbcnt used for rwnd control */ |
michael@0 | 1039 | #ifdef INET6 |
michael@0 | 1040 | uint32_t default_flowlabel; |
michael@0 | 1041 | #endif |
michael@0 | 1042 | uint32_t pr_sctp_cnt; |
michael@0 | 1043 | int ctrl_queue_cnt; /* could be removed REM - NO IT CAN'T!! RRS */ |
michael@0 | 1044 | /* |
michael@0 | 1045 | * All outbound datagrams queue into this list from the individual |
michael@0 | 1046 | * stream queue. Here they get assigned a TSN and then await |
michael@0 | 1047 | * sending. The stream seq comes when it is first put in the |
michael@0 | 1048 | * individual str queue |
michael@0 | 1049 | */ |
michael@0 | 1050 | unsigned int stream_queue_cnt; |
michael@0 | 1051 | unsigned int send_queue_cnt; |
michael@0 | 1052 | unsigned int sent_queue_cnt; |
michael@0 | 1053 | unsigned int sent_queue_cnt_removeable; |
michael@0 | 1054 | /* |
michael@0 | 1055 | * Number on sent queue that are marked for retran until this value |
michael@0 | 1056 | * is 0 we only send one packet of retran'ed data. |
michael@0 | 1057 | */ |
michael@0 | 1058 | unsigned int sent_queue_retran_cnt; |
michael@0 | 1059 | |
michael@0 | 1060 | unsigned int size_on_reasm_queue; |
michael@0 | 1061 | unsigned int cnt_on_reasm_queue; |
michael@0 | 1062 | unsigned int fwd_tsn_cnt; |
michael@0 | 1063 | /* amount of data (bytes) currently in flight (on all destinations) */ |
michael@0 | 1064 | unsigned int total_flight; |
michael@0 | 1065 | /* Total book size in flight */ |
michael@0 | 1066 | unsigned int total_flight_count; /* count of chunks used with |
michael@0 | 1067 | * book total */ |
michael@0 | 1068 | /* count of destinaton nets and list of destination nets */ |
michael@0 | 1069 | unsigned int numnets; |
michael@0 | 1070 | |
michael@0 | 1071 | /* Total error count on this association */ |
michael@0 | 1072 | unsigned int overall_error_count; |
michael@0 | 1073 | |
michael@0 | 1074 | unsigned int cnt_msg_on_sb; |
michael@0 | 1075 | |
michael@0 | 1076 | /* All stream count of chunks for delivery */ |
michael@0 | 1077 | unsigned int size_on_all_streams; |
michael@0 | 1078 | unsigned int cnt_on_all_streams; |
michael@0 | 1079 | |
michael@0 | 1080 | /* Heart Beat delay in ms */ |
michael@0 | 1081 | uint32_t heart_beat_delay; |
michael@0 | 1082 | |
michael@0 | 1083 | /* autoclose */ |
michael@0 | 1084 | unsigned int sctp_autoclose_ticks; |
michael@0 | 1085 | |
michael@0 | 1086 | /* how many preopen streams we have */ |
michael@0 | 1087 | unsigned int pre_open_streams; |
michael@0 | 1088 | |
michael@0 | 1089 | /* How many streams I support coming into me */ |
michael@0 | 1090 | unsigned int max_inbound_streams; |
michael@0 | 1091 | |
michael@0 | 1092 | /* the cookie life I award for any cookie, in seconds */ |
michael@0 | 1093 | unsigned int cookie_life; |
michael@0 | 1094 | /* time to delay acks for */ |
michael@0 | 1095 | unsigned int delayed_ack; |
michael@0 | 1096 | unsigned int old_delayed_ack; |
michael@0 | 1097 | unsigned int sack_freq; |
michael@0 | 1098 | unsigned int data_pkts_seen; |
michael@0 | 1099 | |
michael@0 | 1100 | unsigned int numduptsns; |
michael@0 | 1101 | int dup_tsns[SCTP_MAX_DUP_TSNS]; |
michael@0 | 1102 | unsigned int initial_init_rto_max; /* initial RTO for INIT's */ |
michael@0 | 1103 | unsigned int initial_rto; /* initial send RTO */ |
michael@0 | 1104 | unsigned int minrto; /* per assoc RTO-MIN */ |
michael@0 | 1105 | unsigned int maxrto; /* per assoc RTO-MAX */ |
michael@0 | 1106 | |
michael@0 | 1107 | /* authentication fields */ |
michael@0 | 1108 | sctp_auth_chklist_t *local_auth_chunks; |
michael@0 | 1109 | sctp_auth_chklist_t *peer_auth_chunks; |
michael@0 | 1110 | sctp_hmaclist_t *local_hmacs; /* local HMACs supported */ |
michael@0 | 1111 | sctp_hmaclist_t *peer_hmacs; /* peer HMACs supported */ |
michael@0 | 1112 | struct sctp_keyhead shared_keys; /* assoc's shared keys */ |
michael@0 | 1113 | sctp_authinfo_t authinfo; /* randoms, cached keys */ |
michael@0 | 1114 | /* |
michael@0 | 1115 | * refcnt to block freeing when a sender or receiver is off coping |
michael@0 | 1116 | * user data in. |
michael@0 | 1117 | */ |
michael@0 | 1118 | uint32_t refcnt; |
michael@0 | 1119 | uint32_t chunks_on_out_queue; /* total chunks floating around, |
michael@0 | 1120 | * locked by send socket buffer */ |
michael@0 | 1121 | uint32_t peers_adaptation; |
michael@0 | 1122 | uint16_t peer_hmac_id; /* peer HMAC id to send */ |
michael@0 | 1123 | |
michael@0 | 1124 | /* |
michael@0 | 1125 | * Being that we have no bag to collect stale cookies, and that we |
michael@0 | 1126 | * really would not want to anyway.. we will count them in this |
michael@0 | 1127 | * counter. We of course feed them to the pigeons right away (I have |
michael@0 | 1128 | * always thought of pigeons as flying rats). |
michael@0 | 1129 | */ |
michael@0 | 1130 | uint16_t stale_cookie_count; |
michael@0 | 1131 | |
michael@0 | 1132 | /* |
michael@0 | 1133 | * For the partial delivery API, if up, invoked this is what last |
michael@0 | 1134 | * TSN I delivered |
michael@0 | 1135 | */ |
michael@0 | 1136 | uint16_t str_of_pdapi; |
michael@0 | 1137 | uint16_t ssn_of_pdapi; |
michael@0 | 1138 | |
michael@0 | 1139 | /* counts of actual built streams. Allocation may be more however */ |
michael@0 | 1140 | /* could re-arrange to optimize space here. */ |
michael@0 | 1141 | uint16_t streamincnt; |
michael@0 | 1142 | uint16_t streamoutcnt; |
michael@0 | 1143 | uint16_t strm_realoutsize; |
michael@0 | 1144 | uint16_t strm_pending_add_size; |
michael@0 | 1145 | /* my maximum number of retrans of INIT and SEND */ |
michael@0 | 1146 | /* copied from SCTP but should be individually setable */ |
michael@0 | 1147 | uint16_t max_init_times; |
michael@0 | 1148 | uint16_t max_send_times; |
michael@0 | 1149 | |
michael@0 | 1150 | uint16_t def_net_failure; |
michael@0 | 1151 | |
michael@0 | 1152 | uint16_t def_net_pf_threshold; |
michael@0 | 1153 | |
michael@0 | 1154 | /* |
michael@0 | 1155 | * lock flag: 0 is ok to send, 1+ (duals as a retran count) is |
michael@0 | 1156 | * awaiting ACK |
michael@0 | 1157 | */ |
michael@0 | 1158 | uint16_t mapping_array_size; |
michael@0 | 1159 | |
michael@0 | 1160 | uint16_t last_strm_seq_delivered; |
michael@0 | 1161 | uint16_t last_strm_no_delivered; |
michael@0 | 1162 | |
michael@0 | 1163 | uint16_t last_revoke_count; |
michael@0 | 1164 | int16_t num_send_timers_up; |
michael@0 | 1165 | |
michael@0 | 1166 | uint16_t stream_locked_on; |
michael@0 | 1167 | uint16_t ecn_echo_cnt_onq; |
michael@0 | 1168 | |
michael@0 | 1169 | uint16_t free_chunk_cnt; |
michael@0 | 1170 | uint8_t stream_locked; |
michael@0 | 1171 | uint8_t authenticated; /* packet authenticated ok */ |
michael@0 | 1172 | /* |
michael@0 | 1173 | * This flag indicates that a SACK need to be sent. |
michael@0 | 1174 | * Initially this is 1 to send the first sACK immediately. |
michael@0 | 1175 | */ |
michael@0 | 1176 | uint8_t send_sack; |
michael@0 | 1177 | |
michael@0 | 1178 | /* max burst of new packets into the network */ |
michael@0 | 1179 | uint32_t max_burst; |
michael@0 | 1180 | /* max burst of fast retransmit packets */ |
michael@0 | 1181 | uint32_t fr_max_burst; |
michael@0 | 1182 | |
michael@0 | 1183 | uint8_t sat_network; /* RTT is in range of sat net or greater */ |
michael@0 | 1184 | uint8_t sat_network_lockout; /* lockout code */ |
michael@0 | 1185 | uint8_t burst_limit_applied; /* Burst limit in effect at last send? */ |
michael@0 | 1186 | /* flag goes on when we are doing a partial delivery api */ |
michael@0 | 1187 | uint8_t hb_random_values[4]; |
michael@0 | 1188 | uint8_t fragmented_delivery_inprogress; |
michael@0 | 1189 | uint8_t fragment_flags; |
michael@0 | 1190 | uint8_t last_flags_delivered; |
michael@0 | 1191 | uint8_t hb_ect_randombit; |
michael@0 | 1192 | uint8_t hb_random_idx; |
michael@0 | 1193 | uint8_t default_dscp; |
michael@0 | 1194 | uint8_t asconf_del_pending; /* asconf delete last addr pending */ |
michael@0 | 1195 | |
michael@0 | 1196 | /* |
michael@0 | 1197 | * This value, plus all other ack'd but above cum-ack is added |
michael@0 | 1198 | * together to cross check against the bit that we have yet to |
michael@0 | 1199 | * define (probably in the SACK). When the cum-ack is updated, this |
michael@0 | 1200 | * sum is updated as well. |
michael@0 | 1201 | */ |
michael@0 | 1202 | |
michael@0 | 1203 | /* Flag to tell if ECN is allowed */ |
michael@0 | 1204 | uint8_t ecn_allowed; |
michael@0 | 1205 | |
michael@0 | 1206 | /* Did the peer make the stream config (add out) request */ |
michael@0 | 1207 | uint8_t peer_req_out; |
michael@0 | 1208 | |
michael@0 | 1209 | /* flag to indicate if peer can do asconf */ |
michael@0 | 1210 | uint8_t peer_supports_asconf; |
michael@0 | 1211 | /* EY - flag to indicate if peer can do nr_sack*/ |
michael@0 | 1212 | uint8_t peer_supports_nr_sack; |
michael@0 | 1213 | /* pr-sctp support flag */ |
michael@0 | 1214 | uint8_t peer_supports_prsctp; |
michael@0 | 1215 | /* peer authentication support flag */ |
michael@0 | 1216 | uint8_t peer_supports_auth; |
michael@0 | 1217 | /* stream resets are supported by the peer */ |
michael@0 | 1218 | uint8_t peer_supports_strreset; |
michael@0 | 1219 | uint8_t local_strreset_support; |
michael@0 | 1220 | |
michael@0 | 1221 | uint8_t peer_supports_nat; |
michael@0 | 1222 | /* |
michael@0 | 1223 | * packet drop's are supported by the peer, we don't really care |
michael@0 | 1224 | * about this but we bookkeep it anyway. |
michael@0 | 1225 | */ |
michael@0 | 1226 | uint8_t peer_supports_pktdrop; |
michael@0 | 1227 | |
michael@0 | 1228 | struct sctp_scoping scope; |
michael@0 | 1229 | /* flags to handle send alternate net tracking */ |
michael@0 | 1230 | uint8_t used_alt_onsack; |
michael@0 | 1231 | uint8_t used_alt_asconfack; |
michael@0 | 1232 | uint8_t fast_retran_loss_recovery; |
michael@0 | 1233 | uint8_t sat_t3_loss_recovery; |
michael@0 | 1234 | uint8_t dropped_special_cnt; |
michael@0 | 1235 | uint8_t seen_a_sack_this_pkt; |
michael@0 | 1236 | uint8_t stream_reset_outstanding; |
michael@0 | 1237 | uint8_t stream_reset_out_is_outstanding; |
michael@0 | 1238 | uint8_t delayed_connection; |
michael@0 | 1239 | uint8_t ifp_had_enobuf; |
michael@0 | 1240 | uint8_t saw_sack_with_frags; |
michael@0 | 1241 | uint8_t saw_sack_with_nr_frags; |
michael@0 | 1242 | uint8_t in_asocid_hash; |
michael@0 | 1243 | uint8_t assoc_up_sent; |
michael@0 | 1244 | uint8_t adaptation_needed; |
michael@0 | 1245 | uint8_t adaptation_sent; |
michael@0 | 1246 | /* CMT variables */ |
michael@0 | 1247 | uint8_t cmt_dac_pkts_rcvd; |
michael@0 | 1248 | uint8_t sctp_cmt_on_off; |
michael@0 | 1249 | uint8_t iam_blocking; |
michael@0 | 1250 | uint8_t cookie_how[8]; |
michael@0 | 1251 | /* EY 05/05/08 - NR_SACK variable*/ |
michael@0 | 1252 | uint8_t sctp_nr_sack_on_off; |
michael@0 | 1253 | /* JRS 5/21/07 - CMT PF variable */ |
michael@0 | 1254 | uint8_t sctp_cmt_pf; |
michael@0 | 1255 | uint8_t use_precise_time; |
michael@0 | 1256 | uint64_t sctp_features; |
michael@0 | 1257 | uint16_t port; /* remote UDP encapsulation port */ |
michael@0 | 1258 | /* |
michael@0 | 1259 | * The mapping array is used to track out of order sequences above |
michael@0 | 1260 | * last_acked_seq. 0 indicates packet missing 1 indicates packet |
michael@0 | 1261 | * rec'd. We slide it up every time we raise last_acked_seq and 0 |
michael@0 | 1262 | * trailing locactions out. If I get a TSN above the array |
michael@0 | 1263 | * mappingArraySz, I discard the datagram and let retransmit happen. |
michael@0 | 1264 | */ |
michael@0 | 1265 | uint32_t marked_retrans; |
michael@0 | 1266 | uint32_t timoinit; |
michael@0 | 1267 | uint32_t timodata; |
michael@0 | 1268 | uint32_t timosack; |
michael@0 | 1269 | uint32_t timoshutdown; |
michael@0 | 1270 | uint32_t timoheartbeat; |
michael@0 | 1271 | uint32_t timocookie; |
michael@0 | 1272 | uint32_t timoshutdownack; |
michael@0 | 1273 | struct timeval start_time; |
michael@0 | 1274 | struct timeval discontinuity_time; |
michael@0 | 1275 | }; |
michael@0 | 1276 | |
michael@0 | 1277 | #endif |