michael@0: /*- michael@0: * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. michael@0: * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. michael@0: * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. michael@0: * michael@0: * Redistribution and use in source and binary forms, with or without michael@0: * modification, are permitted provided that the following conditions are met: michael@0: * michael@0: * a) Redistributions of source code must retain the above copyright notice, michael@0: * this list of conditions and the following disclaimer. michael@0: * michael@0: * b) Redistributions in binary form must reproduce the above copyright michael@0: * notice, this list of conditions and the following disclaimer in michael@0: * the documentation and/or other materials provided with the distribution. michael@0: * michael@0: * c) Neither the name of Cisco Systems, Inc. nor the names of its michael@0: * contributors may be used to endorse or promote products derived michael@0: * from this software without specific prior written permission. michael@0: * michael@0: * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS michael@0: * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, michael@0: * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE michael@0: * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE michael@0: * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR michael@0: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF michael@0: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS michael@0: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN michael@0: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) michael@0: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF michael@0: * THE POSSIBILITY OF SUCH DAMAGE. michael@0: */ michael@0: michael@0: #ifdef __FreeBSD__ michael@0: #include michael@0: __FBSDID("$FreeBSD: head/sys/netinet/sctp_structs.h 255190 2013-09-03 19:31:59Z tuexen $"); michael@0: #endif michael@0: michael@0: #ifndef _NETINET_SCTP_STRUCTS_H_ michael@0: #define _NETINET_SCTP_STRUCTS_H_ michael@0: michael@0: #include michael@0: #include michael@0: #include michael@0: michael@0: struct sctp_timer { michael@0: sctp_os_timer_t timer; michael@0: michael@0: int type; michael@0: /* michael@0: * Depending on the timer type these will be setup and cast with the michael@0: * appropriate entity. michael@0: */ michael@0: void *ep; michael@0: void *tcb; michael@0: void *net; michael@0: #if defined(__FreeBSD__) && __FreeBSD_version >= 800000 michael@0: void *vnet; michael@0: #endif michael@0: michael@0: /* for sanity checking */ michael@0: void *self; michael@0: uint32_t ticks; michael@0: uint32_t stopped_from; michael@0: }; michael@0: michael@0: michael@0: struct sctp_foo_stuff { michael@0: struct sctp_inpcb *inp; michael@0: uint32_t lineno; michael@0: uint32_t ticks; michael@0: int updown; michael@0: }; michael@0: michael@0: michael@0: /* michael@0: * This is the information we track on each interface that we know about from michael@0: * the distant end. michael@0: */ michael@0: TAILQ_HEAD(sctpnetlisthead, sctp_nets); michael@0: michael@0: struct sctp_stream_reset_list { michael@0: TAILQ_ENTRY(sctp_stream_reset_list) next_resp; michael@0: uint32_t tsn; michael@0: uint32_t number_entries; michael@0: uint16_t list_of_streams[]; michael@0: }; michael@0: michael@0: TAILQ_HEAD(sctp_resethead, sctp_stream_reset_list); michael@0: michael@0: /* michael@0: * Users of the iterator need to malloc a iterator with a call to michael@0: * sctp_initiate_iterator(inp_func, assoc_func, inp_func, pcb_flags, pcb_features, michael@0: * asoc_state, void-ptr-arg, uint32-arg, end_func, inp); michael@0: * michael@0: * Use the following two defines if you don't care what pcb flags are on the EP michael@0: * and/or you don't care what state the association is in. michael@0: * michael@0: * Note that if you specify an INP as the last argument then ONLY each michael@0: * association of that single INP will be executed upon. Note that the pcb michael@0: * flags STILL apply so if the inp you specify has different pcb_flags then michael@0: * what you put in pcb_flags nothing will happen. use SCTP_PCB_ANY_FLAGS to michael@0: * assure the inp you specify gets treated. michael@0: */ michael@0: #define SCTP_PCB_ANY_FLAGS 0x00000000 michael@0: #define SCTP_PCB_ANY_FEATURES 0x00000000 michael@0: #define SCTP_ASOC_ANY_STATE 0x00000000 michael@0: michael@0: typedef void (*asoc_func) (struct sctp_inpcb *, struct sctp_tcb *, void *ptr, michael@0: uint32_t val); michael@0: typedef int (*inp_func) (struct sctp_inpcb *, void *ptr, uint32_t val); michael@0: typedef void (*end_func) (void *ptr, uint32_t val); michael@0: michael@0: #if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP) michael@0: /* whats on the mcore control struct */ michael@0: struct sctp_mcore_queue { michael@0: TAILQ_ENTRY(sctp_mcore_queue) next; michael@0: #if defined(__FreeBSD__) && __FreeBSD_version >= 801000 michael@0: struct vnet *vn; michael@0: #endif michael@0: struct mbuf *m; michael@0: int off; michael@0: int v6; michael@0: }; michael@0: michael@0: TAILQ_HEAD(sctp_mcore_qhead, sctp_mcore_queue); michael@0: michael@0: struct sctp_mcore_ctrl { michael@0: SCTP_PROCESS_STRUCT thread_proc; michael@0: struct sctp_mcore_qhead que; michael@0: struct mtx core_mtx; michael@0: struct mtx que_mtx; michael@0: int running; michael@0: int cpuid; michael@0: }; michael@0: michael@0: michael@0: #endif michael@0: michael@0: michael@0: struct sctp_iterator { michael@0: TAILQ_ENTRY(sctp_iterator) sctp_nxt_itr; michael@0: #if defined(__FreeBSD__) && __FreeBSD_version >= 801000 michael@0: struct vnet *vn; michael@0: #endif michael@0: struct sctp_timer tmr; michael@0: struct sctp_inpcb *inp; /* current endpoint */ michael@0: struct sctp_tcb *stcb; /* current* assoc */ michael@0: struct sctp_inpcb *next_inp; /* special hook to skip to */ michael@0: asoc_func function_assoc; /* per assoc function */ michael@0: inp_func function_inp; /* per endpoint function */ michael@0: inp_func function_inp_end; /* end INP function */ michael@0: end_func function_atend; /* iterator completion function */ michael@0: void *pointer; /* pointer for apply func to use */ michael@0: uint32_t val; /* value for apply func to use */ michael@0: uint32_t pcb_flags; /* endpoint flags being checked */ michael@0: uint32_t pcb_features; /* endpoint features being checked */ michael@0: uint32_t asoc_state; /* assoc state being checked */ michael@0: uint32_t iterator_flags; michael@0: uint8_t no_chunk_output; michael@0: uint8_t done_current_ep; michael@0: }; michael@0: /* iterator_flags values */ michael@0: #define SCTP_ITERATOR_DO_ALL_INP 0x00000001 michael@0: #define SCTP_ITERATOR_DO_SINGLE_INP 0x00000002 michael@0: michael@0: michael@0: TAILQ_HEAD(sctpiterators, sctp_iterator); michael@0: michael@0: struct sctp_copy_all { michael@0: struct sctp_inpcb *inp; /* ep */ michael@0: struct mbuf *m; michael@0: struct sctp_sndrcvinfo sndrcv; michael@0: int sndlen; michael@0: int cnt_sent; michael@0: int cnt_failed; michael@0: }; michael@0: michael@0: struct sctp_asconf_iterator { michael@0: struct sctpladdr list_of_work; michael@0: int cnt; michael@0: }; michael@0: michael@0: struct iterator_control { michael@0: #if defined(__FreeBSD__) michael@0: struct mtx ipi_iterator_wq_mtx; michael@0: struct mtx it_mtx; michael@0: #elif defined(__APPLE__) michael@0: lck_mtx_t *ipi_iterator_wq_mtx; michael@0: lck_mtx_t *it_mtx; michael@0: #elif defined(SCTP_PROCESS_LEVEL_LOCKS) michael@0: #if defined(__Userspace__) michael@0: userland_mutex_t ipi_iterator_wq_mtx; michael@0: userland_mutex_t it_mtx; michael@0: userland_cond_t iterator_wakeup; michael@0: #else michael@0: pthread_mutex_t ipi_iterator_wq_mtx; michael@0: pthread_mutex_t it_mtx; michael@0: pthread_cond_t iterator_wakeup; michael@0: #endif michael@0: #elif defined(__Windows__) michael@0: struct spinlock it_lock; michael@0: struct spinlock ipi_iterator_wq_lock; michael@0: KEVENT iterator_wakeup[2]; michael@0: PFILE_OBJECT iterator_thread_obj; michael@0: #else michael@0: void *it_mtx; michael@0: #endif michael@0: #if !defined(__Windows__) michael@0: #if !defined(__Userspace__) michael@0: SCTP_PROCESS_STRUCT thread_proc; michael@0: #else michael@0: userland_thread_t thread_proc; michael@0: #endif michael@0: #endif michael@0: struct sctpiterators iteratorhead; michael@0: struct sctp_iterator *cur_it; michael@0: uint32_t iterator_running; michael@0: uint32_t iterator_flags; michael@0: }; michael@0: #if !defined(__FreeBSD__) michael@0: #define SCTP_ITERATOR_MUST_EXIT 0x00000001 michael@0: #define SCTP_ITERATOR_EXITED 0x00000002 michael@0: #endif michael@0: #define SCTP_ITERATOR_STOP_CUR_IT 0x00000004 michael@0: #define SCTP_ITERATOR_STOP_CUR_INP 0x00000008 michael@0: michael@0: struct sctp_net_route { michael@0: sctp_rtentry_t *ro_rt; michael@0: #if defined(__FreeBSD__) michael@0: #if __FreeBSD_version >= 800000 michael@0: void *ro_lle; michael@0: #endif michael@0: #if __FreeBSD_version >= 900000 michael@0: void *ro_ia; michael@0: int ro_flags; michael@0: #endif michael@0: #endif michael@0: #if defined(__APPLE__) michael@0: #if !defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION) michael@0: struct ifaddr *ro_srcia; michael@0: #endif michael@0: #if !defined(APPLE_LEOPARD) michael@0: uint32_t ro_flags; michael@0: #endif michael@0: #endif michael@0: union sctp_sockstore _l_addr; /* remote peer addr */ michael@0: struct sctp_ifa *_s_addr; /* our selected src addr */ michael@0: }; michael@0: michael@0: struct htcp { michael@0: uint16_t alpha; /* Fixed point arith, << 7 */ michael@0: uint8_t beta; /* Fixed point arith, << 7 */ michael@0: uint8_t modeswitch; /* Delay modeswitch until we had at least one congestion event */ michael@0: uint32_t last_cong; /* Time since last congestion event end */ michael@0: uint32_t undo_last_cong; michael@0: uint16_t bytes_acked; michael@0: uint32_t bytecount; michael@0: uint32_t minRTT; michael@0: uint32_t maxRTT; michael@0: michael@0: uint32_t undo_maxRTT; michael@0: uint32_t undo_old_maxB; michael@0: michael@0: /* Bandwidth estimation */ michael@0: uint32_t minB; michael@0: uint32_t maxB; michael@0: uint32_t old_maxB; michael@0: uint32_t Bi; michael@0: uint32_t lasttime; michael@0: }; michael@0: michael@0: struct rtcc_cc { michael@0: struct timeval tls; /* The time we started the sending */ michael@0: uint64_t lbw; /* Our last estimated bw */ michael@0: uint64_t lbw_rtt; /* RTT at bw estimate */ michael@0: uint64_t bw_bytes; /* The total bytes since this sending began */ michael@0: uint64_t bw_tot_time; /* The total time since sending began */ michael@0: uint64_t new_tot_time; /* temp holding the new value */ michael@0: uint64_t bw_bytes_at_last_rttc; /* What bw_bytes was at last rtt calc */ michael@0: uint32_t cwnd_at_bw_set; /* Cwnd at last bw saved - lbw */ michael@0: uint32_t vol_reduce; /* cnt of voluntary reductions */ michael@0: uint16_t steady_step; /* The number required to be in steady state*/ michael@0: uint16_t step_cnt; /* The current number */ michael@0: uint8_t ret_from_eq; /* When all things are equal what do I return 0/1 - 1 no cc advance */ michael@0: uint8_t use_dccc_ecn; /* Flag to enable DCCC ECN */ michael@0: uint8_t tls_needs_set; /* Flag to indicate we need to set tls 0 or 1 means set at send 2 not */ michael@0: uint8_t last_step_state; /* Last state if steady state stepdown is on */ michael@0: uint8_t rtt_set_this_sack; /* Flag saying this sack had RTT calc on it */ michael@0: uint8_t last_inst_ind; /* Last saved inst indication */ michael@0: }; michael@0: michael@0: michael@0: struct sctp_nets { michael@0: TAILQ_ENTRY(sctp_nets) sctp_next; /* next link */ michael@0: michael@0: /* michael@0: * Things on the top half may be able to be split into a common michael@0: * structure shared by all. michael@0: */ michael@0: struct sctp_timer pmtu_timer; michael@0: struct sctp_timer hb_timer; michael@0: michael@0: /* michael@0: * The following two in combination equate to a route entry for v6 michael@0: * or v4. michael@0: */ michael@0: struct sctp_net_route ro; michael@0: michael@0: /* mtu discovered so far */ michael@0: uint32_t mtu; michael@0: uint32_t ssthresh; /* not sure about this one for split */ michael@0: uint32_t last_cwr_tsn; michael@0: uint32_t cwr_window_tsn; michael@0: uint32_t ecn_ce_pkt_cnt; michael@0: uint32_t lost_cnt; michael@0: /* smoothed average things for RTT and RTO itself */ michael@0: int lastsa; michael@0: int lastsv; michael@0: uint64_t rtt; /* last measured rtt value in us */ michael@0: unsigned int RTO; michael@0: michael@0: /* This is used for SHUTDOWN/SHUTDOWN-ACK/SEND or INIT timers */ michael@0: struct sctp_timer rxt_timer; michael@0: michael@0: /* last time in seconds I sent to it */ michael@0: struct timeval last_sent_time; michael@0: union cc_control_data { michael@0: struct htcp htcp_ca; /* JRS - struct used in HTCP algorithm */ michael@0: struct rtcc_cc rtcc; /* rtcc module cc stuff */ michael@0: } cc_mod; michael@0: int ref_count; michael@0: michael@0: /* Congestion stats per destination */ michael@0: /* michael@0: * flight size variables and such, sorry Vern, I could not avoid michael@0: * this if I wanted performance :> michael@0: */ michael@0: uint32_t flight_size; michael@0: uint32_t cwnd; /* actual cwnd */ michael@0: uint32_t prev_cwnd; /* cwnd before any processing */ michael@0: uint32_t ecn_prev_cwnd; /* ECN prev cwnd at first ecn_echo seen in new window */ michael@0: uint32_t partial_bytes_acked; /* in CA tracks when to incr a MTU */ michael@0: /* tracking variables to avoid the aloc/free in sack processing */ michael@0: unsigned int net_ack; michael@0: unsigned int net_ack2; michael@0: michael@0: /* michael@0: * JRS - 5/8/07 - Variable to track last time michael@0: * a destination was active for CMT PF michael@0: */ michael@0: uint32_t last_active; michael@0: michael@0: /* michael@0: * CMT variables (iyengar@cis.udel.edu) michael@0: */ michael@0: uint32_t this_sack_highest_newack; /* tracks highest TSN newly michael@0: * acked for a given dest in michael@0: * the current SACK. Used in michael@0: * SFR and HTNA algos */ michael@0: uint32_t pseudo_cumack; /* CMT CUC algorithm. Maintains next expected michael@0: * pseudo-cumack for this destination */ michael@0: uint32_t rtx_pseudo_cumack; /* CMT CUC algorithm. Maintains next michael@0: * expected pseudo-cumack for this michael@0: * destination */ michael@0: michael@0: /* CMT fast recovery variables */ michael@0: uint32_t fast_recovery_tsn; michael@0: uint32_t heartbeat_random1; michael@0: uint32_t heartbeat_random2; michael@0: #ifdef INET6 michael@0: uint32_t flowlabel; michael@0: #endif michael@0: uint8_t dscp; michael@0: michael@0: struct timeval start_time; /* time when this net was created */ michael@0: uint32_t marked_retrans; /* number or DATA chunks marked for michael@0: timer based retransmissions */ michael@0: uint32_t marked_fastretrans; michael@0: uint32_t heart_beat_delay; /* Heart Beat delay in ms */ michael@0: michael@0: /* if this guy is ok or not ... status */ michael@0: uint16_t dest_state; michael@0: /* number of timeouts to consider the destination unreachable */ michael@0: uint16_t failure_threshold; michael@0: /* number of timeouts to consider the destination potentially failed */ michael@0: uint16_t pf_threshold; michael@0: /* error stats on the destination */ michael@0: uint16_t error_count; michael@0: /* UDP port number in case of UDP tunneling */ michael@0: uint16_t port; michael@0: michael@0: uint8_t fast_retran_loss_recovery; michael@0: uint8_t will_exit_fast_recovery; michael@0: /* Flags that probably can be combined into dest_state */ michael@0: uint8_t fast_retran_ip; /* fast retransmit in progress */ michael@0: uint8_t hb_responded; michael@0: uint8_t saw_newack; /* CMT's SFR algorithm flag */ michael@0: uint8_t src_addr_selected; /* if we split we move */ michael@0: uint8_t indx_of_eligible_next_to_use; michael@0: uint8_t addr_is_local; /* its a local address (if known) could move michael@0: * in split */ michael@0: michael@0: /* michael@0: * CMT variables (iyengar@cis.udel.edu) michael@0: */ michael@0: uint8_t find_pseudo_cumack; /* CMT CUC algorithm. Flag used to michael@0: * find a new pseudocumack. This flag michael@0: * is set after a new pseudo-cumack michael@0: * has been received and indicates michael@0: * that the sender should find the michael@0: * next pseudo-cumack expected for michael@0: * this destination */ michael@0: uint8_t find_rtx_pseudo_cumack; /* CMT CUCv2 algorithm. Flag used to michael@0: * find a new rtx-pseudocumack. This michael@0: * flag is set after a new michael@0: * rtx-pseudo-cumack has been received michael@0: * and indicates that the sender michael@0: * should find the next michael@0: * rtx-pseudo-cumack expected for this michael@0: * destination */ michael@0: uint8_t new_pseudo_cumack; /* CMT CUC algorithm. Flag used to michael@0: * indicate if a new pseudo-cumack or michael@0: * rtx-pseudo-cumack has been received */ michael@0: uint8_t window_probe; /* Doing a window probe? */ michael@0: uint8_t RTO_measured; /* Have we done the first measure */ michael@0: uint8_t last_hs_used; /* index into the last HS table entry we used */ michael@0: uint8_t lan_type; michael@0: uint8_t rto_needed; michael@0: #if defined(__FreeBSD__) michael@0: uint32_t flowid; michael@0: #ifdef INVARIANTS michael@0: uint8_t flowidset; michael@0: #endif michael@0: #endif michael@0: }; michael@0: michael@0: michael@0: struct sctp_data_chunkrec { michael@0: uint32_t TSN_seq; /* the TSN of this transmit */ michael@0: uint16_t stream_seq; /* the stream sequence number of this transmit */ michael@0: uint16_t stream_number; /* the stream number of this guy */ michael@0: uint32_t payloadtype; michael@0: uint32_t context; /* from send */ michael@0: uint32_t cwnd_at_send; michael@0: /* michael@0: * part of the Highest sacked algorithm to be able to stroke counts michael@0: * on ones that are FR'd. michael@0: */ michael@0: uint32_t fast_retran_tsn; /* sending_seq at the time of FR */ michael@0: struct timeval timetodrop; /* time we drop it from queue */ michael@0: uint8_t doing_fast_retransmit; michael@0: uint8_t rcv_flags; /* flags pulled from data chunk on inbound for michael@0: * outbound holds sending flags for PR-SCTP. michael@0: */ michael@0: uint8_t state_flags; michael@0: uint8_t chunk_was_revoked; michael@0: uint8_t fwd_tsn_cnt; michael@0: }; michael@0: michael@0: TAILQ_HEAD(sctpchunk_listhead, sctp_tmit_chunk); michael@0: michael@0: /* The lower byte is used to enumerate PR_SCTP policies */ michael@0: #define CHUNK_FLAGS_PR_SCTP_TTL SCTP_PR_SCTP_TTL michael@0: #define CHUNK_FLAGS_PR_SCTP_BUF SCTP_PR_SCTP_BUF michael@0: #define CHUNK_FLAGS_PR_SCTP_RTX SCTP_PR_SCTP_RTX michael@0: michael@0: /* The upper byte is used as a bit mask */ michael@0: #define CHUNK_FLAGS_FRAGMENT_OK 0x0100 michael@0: michael@0: struct chk_id { michael@0: uint16_t id; michael@0: uint16_t can_take_data; michael@0: }; michael@0: michael@0: michael@0: struct sctp_tmit_chunk { michael@0: union { michael@0: struct sctp_data_chunkrec data; michael@0: struct chk_id chunk_id; michael@0: } rec; michael@0: struct sctp_association *asoc; /* bp to asoc this belongs to */ michael@0: struct timeval sent_rcv_time; /* filled in if RTT being calculated */ michael@0: struct mbuf *data; /* pointer to mbuf chain of data */ michael@0: struct mbuf *last_mbuf; /* pointer to last mbuf in chain */ michael@0: struct sctp_nets *whoTo; michael@0: TAILQ_ENTRY(sctp_tmit_chunk) sctp_next; /* next link */ michael@0: int32_t sent; /* the send status */ michael@0: uint16_t snd_count; /* number of times I sent */ michael@0: uint16_t flags; /* flags, such as FRAGMENT_OK */ michael@0: uint16_t send_size; michael@0: uint16_t book_size; michael@0: uint16_t mbcnt; michael@0: uint16_t auth_keyid; michael@0: uint8_t holds_key_ref; /* flag if auth keyid refcount is held */ michael@0: uint8_t pad_inplace; michael@0: uint8_t do_rtt; michael@0: uint8_t book_size_scale; michael@0: uint8_t no_fr_allowed; michael@0: uint8_t copy_by_ref; michael@0: uint8_t window_probe; michael@0: }; michael@0: michael@0: /* michael@0: * The first part of this structure MUST be the entire sinfo structure. Maybe michael@0: * I should have made it a sub structure... we can circle back later and do michael@0: * that if we want. michael@0: */ michael@0: struct sctp_queued_to_read { /* sinfo structure Pluse more */ michael@0: uint16_t sinfo_stream; /* off the wire */ michael@0: uint16_t sinfo_ssn; /* off the wire */ michael@0: uint16_t sinfo_flags; /* SCTP_UNORDERED from wire use SCTP_EOF for michael@0: * EOR */ michael@0: uint32_t sinfo_ppid; /* off the wire */ michael@0: uint32_t sinfo_context; /* pick this up from assoc def context? */ michael@0: uint32_t sinfo_timetolive; /* not used by kernel */ michael@0: uint32_t sinfo_tsn; /* Use this in reassembly as first TSN */ michael@0: uint32_t sinfo_cumtsn; /* Use this in reassembly as last TSN */ michael@0: sctp_assoc_t sinfo_assoc_id; /* our assoc id */ michael@0: /* Non sinfo stuff */ michael@0: uint32_t length; /* length of data */ michael@0: uint32_t held_length; /* length held in sb */ michael@0: struct sctp_nets *whoFrom; /* where it came from */ michael@0: struct mbuf *data; /* front of the mbuf chain of data with michael@0: * PKT_HDR */ michael@0: struct mbuf *tail_mbuf; /* used for multi-part data */ michael@0: struct mbuf *aux_data; /* used to hold/cache control if o/s does not take it from us */ michael@0: struct sctp_tcb *stcb; /* assoc, used for window update */ michael@0: TAILQ_ENTRY(sctp_queued_to_read) next; michael@0: uint16_t port_from; michael@0: uint16_t spec_flags; /* Flags to hold the notification field */ michael@0: uint8_t do_not_ref_stcb; michael@0: uint8_t end_added; michael@0: uint8_t pdapi_aborted; michael@0: uint8_t some_taken; michael@0: }; michael@0: michael@0: /* This data structure will be on the outbound michael@0: * stream queues. Data will be pulled off from michael@0: * the front of the mbuf data and chunk-ified michael@0: * by the output routines. We will custom michael@0: * fit every chunk we pull to the send/sent michael@0: * queue to make up the next full packet michael@0: * if we can. An entry cannot be removed michael@0: * from the stream_out queue until michael@0: * the msg_is_complete flag is set. This michael@0: * means at times data/tail_mbuf MIGHT michael@0: * be NULL.. If that occurs it happens michael@0: * for one of two reasons. Either the user michael@0: * is blocked on a send() call and has not michael@0: * awoken to copy more data down... OR michael@0: * the user is in the explict MSG_EOR mode michael@0: * and wrote some data, but has not completed michael@0: * sending. michael@0: */ michael@0: struct sctp_stream_queue_pending { michael@0: struct mbuf *data; michael@0: struct mbuf *tail_mbuf; michael@0: struct timeval ts; michael@0: struct sctp_nets *net; michael@0: TAILQ_ENTRY (sctp_stream_queue_pending) next; michael@0: TAILQ_ENTRY (sctp_stream_queue_pending) ss_next; michael@0: uint32_t length; michael@0: uint32_t timetolive; michael@0: uint32_t ppid; michael@0: uint32_t context; michael@0: uint16_t sinfo_flags; michael@0: uint16_t stream; michael@0: uint16_t act_flags; michael@0: uint16_t auth_keyid; michael@0: uint8_t holds_key_ref; michael@0: uint8_t msg_is_complete; michael@0: uint8_t some_taken; michael@0: uint8_t sender_all_done; michael@0: uint8_t put_last_out; michael@0: uint8_t discard_rest; michael@0: }; michael@0: michael@0: /* michael@0: * this struct contains info that is used to track inbound stream data and michael@0: * help with ordering. michael@0: */ michael@0: TAILQ_HEAD(sctpwheelunrel_listhead, sctp_stream_in); michael@0: struct sctp_stream_in { michael@0: struct sctp_readhead inqueue; michael@0: uint16_t stream_no; michael@0: uint16_t last_sequence_delivered; /* used for re-order */ michael@0: uint8_t delivery_started; michael@0: }; michael@0: michael@0: TAILQ_HEAD(sctpwheel_listhead, sctp_stream_out); michael@0: TAILQ_HEAD(sctplist_listhead, sctp_stream_queue_pending); michael@0: michael@0: /* Round-robin schedulers */ michael@0: struct ss_rr { michael@0: /* next link in wheel */ michael@0: TAILQ_ENTRY(sctp_stream_out) next_spoke; michael@0: }; michael@0: michael@0: /* Priority scheduler */ michael@0: struct ss_prio { michael@0: /* next link in wheel */ michael@0: TAILQ_ENTRY(sctp_stream_out) next_spoke; michael@0: /* priority id */ michael@0: uint16_t priority; michael@0: }; michael@0: michael@0: /* Fair Bandwidth scheduler */ michael@0: struct ss_fb { michael@0: /* next link in wheel */ michael@0: TAILQ_ENTRY(sctp_stream_out) next_spoke; michael@0: /* stores message size */ michael@0: int32_t rounds; michael@0: }; michael@0: michael@0: /* michael@0: * This union holds all data necessary for michael@0: * different stream schedulers. michael@0: */ michael@0: union scheduling_data { michael@0: struct sctpwheel_listhead out_wheel; michael@0: struct sctplist_listhead out_list; michael@0: }; michael@0: michael@0: /* michael@0: * This union holds all parameters per stream michael@0: * necessary for different stream schedulers. michael@0: */ michael@0: union scheduling_parameters { michael@0: struct ss_rr rr; michael@0: struct ss_prio prio; michael@0: struct ss_fb fb; michael@0: }; michael@0: michael@0: /* This struct is used to track the traffic on outbound streams */ michael@0: struct sctp_stream_out { michael@0: struct sctp_streamhead outqueue; michael@0: union scheduling_parameters ss_params; michael@0: uint32_t chunks_on_queues; michael@0: uint16_t stream_no; michael@0: uint16_t next_sequence_send; /* next one I expect to send out */ michael@0: uint8_t last_msg_incomplete; michael@0: }; michael@0: michael@0: /* used to keep track of the addresses yet to try to add/delete */ michael@0: TAILQ_HEAD(sctp_asconf_addrhead, sctp_asconf_addr); michael@0: struct sctp_asconf_addr { michael@0: TAILQ_ENTRY(sctp_asconf_addr) next; michael@0: struct sctp_asconf_addr_param ap; michael@0: struct sctp_ifa *ifa; /* save the ifa for add/del ip */ michael@0: uint8_t sent; /* has this been sent yet? */ michael@0: uint8_t special_del; /* not to be used in lookup */ michael@0: }; michael@0: michael@0: struct sctp_scoping { michael@0: uint8_t ipv4_addr_legal; michael@0: uint8_t ipv6_addr_legal; michael@0: #if defined(__Userspace__) michael@0: uint8_t conn_addr_legal; michael@0: #endif michael@0: uint8_t loopback_scope; michael@0: uint8_t ipv4_local_scope; michael@0: uint8_t local_scope; michael@0: uint8_t site_scope; michael@0: }; michael@0: michael@0: #define SCTP_TSN_LOG_SIZE 40 michael@0: michael@0: struct sctp_tsn_log { michael@0: void *stcb; michael@0: uint32_t tsn; michael@0: uint16_t strm; michael@0: uint16_t seq; michael@0: uint16_t sz; michael@0: uint16_t flgs; michael@0: uint16_t in_pos; michael@0: uint16_t in_out; michael@0: }; michael@0: michael@0: #define SCTP_FS_SPEC_LOG_SIZE 200 michael@0: struct sctp_fs_spec_log { michael@0: uint32_t sent; michael@0: uint32_t total_flight; michael@0: uint32_t tsn; michael@0: uint16_t book; michael@0: uint8_t incr; michael@0: uint8_t decr; michael@0: }; michael@0: michael@0: /* This struct is here to cut out the compatiabilty michael@0: * pad that bulks up both the inp and stcb. The non michael@0: * pad portion MUST stay in complete sync with michael@0: * sctp_sndrcvinfo... i.e. if sinfo_xxxx is added michael@0: * this must be done here too. michael@0: */ michael@0: struct sctp_nonpad_sndrcvinfo { michael@0: uint16_t sinfo_stream; michael@0: uint16_t sinfo_ssn; michael@0: uint16_t sinfo_flags; michael@0: uint32_t sinfo_ppid; michael@0: uint32_t sinfo_context; michael@0: uint32_t sinfo_timetolive; michael@0: uint32_t sinfo_tsn; michael@0: uint32_t sinfo_cumtsn; michael@0: sctp_assoc_t sinfo_assoc_id; michael@0: uint16_t sinfo_keynumber; michael@0: uint16_t sinfo_keynumber_valid; michael@0: }; michael@0: michael@0: /* michael@0: * JRS - Structure to hold function pointers to the functions responsible michael@0: * for congestion control. michael@0: */ michael@0: michael@0: struct sctp_cc_functions { michael@0: void (*sctp_set_initial_cc_param)(struct sctp_tcb *stcb, struct sctp_nets *net); michael@0: void (*sctp_cwnd_update_after_sack)(struct sctp_tcb *stcb, michael@0: struct sctp_association *asoc, michael@0: int accum_moved ,int reneged_all, int will_exit); michael@0: void (*sctp_cwnd_update_exit_pf)(struct sctp_tcb *stcb, struct sctp_nets *net); michael@0: void (*sctp_cwnd_update_after_fr)(struct sctp_tcb *stcb, michael@0: struct sctp_association *asoc); michael@0: void (*sctp_cwnd_update_after_timeout)(struct sctp_tcb *stcb, michael@0: struct sctp_nets *net); michael@0: void (*sctp_cwnd_update_after_ecn_echo)(struct sctp_tcb *stcb, michael@0: struct sctp_nets *net, int in_window, int num_pkt_lost); michael@0: void (*sctp_cwnd_update_after_packet_dropped)(struct sctp_tcb *stcb, michael@0: struct sctp_nets *net, struct sctp_pktdrop_chunk *cp, michael@0: uint32_t *bottle_bw, uint32_t *on_queue); michael@0: void (*sctp_cwnd_update_after_output)(struct sctp_tcb *stcb, michael@0: struct sctp_nets *net, int burst_limit); michael@0: void (*sctp_cwnd_update_packet_transmitted)(struct sctp_tcb *stcb, michael@0: struct sctp_nets *net); michael@0: void (*sctp_cwnd_update_tsn_acknowledged)(struct sctp_nets *net, michael@0: struct sctp_tmit_chunk *); michael@0: void (*sctp_cwnd_new_transmission_begins)(struct sctp_tcb *stcb, michael@0: struct sctp_nets *net); michael@0: void (*sctp_cwnd_prepare_net_for_sack)(struct sctp_tcb *stcb, michael@0: struct sctp_nets *net); michael@0: int (*sctp_cwnd_socket_option)(struct sctp_tcb *stcb, int set, struct sctp_cc_option *); michael@0: void (*sctp_rtt_calculated)(struct sctp_tcb *, struct sctp_nets *, struct timeval *); michael@0: }; michael@0: michael@0: /* michael@0: * RS - Structure to hold function pointers to the functions responsible michael@0: * for stream scheduling. michael@0: */ michael@0: struct sctp_ss_functions { michael@0: void (*sctp_ss_init)(struct sctp_tcb *stcb, struct sctp_association *asoc, michael@0: int holds_lock); michael@0: void (*sctp_ss_clear)(struct sctp_tcb *stcb, struct sctp_association *asoc, michael@0: int clear_values, int holds_lock); michael@0: void (*sctp_ss_init_stream)(struct sctp_stream_out *strq, struct sctp_stream_out *with_strq); michael@0: void (*sctp_ss_add_to_stream)(struct sctp_tcb *stcb, struct sctp_association *asoc, michael@0: struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp, int holds_lock); michael@0: int (*sctp_ss_is_empty)(struct sctp_tcb *stcb, struct sctp_association *asoc); michael@0: void (*sctp_ss_remove_from_stream)(struct sctp_tcb *stcb, struct sctp_association *asoc, michael@0: struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp, int holds_lock); michael@0: struct sctp_stream_out* (*sctp_ss_select_stream)(struct sctp_tcb *stcb, michael@0: struct sctp_nets *net, struct sctp_association *asoc); michael@0: void (*sctp_ss_scheduled)(struct sctp_tcb *stcb, struct sctp_nets *net, michael@0: struct sctp_association *asoc, struct sctp_stream_out *strq, int moved_how_much); michael@0: void (*sctp_ss_packet_done)(struct sctp_tcb *stcb, struct sctp_nets *net, michael@0: struct sctp_association *asoc); michael@0: int (*sctp_ss_get_value)(struct sctp_tcb *stcb, struct sctp_association *asoc, michael@0: struct sctp_stream_out *strq, uint16_t *value); michael@0: int (*sctp_ss_set_value)(struct sctp_tcb *stcb, struct sctp_association *asoc, michael@0: struct sctp_stream_out *strq, uint16_t value); michael@0: }; michael@0: michael@0: /* used to save ASCONF chunks for retransmission */ michael@0: TAILQ_HEAD(sctp_asconf_head, sctp_asconf); michael@0: struct sctp_asconf { michael@0: TAILQ_ENTRY(sctp_asconf) next; michael@0: uint32_t serial_number; michael@0: uint16_t snd_count; michael@0: struct mbuf *data; michael@0: uint16_t len; michael@0: }; michael@0: michael@0: /* used to save ASCONF-ACK chunks for retransmission */ michael@0: TAILQ_HEAD(sctp_asconf_ackhead, sctp_asconf_ack); michael@0: struct sctp_asconf_ack { michael@0: TAILQ_ENTRY(sctp_asconf_ack) next; michael@0: uint32_t serial_number; michael@0: struct sctp_nets *last_sent_to; michael@0: struct mbuf *data; michael@0: uint16_t len; michael@0: }; michael@0: michael@0: /* michael@0: * Here we have information about each individual association that we track. michael@0: * We probably in production would be more dynamic. But for ease of michael@0: * implementation we will have a fixed array that we hunt for in a linear michael@0: * fashion. michael@0: */ michael@0: struct sctp_association { michael@0: /* association state */ michael@0: int state; michael@0: michael@0: /* queue of pending addrs to add/delete */ michael@0: struct sctp_asconf_addrhead asconf_queue; michael@0: michael@0: struct timeval time_entered; /* time we entered state */ michael@0: struct timeval time_last_rcvd; michael@0: struct timeval time_last_sent; michael@0: struct timeval time_last_sat_advance; michael@0: struct sctp_nonpad_sndrcvinfo def_send; michael@0: michael@0: /* timers and such */ michael@0: struct sctp_timer dack_timer; /* Delayed ack timer */ michael@0: struct sctp_timer asconf_timer; /* asconf */ michael@0: struct sctp_timer strreset_timer; /* stream reset */ michael@0: struct sctp_timer shut_guard_timer; /* shutdown guard */ michael@0: struct sctp_timer autoclose_timer; /* automatic close timer */ michael@0: struct sctp_timer delayed_event_timer; /* timer for delayed events */ michael@0: struct sctp_timer delete_prim_timer; /* deleting primary dst */ michael@0: michael@0: /* list of restricted local addresses */ michael@0: struct sctpladdr sctp_restricted_addrs; michael@0: michael@0: /* last local address pending deletion (waiting for an address add) */ michael@0: struct sctp_ifa *asconf_addr_del_pending; michael@0: /* Deleted primary destination (used to stop timer) */ michael@0: struct sctp_nets *deleted_primary; michael@0: michael@0: struct sctpnetlisthead nets; /* remote address list */ michael@0: michael@0: /* Free chunk list */ michael@0: struct sctpchunk_listhead free_chunks; michael@0: michael@0: /* Control chunk queue */ michael@0: struct sctpchunk_listhead control_send_queue; michael@0: michael@0: /* ASCONF chunk queue */ michael@0: struct sctpchunk_listhead asconf_send_queue; michael@0: michael@0: /* michael@0: * Once a TSN hits the wire it is moved to the sent_queue. We michael@0: * maintain two counts here (don't know if any but retran_cnt is michael@0: * needed). The idea is that the sent_queue_retran_cnt reflects how michael@0: * many chunks have been marked for retranmission by either T3-rxt michael@0: * or FR. michael@0: */ michael@0: struct sctpchunk_listhead sent_queue; michael@0: struct sctpchunk_listhead send_queue; michael@0: michael@0: /* re-assembly queue for fragmented chunks on the inbound path */ michael@0: struct sctpchunk_listhead reasmqueue; michael@0: michael@0: /* Scheduling queues */ michael@0: union scheduling_data ss_data; michael@0: michael@0: /* This pointer will be set to NULL michael@0: * most of the time. But when we have michael@0: * a fragmented message, where we could michael@0: * not get out all of the message at michael@0: * the last send then this will point michael@0: * to the stream to go get data from. michael@0: */ michael@0: struct sctp_stream_out *locked_on_sending; michael@0: michael@0: /* If an iterator is looking at me, this is it */ michael@0: struct sctp_iterator *stcb_starting_point_for_iterator; michael@0: michael@0: /* ASCONF save the last ASCONF-ACK so we can resend it if necessary */ michael@0: struct sctp_asconf_ackhead asconf_ack_sent; michael@0: michael@0: /* michael@0: * pointer to last stream reset queued to control queue by us with michael@0: * requests. michael@0: */ michael@0: struct sctp_tmit_chunk *str_reset; michael@0: /* michael@0: * if Source Address Selection happening, this will rotate through michael@0: * the link list. michael@0: */ michael@0: struct sctp_laddr *last_used_address; michael@0: michael@0: /* stream arrays */ michael@0: struct sctp_stream_in *strmin; michael@0: struct sctp_stream_out *strmout; michael@0: uint8_t *mapping_array; michael@0: /* primary destination to use */ michael@0: struct sctp_nets *primary_destination; michael@0: struct sctp_nets *alternate; /* If primary is down or PF */ michael@0: /* For CMT */ michael@0: struct sctp_nets *last_net_cmt_send_started; michael@0: /* last place I got a data chunk from */ michael@0: struct sctp_nets *last_data_chunk_from; michael@0: /* last place I got a control from */ michael@0: struct sctp_nets *last_control_chunk_from; michael@0: michael@0: /* circular looking for output selection */ michael@0: struct sctp_stream_out *last_out_stream; michael@0: michael@0: /* michael@0: * wait to the point the cum-ack passes req->send_reset_at_tsn for michael@0: * any req on the list. michael@0: */ michael@0: struct sctp_resethead resetHead; michael@0: michael@0: /* queue of chunks waiting to be sent into the local stack */ michael@0: struct sctp_readhead pending_reply_queue; michael@0: michael@0: /* JRS - the congestion control functions are in this struct */ michael@0: struct sctp_cc_functions cc_functions; michael@0: /* JRS - value to store the currently loaded congestion control module */ michael@0: uint32_t congestion_control_module; michael@0: /* RS - the stream scheduling functions are in this struct */ michael@0: struct sctp_ss_functions ss_functions; michael@0: /* RS - value to store the currently loaded stream scheduling module */ michael@0: uint32_t stream_scheduling_module; michael@0: michael@0: uint32_t vrf_id; michael@0: michael@0: uint32_t cookie_preserve_req; michael@0: /* ASCONF next seq I am sending out, inits at init-tsn */ michael@0: uint32_t asconf_seq_out; michael@0: uint32_t asconf_seq_out_acked; michael@0: /* ASCONF last received ASCONF from peer, starts at peer's TSN-1 */ michael@0: uint32_t asconf_seq_in; michael@0: michael@0: /* next seq I am sending in str reset messages */ michael@0: uint32_t str_reset_seq_out; michael@0: /* next seq I am expecting in str reset messages */ michael@0: uint32_t str_reset_seq_in; michael@0: michael@0: /* various verification tag information */ michael@0: uint32_t my_vtag; /* The tag to be used. if assoc is re-initited michael@0: * by remote end, and I have unlocked this michael@0: * will be regenerated to a new random value. */ michael@0: uint32_t peer_vtag; /* The peers last tag */ michael@0: michael@0: uint32_t my_vtag_nonce; michael@0: uint32_t peer_vtag_nonce; michael@0: michael@0: uint32_t assoc_id; michael@0: michael@0: /* This is the SCTP fragmentation threshold */ michael@0: uint32_t smallest_mtu; michael@0: michael@0: /* michael@0: * Special hook for Fast retransmit, allows us to track the highest michael@0: * TSN that is NEW in this SACK if gap ack blocks are present. michael@0: */ michael@0: uint32_t this_sack_highest_gap; michael@0: michael@0: /* michael@0: * The highest consecutive TSN that has been acked by peer on my michael@0: * sends michael@0: */ michael@0: uint32_t last_acked_seq; michael@0: michael@0: /* The next TSN that I will use in sending. */ michael@0: uint32_t sending_seq; michael@0: michael@0: /* Original seq number I used ??questionable to keep?? */ michael@0: uint32_t init_seq_number; michael@0: michael@0: michael@0: /* The Advanced Peer Ack Point, as required by the PR-SCTP */ michael@0: /* (A1 in Section 4.2) */ michael@0: uint32_t advanced_peer_ack_point; michael@0: michael@0: /* michael@0: * The highest consequetive TSN at the bottom of the mapping array michael@0: * (for his sends). michael@0: */ michael@0: uint32_t cumulative_tsn; michael@0: /* michael@0: * Used to track the mapping array and its offset bits. This MAY be michael@0: * lower then cumulative_tsn. michael@0: */ michael@0: uint32_t mapping_array_base_tsn; michael@0: /* michael@0: * used to track highest TSN we have received and is listed in the michael@0: * mapping array. michael@0: */ michael@0: uint32_t highest_tsn_inside_map; michael@0: michael@0: /* EY - new NR variables used for nr_sack based on mapping_array*/ michael@0: uint8_t *nr_mapping_array; michael@0: uint32_t highest_tsn_inside_nr_map; michael@0: michael@0: uint32_t fast_recovery_tsn; michael@0: uint32_t sat_t3_recovery_tsn; michael@0: uint32_t tsn_last_delivered; michael@0: /* michael@0: * For the pd-api we should re-write this a bit more efficent. We michael@0: * could have multiple sctp_queued_to_read's that we are building at michael@0: * once. Now we only do this when we get ready to deliver to the michael@0: * socket buffer. Note that we depend on the fact that the struct is michael@0: * "stuck" on the read queue until we finish all the pd-api. michael@0: */ michael@0: struct sctp_queued_to_read *control_pdapi; michael@0: michael@0: uint32_t tsn_of_pdapi_last_delivered; michael@0: uint32_t pdapi_ppid; michael@0: uint32_t context; michael@0: uint32_t last_reset_action[SCTP_MAX_RESET_PARAMS]; michael@0: uint32_t last_sending_seq[SCTP_MAX_RESET_PARAMS]; michael@0: uint32_t last_base_tsnsent[SCTP_MAX_RESET_PARAMS]; michael@0: #ifdef SCTP_ASOCLOG_OF_TSNS michael@0: /* michael@0: * special log - This adds considerable size michael@0: * to the asoc, but provides a log that you michael@0: * can use to detect problems via kgdb. michael@0: */ michael@0: struct sctp_tsn_log in_tsnlog[SCTP_TSN_LOG_SIZE]; michael@0: struct sctp_tsn_log out_tsnlog[SCTP_TSN_LOG_SIZE]; michael@0: uint32_t cumack_log[SCTP_TSN_LOG_SIZE]; michael@0: uint32_t cumack_logsnt[SCTP_TSN_LOG_SIZE]; michael@0: uint16_t tsn_in_at; michael@0: uint16_t tsn_out_at; michael@0: uint16_t tsn_in_wrapped; michael@0: uint16_t tsn_out_wrapped; michael@0: uint16_t cumack_log_at; michael@0: uint16_t cumack_log_atsnt; michael@0: #endif /* SCTP_ASOCLOG_OF_TSNS */ michael@0: #ifdef SCTP_FS_SPEC_LOG michael@0: struct sctp_fs_spec_log fslog[SCTP_FS_SPEC_LOG_SIZE]; michael@0: uint16_t fs_index; michael@0: #endif michael@0: michael@0: /* michael@0: * window state information and smallest MTU that I use to bound michael@0: * segmentation michael@0: */ michael@0: uint32_t peers_rwnd; michael@0: uint32_t my_rwnd; michael@0: uint32_t my_last_reported_rwnd; michael@0: uint32_t sctp_frag_point; michael@0: michael@0: uint32_t total_output_queue_size; michael@0: michael@0: uint32_t sb_cc; /* shadow of sb_cc */ michael@0: uint32_t sb_send_resv; /* amount reserved on a send */ michael@0: uint32_t my_rwnd_control_len; /* shadow of sb_mbcnt used for rwnd control */ michael@0: #ifdef INET6 michael@0: uint32_t default_flowlabel; michael@0: #endif michael@0: uint32_t pr_sctp_cnt; michael@0: int ctrl_queue_cnt; /* could be removed REM - NO IT CAN'T!! RRS */ michael@0: /* michael@0: * All outbound datagrams queue into this list from the individual michael@0: * stream queue. Here they get assigned a TSN and then await michael@0: * sending. The stream seq comes when it is first put in the michael@0: * individual str queue michael@0: */ michael@0: unsigned int stream_queue_cnt; michael@0: unsigned int send_queue_cnt; michael@0: unsigned int sent_queue_cnt; michael@0: unsigned int sent_queue_cnt_removeable; michael@0: /* michael@0: * Number on sent queue that are marked for retran until this value michael@0: * is 0 we only send one packet of retran'ed data. michael@0: */ michael@0: unsigned int sent_queue_retran_cnt; michael@0: michael@0: unsigned int size_on_reasm_queue; michael@0: unsigned int cnt_on_reasm_queue; michael@0: unsigned int fwd_tsn_cnt; michael@0: /* amount of data (bytes) currently in flight (on all destinations) */ michael@0: unsigned int total_flight; michael@0: /* Total book size in flight */ michael@0: unsigned int total_flight_count; /* count of chunks used with michael@0: * book total */ michael@0: /* count of destinaton nets and list of destination nets */ michael@0: unsigned int numnets; michael@0: michael@0: /* Total error count on this association */ michael@0: unsigned int overall_error_count; michael@0: michael@0: unsigned int cnt_msg_on_sb; michael@0: michael@0: /* All stream count of chunks for delivery */ michael@0: unsigned int size_on_all_streams; michael@0: unsigned int cnt_on_all_streams; michael@0: michael@0: /* Heart Beat delay in ms */ michael@0: uint32_t heart_beat_delay; michael@0: michael@0: /* autoclose */ michael@0: unsigned int sctp_autoclose_ticks; michael@0: michael@0: /* how many preopen streams we have */ michael@0: unsigned int pre_open_streams; michael@0: michael@0: /* How many streams I support coming into me */ michael@0: unsigned int max_inbound_streams; michael@0: michael@0: /* the cookie life I award for any cookie, in seconds */ michael@0: unsigned int cookie_life; michael@0: /* time to delay acks for */ michael@0: unsigned int delayed_ack; michael@0: unsigned int old_delayed_ack; michael@0: unsigned int sack_freq; michael@0: unsigned int data_pkts_seen; michael@0: michael@0: unsigned int numduptsns; michael@0: int dup_tsns[SCTP_MAX_DUP_TSNS]; michael@0: unsigned int initial_init_rto_max; /* initial RTO for INIT's */ michael@0: unsigned int initial_rto; /* initial send RTO */ michael@0: unsigned int minrto; /* per assoc RTO-MIN */ michael@0: unsigned int maxrto; /* per assoc RTO-MAX */ michael@0: michael@0: /* authentication fields */ michael@0: sctp_auth_chklist_t *local_auth_chunks; michael@0: sctp_auth_chklist_t *peer_auth_chunks; michael@0: sctp_hmaclist_t *local_hmacs; /* local HMACs supported */ michael@0: sctp_hmaclist_t *peer_hmacs; /* peer HMACs supported */ michael@0: struct sctp_keyhead shared_keys; /* assoc's shared keys */ michael@0: sctp_authinfo_t authinfo; /* randoms, cached keys */ michael@0: /* michael@0: * refcnt to block freeing when a sender or receiver is off coping michael@0: * user data in. michael@0: */ michael@0: uint32_t refcnt; michael@0: uint32_t chunks_on_out_queue; /* total chunks floating around, michael@0: * locked by send socket buffer */ michael@0: uint32_t peers_adaptation; michael@0: uint16_t peer_hmac_id; /* peer HMAC id to send */ michael@0: michael@0: /* michael@0: * Being that we have no bag to collect stale cookies, and that we michael@0: * really would not want to anyway.. we will count them in this michael@0: * counter. We of course feed them to the pigeons right away (I have michael@0: * always thought of pigeons as flying rats). michael@0: */ michael@0: uint16_t stale_cookie_count; michael@0: michael@0: /* michael@0: * For the partial delivery API, if up, invoked this is what last michael@0: * TSN I delivered michael@0: */ michael@0: uint16_t str_of_pdapi; michael@0: uint16_t ssn_of_pdapi; michael@0: michael@0: /* counts of actual built streams. Allocation may be more however */ michael@0: /* could re-arrange to optimize space here. */ michael@0: uint16_t streamincnt; michael@0: uint16_t streamoutcnt; michael@0: uint16_t strm_realoutsize; michael@0: uint16_t strm_pending_add_size; michael@0: /* my maximum number of retrans of INIT and SEND */ michael@0: /* copied from SCTP but should be individually setable */ michael@0: uint16_t max_init_times; michael@0: uint16_t max_send_times; michael@0: michael@0: uint16_t def_net_failure; michael@0: michael@0: uint16_t def_net_pf_threshold; michael@0: michael@0: /* michael@0: * lock flag: 0 is ok to send, 1+ (duals as a retran count) is michael@0: * awaiting ACK michael@0: */ michael@0: uint16_t mapping_array_size; michael@0: michael@0: uint16_t last_strm_seq_delivered; michael@0: uint16_t last_strm_no_delivered; michael@0: michael@0: uint16_t last_revoke_count; michael@0: int16_t num_send_timers_up; michael@0: michael@0: uint16_t stream_locked_on; michael@0: uint16_t ecn_echo_cnt_onq; michael@0: michael@0: uint16_t free_chunk_cnt; michael@0: uint8_t stream_locked; michael@0: uint8_t authenticated; /* packet authenticated ok */ michael@0: /* michael@0: * This flag indicates that a SACK need to be sent. michael@0: * Initially this is 1 to send the first sACK immediately. michael@0: */ michael@0: uint8_t send_sack; michael@0: michael@0: /* max burst of new packets into the network */ michael@0: uint32_t max_burst; michael@0: /* max burst of fast retransmit packets */ michael@0: uint32_t fr_max_burst; michael@0: michael@0: uint8_t sat_network; /* RTT is in range of sat net or greater */ michael@0: uint8_t sat_network_lockout; /* lockout code */ michael@0: uint8_t burst_limit_applied; /* Burst limit in effect at last send? */ michael@0: /* flag goes on when we are doing a partial delivery api */ michael@0: uint8_t hb_random_values[4]; michael@0: uint8_t fragmented_delivery_inprogress; michael@0: uint8_t fragment_flags; michael@0: uint8_t last_flags_delivered; michael@0: uint8_t hb_ect_randombit; michael@0: uint8_t hb_random_idx; michael@0: uint8_t default_dscp; michael@0: uint8_t asconf_del_pending; /* asconf delete last addr pending */ michael@0: michael@0: /* michael@0: * This value, plus all other ack'd but above cum-ack is added michael@0: * together to cross check against the bit that we have yet to michael@0: * define (probably in the SACK). When the cum-ack is updated, this michael@0: * sum is updated as well. michael@0: */ michael@0: michael@0: /* Flag to tell if ECN is allowed */ michael@0: uint8_t ecn_allowed; michael@0: michael@0: /* Did the peer make the stream config (add out) request */ michael@0: uint8_t peer_req_out; michael@0: michael@0: /* flag to indicate if peer can do asconf */ michael@0: uint8_t peer_supports_asconf; michael@0: /* EY - flag to indicate if peer can do nr_sack*/ michael@0: uint8_t peer_supports_nr_sack; michael@0: /* pr-sctp support flag */ michael@0: uint8_t peer_supports_prsctp; michael@0: /* peer authentication support flag */ michael@0: uint8_t peer_supports_auth; michael@0: /* stream resets are supported by the peer */ michael@0: uint8_t peer_supports_strreset; michael@0: uint8_t local_strreset_support; michael@0: michael@0: uint8_t peer_supports_nat; michael@0: /* michael@0: * packet drop's are supported by the peer, we don't really care michael@0: * about this but we bookkeep it anyway. michael@0: */ michael@0: uint8_t peer_supports_pktdrop; michael@0: michael@0: struct sctp_scoping scope; michael@0: /* flags to handle send alternate net tracking */ michael@0: uint8_t used_alt_onsack; michael@0: uint8_t used_alt_asconfack; michael@0: uint8_t fast_retran_loss_recovery; michael@0: uint8_t sat_t3_loss_recovery; michael@0: uint8_t dropped_special_cnt; michael@0: uint8_t seen_a_sack_this_pkt; michael@0: uint8_t stream_reset_outstanding; michael@0: uint8_t stream_reset_out_is_outstanding; michael@0: uint8_t delayed_connection; michael@0: uint8_t ifp_had_enobuf; michael@0: uint8_t saw_sack_with_frags; michael@0: uint8_t saw_sack_with_nr_frags; michael@0: uint8_t in_asocid_hash; michael@0: uint8_t assoc_up_sent; michael@0: uint8_t adaptation_needed; michael@0: uint8_t adaptation_sent; michael@0: /* CMT variables */ michael@0: uint8_t cmt_dac_pkts_rcvd; michael@0: uint8_t sctp_cmt_on_off; michael@0: uint8_t iam_blocking; michael@0: uint8_t cookie_how[8]; michael@0: /* EY 05/05/08 - NR_SACK variable*/ michael@0: uint8_t sctp_nr_sack_on_off; michael@0: /* JRS 5/21/07 - CMT PF variable */ michael@0: uint8_t sctp_cmt_pf; michael@0: uint8_t use_precise_time; michael@0: uint64_t sctp_features; michael@0: uint16_t port; /* remote UDP encapsulation port */ michael@0: /* michael@0: * The mapping array is used to track out of order sequences above michael@0: * last_acked_seq. 0 indicates packet missing 1 indicates packet michael@0: * rec'd. We slide it up every time we raise last_acked_seq and 0 michael@0: * trailing locactions out. If I get a TSN above the array michael@0: * mappingArraySz, I discard the datagram and let retransmit happen. michael@0: */ michael@0: uint32_t marked_retrans; michael@0: uint32_t timoinit; michael@0: uint32_t timodata; michael@0: uint32_t timosack; michael@0: uint32_t timoshutdown; michael@0: uint32_t timoheartbeat; michael@0: uint32_t timocookie; michael@0: uint32_t timoshutdownack; michael@0: struct timeval start_time; michael@0: struct timeval discontinuity_time; michael@0: }; michael@0: michael@0: #endif