netwerk/sctp/src/netinet/sctputil.c

Tue, 06 Jan 2015 21:39:09 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Tue, 06 Jan 2015 21:39:09 +0100
branch
TOR_BUG_9701
changeset 8
97036ab72558
permissions
-rwxr-xr-x

Conditionally force memory storage according to privacy.thirdparty.isolate;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.

     1 /*-
     2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
     3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
     4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
     5  *
     6  * Redistribution and use in source and binary forms, with or without
     7  * modification, are permitted provided that the following conditions are met:
     8  *
     9  * a) Redistributions of source code must retain the above copyright notice,
    10  *    this list of conditions and the following disclaimer.
    11  *
    12  * b) Redistributions in binary form must reproduce the above copyright
    13  *    notice, this list of conditions and the following disclaimer in
    14  *    the documentation and/or other materials provided with the distribution.
    15  *
    16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
    17  *    contributors may be used to endorse or promote products derived
    18  *    from this software without specific prior written permission.
    19  *
    20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
    21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
    22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
    23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
    24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
    25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
    26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
    27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
    28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
    29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
    30  * THE POSSIBILITY OF SUCH DAMAGE.
    31  */
    33 #ifdef __FreeBSD__
    34 #include <sys/cdefs.h>
    35 __FBSDID("$FreeBSD: head/sys/netinet/sctputil.c 259943 2013-12-27 13:07:00Z tuexen $");
    36 #endif
    38 #include <netinet/sctp_os.h>
    39 #include <netinet/sctp_pcb.h>
    40 #include <netinet/sctputil.h>
    41 #include <netinet/sctp_var.h>
    42 #include <netinet/sctp_sysctl.h>
    43 #ifdef INET6
    44 #if defined(__Userspace__) || defined(__FreeBSD__)
    45 #include <netinet6/sctp6_var.h>
    46 #endif
    47 #endif
    48 #include <netinet/sctp_header.h>
    49 #include <netinet/sctp_output.h>
    50 #include <netinet/sctp_uio.h>
    51 #include <netinet/sctp_timer.h>
    52 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
    53 #include <netinet/sctp_auth.h>
    54 #include <netinet/sctp_asconf.h>
    55 #include <netinet/sctp_bsd_addr.h>
    56 #if defined(__Userspace__)
    57 #include <netinet/sctp_constants.h>
    58 #endif
    59 #if defined(__FreeBSD__)
    60 #include <netinet/udp.h>
    61 #include <netinet/udp_var.h>
    62 #include <sys/proc.h>
    63 #endif
    65 #if defined(__APPLE__)
    66 #define APPLE_FILE_NO 8
    67 #endif
    69 #if defined(__Windows__)
    70 #if !defined(SCTP_LOCAL_TRACE_BUF)
    71 #include "eventrace_netinet.h"
    72 #include "sctputil.tmh" /* this is the file that will be auto generated */
    73 #endif
    74 #else
    75 #ifndef KTR_SCTP
    76 #define KTR_SCTP KTR_SUBSYS
    77 #endif
    78 #endif
    80 extern struct sctp_cc_functions sctp_cc_functions[];
    81 extern struct sctp_ss_functions sctp_ss_functions[];
    83 void
    84 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
    85 {
    86 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
    87 	struct sctp_cwnd_log sctp_clog;
    89 	sctp_clog.x.sb.stcb = stcb;
    90 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
    91 	if (stcb)
    92 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
    93 	else
    94 		sctp_clog.x.sb.stcb_sbcc = 0;
    95 	sctp_clog.x.sb.incr = incr;
    96 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
    97 	     SCTP_LOG_EVENT_SB,
    98 	     from,
    99 	     sctp_clog.x.misc.log1,
   100 	     sctp_clog.x.misc.log2,
   101 	     sctp_clog.x.misc.log3,
   102 	     sctp_clog.x.misc.log4);
   103 #endif
   104 }
   106 void
   107 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
   108 {
   109 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
   110 	struct sctp_cwnd_log sctp_clog;
   112 	sctp_clog.x.close.inp = (void *)inp;
   113 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
   114 	if (stcb) {
   115 		sctp_clog.x.close.stcb = (void *)stcb;
   116 		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
   117 	} else {
   118 		sctp_clog.x.close.stcb = 0;
   119 		sctp_clog.x.close.state = 0;
   120 	}
   121 	sctp_clog.x.close.loc = loc;
   122 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
   123 	     SCTP_LOG_EVENT_CLOSE,
   124 	     0,
   125 	     sctp_clog.x.misc.log1,
   126 	     sctp_clog.x.misc.log2,
   127 	     sctp_clog.x.misc.log3,
   128 	     sctp_clog.x.misc.log4);
   129 #endif
   130 }
   132 void
   133 rto_logging(struct sctp_nets *net, int from)
   134 {
   135 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
   136 	struct sctp_cwnd_log sctp_clog;
   138 	memset(&sctp_clog, 0, sizeof(sctp_clog));
   139 	sctp_clog.x.rto.net = (void *) net;
   140 	sctp_clog.x.rto.rtt = net->rtt / 1000;
   141 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
   142 	     SCTP_LOG_EVENT_RTT,
   143 	     from,
   144 	     sctp_clog.x.misc.log1,
   145 	     sctp_clog.x.misc.log2,
   146 	     sctp_clog.x.misc.log3,
   147 	     sctp_clog.x.misc.log4);
   148 #endif
   149 }
   151 void
   152 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
   153 {
   154 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
   155 	struct sctp_cwnd_log sctp_clog;
   157 	sctp_clog.x.strlog.stcb = stcb;
   158 	sctp_clog.x.strlog.n_tsn = tsn;
   159 	sctp_clog.x.strlog.n_sseq = sseq;
   160 	sctp_clog.x.strlog.e_tsn = 0;
   161 	sctp_clog.x.strlog.e_sseq = 0;
   162 	sctp_clog.x.strlog.strm = stream;
   163 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
   164 	     SCTP_LOG_EVENT_STRM,
   165 	     from,
   166 	     sctp_clog.x.misc.log1,
   167 	     sctp_clog.x.misc.log2,
   168 	     sctp_clog.x.misc.log3,
   169 	     sctp_clog.x.misc.log4);
   170 #endif
   171 }
   173 void
   174 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
   175 {
   176 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
   177 	struct sctp_cwnd_log sctp_clog;
   179 	sctp_clog.x.nagle.stcb = (void *)stcb;
   180 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
   181 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
   182 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
   183 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
   184 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
   185 	     SCTP_LOG_EVENT_NAGLE,
   186 	     action,
   187 	     sctp_clog.x.misc.log1,
   188 	     sctp_clog.x.misc.log2,
   189 	     sctp_clog.x.misc.log3,
   190 	     sctp_clog.x.misc.log4);
   191 #endif
   192 }
   194 void
   195 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
   196 {
   197 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
   198 	struct sctp_cwnd_log sctp_clog;
   200 	sctp_clog.x.sack.cumack = cumack;
   201 	sctp_clog.x.sack.oldcumack = old_cumack;
   202 	sctp_clog.x.sack.tsn = tsn;
   203 	sctp_clog.x.sack.numGaps = gaps;
   204 	sctp_clog.x.sack.numDups = dups;
   205 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
   206 	     SCTP_LOG_EVENT_SACK,
   207 	     from,
   208 	     sctp_clog.x.misc.log1,
   209 	     sctp_clog.x.misc.log2,
   210 	     sctp_clog.x.misc.log3,
   211 	     sctp_clog.x.misc.log4);
   212 #endif
   213 }
   215 void
   216 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
   217 {
   218 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
   219 	struct sctp_cwnd_log sctp_clog;
   221 	memset(&sctp_clog, 0, sizeof(sctp_clog));
   222 	sctp_clog.x.map.base = map;
   223 	sctp_clog.x.map.cum = cum;
   224 	sctp_clog.x.map.high = high;
   225 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
   226 	     SCTP_LOG_EVENT_MAP,
   227 	     from,
   228 	     sctp_clog.x.misc.log1,
   229 	     sctp_clog.x.misc.log2,
   230 	     sctp_clog.x.misc.log3,
   231 	     sctp_clog.x.misc.log4);
   232 #endif
   233 }
   235 void
   236 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
   237 {
   238 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
   239 	struct sctp_cwnd_log sctp_clog;
   241 	memset(&sctp_clog, 0, sizeof(sctp_clog));
   242 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
   243 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
   244 	sctp_clog.x.fr.tsn = tsn;
   245 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
   246 	     SCTP_LOG_EVENT_FR,
   247 	     from,
   248 	     sctp_clog.x.misc.log1,
   249 	     sctp_clog.x.misc.log2,
   250 	     sctp_clog.x.misc.log3,
   251 	     sctp_clog.x.misc.log4);
   252 #endif
   253 }
   255 void
   256 sctp_log_mb(struct mbuf *m, int from)
   257 {
   258 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
   259 	struct sctp_cwnd_log sctp_clog;
   261 	sctp_clog.x.mb.mp = m;
   262 	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
   263 	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
   264 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
   265 	if (SCTP_BUF_IS_EXTENDED(m)) {
   266 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
   267 #if defined(__APPLE__)
   268 		/* APPLE does not use a ref_cnt, but a forward/backward ref queue */
   269 #else
   270 		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
   271 #endif
   272 	} else {
   273 		sctp_clog.x.mb.ext = 0;
   274 		sctp_clog.x.mb.refcnt = 0;
   275 	}
   276 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
   277 	     SCTP_LOG_EVENT_MBUF,
   278 	     from,
   279 	     sctp_clog.x.misc.log1,
   280 	     sctp_clog.x.misc.log2,
   281 	     sctp_clog.x.misc.log3,
   282 	     sctp_clog.x.misc.log4);
   283 #endif
   284 }
   286 void
   287 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
   288 {
   289 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
   290 	struct sctp_cwnd_log sctp_clog;
   292 	if (control == NULL) {
   293 		SCTP_PRINTF("Gak log of NULL?\n");
   294 		return;
   295 	}
   296 	sctp_clog.x.strlog.stcb = control->stcb;
   297 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
   298 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
   299 	sctp_clog.x.strlog.strm = control->sinfo_stream;
   300 	if (poschk != NULL) {
   301 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
   302 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
   303 	} else {
   304 		sctp_clog.x.strlog.e_tsn = 0;
   305 		sctp_clog.x.strlog.e_sseq = 0;
   306 	}
   307 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
   308 	     SCTP_LOG_EVENT_STRM,
   309 	     from,
   310 	     sctp_clog.x.misc.log1,
   311 	     sctp_clog.x.misc.log2,
   312 	     sctp_clog.x.misc.log3,
   313 	     sctp_clog.x.misc.log4);
   314 #endif
   315 }
   317 void
   318 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
   319 {
   320 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
   321 	struct sctp_cwnd_log sctp_clog;
   323 	sctp_clog.x.cwnd.net = net;
   324 	if (stcb->asoc.send_queue_cnt > 255)
   325 		sctp_clog.x.cwnd.cnt_in_send = 255;
   326 	else
   327 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
   328 	if (stcb->asoc.stream_queue_cnt > 255)
   329 		sctp_clog.x.cwnd.cnt_in_str = 255;
   330 	else
   331 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
   333 	if (net) {
   334 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
   335 		sctp_clog.x.cwnd.inflight = net->flight_size;
   336 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
   337 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
   338 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
   339 	}
   340 	if (SCTP_CWNDLOG_PRESEND == from) {
   341 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
   342 	}
   343 	sctp_clog.x.cwnd.cwnd_augment = augment;
   344 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
   345 	     SCTP_LOG_EVENT_CWND,
   346 	     from,
   347 	     sctp_clog.x.misc.log1,
   348 	     sctp_clog.x.misc.log2,
   349 	     sctp_clog.x.misc.log3,
   350 	     sctp_clog.x.misc.log4);
   351 #endif
   352 }
   354 #ifndef __APPLE__
   355 void
   356 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
   357 {
   358 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
   359 	struct sctp_cwnd_log sctp_clog;
   361 	memset(&sctp_clog, 0, sizeof(sctp_clog));
   362 	if (inp) {
   363 		sctp_clog.x.lock.sock = (void *) inp->sctp_socket;
   365 	} else {
   366 		sctp_clog.x.lock.sock = (void *) NULL;
   367 	}
   368 	sctp_clog.x.lock.inp = (void *) inp;
   369 #if (defined(__FreeBSD__) && __FreeBSD_version >= 503000) || (defined(__APPLE__))
   370 	if (stcb) {
   371 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
   372 	} else {
   373 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
   374 	}
   375 	if (inp) {
   376 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
   377 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
   378 	} else {
   379 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
   380 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
   381 	}
   382 #if (defined(__FreeBSD__) && __FreeBSD_version <= 602000)
   383 	sctp_clog.x.lock.info_lock = mtx_owned(&SCTP_BASE_INFO(ipi_ep_mtx));
   384 #else
   385 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
   386 #endif
   387 	if (inp && (inp->sctp_socket)) {
   388 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
   389 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
   390 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
   391 	} else {
   392 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
   393 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
   394 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
   395 	}
   396 #endif
   397 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
   398 	     SCTP_LOG_LOCK_EVENT,
   399 	     from,
   400 	     sctp_clog.x.misc.log1,
   401 	     sctp_clog.x.misc.log2,
   402 	     sctp_clog.x.misc.log3,
   403 	     sctp_clog.x.misc.log4);
   404 #endif
   405 }
   406 #endif
   408 void
   409 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
   410 {
   411 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
   412 	struct sctp_cwnd_log sctp_clog;
   414 	memset(&sctp_clog, 0, sizeof(sctp_clog));
   415 	sctp_clog.x.cwnd.net = net;
   416 	sctp_clog.x.cwnd.cwnd_new_value = error;
   417 	sctp_clog.x.cwnd.inflight = net->flight_size;
   418 	sctp_clog.x.cwnd.cwnd_augment = burst;
   419 	if (stcb->asoc.send_queue_cnt > 255)
   420 		sctp_clog.x.cwnd.cnt_in_send = 255;
   421 	else
   422 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
   423 	if (stcb->asoc.stream_queue_cnt > 255)
   424 		sctp_clog.x.cwnd.cnt_in_str = 255;
   425 	else
   426 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
   427 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
   428 	     SCTP_LOG_EVENT_MAXBURST,
   429 	     from,
   430 	     sctp_clog.x.misc.log1,
   431 	     sctp_clog.x.misc.log2,
   432 	     sctp_clog.x.misc.log3,
   433 	     sctp_clog.x.misc.log4);
   434 #endif
   435 }
   437 void
   438 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
   439 {
   440 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
   441 	struct sctp_cwnd_log sctp_clog;
   443 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
   444 	sctp_clog.x.rwnd.send_size = snd_size;
   445 	sctp_clog.x.rwnd.overhead = overhead;
   446 	sctp_clog.x.rwnd.new_rwnd = 0;
   447 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
   448 	     SCTP_LOG_EVENT_RWND,
   449 	     from,
   450 	     sctp_clog.x.misc.log1,
   451 	     sctp_clog.x.misc.log2,
   452 	     sctp_clog.x.misc.log3,
   453 	     sctp_clog.x.misc.log4);
   454 #endif
   455 }
   457 void
   458 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
   459 {
   460 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
   461 	struct sctp_cwnd_log sctp_clog;
   463 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
   464 	sctp_clog.x.rwnd.send_size = flight_size;
   465 	sctp_clog.x.rwnd.overhead = overhead;
   466 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
   467 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
   468 	     SCTP_LOG_EVENT_RWND,
   469 	     from,
   470 	     sctp_clog.x.misc.log1,
   471 	     sctp_clog.x.misc.log2,
   472 	     sctp_clog.x.misc.log3,
   473 	     sctp_clog.x.misc.log4);
   474 #endif
   475 }
   477 void
   478 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
   479 {
   480 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
   481 	struct sctp_cwnd_log sctp_clog;
   483 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
   484 	sctp_clog.x.mbcnt.size_change = book;
   485 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
   486 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
   487 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
   488 	     SCTP_LOG_EVENT_MBCNT,
   489 	     from,
   490 	     sctp_clog.x.misc.log1,
   491 	     sctp_clog.x.misc.log2,
   492 	     sctp_clog.x.misc.log3,
   493 	     sctp_clog.x.misc.log4);
   494 #endif
   495 }
   497 void
   498 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
   499 {
   500 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
   501 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
   502 	     SCTP_LOG_MISC_EVENT,
   503 	     from,
   504 	     a, b, c, d);
   505 #endif
   506 }
   508 void
   509 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
   510 {
   511 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
   512 	struct sctp_cwnd_log sctp_clog;
   514 	sctp_clog.x.wake.stcb = (void *)stcb;
   515 	sctp_clog.x.wake.wake_cnt = wake_cnt;
   516 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
   517 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
   518 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
   520 	if (stcb->asoc.stream_queue_cnt < 0xff)
   521 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
   522 	else
   523 		sctp_clog.x.wake.stream_qcnt = 0xff;
   525 	if (stcb->asoc.chunks_on_out_queue < 0xff)
   526 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
   527 	else
   528 		sctp_clog.x.wake.chunks_on_oque = 0xff;
   530 	sctp_clog.x.wake.sctpflags = 0;
   531 	/* set in the defered mode stuff */
   532 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
   533 		sctp_clog.x.wake.sctpflags |= 1;
   534 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
   535 		sctp_clog.x.wake.sctpflags |= 2;
   536 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
   537 		sctp_clog.x.wake.sctpflags |= 4;
   538 	/* what about the sb */
   539 	if (stcb->sctp_socket) {
   540 		struct socket *so = stcb->sctp_socket;
   542 		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
   543 	} else {
   544 		sctp_clog.x.wake.sbflags = 0xff;
   545 	}
   546 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
   547 	     SCTP_LOG_EVENT_WAKE,
   548 	     from,
   549 	     sctp_clog.x.misc.log1,
   550 	     sctp_clog.x.misc.log2,
   551 	     sctp_clog.x.misc.log3,
   552 	     sctp_clog.x.misc.log4);
   553 #endif
   554 }
   556 void
   557 sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
   558 {
   559 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
   560 	struct sctp_cwnd_log sctp_clog;
   562 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
   563 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
   564 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
   565 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
   566 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
   567 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight/1024);
   568 	sctp_clog.x.blk.sndlen = sendlen;
   569 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
   570 	     SCTP_LOG_EVENT_BLOCK,
   571 	     from,
   572 	     sctp_clog.x.misc.log1,
   573 	     sctp_clog.x.misc.log2,
   574 	     sctp_clog.x.misc.log3,
   575 	     sctp_clog.x.misc.log4);
   576 #endif
   577 }
   579 int
   580 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
   581 {
   582 	/* May need to fix this if ktrdump does not work */
   583 	return (0);
   584 }
   586 #ifdef SCTP_AUDITING_ENABLED
   587 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
   588 static int sctp_audit_indx = 0;
   590 static
   591 void
   592 sctp_print_audit_report(void)
   593 {
   594 	int i;
   595 	int cnt;
   597 	cnt = 0;
   598 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
   599 		if ((sctp_audit_data[i][0] == 0xe0) &&
   600 		    (sctp_audit_data[i][1] == 0x01)) {
   601 			cnt = 0;
   602 			SCTP_PRINTF("\n");
   603 		} else if (sctp_audit_data[i][0] == 0xf0) {
   604 			cnt = 0;
   605 			SCTP_PRINTF("\n");
   606 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
   607 		    (sctp_audit_data[i][1] == 0x01)) {
   608 			SCTP_PRINTF("\n");
   609 			cnt = 0;
   610 		}
   611 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
   612 			    (uint32_t) sctp_audit_data[i][1]);
   613 		cnt++;
   614 		if ((cnt % 14) == 0)
   615 			SCTP_PRINTF("\n");
   616 	}
   617 	for (i = 0; i < sctp_audit_indx; i++) {
   618 		if ((sctp_audit_data[i][0] == 0xe0) &&
   619 		    (sctp_audit_data[i][1] == 0x01)) {
   620 			cnt = 0;
   621 			SCTP_PRINTF("\n");
   622 		} else if (sctp_audit_data[i][0] == 0xf0) {
   623 			cnt = 0;
   624 			SCTP_PRINTF("\n");
   625 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
   626 		    (sctp_audit_data[i][1] == 0x01)) {
   627 			SCTP_PRINTF("\n");
   628 			cnt = 0;
   629 		}
   630 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
   631 			    (uint32_t) sctp_audit_data[i][1]);
   632 		cnt++;
   633 		if ((cnt % 14) == 0)
   634 			SCTP_PRINTF("\n");
   635 	}
   636 	SCTP_PRINTF("\n");
   637 }
   639 void
   640 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
   641     struct sctp_nets *net)
   642 {
   643 	int resend_cnt, tot_out, rep, tot_book_cnt;
   644 	struct sctp_nets *lnet;
   645 	struct sctp_tmit_chunk *chk;
   647 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
   648 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
   649 	sctp_audit_indx++;
   650 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
   651 		sctp_audit_indx = 0;
   652 	}
   653 	if (inp == NULL) {
   654 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
   655 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
   656 		sctp_audit_indx++;
   657 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
   658 			sctp_audit_indx = 0;
   659 		}
   660 		return;
   661 	}
   662 	if (stcb == NULL) {
   663 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
   664 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
   665 		sctp_audit_indx++;
   666 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
   667 			sctp_audit_indx = 0;
   668 		}
   669 		return;
   670 	}
   671 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
   672 	sctp_audit_data[sctp_audit_indx][1] =
   673 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
   674 	sctp_audit_indx++;
   675 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
   676 		sctp_audit_indx = 0;
   677 	}
   678 	rep = 0;
   679 	tot_book_cnt = 0;
   680 	resend_cnt = tot_out = 0;
   681 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
   682 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
   683 			resend_cnt++;
   684 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
   685 			tot_out += chk->book_size;
   686 			tot_book_cnt++;
   687 		}
   688 	}
   689 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
   690 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
   691 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
   692 		sctp_audit_indx++;
   693 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
   694 			sctp_audit_indx = 0;
   695 		}
   696 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
   697 			    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
   698 		rep = 1;
   699 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
   700 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
   701 		sctp_audit_data[sctp_audit_indx][1] =
   702 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
   703 		sctp_audit_indx++;
   704 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
   705 			sctp_audit_indx = 0;
   706 		}
   707 	}
   708 	if (tot_out != stcb->asoc.total_flight) {
   709 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
   710 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
   711 		sctp_audit_indx++;
   712 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
   713 			sctp_audit_indx = 0;
   714 		}
   715 		rep = 1;
   716 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
   717 			    (int)stcb->asoc.total_flight);
   718 		stcb->asoc.total_flight = tot_out;
   719 	}
   720 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
   721 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
   722 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
   723 		sctp_audit_indx++;
   724 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
   725 			sctp_audit_indx = 0;
   726 		}
   727 		rep = 1;
   728 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
   730 		stcb->asoc.total_flight_count = tot_book_cnt;
   731 	}
   732 	tot_out = 0;
   733 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
   734 		tot_out += lnet->flight_size;
   735 	}
   736 	if (tot_out != stcb->asoc.total_flight) {
   737 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
   738 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
   739 		sctp_audit_indx++;
   740 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
   741 			sctp_audit_indx = 0;
   742 		}
   743 		rep = 1;
   744 		SCTP_PRINTF("real flight:%d net total was %d\n",
   745 			    stcb->asoc.total_flight, tot_out);
   746 		/* now corrective action */
   747 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
   749 			tot_out = 0;
   750 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
   751 				if ((chk->whoTo == lnet) &&
   752 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
   753 					tot_out += chk->book_size;
   754 				}
   755 			}
   756 			if (lnet->flight_size != tot_out) {
   757 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
   758 					    (void *)lnet, lnet->flight_size,
   759 					    tot_out);
   760 				lnet->flight_size = tot_out;
   761 			}
   762 		}
   763 	}
   764 	if (rep) {
   765 		sctp_print_audit_report();
   766 	}
   767 }
   769 void
   770 sctp_audit_log(uint8_t ev, uint8_t fd)
   771 {
   773 	sctp_audit_data[sctp_audit_indx][0] = ev;
   774 	sctp_audit_data[sctp_audit_indx][1] = fd;
   775 	sctp_audit_indx++;
   776 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
   777 		sctp_audit_indx = 0;
   778 	}
   779 }
   781 #endif
   783 /*
   784  * sctp_stop_timers_for_shutdown() should be called
   785  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
   786  * state to make sure that all timers are stopped.
   787  */
   788 void
   789 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
   790 {
   791 	struct sctp_association *asoc;
   792 	struct sctp_nets *net;
   794 	asoc = &stcb->asoc;
   796 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
   797 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
   798 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
   799 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
   800 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
   801 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
   802 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
   803 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
   804 	}
   805 }
   807 /*
   808  * a list of sizes based on typical mtu's, used only if next hop size not
   809  * returned.
   810  */
   811 static uint32_t sctp_mtu_sizes[] = {
   812 	68,
   813 	296,
   814 	508,
   815 	512,
   816 	544,
   817 	576,
   818 	1006,
   819 	1492,
   820 	1500,
   821 	1536,
   822 	2002,
   823 	2048,
   824 	4352,
   825 	4464,
   826 	8166,
   827 	17914,
   828 	32000,
   829 	65535
   830 };
   832 /*
   833  * Return the largest MTU smaller than val. If there is no
   834  * entry, just return val.
   835  */
   836 uint32_t
   837 sctp_get_prev_mtu(uint32_t val)
   838 {
   839 	uint32_t i;
   841 	if (val <= sctp_mtu_sizes[0]) {
   842 		return (val);
   843 	}
   844 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
   845 		if (val <= sctp_mtu_sizes[i]) {
   846 			break;
   847 		}
   848 	}
   849 	return (sctp_mtu_sizes[i - 1]);
   850 }
   852 /*
   853  * Return the smallest MTU larger than val. If there is no
   854  * entry, just return val.
   855  */
   856 uint32_t
   857 sctp_get_next_mtu(uint32_t val)
   858 {
   859 	/* select another MTU that is just bigger than this one */
   860 	uint32_t i;
   862 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
   863 		if (val < sctp_mtu_sizes[i]) {
   864 			return (sctp_mtu_sizes[i]);
   865 		}
   866 	}
   867 	return (val);
   868 }
   870 void
   871 sctp_fill_random_store(struct sctp_pcb *m)
   872 {
   873 	/*
   874 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
   875 	 * our counter. The result becomes our good random numbers and we
   876 	 * then setup to give these out. Note that we do no locking to
   877 	 * protect this. This is ok, since if competing folks call this we
   878 	 * will get more gobbled gook in the random store which is what we
   879 	 * want. There is a danger that two guys will use the same random
   880 	 * numbers, but thats ok too since that is random as well :->
   881 	 */
   882 	m->store_at = 0;
   883 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
   884 	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
   885 	    sizeof(m->random_counter), (uint8_t *)m->random_store);
   886 	m->random_counter++;
   887 }
   889 uint32_t
   890 sctp_select_initial_TSN(struct sctp_pcb *inp)
   891 {
   892 	/*
   893 	 * A true implementation should use random selection process to get
   894 	 * the initial stream sequence number, using RFC1750 as a good
   895 	 * guideline
   896 	 */
   897 	uint32_t x, *xp;
   898 	uint8_t *p;
   899 	int store_at, new_store;
   901 	if (inp->initial_sequence_debug != 0) {
   902 		uint32_t ret;
   904 		ret = inp->initial_sequence_debug;
   905 		inp->initial_sequence_debug++;
   906 		return (ret);
   907 	}
   908  retry:
   909 	store_at = inp->store_at;
   910 	new_store = store_at + sizeof(uint32_t);
   911 	if (new_store >= (SCTP_SIGNATURE_SIZE-3)) {
   912 		new_store = 0;
   913 	}
   914 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
   915 		goto retry;
   916 	}
   917 	if (new_store == 0) {
   918 		/* Refill the random store */
   919 		sctp_fill_random_store(inp);
   920 	}
   921 	p = &inp->random_store[store_at];
   922 	xp = (uint32_t *)p;
   923 	x = *xp;
   924 	return (x);
   925 }
   927 uint32_t
   928 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
   929 {
   930 	uint32_t x;
   931 	struct timeval now;
   933 	if (check) {
   934 		(void)SCTP_GETTIME_TIMEVAL(&now);
   935 	}
   936 	for (;;) {
   937 		x = sctp_select_initial_TSN(&inp->sctp_ep);
   938 		if (x == 0) {
   939 			/* we never use 0 */
   940 			continue;
   941 		}
   942 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
   943 			break;
   944 		}
   945 	}
   946 	return (x);
   947 }
   949 int
   950 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
   951                uint32_t override_tag, uint32_t vrf_id)
   952 {
   953 	struct sctp_association *asoc;
   954 	/*
   955 	 * Anything set to zero is taken care of by the allocation routine's
   956 	 * bzero
   957 	 */
   959 	/*
   960 	 * Up front select what scoping to apply on addresses I tell my peer
   961 	 * Not sure what to do with these right now, we will need to come up
   962 	 * with a way to set them. We may need to pass them through from the
   963 	 * caller in the sctp_aloc_assoc() function.
   964 	 */
   965 	int i;
   967 	asoc = &stcb->asoc;
   968 	/* init all variables to a known value. */
   969 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
   970 	asoc->max_burst = inp->sctp_ep.max_burst;
   971 	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
   972 	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
   973 	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
   974 	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
   975 	asoc->ecn_allowed = inp->sctp_ecn_enable;
   976 	asoc->sctp_nr_sack_on_off = (uint8_t)SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
   977 	asoc->sctp_cmt_pf = (uint8_t)0;
   978 	asoc->sctp_frag_point = inp->sctp_frag_point;
   979 	asoc->sctp_features = inp->sctp_features;
   980 	asoc->default_dscp = inp->sctp_ep.default_dscp;
   981 #ifdef INET6
   982 	if (inp->sctp_ep.default_flowlabel) {
   983 		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
   984 	} else {
   985 		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
   986 			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
   987 			asoc->default_flowlabel &= 0x000fffff;
   988 			asoc->default_flowlabel |= 0x80000000;
   989 		} else {
   990 			asoc->default_flowlabel = 0;
   991 		}
   992 	}
   993 #endif
   994 	asoc->sb_send_resv = 0;
   995 	if (override_tag) {
   996 		asoc->my_vtag = override_tag;
   997 	} else {
   998 		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport,  1);
   999 	}
  1000 	/* Get the nonce tags */
  1001 	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
  1002 	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
  1003 	asoc->vrf_id = vrf_id;
  1005 #ifdef SCTP_ASOCLOG_OF_TSNS
  1006 	asoc->tsn_in_at = 0;
  1007  	asoc->tsn_out_at = 0;
  1008 	asoc->tsn_in_wrapped = 0;
  1009 	asoc->tsn_out_wrapped = 0;
  1010 	asoc->cumack_log_at = 0;
  1011 	asoc->cumack_log_atsnt = 0;
  1012 #endif
  1013 #ifdef SCTP_FS_SPEC_LOG
  1014 	asoc->fs_index = 0;
  1015 #endif
  1016 	asoc->refcnt = 0;
  1017 	asoc->assoc_up_sent = 0;
  1018 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
  1019 	    sctp_select_initial_TSN(&inp->sctp_ep);
  1020 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
  1021 	/* we are optimisitic here */
  1022 	asoc->peer_supports_pktdrop = 1;
  1023 	asoc->peer_supports_nat = 0;
  1024 	asoc->sent_queue_retran_cnt = 0;
  1026 	/* for CMT */
  1027         asoc->last_net_cmt_send_started = NULL;
  1029 	/* This will need to be adjusted */
  1030 	asoc->last_acked_seq = asoc->init_seq_number - 1;
  1031 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
  1032 	asoc->asconf_seq_in = asoc->last_acked_seq;
  1034 	/* here we are different, we hold the next one we expect */
  1035 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
  1037 	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
  1038 	asoc->initial_rto = inp->sctp_ep.initial_rto;
  1040 	asoc->max_init_times = inp->sctp_ep.max_init_times;
  1041 	asoc->max_send_times = inp->sctp_ep.max_send_times;
  1042 	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
  1043 	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
  1044 	asoc->free_chunk_cnt = 0;
  1046 	asoc->iam_blocking = 0;
  1047 	asoc->context = inp->sctp_context;
  1048 	asoc->local_strreset_support = inp->local_strreset_support;
  1049 	asoc->def_send = inp->def_send;
  1050 	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
  1051 	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
  1052 	asoc->pr_sctp_cnt = 0;
  1053 	asoc->total_output_queue_size = 0;
  1055 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
  1056 		asoc->scope.ipv6_addr_legal = 1;
  1057 		if (SCTP_IPV6_V6ONLY(inp) == 0) {
  1058 			asoc->scope.ipv4_addr_legal = 1;
  1059 		} else {
  1060 			asoc->scope.ipv4_addr_legal = 0;
  1062 #if defined(__Userspace__)
  1063 			asoc->scope.conn_addr_legal = 0;
  1064 #endif
  1065 	} else {
  1066 		asoc->scope.ipv6_addr_legal = 0;
  1067 #if defined(__Userspace__)
  1068 		if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
  1069 			asoc->scope.conn_addr_legal = 1;
  1070 			asoc->scope.ipv4_addr_legal = 0;
  1071 		} else {
  1072 			asoc->scope.conn_addr_legal = 0;
  1073 			asoc->scope.ipv4_addr_legal = 1;
  1075 #else
  1076 		asoc->scope.ipv4_addr_legal = 1;
  1077 #endif
  1080 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
  1081 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
  1083 	asoc->smallest_mtu = inp->sctp_frag_point;
  1084 	asoc->minrto = inp->sctp_ep.sctp_minrto;
  1085 	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
  1087 	asoc->locked_on_sending = NULL;
  1088 	asoc->stream_locked_on = 0;
  1089 	asoc->ecn_echo_cnt_onq = 0;
  1090 	asoc->stream_locked = 0;
  1092 	asoc->send_sack = 1;
  1094 	LIST_INIT(&asoc->sctp_restricted_addrs);
  1096 	TAILQ_INIT(&asoc->nets);
  1097 	TAILQ_INIT(&asoc->pending_reply_queue);
  1098 	TAILQ_INIT(&asoc->asconf_ack_sent);
  1099 	/* Setup to fill the hb random cache at first HB */
  1100 	asoc->hb_random_idx = 4;
  1102 	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
  1104 	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
  1105 	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
  1107 	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
  1108 	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
  1110 	/*
  1111 	 * Now the stream parameters, here we allocate space for all streams
  1112 	 * that we request by default.
  1113 	 */
  1114 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
  1115 	    inp->sctp_ep.pre_open_stream_count;
  1116 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
  1117 		    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
  1118 		    SCTP_M_STRMO);
  1119 	if (asoc->strmout == NULL) {
  1120 		/* big trouble no memory */
  1121 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
  1122 		return (ENOMEM);
  1124 	for (i = 0; i < asoc->streamoutcnt; i++) {
  1125 		/*
  1126 		 * inbound side must be set to 0xffff, also NOTE when we get
  1127 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
  1128 		 * count (streamoutcnt) but first check if we sent to any of
  1129 		 * the upper streams that were dropped (if some were). Those
  1130 		 * that were dropped must be notified to the upper layer as
  1131 		 * failed to send.
  1132 		 */
  1133 		asoc->strmout[i].next_sequence_send = 0x0;
  1134 		TAILQ_INIT(&asoc->strmout[i].outqueue);
  1135 		asoc->strmout[i].chunks_on_queues = 0;
  1136 		asoc->strmout[i].stream_no = i;
  1137 		asoc->strmout[i].last_msg_incomplete = 0;
  1138 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
  1140 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
  1142 	/* Now the mapping array */
  1143 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
  1144 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
  1145 		    SCTP_M_MAP);
  1146 	if (asoc->mapping_array == NULL) {
  1147 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
  1148 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
  1149 		return (ENOMEM);
  1151 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
  1152 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
  1153 	    SCTP_M_MAP);
  1154 	if (asoc->nr_mapping_array == NULL) {
  1155 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
  1156 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
  1157 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
  1158 		return (ENOMEM);
  1160 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
  1162 	/* Now the init of the other outqueues */
  1163 	TAILQ_INIT(&asoc->free_chunks);
  1164 	TAILQ_INIT(&asoc->control_send_queue);
  1165 	TAILQ_INIT(&asoc->asconf_send_queue);
  1166 	TAILQ_INIT(&asoc->send_queue);
  1167 	TAILQ_INIT(&asoc->sent_queue);
  1168 	TAILQ_INIT(&asoc->reasmqueue);
  1169 	TAILQ_INIT(&asoc->resetHead);
  1170 	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
  1171 	TAILQ_INIT(&asoc->asconf_queue);
  1172 	/* authentication fields */
  1173 	asoc->authinfo.random = NULL;
  1174 	asoc->authinfo.active_keyid = 0;
  1175 	asoc->authinfo.assoc_key = NULL;
  1176 	asoc->authinfo.assoc_keyid = 0;
  1177 	asoc->authinfo.recv_key = NULL;
  1178 	asoc->authinfo.recv_keyid = 0;
  1179 	LIST_INIT(&asoc->shared_keys);
  1180 	asoc->marked_retrans = 0;
  1181 	asoc->port = inp->sctp_ep.port;
  1182 	asoc->timoinit = 0;
  1183 	asoc->timodata = 0;
  1184 	asoc->timosack = 0;
  1185 	asoc->timoshutdown = 0;
  1186 	asoc->timoheartbeat = 0;
  1187 	asoc->timocookie = 0;
  1188 	asoc->timoshutdownack = 0;
  1189 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
  1190 	asoc->discontinuity_time = asoc->start_time;
  1191 	/* sa_ignore MEMLEAK {memory is put in the assoc mapping array and freed later when
  1192 	 * the association is freed.
  1193 	 */
  1194 	return (0);
  1197 void
  1198 sctp_print_mapping_array(struct sctp_association *asoc)
  1200 	unsigned int i, limit;
  1202 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
  1203 	            asoc->mapping_array_size,
  1204 	            asoc->mapping_array_base_tsn,
  1205 	            asoc->cumulative_tsn,
  1206 	            asoc->highest_tsn_inside_map,
  1207 	            asoc->highest_tsn_inside_nr_map);
  1208 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
  1209 		if (asoc->mapping_array[limit - 1] != 0) {
  1210 			break;
  1213 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
  1214 	for (i = 0; i < limit; i++) {
  1215 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
  1217 	if (limit % 16)
  1218 		SCTP_PRINTF("\n");
  1219 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
  1220 		if (asoc->nr_mapping_array[limit - 1]) {
  1221 			break;
  1224 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
  1225 	for (i = 0; i < limit; i++) {
  1226 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ': '\n');
  1228 	if (limit % 16)
  1229 		SCTP_PRINTF("\n");
  1232 int
  1233 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
  1235 	/* mapping array needs to grow */
  1236 	uint8_t *new_array1, *new_array2;
  1237 	uint32_t new_size;
  1239 	new_size = asoc->mapping_array_size + ((needed+7)/8 + SCTP_MAPPING_ARRAY_INCR);
  1240 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
  1241 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
  1242 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
  1243 		/* can't get more, forget it */
  1244 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
  1245 		if (new_array1) {
  1246 			SCTP_FREE(new_array1, SCTP_M_MAP);
  1248 		if (new_array2) {
  1249 			SCTP_FREE(new_array2, SCTP_M_MAP);
  1251 		return (-1);
  1253 	memset(new_array1, 0, new_size);
  1254 	memset(new_array2, 0, new_size);
  1255 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
  1256 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
  1257 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
  1258 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
  1259 	asoc->mapping_array = new_array1;
  1260 	asoc->nr_mapping_array = new_array2;
  1261 	asoc->mapping_array_size = new_size;
  1262 	return (0);
  1266 static void
  1267 sctp_iterator_work(struct sctp_iterator *it)
  1269 	int iteration_count = 0;
  1270 	int inp_skip = 0;
  1271 	int first_in = 1;
  1272 	struct sctp_inpcb *tinp;
  1274 	SCTP_INP_INFO_RLOCK();
  1275 	SCTP_ITERATOR_LOCK();
  1276  	if (it->inp) {
  1277 		SCTP_INP_RLOCK(it->inp);
  1278 		SCTP_INP_DECR_REF(it->inp);
  1280 	if (it->inp == NULL) {
  1281 		/* iterator is complete */
  1282 done_with_iterator:
  1283 		SCTP_ITERATOR_UNLOCK();
  1284 		SCTP_INP_INFO_RUNLOCK();
  1285 		if (it->function_atend != NULL) {
  1286 			(*it->function_atend) (it->pointer, it->val);
  1288 		SCTP_FREE(it, SCTP_M_ITER);
  1289 		return;
  1291 select_a_new_ep:
  1292 	if (first_in) {
  1293 		first_in = 0;
  1294 	} else {
  1295 		SCTP_INP_RLOCK(it->inp);
  1297 	while (((it->pcb_flags) &&
  1298 		((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
  1299 	       ((it->pcb_features) &&
  1300 		((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
  1301 		/* endpoint flags or features don't match, so keep looking */
  1302 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
  1303 			SCTP_INP_RUNLOCK(it->inp);
  1304 			goto done_with_iterator;
  1306 		tinp = it->inp;
  1307 		it->inp = LIST_NEXT(it->inp, sctp_list);
  1308 		SCTP_INP_RUNLOCK(tinp);
  1309 		if (it->inp == NULL) {
  1310 			goto done_with_iterator;
  1312 		SCTP_INP_RLOCK(it->inp);
  1314 	/* now go through each assoc which is in the desired state */
  1315 	if (it->done_current_ep == 0) {
  1316 		if (it->function_inp != NULL)
  1317 			inp_skip = (*it->function_inp)(it->inp, it->pointer, it->val);
  1318 		it->done_current_ep = 1;
  1320 	if (it->stcb == NULL) {
  1321 		/* run the per instance function */
  1322 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
  1324 	if ((inp_skip) || it->stcb == NULL) {
  1325 		if (it->function_inp_end != NULL) {
  1326 			inp_skip = (*it->function_inp_end)(it->inp,
  1327 							   it->pointer,
  1328 							   it->val);
  1330 		SCTP_INP_RUNLOCK(it->inp);
  1331 		goto no_stcb;
  1333 	while (it->stcb) {
  1334 		SCTP_TCB_LOCK(it->stcb);
  1335 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
  1336 			/* not in the right state... keep looking */
  1337 			SCTP_TCB_UNLOCK(it->stcb);
  1338 			goto next_assoc;
  1340 		/* see if we have limited out the iterator loop */
  1341 		iteration_count++;
  1342 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
  1343 			/* Pause to let others grab the lock */
  1344 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
  1345 			SCTP_TCB_UNLOCK(it->stcb);
  1346 			SCTP_INP_INCR_REF(it->inp);
  1347 			SCTP_INP_RUNLOCK(it->inp);
  1348 			SCTP_ITERATOR_UNLOCK();
  1349 			SCTP_INP_INFO_RUNLOCK();
  1350 			SCTP_INP_INFO_RLOCK();
  1351 			SCTP_ITERATOR_LOCK();
  1352 			if (sctp_it_ctl.iterator_flags) {
  1353 				/* We won't be staying here */
  1354 				SCTP_INP_DECR_REF(it->inp);
  1355 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
  1356 #if !defined(__FreeBSD__)
  1357 				if (sctp_it_ctl.iterator_flags &
  1358 				   SCTP_ITERATOR_MUST_EXIT) {
  1359 					goto done_with_iterator;
  1361 #endif
  1362 				if (sctp_it_ctl.iterator_flags &
  1363 				   SCTP_ITERATOR_STOP_CUR_IT) {
  1364 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
  1365 					goto done_with_iterator;
  1367 				if (sctp_it_ctl.iterator_flags &
  1368 				   SCTP_ITERATOR_STOP_CUR_INP) {
  1369 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
  1370 					goto no_stcb;
  1372 				/* If we reach here huh? */
  1373 				SCTP_PRINTF("Unknown it ctl flag %x\n",
  1374 					    sctp_it_ctl.iterator_flags);
  1375 				sctp_it_ctl.iterator_flags = 0;
  1377 			SCTP_INP_RLOCK(it->inp);
  1378 			SCTP_INP_DECR_REF(it->inp);
  1379 			SCTP_TCB_LOCK(it->stcb);
  1380 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
  1381 			iteration_count = 0;
  1384 		/* run function on this one */
  1385 		(*it->function_assoc)(it->inp, it->stcb, it->pointer, it->val);
  1387 		/*
  1388 		 * we lie here, it really needs to have its own type but
  1389 		 * first I must verify that this won't effect things :-0
  1390 		 */
  1391 		if (it->no_chunk_output == 0)
  1392 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
  1394 		SCTP_TCB_UNLOCK(it->stcb);
  1395 	next_assoc:
  1396 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
  1397 		if (it->stcb == NULL) {
  1398 			/* Run last function */
  1399 			if (it->function_inp_end != NULL) {
  1400 				inp_skip = (*it->function_inp_end)(it->inp,
  1401 								   it->pointer,
  1402 								   it->val);
  1406 	SCTP_INP_RUNLOCK(it->inp);
  1407  no_stcb:
  1408 	/* done with all assocs on this endpoint, move on to next endpoint */
  1409 	it->done_current_ep = 0;
  1410 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
  1411 		it->inp = NULL;
  1412 	} else {
  1413 		it->inp = LIST_NEXT(it->inp, sctp_list);
  1415 	if (it->inp == NULL) {
  1416 		goto done_with_iterator;
  1418 	goto select_a_new_ep;
  1421 void
  1422 sctp_iterator_worker(void)
  1424 	struct sctp_iterator *it, *nit;
  1426 	/* This function is called with the WQ lock in place */
  1428 	sctp_it_ctl.iterator_running = 1;
  1429 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
  1430 		sctp_it_ctl.cur_it = it;
  1431 		/* now lets work on this one */
  1432 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
  1433 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
  1434 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1435 		CURVNET_SET(it->vn);
  1436 #endif
  1437 		sctp_iterator_work(it);
  1438 		sctp_it_ctl.cur_it = NULL;
  1439 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1440 		CURVNET_RESTORE();
  1441 #endif
  1442 		SCTP_IPI_ITERATOR_WQ_LOCK();
  1443 #if !defined(__FreeBSD__)
  1444 		if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
  1445 			break;
  1447 #endif
  1448 	        /*sa_ignore FREED_MEMORY*/
  1450 	sctp_it_ctl.iterator_running = 0;
  1451 	return;
  1455 static void
  1456 sctp_handle_addr_wq(void)
  1458 	/* deal with the ADDR wq from the rtsock calls */
  1459 	struct sctp_laddr *wi, *nwi;
  1460 	struct sctp_asconf_iterator *asc;
  1462 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
  1463 		    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
  1464 	if (asc == NULL) {
  1465 		/* Try later, no memory */
  1466 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
  1467 				 (struct sctp_inpcb *)NULL,
  1468 				 (struct sctp_tcb *)NULL,
  1469 				 (struct sctp_nets *)NULL);
  1470 		return;
  1472 	LIST_INIT(&asc->list_of_work);
  1473 	asc->cnt = 0;
  1475 	SCTP_WQ_ADDR_LOCK();
  1476 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
  1477 		LIST_REMOVE(wi, sctp_nxt_addr);
  1478 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
  1479 		asc->cnt++;
  1481 	SCTP_WQ_ADDR_UNLOCK();
  1483 	if (asc->cnt == 0) {
  1484 		SCTP_FREE(asc, SCTP_M_ASC_IT);
  1485 	} else {
  1486 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
  1487 					     sctp_asconf_iterator_stcb,
  1488 					     NULL, /* No ep end for boundall */
  1489 					     SCTP_PCB_FLAGS_BOUNDALL,
  1490 					     SCTP_PCB_ANY_FEATURES,
  1491 					     SCTP_ASOC_ANY_STATE,
  1492 					     (void *)asc, 0,
  1493 					     sctp_asconf_iterator_end, NULL, 0);
  1497 void
  1498 sctp_timeout_handler(void *t)
  1500 	struct sctp_inpcb *inp;
  1501 	struct sctp_tcb *stcb;
  1502 	struct sctp_nets *net;
  1503 	struct sctp_timer *tmr;
  1504 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  1505 	struct socket *so;
  1506 #endif
  1507 	int did_output, type;
  1509 	tmr = (struct sctp_timer *)t;
  1510 	inp = (struct sctp_inpcb *)tmr->ep;
  1511 	stcb = (struct sctp_tcb *)tmr->tcb;
  1512 	net = (struct sctp_nets *)tmr->net;
  1513 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1514 	CURVNET_SET((struct vnet *)tmr->vnet);
  1515 #endif
  1516 	did_output = 1;
  1518 #ifdef SCTP_AUDITING_ENABLED
  1519 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
  1520 	sctp_auditing(3, inp, stcb, net);
  1521 #endif
  1523 	/* sanity checks... */
  1524 	if (tmr->self != (void *)tmr) {
  1525 		/*
  1526 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
  1527 		 *             (void *)tmr);
  1528 		 */
  1529 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1530 		CURVNET_RESTORE();
  1531 #endif
  1532 		return;
  1534 	tmr->stopped_from = 0xa001;
  1535 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
  1536 		/*
  1537 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
  1538 		 * tmr->type);
  1539 		 */
  1540 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1541 		CURVNET_RESTORE();
  1542 #endif
  1543 		return;
  1545 	tmr->stopped_from = 0xa002;
  1546 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
  1547 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1548 		CURVNET_RESTORE();
  1549 #endif
  1550 		return;
  1552 	/* if this is an iterator timeout, get the struct and clear inp */
  1553 	tmr->stopped_from = 0xa003;
  1554 	type = tmr->type;
  1555 	if (inp) {
  1556 		SCTP_INP_INCR_REF(inp);
  1557 		if ((inp->sctp_socket == NULL) &&
  1558 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
  1559 		     (tmr->type != SCTP_TIMER_TYPE_INIT) &&
  1560 		     (tmr->type != SCTP_TIMER_TYPE_SEND) &&
  1561 		     (tmr->type != SCTP_TIMER_TYPE_RECV) &&
  1562 		     (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
  1563 		     (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
  1564 		     (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
  1565 		     (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
  1566 		     (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
  1567 			) {
  1568 			SCTP_INP_DECR_REF(inp);
  1569 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1570 			CURVNET_RESTORE();
  1571 #endif
  1572 			return;
  1575 	tmr->stopped_from = 0xa004;
  1576 	if (stcb) {
  1577 		atomic_add_int(&stcb->asoc.refcnt, 1);
  1578 		if (stcb->asoc.state == 0) {
  1579 			atomic_add_int(&stcb->asoc.refcnt, -1);
  1580 			if (inp) {
  1581 				SCTP_INP_DECR_REF(inp);
  1583 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1584 			CURVNET_RESTORE();
  1585 #endif
  1586 			return;
  1589 	tmr->stopped_from = 0xa005;
  1590 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
  1591 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
  1592 		if (inp) {
  1593 			SCTP_INP_DECR_REF(inp);
  1595 		if (stcb) {
  1596 			atomic_add_int(&stcb->asoc.refcnt, -1);
  1598 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1599 		CURVNET_RESTORE();
  1600 #endif
  1601 		return;
  1603 	tmr->stopped_from = 0xa006;
  1605 	if (stcb) {
  1606 		SCTP_TCB_LOCK(stcb);
  1607 		atomic_add_int(&stcb->asoc.refcnt, -1);
  1608 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
  1609 		    ((stcb->asoc.state == 0) ||
  1610 		     (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
  1611 			SCTP_TCB_UNLOCK(stcb);
  1612 			if (inp) {
  1613 				SCTP_INP_DECR_REF(inp);
  1615 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1616 			CURVNET_RESTORE();
  1617 #endif
  1618 			return;
  1621 	/* record in stopped what t-o occured */
  1622 	tmr->stopped_from = tmr->type;
  1624 	/* mark as being serviced now */
  1625 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
  1626 		/*
  1627 		 * Callout has been rescheduled.
  1628 		 */
  1629 		goto get_out;
  1631 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
  1632 		/*
  1633 		 * Not active, so no action.
  1634 		 */
  1635 		goto get_out;
  1637 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
  1639 	/* call the handler for the appropriate timer type */
  1640 	switch (tmr->type) {
  1641 	case SCTP_TIMER_TYPE_ZERO_COPY:
  1642 		if (inp == NULL) {
  1643 			break;
  1645 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
  1646 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
  1648 		break;
  1649 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
  1650 		if (inp == NULL) {
  1651 			break;
  1653 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
  1654 		    SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
  1656                 break;
  1657 	case SCTP_TIMER_TYPE_ADDR_WQ:
  1658 		sctp_handle_addr_wq();
  1659 		break;
  1660 	case SCTP_TIMER_TYPE_SEND:
  1661 		if ((stcb == NULL) || (inp == NULL)) {
  1662 			break;
  1664 		SCTP_STAT_INCR(sctps_timodata);
  1665 		stcb->asoc.timodata++;
  1666 		stcb->asoc.num_send_timers_up--;
  1667 		if (stcb->asoc.num_send_timers_up < 0) {
  1668 			stcb->asoc.num_send_timers_up = 0;
  1670 		SCTP_TCB_LOCK_ASSERT(stcb);
  1671 		if (sctp_t3rxt_timer(inp, stcb, net)) {
  1672 			/* no need to unlock on tcb its gone */
  1674 			goto out_decr;
  1676 		SCTP_TCB_LOCK_ASSERT(stcb);
  1677 #ifdef SCTP_AUDITING_ENABLED
  1678 		sctp_auditing(4, inp, stcb, net);
  1679 #endif
  1680 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
  1681 		if ((stcb->asoc.num_send_timers_up == 0) &&
  1682 		    (stcb->asoc.sent_queue_cnt > 0)) {
  1683 			struct sctp_tmit_chunk *chk;
  1685 			/*
  1686 			 * safeguard. If there on some on the sent queue
  1687 			 * somewhere but no timers running something is
  1688 			 * wrong... so we start a timer on the first chunk
  1689 			 * on the send queue on whatever net it is sent to.
  1690 			 */
  1691 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
  1692 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
  1693 			    chk->whoTo);
  1695 		break;
  1696 	case SCTP_TIMER_TYPE_INIT:
  1697 		if ((stcb == NULL) || (inp == NULL)) {
  1698 			break;
  1700 		SCTP_STAT_INCR(sctps_timoinit);
  1701 		stcb->asoc.timoinit++;
  1702 		if (sctp_t1init_timer(inp, stcb, net)) {
  1703 			/* no need to unlock on tcb its gone */
  1704 			goto out_decr;
  1706 		/* We do output but not here */
  1707 		did_output = 0;
  1708 		break;
  1709 	case SCTP_TIMER_TYPE_RECV:
  1710 		if ((stcb == NULL) || (inp == NULL)) {
  1711 			break;
  1713 		SCTP_STAT_INCR(sctps_timosack);
  1714 		stcb->asoc.timosack++;
  1715 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
  1716 #ifdef SCTP_AUDITING_ENABLED
  1717 		sctp_auditing(4, inp, stcb, net);
  1718 #endif
  1719 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
  1720 		break;
  1721 	case SCTP_TIMER_TYPE_SHUTDOWN:
  1722 		if ((stcb == NULL) || (inp == NULL)) {
  1723 			break;
  1725 		if (sctp_shutdown_timer(inp, stcb, net)) {
  1726 			/* no need to unlock on tcb its gone */
  1727 			goto out_decr;
  1729 		SCTP_STAT_INCR(sctps_timoshutdown);
  1730 		stcb->asoc.timoshutdown++;
  1731 #ifdef SCTP_AUDITING_ENABLED
  1732 		sctp_auditing(4, inp, stcb, net);
  1733 #endif
  1734 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
  1735 		break;
  1736 	case SCTP_TIMER_TYPE_HEARTBEAT:
  1737 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
  1738 			break;
  1740 		SCTP_STAT_INCR(sctps_timoheartbeat);
  1741 		stcb->asoc.timoheartbeat++;
  1742 		if (sctp_heartbeat_timer(inp, stcb, net)) {
  1743 			/* no need to unlock on tcb its gone */
  1744 			goto out_decr;
  1746 #ifdef SCTP_AUDITING_ENABLED
  1747 		sctp_auditing(4, inp, stcb, net);
  1748 #endif
  1749 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
  1750 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
  1751 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
  1753 		break;
  1754 	case SCTP_TIMER_TYPE_COOKIE:
  1755 		if ((stcb == NULL) || (inp == NULL)) {
  1756 			break;
  1759 		if (sctp_cookie_timer(inp, stcb, net)) {
  1760 			/* no need to unlock on tcb its gone */
  1761 			goto out_decr;
  1763 		SCTP_STAT_INCR(sctps_timocookie);
  1764 		stcb->asoc.timocookie++;
  1765 #ifdef SCTP_AUDITING_ENABLED
  1766 		sctp_auditing(4, inp, stcb, net);
  1767 #endif
  1768 		/*
  1769 		 * We consider T3 and Cookie timer pretty much the same with
  1770 		 * respect to where from in chunk_output.
  1771 		 */
  1772 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
  1773 		break;
  1774 	case SCTP_TIMER_TYPE_NEWCOOKIE:
  1776 			struct timeval tv;
  1777 			int i, secret;
  1778 			if (inp == NULL) {
  1779 				break;
  1781 			SCTP_STAT_INCR(sctps_timosecret);
  1782 			(void)SCTP_GETTIME_TIMEVAL(&tv);
  1783 			SCTP_INP_WLOCK(inp);
  1784 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
  1785 			inp->sctp_ep.last_secret_number =
  1786 			    inp->sctp_ep.current_secret_number;
  1787 			inp->sctp_ep.current_secret_number++;
  1788 			if (inp->sctp_ep.current_secret_number >=
  1789 			    SCTP_HOW_MANY_SECRETS) {
  1790 				inp->sctp_ep.current_secret_number = 0;
  1792 			secret = (int)inp->sctp_ep.current_secret_number;
  1793 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
  1794 				inp->sctp_ep.secret_key[secret][i] =
  1795 				    sctp_select_initial_TSN(&inp->sctp_ep);
  1797 			SCTP_INP_WUNLOCK(inp);
  1798 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
  1800 		did_output = 0;
  1801 		break;
  1802 	case SCTP_TIMER_TYPE_PATHMTURAISE:
  1803 		if ((stcb == NULL) || (inp == NULL)) {
  1804 			break;
  1806 		SCTP_STAT_INCR(sctps_timopathmtu);
  1807 		sctp_pathmtu_timer(inp, stcb, net);
  1808 		did_output = 0;
  1809 		break;
  1810 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
  1811 		if ((stcb == NULL) || (inp == NULL)) {
  1812 			break;
  1814 		if (sctp_shutdownack_timer(inp, stcb, net)) {
  1815 			/* no need to unlock on tcb its gone */
  1816 			goto out_decr;
  1818 		SCTP_STAT_INCR(sctps_timoshutdownack);
  1819  		stcb->asoc.timoshutdownack++;
  1820 #ifdef SCTP_AUDITING_ENABLED
  1821 		sctp_auditing(4, inp, stcb, net);
  1822 #endif
  1823 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
  1824 		break;
  1825 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
  1826 		if ((stcb == NULL) || (inp == NULL)) {
  1827 			break;
  1829 		SCTP_STAT_INCR(sctps_timoshutdownguard);
  1830 		sctp_abort_an_association(inp, stcb, NULL, SCTP_SO_NOT_LOCKED);
  1831 		/* no need to unlock on tcb its gone */
  1832 		goto out_decr;
  1834 	case SCTP_TIMER_TYPE_STRRESET:
  1835 		if ((stcb == NULL) || (inp == NULL)) {
  1836 			break;
  1838 		if (sctp_strreset_timer(inp, stcb, net)) {
  1839 			/* no need to unlock on tcb its gone */
  1840 			goto out_decr;
  1842 		SCTP_STAT_INCR(sctps_timostrmrst);
  1843 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
  1844 		break;
  1845 	case SCTP_TIMER_TYPE_ASCONF:
  1846 		if ((stcb == NULL) || (inp == NULL)) {
  1847 			break;
  1849 		if (sctp_asconf_timer(inp, stcb, net)) {
  1850 			/* no need to unlock on tcb its gone */
  1851 			goto out_decr;
  1853 		SCTP_STAT_INCR(sctps_timoasconf);
  1854 #ifdef SCTP_AUDITING_ENABLED
  1855 		sctp_auditing(4, inp, stcb, net);
  1856 #endif
  1857 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
  1858 		break;
  1859 	case SCTP_TIMER_TYPE_PRIM_DELETED:
  1860 		if ((stcb == NULL) || (inp == NULL)) {
  1861 			break;
  1863 		sctp_delete_prim_timer(inp, stcb, net);
  1864 		SCTP_STAT_INCR(sctps_timodelprim);
  1865 		break;
  1867 	case SCTP_TIMER_TYPE_AUTOCLOSE:
  1868 		if ((stcb == NULL) || (inp == NULL)) {
  1869 			break;
  1871 		SCTP_STAT_INCR(sctps_timoautoclose);
  1872 		sctp_autoclose_timer(inp, stcb, net);
  1873 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
  1874 		did_output = 0;
  1875 		break;
  1876 	case SCTP_TIMER_TYPE_ASOCKILL:
  1877 		if ((stcb == NULL) || (inp == NULL)) {
  1878 			break;
  1880 		SCTP_STAT_INCR(sctps_timoassockill);
  1881 		/* Can we free it yet? */
  1882 		SCTP_INP_DECR_REF(inp);
  1883 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL+SCTP_LOC_1);
  1884 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  1885 		so = SCTP_INP_SO(inp);
  1886 		atomic_add_int(&stcb->asoc.refcnt, 1);
  1887 		SCTP_TCB_UNLOCK(stcb);
  1888 		SCTP_SOCKET_LOCK(so, 1);
  1889 		SCTP_TCB_LOCK(stcb);
  1890 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
  1891 #endif
  1892 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL+SCTP_LOC_2);
  1893 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  1894 		SCTP_SOCKET_UNLOCK(so, 1);
  1895 #endif
  1896 		/*
  1897 		 * free asoc, always unlocks (or destroy's) so prevent
  1898 		 * duplicate unlock or unlock of a free mtx :-0
  1899 		 */
  1900 		stcb = NULL;
  1901 		goto out_no_decr;
  1902 	case SCTP_TIMER_TYPE_INPKILL:
  1903 		SCTP_STAT_INCR(sctps_timoinpkill);
  1904 		if (inp == NULL) {
  1905 			break;
  1907 		/*
  1908 		 * special case, take away our increment since WE are the
  1909 		 * killer
  1910 		 */
  1911 		SCTP_INP_DECR_REF(inp);
  1912 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL+SCTP_LOC_3);
  1913 #if defined(__APPLE__)
  1914 		SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1);
  1915 #endif
  1916 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
  1917 				SCTP_CALLED_FROM_INPKILL_TIMER);
  1918 #if defined(__APPLE__)
  1919 		SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1);
  1920 #endif
  1921 		inp = NULL;
  1922 		goto out_no_decr;
  1923 	default:
  1924 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
  1925 			tmr->type);
  1926 		break;
  1928 #ifdef SCTP_AUDITING_ENABLED
  1929 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
  1930 	if (inp)
  1931 		sctp_auditing(5, inp, stcb, net);
  1932 #endif
  1933 	if ((did_output) && stcb) {
  1934 		/*
  1935 		 * Now we need to clean up the control chunk chain if an
  1936 		 * ECNE is on it. It must be marked as UNSENT again so next
  1937 		 * call will continue to send it until such time that we get
  1938 		 * a CWR, to remove it. It is, however, less likely that we
  1939 		 * will find a ecn echo on the chain though.
  1940 		 */
  1941 		sctp_fix_ecn_echo(&stcb->asoc);
  1943 get_out:
  1944 	if (stcb) {
  1945 		SCTP_TCB_UNLOCK(stcb);
  1948 out_decr:
  1949 	if (inp) {
  1950 		SCTP_INP_DECR_REF(inp);
  1953 out_no_decr:
  1954 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
  1955 			  type);
  1956 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
  1957 	CURVNET_RESTORE();
  1958 #endif
  1961 void
  1962 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
  1963     struct sctp_nets *net)
  1965 	uint32_t to_ticks;
  1966 	struct sctp_timer *tmr;
  1968 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
  1969 		return;
  1971 	tmr = NULL;
  1972 	if (stcb) {
  1973 		SCTP_TCB_LOCK_ASSERT(stcb);
  1975 	switch (t_type) {
  1976 	case SCTP_TIMER_TYPE_ZERO_COPY:
  1977 		tmr = &inp->sctp_ep.zero_copy_timer;
  1978 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
  1979 		break;
  1980 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
  1981 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
  1982 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
  1983 		break;
  1984 	case SCTP_TIMER_TYPE_ADDR_WQ:
  1985 		/* Only 1 tick away :-) */
  1986 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
  1987 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
  1988 		break;
  1989 	case SCTP_TIMER_TYPE_SEND:
  1990 		/* Here we use the RTO timer */
  1992 			int rto_val;
  1994 			if ((stcb == NULL) || (net == NULL)) {
  1995 				return;
  1997 			tmr = &net->rxt_timer;
  1998 			if (net->RTO == 0) {
  1999 				rto_val = stcb->asoc.initial_rto;
  2000 			} else {
  2001 				rto_val = net->RTO;
  2003 			to_ticks = MSEC_TO_TICKS(rto_val);
  2005 		break;
  2006 	case SCTP_TIMER_TYPE_INIT:
  2007 		/*
  2008 		 * Here we use the INIT timer default usually about 1
  2009 		 * minute.
  2010 		 */
  2011 		if ((stcb == NULL) || (net == NULL)) {
  2012 			return;
  2014 		tmr = &net->rxt_timer;
  2015 		if (net->RTO == 0) {
  2016 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
  2017 		} else {
  2018 			to_ticks = MSEC_TO_TICKS(net->RTO);
  2020 		break;
  2021 	case SCTP_TIMER_TYPE_RECV:
  2022 		/*
  2023 		 * Here we use the Delayed-Ack timer value from the inp
  2024 		 * ususually about 200ms.
  2025 		 */
  2026 		if (stcb == NULL) {
  2027 			return;
  2029 		tmr = &stcb->asoc.dack_timer;
  2030 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
  2031 		break;
  2032 	case SCTP_TIMER_TYPE_SHUTDOWN:
  2033 		/* Here we use the RTO of the destination. */
  2034 		if ((stcb == NULL) || (net == NULL)) {
  2035 			return;
  2037 		if (net->RTO == 0) {
  2038 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
  2039 		} else {
  2040 			to_ticks = MSEC_TO_TICKS(net->RTO);
  2042 		tmr = &net->rxt_timer;
  2043 		break;
  2044 	case SCTP_TIMER_TYPE_HEARTBEAT:
  2045 		/*
  2046 		 * the net is used here so that we can add in the RTO. Even
  2047 		 * though we use a different timer. We also add the HB timer
  2048 		 * PLUS a random jitter.
  2049 		 */
  2050 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
  2051 			return;
  2052 		} else {
  2053 			uint32_t rndval;
  2054 			uint32_t jitter;
  2056 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
  2057 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
  2058 				return;
  2060 			if (net->RTO == 0) {
  2061 				to_ticks = stcb->asoc.initial_rto;
  2062 			} else {
  2063 				to_ticks = net->RTO;
  2065 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
  2066 			jitter = rndval % to_ticks;
  2067 			if (jitter >= (to_ticks >> 1)) {
  2068 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
  2069 			} else {
  2070 				to_ticks = to_ticks - jitter;
  2072 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
  2073 			    !(net->dest_state & SCTP_ADDR_PF)) {
  2074 				to_ticks += net->heart_beat_delay;
  2076 			/*
  2077 			 * Now we must convert the to_ticks that are now in
  2078 			 * ms to ticks.
  2079 			 */
  2080 			to_ticks = MSEC_TO_TICKS(to_ticks);
  2081 			tmr = &net->hb_timer;
  2083 		break;
  2084 	case SCTP_TIMER_TYPE_COOKIE:
  2085 		/*
  2086 		 * Here we can use the RTO timer from the network since one
  2087 		 * RTT was compelete. If a retran happened then we will be
  2088 		 * using the RTO initial value.
  2089 		 */
  2090 		if ((stcb == NULL) || (net == NULL)) {
  2091 			return;
  2093 		if (net->RTO == 0) {
  2094 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
  2095 		} else {
  2096 			to_ticks = MSEC_TO_TICKS(net->RTO);
  2098 		tmr = &net->rxt_timer;
  2099 		break;
  2100 	case SCTP_TIMER_TYPE_NEWCOOKIE:
  2101 		/*
  2102 		 * nothing needed but the endpoint here ususually about 60
  2103 		 * minutes.
  2104 		 */
  2105 		if (inp == NULL) {
  2106 			return;
  2108 		tmr = &inp->sctp_ep.signature_change;
  2109 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
  2110 		break;
  2111 	case SCTP_TIMER_TYPE_ASOCKILL:
  2112 		if (stcb == NULL) {
  2113 			return;
  2115 		tmr = &stcb->asoc.strreset_timer;
  2116 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
  2117 		break;
  2118 	case SCTP_TIMER_TYPE_INPKILL:
  2119 		/*
  2120 		 * The inp is setup to die. We re-use the signature_chage
  2121 		 * timer since that has stopped and we are in the GONE
  2122 		 * state.
  2123 		 */
  2124 		if (inp == NULL) {
  2125 			return;
  2127 		tmr = &inp->sctp_ep.signature_change;
  2128 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
  2129 		break;
  2130 	case SCTP_TIMER_TYPE_PATHMTURAISE:
  2131 		/*
  2132 		 * Here we use the value found in the EP for PMTU ususually
  2133 		 * about 10 minutes.
  2134 		 */
  2135 		if ((stcb == NULL) || (inp == NULL)) {
  2136 			return;
  2138 		if (net == NULL) {
  2139 			return;
  2141 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
  2142 			return;
  2144 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
  2145 		tmr = &net->pmtu_timer;
  2146 		break;
  2147 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
  2148 		/* Here we use the RTO of the destination */
  2149 		if ((stcb == NULL) || (net == NULL)) {
  2150 			return;
  2152 		if (net->RTO == 0) {
  2153 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
  2154 		} else {
  2155 			to_ticks = MSEC_TO_TICKS(net->RTO);
  2157 		tmr = &net->rxt_timer;
  2158 		break;
  2159 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
  2160 		/*
  2161 		 * Here we use the endpoints shutdown guard timer usually
  2162 		 * about 3 minutes.
  2163 		 */
  2164 		if ((inp == NULL) || (stcb == NULL)) {
  2165 			return;
  2167 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
  2168 		tmr = &stcb->asoc.shut_guard_timer;
  2169 		break;
  2170 	case SCTP_TIMER_TYPE_STRRESET:
  2171 		/*
  2172 		 * Here the timer comes from the stcb but its value is from
  2173 		 * the net's RTO.
  2174 		 */
  2175 		if ((stcb == NULL) || (net == NULL)) {
  2176 			return;
  2178 		if (net->RTO == 0) {
  2179 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
  2180 		} else {
  2181 			to_ticks = MSEC_TO_TICKS(net->RTO);
  2183 		tmr = &stcb->asoc.strreset_timer;
  2184 		break;
  2185 	case SCTP_TIMER_TYPE_ASCONF:
  2186 		/*
  2187 		 * Here the timer comes from the stcb but its value is from
  2188 		 * the net's RTO.
  2189 		 */
  2190 		if ((stcb == NULL) || (net == NULL)) {
  2191 			return;
  2193 		if (net->RTO == 0) {
  2194 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
  2195 		} else {
  2196 			to_ticks = MSEC_TO_TICKS(net->RTO);
  2198 		tmr = &stcb->asoc.asconf_timer;
  2199 		break;
  2200 	case SCTP_TIMER_TYPE_PRIM_DELETED:
  2201 		if ((stcb == NULL) || (net != NULL)) {
  2202 			return;
  2204 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
  2205 		tmr = &stcb->asoc.delete_prim_timer;
  2206 		break;
  2207 	case SCTP_TIMER_TYPE_AUTOCLOSE:
  2208 		if (stcb == NULL) {
  2209 			return;
  2211 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
  2212 			/*
  2213 			 * Really an error since stcb is NOT set to
  2214 			 * autoclose
  2215 			 */
  2216 			return;
  2218 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
  2219 		tmr = &stcb->asoc.autoclose_timer;
  2220 		break;
  2221 	default:
  2222 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
  2223 			__FUNCTION__, t_type);
  2224 		return;
  2225 		break;
  2227 	if ((to_ticks <= 0) || (tmr == NULL)) {
  2228 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
  2229 			__FUNCTION__, t_type, to_ticks, (void *)tmr);
  2230 		return;
  2232 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
  2233 		/*
  2234 		 * we do NOT allow you to have it already running. if it is
  2235 		 * we leave the current one up unchanged
  2236 		 */
  2237 		return;
  2239 	/* At this point we can proceed */
  2240 	if (t_type == SCTP_TIMER_TYPE_SEND) {
  2241 		stcb->asoc.num_send_timers_up++;
  2243 	tmr->stopped_from = 0;
  2244 	tmr->type = t_type;
  2245 	tmr->ep = (void *)inp;
  2246 	tmr->tcb = (void *)stcb;
  2247 	tmr->net = (void *)net;
  2248 	tmr->self = (void *)tmr;
  2249 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000
  2250 	tmr->vnet = (void *)curvnet;
  2251 #endif
  2252 #ifndef __Panda__
  2253 	tmr->ticks = sctp_get_tick_count();
  2254 #endif
  2255 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
  2256 	return;
  2259 void
  2260 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
  2261     struct sctp_nets *net, uint32_t from)
  2263 	struct sctp_timer *tmr;
  2265 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
  2266 	    (inp == NULL))
  2267 		return;
  2269 	tmr = NULL;
  2270 	if (stcb) {
  2271 		SCTP_TCB_LOCK_ASSERT(stcb);
  2273 	switch (t_type) {
  2274 	case SCTP_TIMER_TYPE_ZERO_COPY:
  2275 		tmr = &inp->sctp_ep.zero_copy_timer;
  2276 		break;
  2277 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
  2278 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
  2279 		break;
  2280 	case SCTP_TIMER_TYPE_ADDR_WQ:
  2281 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
  2282 		break;
  2283 	case SCTP_TIMER_TYPE_SEND:
  2284 		if ((stcb == NULL) || (net == NULL)) {
  2285 			return;
  2287 		tmr = &net->rxt_timer;
  2288 		break;
  2289 	case SCTP_TIMER_TYPE_INIT:
  2290 		if ((stcb == NULL) || (net == NULL)) {
  2291 			return;
  2293 		tmr = &net->rxt_timer;
  2294 		break;
  2295 	case SCTP_TIMER_TYPE_RECV:
  2296 		if (stcb == NULL) {
  2297 			return;
  2299 		tmr = &stcb->asoc.dack_timer;
  2300 		break;
  2301 	case SCTP_TIMER_TYPE_SHUTDOWN:
  2302 		if ((stcb == NULL) || (net == NULL)) {
  2303 			return;
  2305 		tmr = &net->rxt_timer;
  2306 		break;
  2307 	case SCTP_TIMER_TYPE_HEARTBEAT:
  2308 		if ((stcb == NULL) || (net == NULL)) {
  2309 			return;
  2311 		tmr = &net->hb_timer;
  2312 		break;
  2313 	case SCTP_TIMER_TYPE_COOKIE:
  2314 		if ((stcb == NULL) || (net == NULL)) {
  2315 			return;
  2317 		tmr = &net->rxt_timer;
  2318 		break;
  2319 	case SCTP_TIMER_TYPE_NEWCOOKIE:
  2320 		/* nothing needed but the endpoint here */
  2321 		tmr = &inp->sctp_ep.signature_change;
  2322 		/*
  2323 		 * We re-use the newcookie timer for the INP kill timer. We
  2324 		 * must assure that we do not kill it by accident.
  2325 		 */
  2326 		break;
  2327 	case SCTP_TIMER_TYPE_ASOCKILL:
  2328 		/*
  2329 		 * Stop the asoc kill timer.
  2330 		 */
  2331 		if (stcb == NULL) {
  2332 			return;
  2334 		tmr = &stcb->asoc.strreset_timer;
  2335 		break;
  2337 	case SCTP_TIMER_TYPE_INPKILL:
  2338 		/*
  2339 		 * The inp is setup to die. We re-use the signature_chage
  2340 		 * timer since that has stopped and we are in the GONE
  2341 		 * state.
  2342 		 */
  2343 		tmr = &inp->sctp_ep.signature_change;
  2344 		break;
  2345 	case SCTP_TIMER_TYPE_PATHMTURAISE:
  2346 		if ((stcb == NULL) || (net == NULL)) {
  2347 			return;
  2349 		tmr = &net->pmtu_timer;
  2350 		break;
  2351 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
  2352 		if ((stcb == NULL) || (net == NULL)) {
  2353 			return;
  2355 		tmr = &net->rxt_timer;
  2356 		break;
  2357 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
  2358 		if (stcb == NULL) {
  2359 			return;
  2361 		tmr = &stcb->asoc.shut_guard_timer;
  2362 		break;
  2363 	case SCTP_TIMER_TYPE_STRRESET:
  2364 		if (stcb == NULL) {
  2365 			return;
  2367 		tmr = &stcb->asoc.strreset_timer;
  2368 		break;
  2369 	case SCTP_TIMER_TYPE_ASCONF:
  2370 		if (stcb == NULL) {
  2371 			return;
  2373 		tmr = &stcb->asoc.asconf_timer;
  2374 		break;
  2375 	case SCTP_TIMER_TYPE_PRIM_DELETED:
  2376 		if (stcb == NULL) {
  2377 			return;
  2379 		tmr = &stcb->asoc.delete_prim_timer;
  2380 		break;
  2381 	case SCTP_TIMER_TYPE_AUTOCLOSE:
  2382 		if (stcb == NULL) {
  2383 			return;
  2385 		tmr = &stcb->asoc.autoclose_timer;
  2386 		break;
  2387 	default:
  2388 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
  2389 			__FUNCTION__, t_type);
  2390 		break;
  2392 	if (tmr == NULL) {
  2393 		return;
  2395 	if ((tmr->type != t_type) && tmr->type) {
  2396 		/*
  2397 		 * Ok we have a timer that is under joint use. Cookie timer
  2398 		 * per chance with the SEND timer. We therefore are NOT
  2399 		 * running the timer that the caller wants stopped.  So just
  2400 		 * return.
  2401 		 */
  2402 		return;
  2404 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
  2405 		stcb->asoc.num_send_timers_up--;
  2406 		if (stcb->asoc.num_send_timers_up < 0) {
  2407 			stcb->asoc.num_send_timers_up = 0;
  2410 	tmr->self = NULL;
  2411 	tmr->stopped_from = from;
  2412 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
  2413 	return;
  2416 uint32_t
  2417 sctp_calculate_len(struct mbuf *m)
  2419 	uint32_t tlen = 0;
  2420 	struct mbuf *at;
  2422 	at = m;
  2423 	while (at) {
  2424 		tlen += SCTP_BUF_LEN(at);
  2425 		at = SCTP_BUF_NEXT(at);
  2427 	return (tlen);
  2430 void
  2431 sctp_mtu_size_reset(struct sctp_inpcb *inp,
  2432     struct sctp_association *asoc, uint32_t mtu)
  2434 	/*
  2435 	 * Reset the P-MTU size on this association, this involves changing
  2436 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
  2437 	 * allow the DF flag to be cleared.
  2438 	 */
  2439 	struct sctp_tmit_chunk *chk;
  2440 	unsigned int eff_mtu, ovh;
  2442 	asoc->smallest_mtu = mtu;
  2443 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
  2444 		ovh = SCTP_MIN_OVERHEAD;
  2445 	} else {
  2446 		ovh = SCTP_MIN_V4_OVERHEAD;
  2448 	eff_mtu = mtu - ovh;
  2449 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
  2450 		if (chk->send_size > eff_mtu) {
  2451 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
  2454 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
  2455 		if (chk->send_size > eff_mtu) {
  2456 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
  2462 /*
  2463  * given an association and starting time of the current RTT period return
  2464  * RTO in number of msecs net should point to the current network
  2465  */
  2467 uint32_t
  2468 sctp_calculate_rto(struct sctp_tcb *stcb,
  2469 		   struct sctp_association *asoc,
  2470 		   struct sctp_nets *net,
  2471 		   struct timeval *told,
  2472 		   int safe, int rtt_from_sack)
  2474 	/*-
  2475 	 * given an association and the starting time of the current RTT
  2476 	 * period (in value1/value2) return RTO in number of msecs.
  2477 	 */
  2478 	int32_t rtt; /* RTT in ms */
  2479 	uint32_t new_rto;
  2480 	int first_measure = 0;
  2481 	struct timeval now, then, *old;
  2483 	/* Copy it out for sparc64 */
  2484 	if (safe == sctp_align_unsafe_makecopy) {
  2485 		old = &then;
  2486 		memcpy(&then, told, sizeof(struct timeval));
  2487 	} else if (safe == sctp_align_safe_nocopy) {
  2488 		old = told;
  2489 	} else {
  2490 		/* error */
  2491 		SCTP_PRINTF("Huh, bad rto calc call\n");
  2492 		return (0);
  2494 	/************************/
  2495 	/* 1. calculate new RTT */
  2496 	/************************/
  2497 	/* get the current time */
  2498 	if (stcb->asoc.use_precise_time) {
  2499 		(void)SCTP_GETPTIME_TIMEVAL(&now);
  2500 	} else {
  2501 		(void)SCTP_GETTIME_TIMEVAL(&now);
  2503 	timevalsub(&now, old);
  2504 	/* store the current RTT in us */
  2505 	net->rtt = (uint64_t)1000000 * (uint64_t)now.tv_sec +
  2506 	           (uint64_t)now.tv_usec;
  2507 	/* computer rtt in ms */
  2508 	rtt = net->rtt / 1000;
  2509 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
  2510 		/* Tell the CC module that a new update has just occurred from a sack */
  2511 		(*asoc->cc_functions.sctp_rtt_calculated)(stcb, net, &now);
  2513 	/* Do we need to determine the lan? We do this only
  2514 	 * on sacks i.e. RTT being determined from data not
  2515 	 * non-data (HB/INIT->INITACK).
  2516 	 */
  2517 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
  2518 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
  2519 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
  2520 			net->lan_type = SCTP_LAN_INTERNET;
  2521 		} else {
  2522 			net->lan_type = SCTP_LAN_LOCAL;
  2526 	/***************************/
  2527 	/* 2. update RTTVAR & SRTT */
  2528 	/***************************/
  2529 	/*-
  2530 	 * Compute the scaled average lastsa and the
  2531 	 * scaled variance lastsv as described in van Jacobson
  2532 	 * Paper "Congestion Avoidance and Control", Annex A.
  2534 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
  2535 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
  2536 	 */
  2537 	if (net->RTO_measured) {
  2538 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
  2539 		net->lastsa += rtt;
  2540 		if (rtt < 0) {
  2541 			rtt = -rtt;
  2543 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
  2544 		net->lastsv += rtt;
  2545 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
  2546 			rto_logging(net, SCTP_LOG_RTTVAR);
  2548 	} else {
  2549 		/* First RTO measurment */
  2550 		net->RTO_measured = 1;
  2551 		first_measure = 1;
  2552 		net->lastsa = rtt << SCTP_RTT_SHIFT;
  2553 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
  2554 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
  2555 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
  2558 	if (net->lastsv == 0) {
  2559 		net->lastsv = SCTP_CLOCK_GRANULARITY;
  2561 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
  2562 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
  2563 	    (stcb->asoc.sat_network_lockout == 0)) {
  2564 		stcb->asoc.sat_network = 1;
  2565 	} else if ((!first_measure) && stcb->asoc.sat_network) {
  2566 		stcb->asoc.sat_network = 0;
  2567 		stcb->asoc.sat_network_lockout = 1;
  2569  	/* bound it, per C6/C7 in Section 5.3.1 */
  2570  	if (new_rto < stcb->asoc.minrto) {
  2571 		new_rto = stcb->asoc.minrto;
  2573 	if (new_rto > stcb->asoc.maxrto) {
  2574 		new_rto = stcb->asoc.maxrto;
  2576 	/* we are now returning the RTO */
  2577  	return (new_rto);
  2580 /*
  2581  * return a pointer to a contiguous piece of data from the given mbuf chain
  2582  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
  2583  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
  2584  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
  2585  */
  2586 caddr_t
  2587 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
  2589 	uint32_t count;
  2590 	uint8_t *ptr;
  2592 	ptr = in_ptr;
  2593 	if ((off < 0) || (len <= 0))
  2594 		return (NULL);
  2596 	/* find the desired start location */
  2597 	while ((m != NULL) && (off > 0)) {
  2598 		if (off < SCTP_BUF_LEN(m))
  2599 			break;
  2600 		off -= SCTP_BUF_LEN(m);
  2601 		m = SCTP_BUF_NEXT(m);
  2603 	if (m == NULL)
  2604 		return (NULL);
  2606 	/* is the current mbuf large enough (eg. contiguous)? */
  2607 	if ((SCTP_BUF_LEN(m) - off) >= len) {
  2608 		return (mtod(m, caddr_t) + off);
  2609 	} else {
  2610 		/* else, it spans more than one mbuf, so save a temp copy... */
  2611 		while ((m != NULL) && (len > 0)) {
  2612 			count = min(SCTP_BUF_LEN(m) - off, len);
  2613 			bcopy(mtod(m, caddr_t) + off, ptr, count);
  2614 			len -= count;
  2615 			ptr += count;
  2616 			off = 0;
  2617 			m = SCTP_BUF_NEXT(m);
  2619 		if ((m == NULL) && (len > 0))
  2620 			return (NULL);
  2621 		else
  2622 			return ((caddr_t)in_ptr);
  2628 struct sctp_paramhdr *
  2629 sctp_get_next_param(struct mbuf *m,
  2630     int offset,
  2631     struct sctp_paramhdr *pull,
  2632     int pull_limit)
  2634 	/* This just provides a typed signature to Peter's Pull routine */
  2635 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
  2636 	    (uint8_t *) pull));
  2640 int
  2641 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
  2643 	/*
  2644 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
  2645 	 * padlen is > 3 this routine will fail.
  2646 	 */
  2647 	uint8_t *dp;
  2648 	int i;
  2650 	if (padlen > 3) {
  2651 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
  2652 		return (ENOBUFS);
  2654 	if (padlen <= M_TRAILINGSPACE(m)) {
  2655 		/*
  2656 		 * The easy way. We hope the majority of the time we hit
  2657 		 * here :)
  2658 		 */
  2659 		dp = (uint8_t *) (mtod(m, caddr_t) + SCTP_BUF_LEN(m));
  2660 		SCTP_BUF_LEN(m) += padlen;
  2661 	} else {
  2662 		/* Hard way we must grow the mbuf */
  2663 		struct mbuf *tmp;
  2665 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
  2666 		if (tmp == NULL) {
  2667 			/* Out of space GAK! we are in big trouble. */
  2668 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
  2669 			return (ENOBUFS);
  2671 		/* setup and insert in middle */
  2672 		SCTP_BUF_LEN(tmp) = padlen;
  2673 		SCTP_BUF_NEXT(tmp) = NULL;
  2674 		SCTP_BUF_NEXT(m) = tmp;
  2675 		dp = mtod(tmp, uint8_t *);
  2677 	/* zero out the pad */
  2678 	for (i = 0; i < padlen; i++) {
  2679 		*dp = 0;
  2680 		dp++;
  2682 	return (0);
  2685 int
  2686 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
  2688 	/* find the last mbuf in chain and pad it */
  2689 	struct mbuf *m_at;
  2691 	if (last_mbuf) {
  2692 		return (sctp_add_pad_tombuf(last_mbuf, padval));
  2693 	} else {
  2694 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
  2695 			if (SCTP_BUF_NEXT(m_at) == NULL) {
  2696 				return (sctp_add_pad_tombuf(m_at, padval));
  2700 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
  2701 	return (EFAULT);
  2704 static void
  2705 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
  2706     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
  2707 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
  2708     SCTP_UNUSED
  2709 #endif
  2712 	struct mbuf *m_notify;
  2713 	struct sctp_assoc_change *sac;
  2714 	struct sctp_queued_to_read *control;
  2715 	size_t notif_len, abort_len;
  2716 	unsigned int i;
  2717 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  2718 	struct socket *so;
  2719 #endif
  2721 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
  2722 		notif_len = sizeof(struct sctp_assoc_change);
  2723 		if (abort != NULL) {
  2724 			abort_len = ntohs(abort->ch.chunk_length);
  2725 		} else {
  2726 			abort_len = 0;
  2728 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
  2729 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
  2730 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
  2731 			notif_len += abort_len;
  2733 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
  2734 		if (m_notify == NULL) {
  2735 			/* Retry with smaller value. */
  2736 			notif_len = sizeof(struct sctp_assoc_change);
  2737 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
  2738 			if (m_notify == NULL) {
  2739 				goto set_error;
  2742 		SCTP_BUF_NEXT(m_notify) = NULL;
  2743 		sac = mtod(m_notify, struct sctp_assoc_change *);
  2744 		sac->sac_type = SCTP_ASSOC_CHANGE;
  2745 		sac->sac_flags = 0;
  2746 		sac->sac_length = sizeof(struct sctp_assoc_change);
  2747 		sac->sac_state = state;
  2748 		sac->sac_error = error;
  2749 		/* XXX verify these stream counts */
  2750 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
  2751 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
  2752 		sac->sac_assoc_id = sctp_get_associd(stcb);
  2753 		if (notif_len > sizeof(struct sctp_assoc_change)) {
  2754 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
  2755 				i = 0;
  2756 				if (stcb->asoc.peer_supports_prsctp) {
  2757 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
  2759 				if (stcb->asoc.peer_supports_auth) {
  2760 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
  2762 				if (stcb->asoc.peer_supports_asconf) {
  2763 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
  2765 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
  2766 				if (stcb->asoc.peer_supports_strreset) {
  2767 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
  2769 				sac->sac_length += i;
  2770 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
  2771 				memcpy(sac->sac_info, abort, abort_len);
  2772 				sac->sac_length += abort_len;
  2775 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
  2776 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
  2777 		                                 0, 0, stcb->asoc.context, 0, 0, 0,
  2778 		                                 m_notify);
  2779 		if (control != NULL) {
  2780 			control->length = SCTP_BUF_LEN(m_notify);
  2781 			/* not that we need this */
  2782 			control->tail_mbuf = m_notify;
  2783 			control->spec_flags = M_NOTIFICATION;
  2784 			sctp_add_to_readq(stcb->sctp_ep, stcb,
  2785 			                  control,
  2786 			                  &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
  2787 			                  so_locked);
  2788 		} else {
  2789 			sctp_m_freem(m_notify);
  2792 	/*
  2793 	 * For 1-to-1 style sockets, we send up and error when an ABORT
  2794 	 * comes in.
  2795 	 */
  2796 set_error:
  2797 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
  2798 	     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
  2799 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
  2800 		SOCK_LOCK(stcb->sctp_socket);
  2801 		if (from_peer) {
  2802 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
  2803 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
  2804 				stcb->sctp_socket->so_error = ECONNREFUSED;
  2805 			} else {
  2806 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
  2807 				stcb->sctp_socket->so_error = ECONNRESET;
  2809 		} else {
  2810 			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
  2811 			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
  2812 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
  2813 				stcb->sctp_socket->so_error = ETIMEDOUT;
  2814 			} else {
  2815 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
  2816 				stcb->sctp_socket->so_error = ECONNABORTED;
  2820 	/* Wake ANY sleepers */
  2821 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  2822 	so = SCTP_INP_SO(stcb->sctp_ep);
  2823 	if (!so_locked) {
  2824 		atomic_add_int(&stcb->asoc.refcnt, 1);
  2825 		SCTP_TCB_UNLOCK(stcb);
  2826 		SCTP_SOCKET_LOCK(so, 1);
  2827 		SCTP_TCB_LOCK(stcb);
  2828 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
  2829 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
  2830 			SCTP_SOCKET_UNLOCK(so, 1);
  2831 			return;
  2834 #endif
  2835 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
  2836 	     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
  2837 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
  2838 #if defined(__APPLE__)
  2839 		socantrcvmore(stcb->sctp_socket);
  2840 #else
  2841 		socantrcvmore_locked(stcb->sctp_socket);
  2842 #endif
  2844 	sorwakeup(stcb->sctp_socket);
  2845 	sowwakeup(stcb->sctp_socket);
  2846 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  2847 	if (!so_locked) {
  2848 		SCTP_SOCKET_UNLOCK(so, 1);
  2850 #endif
  2853 static void
  2854 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
  2855     struct sockaddr *sa, uint32_t error)
  2857 	struct mbuf *m_notify;
  2858 	struct sctp_paddr_change *spc;
  2859 	struct sctp_queued_to_read *control;
  2861 	if ((stcb == NULL) ||
  2862 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
  2863 		/* event not enabled */
  2864 		return;
  2866 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
  2867 	if (m_notify == NULL)
  2868 		return;
  2869 	SCTP_BUF_LEN(m_notify) = 0;
  2870 	spc = mtod(m_notify, struct sctp_paddr_change *);
  2871 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
  2872 	spc->spc_flags = 0;
  2873 	spc->spc_length = sizeof(struct sctp_paddr_change);
  2874 	switch (sa->sa_family) {
  2875 #ifdef INET
  2876 	case AF_INET:
  2877 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
  2878 		break;
  2879 #endif
  2880 #ifdef INET6
  2881 	case AF_INET6:
  2883 #ifdef SCTP_EMBEDDED_V6_SCOPE
  2884 		struct sockaddr_in6 *sin6;
  2885 #endif /* SCTP_EMBEDDED_V6_SCOPE */
  2886 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
  2888 #ifdef SCTP_EMBEDDED_V6_SCOPE
  2889 		sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
  2890 		if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
  2891 			if (sin6->sin6_scope_id == 0) {
  2892 				/* recover scope_id for user */
  2893 #ifdef SCTP_KAME
  2894 		 		(void)sa6_recoverscope(sin6);
  2895 #else
  2896 				(void)in6_recoverscope(sin6, &sin6->sin6_addr,
  2897 						       NULL);
  2898 #endif
  2899 			} else {
  2900 				/* clear embedded scope_id for user */
  2901 				in6_clearscope(&sin6->sin6_addr);
  2904 #endif /* SCTP_EMBEDDED_V6_SCOPE */
  2905 		break;
  2907 #endif
  2908 #if defined(__Userspace__)
  2909 	case AF_CONN:
  2910 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_conn));
  2911 		break;
  2912 #endif
  2913 	default:
  2914 		/* TSNH */
  2915 		break;
  2917 	spc->spc_state = state;
  2918 	spc->spc_error = error;
  2919 	spc->spc_assoc_id = sctp_get_associd(stcb);
  2921 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
  2922 	SCTP_BUF_NEXT(m_notify) = NULL;
  2924 	/* append to socket */
  2925 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
  2926 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
  2927 	                                 m_notify);
  2928 	if (control == NULL) {
  2929 		/* no memory */
  2930 		sctp_m_freem(m_notify);
  2931 		return;
  2933 	control->length = SCTP_BUF_LEN(m_notify);
  2934 	control->spec_flags = M_NOTIFICATION;
  2935 	/* not that we need this */
  2936 	control->tail_mbuf = m_notify;
  2937 	sctp_add_to_readq(stcb->sctp_ep, stcb,
  2938 	                  control,
  2939 	                  &stcb->sctp_socket->so_rcv, 1,
  2940 	                  SCTP_READ_LOCK_NOT_HELD,
  2941 	                  SCTP_SO_NOT_LOCKED);
  2945 static void
  2946 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
  2947     struct sctp_tmit_chunk *chk, int so_locked
  2948 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
  2949     SCTP_UNUSED
  2950 #endif
  2953 	struct mbuf *m_notify;
  2954 	struct sctp_send_failed *ssf;
  2955 	struct sctp_send_failed_event *ssfe;
  2956 	struct sctp_queued_to_read *control;
  2957 	int length;
  2959 	if ((stcb == NULL) ||
  2960 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
  2961 	     sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
  2962 		/* event not enabled */
  2963 		return;
  2966 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
  2967 		length = sizeof(struct sctp_send_failed_event);
  2968 	} else {
  2969 		length = sizeof(struct sctp_send_failed);
  2971 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
  2972 	if (m_notify == NULL)
  2973 		/* no space left */
  2974 		return;
  2975 	length += chk->send_size;
  2976 	length -= sizeof(struct sctp_data_chunk);
  2977 	SCTP_BUF_LEN(m_notify) = 0;
  2978 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
  2979 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
  2980 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
  2981 		if (sent) {
  2982 			ssfe->ssfe_flags = SCTP_DATA_SENT;
  2983 		} else {
  2984 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
  2986 		ssfe->ssfe_length = length;
  2987 		ssfe->ssfe_error = error;
  2988 		/* not exactly what the user sent in, but should be close :) */
  2989 		bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info));
  2990 		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
  2991 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
  2992 		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
  2993 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
  2994 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
  2995 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
  2996 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
  2997 	} else {
  2998 		ssf = mtod(m_notify, struct sctp_send_failed *);
  2999 		ssf->ssf_type = SCTP_SEND_FAILED;
  3000 		if (sent) {
  3001 			ssf->ssf_flags = SCTP_DATA_SENT;
  3002 		} else {
  3003 			ssf->ssf_flags = SCTP_DATA_UNSENT;
  3005 		ssf->ssf_length = length;
  3006 		ssf->ssf_error = error;
  3007 		/* not exactly what the user sent in, but should be close :) */
  3008 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
  3009 		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
  3010 		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
  3011 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
  3012 		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
  3013 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
  3014 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
  3015 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
  3016 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
  3018 	if (chk->data) {
  3019 		/*
  3020 		 * trim off the sctp chunk header(it should
  3021 		 * be there)
  3022 		 */
  3023 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
  3024 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
  3025 			sctp_mbuf_crush(chk->data);
  3026 			chk->send_size -= sizeof(struct sctp_data_chunk);
  3029 	SCTP_BUF_NEXT(m_notify) = chk->data;
  3030 	/* Steal off the mbuf */
  3031 	chk->data = NULL;
  3032 	/*
  3033 	 * For this case, we check the actual socket buffer, since the assoc
  3034 	 * is going away we don't want to overfill the socket buffer for a
  3035 	 * non-reader
  3036 	 */
  3037 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
  3038 		sctp_m_freem(m_notify);
  3039 		return;
  3041 	/* append to socket */
  3042 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
  3043 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
  3044 	                                 m_notify);
  3045 	if (control == NULL) {
  3046 		/* no memory */
  3047 		sctp_m_freem(m_notify);
  3048 		return;
  3050 	control->spec_flags = M_NOTIFICATION;
  3051 	sctp_add_to_readq(stcb->sctp_ep, stcb,
  3052 	                  control,
  3053 	                  &stcb->sctp_socket->so_rcv, 1,
  3054 	                  SCTP_READ_LOCK_NOT_HELD,
  3055 	                  so_locked);
  3059 static void
  3060 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
  3061 			 struct sctp_stream_queue_pending *sp, int so_locked
  3062 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
  3063                          SCTP_UNUSED
  3064 #endif
  3067 	struct mbuf *m_notify;
  3068 	struct sctp_send_failed *ssf;
  3069 	struct sctp_send_failed_event *ssfe;
  3070 	struct sctp_queued_to_read *control;
  3071 	int length;
  3073 	if ((stcb == NULL) ||
  3074 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
  3075 	     sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
  3076 		/* event not enabled */
  3077 		return;
  3079 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
  3080 		length = sizeof(struct sctp_send_failed_event);
  3081 	} else {
  3082 		length = sizeof(struct sctp_send_failed);
  3084 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
  3085 	if (m_notify == NULL) {
  3086 		/* no space left */
  3087 		return;
  3089 	length += sp->length;
  3090 	SCTP_BUF_LEN(m_notify) = 0;
  3091 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
  3092 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
  3093 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
  3094 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
  3095 		ssfe->ssfe_length = length;
  3096 		ssfe->ssfe_error = error;
  3097 		/* not exactly what the user sent in, but should be close :) */
  3098 		bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info));
  3099 		ssfe->ssfe_info.snd_sid = sp->stream;
  3100 		if (sp->some_taken) {
  3101 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
  3102 		} else {
  3103 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
  3105 		ssfe->ssfe_info.snd_ppid = sp->ppid;
  3106 		ssfe->ssfe_info.snd_context = sp->context;
  3107 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
  3108 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
  3109 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
  3110 	} else {
  3111 		ssf = mtod(m_notify, struct sctp_send_failed *);
  3112 		ssf->ssf_type = SCTP_SEND_FAILED;
  3113 		ssf->ssf_flags = SCTP_DATA_UNSENT;
  3114 		ssf->ssf_length = length;
  3115 		ssf->ssf_error = error;
  3116 		/* not exactly what the user sent in, but should be close :) */
  3117 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
  3118 		ssf->ssf_info.sinfo_stream = sp->stream;
  3119 		ssf->ssf_info.sinfo_ssn = 0;
  3120 		if (sp->some_taken) {
  3121 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
  3122 		} else {
  3123 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
  3125 		ssf->ssf_info.sinfo_ppid = sp->ppid;
  3126 		ssf->ssf_info.sinfo_context = sp->context;
  3127 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
  3128 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
  3129 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
  3131 	SCTP_BUF_NEXT(m_notify) = sp->data;
  3133 	/* Steal off the mbuf */
  3134 	sp->data = NULL;
  3135 	/*
  3136 	 * For this case, we check the actual socket buffer, since the assoc
  3137 	 * is going away we don't want to overfill the socket buffer for a
  3138 	 * non-reader
  3139 	 */
  3140 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
  3141 		sctp_m_freem(m_notify);
  3142 		return;
  3144 	/* append to socket */
  3145 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
  3146 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
  3147 	                                 m_notify);
  3148 	if (control == NULL) {
  3149 		/* no memory */
  3150 		sctp_m_freem(m_notify);
  3151 		return;
  3153 	control->spec_flags = M_NOTIFICATION;
  3154 	sctp_add_to_readq(stcb->sctp_ep, stcb,
  3155 	    control,
  3156 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
  3161 static void
  3162 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
  3164 	struct mbuf *m_notify;
  3165 	struct sctp_adaptation_event *sai;
  3166 	struct sctp_queued_to_read *control;
  3168 	if ((stcb == NULL) ||
  3169 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
  3170 		/* event not enabled */
  3171 		return;
  3174 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
  3175 	if (m_notify == NULL)
  3176 		/* no space left */
  3177 		return;
  3178 	SCTP_BUF_LEN(m_notify) = 0;
  3179 	sai = mtod(m_notify, struct sctp_adaptation_event *);
  3180 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
  3181 	sai->sai_flags = 0;
  3182 	sai->sai_length = sizeof(struct sctp_adaptation_event);
  3183 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
  3184 	sai->sai_assoc_id = sctp_get_associd(stcb);
  3186 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
  3187 	SCTP_BUF_NEXT(m_notify) = NULL;
  3189 	/* append to socket */
  3190 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
  3191 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
  3192 	                                 m_notify);
  3193 	if (control == NULL) {
  3194 		/* no memory */
  3195 		sctp_m_freem(m_notify);
  3196 		return;
  3198 	control->length = SCTP_BUF_LEN(m_notify);
  3199 	control->spec_flags = M_NOTIFICATION;
  3200 	/* not that we need this */
  3201 	control->tail_mbuf = m_notify;
  3202 	sctp_add_to_readq(stcb->sctp_ep, stcb,
  3203 	    control,
  3204 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
  3207 /* This always must be called with the read-queue LOCKED in the INP */
  3208 static void
  3209 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
  3210 					uint32_t val, int so_locked
  3211 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
  3212                              SCTP_UNUSED
  3213 #endif
  3216 	struct mbuf *m_notify;
  3217 	struct sctp_pdapi_event *pdapi;
  3218 	struct sctp_queued_to_read *control;
  3219 	struct sockbuf *sb;
  3221 	if ((stcb == NULL) ||
  3222 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
  3223 		/* event not enabled */
  3224 		return;
  3226 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
  3227 		return;
  3230 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
  3231 	if (m_notify == NULL)
  3232 		/* no space left */
  3233 		return;
  3234 	SCTP_BUF_LEN(m_notify) = 0;
  3235 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
  3236 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
  3237 	pdapi->pdapi_flags = 0;
  3238 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
  3239 	pdapi->pdapi_indication = error;
  3240 	pdapi->pdapi_stream = (val >> 16);
  3241 	pdapi->pdapi_seq = (val & 0x0000ffff);
  3242 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
  3244 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
  3245 	SCTP_BUF_NEXT(m_notify) = NULL;
  3246 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
  3247 					 0, 0, stcb->asoc.context, 0, 0, 0,
  3248 					 m_notify);
  3249 	if (control == NULL) {
  3250 		/* no memory */
  3251 		sctp_m_freem(m_notify);
  3252 		return;
  3254 	control->spec_flags = M_NOTIFICATION;
  3255 	control->length = SCTP_BUF_LEN(m_notify);
  3256 	/* not that we need this */
  3257 	control->tail_mbuf = m_notify;
  3258 	control->held_length = 0;
  3259 	control->length = 0;
  3260 	sb = &stcb->sctp_socket->so_rcv;
  3261 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  3262 		sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
  3264 	sctp_sballoc(stcb, sb, m_notify);
  3265 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  3266 		sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
  3268 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
  3269 	control->end_added = 1;
  3270 	if (stcb->asoc.control_pdapi)
  3271 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi,  control, next);
  3272 	else {
  3273 		/* we really should not see this case */
  3274 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
  3276 	if (stcb->sctp_ep && stcb->sctp_socket) {
  3277 		/* This should always be the case */
  3278 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  3279 		struct socket *so;
  3281 		so = SCTP_INP_SO(stcb->sctp_ep);
  3282 		if (!so_locked) {
  3283 			atomic_add_int(&stcb->asoc.refcnt, 1);
  3284 			SCTP_TCB_UNLOCK(stcb);
  3285 			SCTP_SOCKET_LOCK(so, 1);
  3286 			SCTP_TCB_LOCK(stcb);
  3287 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
  3288 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
  3289 				SCTP_SOCKET_UNLOCK(so, 1);
  3290 				return;
  3293 #endif
  3294 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
  3295 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  3296 		if (!so_locked) {
  3297 			SCTP_SOCKET_UNLOCK(so, 1);
  3299 #endif
  3303 static void
  3304 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
  3306 	struct mbuf *m_notify;
  3307 	struct sctp_shutdown_event *sse;
  3308 	struct sctp_queued_to_read *control;
  3310 	/*
  3311 	 * For TCP model AND UDP connected sockets we will send an error up
  3312 	 * when an SHUTDOWN completes
  3313 	 */
  3314 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
  3315 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
  3316 		/* mark socket closed for read/write and wakeup! */
  3317 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  3318 		struct socket *so;
  3320 		so = SCTP_INP_SO(stcb->sctp_ep);
  3321 		atomic_add_int(&stcb->asoc.refcnt, 1);
  3322 		SCTP_TCB_UNLOCK(stcb);
  3323 		SCTP_SOCKET_LOCK(so, 1);
  3324 		SCTP_TCB_LOCK(stcb);
  3325 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
  3326 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
  3327 			SCTP_SOCKET_UNLOCK(so, 1);
  3328 			return;
  3330 #endif
  3331 		socantsendmore(stcb->sctp_socket);
  3332 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  3333 		SCTP_SOCKET_UNLOCK(so, 1);
  3334 #endif
  3336 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
  3337 		/* event not enabled */
  3338 		return;
  3341 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
  3342 	if (m_notify == NULL)
  3343 		/* no space left */
  3344 		return;
  3345 	sse = mtod(m_notify, struct sctp_shutdown_event *);
  3346 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
  3347 	sse->sse_flags = 0;
  3348 	sse->sse_length = sizeof(struct sctp_shutdown_event);
  3349 	sse->sse_assoc_id = sctp_get_associd(stcb);
  3351 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
  3352 	SCTP_BUF_NEXT(m_notify) = NULL;
  3354 	/* append to socket */
  3355 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
  3356 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
  3357 	                                 m_notify);
  3358 	if (control == NULL) {
  3359 		/* no memory */
  3360 		sctp_m_freem(m_notify);
  3361 		return;
  3363 	control->spec_flags = M_NOTIFICATION;
  3364 	control->length = SCTP_BUF_LEN(m_notify);
  3365 	/* not that we need this */
  3366 	control->tail_mbuf = m_notify;
  3367 	sctp_add_to_readq(stcb->sctp_ep, stcb,
  3368 	    control,
  3369 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
  3372 static void
  3373 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
  3374                              int so_locked
  3375 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
  3376                              SCTP_UNUSED
  3377 #endif
  3380 	struct mbuf *m_notify;
  3381 	struct sctp_sender_dry_event *event;
  3382 	struct sctp_queued_to_read *control;
  3384 	if ((stcb == NULL) ||
  3385 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
  3386 		/* event not enabled */
  3387 		return;
  3390 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
  3391 	if (m_notify == NULL) {
  3392 		/* no space left */
  3393 		return;
  3395 	SCTP_BUF_LEN(m_notify) = 0;
  3396 	event = mtod(m_notify, struct sctp_sender_dry_event *);
  3397 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
  3398 	event->sender_dry_flags = 0;
  3399 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
  3400 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
  3402 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
  3403 	SCTP_BUF_NEXT(m_notify) = NULL;
  3405 	/* append to socket */
  3406 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
  3407 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
  3408 	                                 m_notify);
  3409 	if (control == NULL) {
  3410 		/* no memory */
  3411 		sctp_m_freem(m_notify);
  3412 		return;
  3414 	control->length = SCTP_BUF_LEN(m_notify);
  3415 	control->spec_flags = M_NOTIFICATION;
  3416 	/* not that we need this */
  3417 	control->tail_mbuf = m_notify;
  3418 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
  3419 	                  &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
  3423 void
  3424 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
  3426 	struct mbuf *m_notify;
  3427 	struct sctp_queued_to_read *control;
  3428 	struct sctp_stream_change_event *stradd;
  3429 	int len;
  3431 	if ((stcb == NULL) ||
  3432 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
  3433 		/* event not enabled */
  3434 		return;
  3436 	if ((stcb->asoc.peer_req_out) && flag) {
  3437 		/* Peer made the request, don't tell the local user */
  3438 		stcb->asoc.peer_req_out = 0;
  3439 		return;
  3441 	stcb->asoc.peer_req_out = 0;
  3442 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
  3443 	if (m_notify == NULL)
  3444 		/* no space left */
  3445 		return;
  3446 	SCTP_BUF_LEN(m_notify) = 0;
  3447 	len = sizeof(struct sctp_stream_change_event);
  3448 	if (len > M_TRAILINGSPACE(m_notify)) {
  3449 		/* never enough room */
  3450 		sctp_m_freem(m_notify);
  3451 		return;
  3453 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
  3454 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
  3455 	stradd->strchange_flags = flag;
  3456 	stradd->strchange_length = len;
  3457 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
  3458 	stradd->strchange_instrms = numberin;
  3459 	stradd->strchange_outstrms = numberout;
  3460 	SCTP_BUF_LEN(m_notify) = len;
  3461 	SCTP_BUF_NEXT(m_notify) = NULL;
  3462 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
  3463 		/* no space */
  3464 		sctp_m_freem(m_notify);
  3465 		return;
  3467 	/* append to socket */
  3468 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
  3469 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
  3470 	                                 m_notify);
  3471 	if (control == NULL) {
  3472 		/* no memory */
  3473 		sctp_m_freem(m_notify);
  3474 		return;
  3476 	control->spec_flags = M_NOTIFICATION;
  3477 	control->length = SCTP_BUF_LEN(m_notify);
  3478 	/* not that we need this */
  3479 	control->tail_mbuf = m_notify;
  3480 	sctp_add_to_readq(stcb->sctp_ep, stcb,
  3481 	    control,
  3482 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
  3485 void
  3486 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
  3488 	struct mbuf *m_notify;
  3489 	struct sctp_queued_to_read *control;
  3490 	struct sctp_assoc_reset_event *strasoc;
  3491 	int len;
  3493 	if ((stcb == NULL) ||
  3494 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
  3495 		/* event not enabled */
  3496 		return;
  3498 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
  3499 	if (m_notify == NULL)
  3500 		/* no space left */
  3501 		return;
  3502 	SCTP_BUF_LEN(m_notify) = 0;
  3503 	len = sizeof(struct sctp_assoc_reset_event);
  3504 	if (len > M_TRAILINGSPACE(m_notify)) {
  3505 		/* never enough room */
  3506 		sctp_m_freem(m_notify);
  3507 		return;
  3509 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event  *);
  3510 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
  3511 	strasoc->assocreset_flags = flag;
  3512 	strasoc->assocreset_length = len;
  3513 	strasoc->assocreset_assoc_id= sctp_get_associd(stcb);
  3514 	strasoc->assocreset_local_tsn = sending_tsn;
  3515 	strasoc->assocreset_remote_tsn = recv_tsn;
  3516 	SCTP_BUF_LEN(m_notify) = len;
  3517 	SCTP_BUF_NEXT(m_notify) = NULL;
  3518 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
  3519 		/* no space */
  3520 		sctp_m_freem(m_notify);
  3521 		return;
  3523 	/* append to socket */
  3524 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
  3525 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
  3526 	                                 m_notify);
  3527 	if (control == NULL) {
  3528 		/* no memory */
  3529 		sctp_m_freem(m_notify);
  3530 		return;
  3532 	control->spec_flags = M_NOTIFICATION;
  3533 	control->length = SCTP_BUF_LEN(m_notify);
  3534 	/* not that we need this */
  3535 	control->tail_mbuf = m_notify;
  3536 	sctp_add_to_readq(stcb->sctp_ep, stcb,
  3537 	    control,
  3538 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
  3543 static void
  3544 sctp_notify_stream_reset(struct sctp_tcb *stcb,
  3545     int number_entries, uint16_t * list, int flag)
  3547 	struct mbuf *m_notify;
  3548 	struct sctp_queued_to_read *control;
  3549 	struct sctp_stream_reset_event *strreset;
  3550 	int len;
  3552 	if ((stcb == NULL) ||
  3553 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
  3554 		/* event not enabled */
  3555 		return;
  3558 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
  3559 	if (m_notify == NULL)
  3560 		/* no space left */
  3561 		return;
  3562 	SCTP_BUF_LEN(m_notify) = 0;
  3563 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
  3564 	if (len > M_TRAILINGSPACE(m_notify)) {
  3565 		/* never enough room */
  3566 		sctp_m_freem(m_notify);
  3567 		return;
  3569 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
  3570 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
  3571 	strreset->strreset_flags = flag;
  3572 	strreset->strreset_length = len;
  3573 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
  3574 	if (number_entries) {
  3575 		int i;
  3577 		for (i = 0; i < number_entries; i++) {
  3578 			strreset->strreset_stream_list[i] = ntohs(list[i]);
  3581 	SCTP_BUF_LEN(m_notify) = len;
  3582 	SCTP_BUF_NEXT(m_notify) = NULL;
  3583 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
  3584 		/* no space */
  3585 		sctp_m_freem(m_notify);
  3586 		return;
  3588 	/* append to socket */
  3589 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
  3590 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
  3591 	                                 m_notify);
  3592 	if (control == NULL) {
  3593 		/* no memory */
  3594 		sctp_m_freem(m_notify);
  3595 		return;
  3597 	control->spec_flags = M_NOTIFICATION;
  3598 	control->length = SCTP_BUF_LEN(m_notify);
  3599 	/* not that we need this */
  3600 	control->tail_mbuf = m_notify;
  3601 	sctp_add_to_readq(stcb->sctp_ep, stcb,
  3602 	                  control,
  3603 	                  &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
  3607 static void
  3608 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
  3610 	struct mbuf *m_notify;
  3611 	struct sctp_remote_error *sre;
  3612 	struct sctp_queued_to_read *control;
  3613 	size_t notif_len, chunk_len;
  3615 	if ((stcb == NULL) ||
  3616 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
  3617 		return;
  3619 	if (chunk != NULL) {
  3620 		chunk_len = ntohs(chunk->ch.chunk_length);
  3621 	} else {
  3622 		chunk_len = 0;
  3624 	notif_len = sizeof(struct sctp_remote_error) + chunk_len;
  3625 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
  3626 	if (m_notify == NULL) {
  3627 		/* Retry with smaller value. */
  3628 		notif_len = sizeof(struct sctp_remote_error);
  3629 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
  3630 		if (m_notify == NULL) {
  3631 			return;
  3634 	SCTP_BUF_NEXT(m_notify) = NULL;
  3635 	sre = mtod(m_notify, struct sctp_remote_error *);
  3636 	sre->sre_type = SCTP_REMOTE_ERROR;
  3637 	sre->sre_flags = 0;
  3638 	sre->sre_length = sizeof(struct sctp_remote_error);
  3639 	sre->sre_error = error;
  3640 	sre->sre_assoc_id = sctp_get_associd(stcb);
  3641 	if (notif_len > sizeof(struct sctp_remote_error)) {
  3642 		memcpy(sre->sre_data, chunk, chunk_len);
  3643 		sre->sre_length += chunk_len;
  3645 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
  3646 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
  3647 	                                 0, 0, stcb->asoc.context, 0, 0, 0,
  3648 	                                 m_notify);
  3649 	if (control != NULL) {
  3650 		control->length = SCTP_BUF_LEN(m_notify);
  3651 		/* not that we need this */
  3652 		control->tail_mbuf = m_notify;
  3653 		control->spec_flags = M_NOTIFICATION;
  3654 		sctp_add_to_readq(stcb->sctp_ep, stcb,
  3655 		                  control,
  3656 		                  &stcb->sctp_socket->so_rcv, 1,
  3657 				  SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
  3658 	} else {
  3659 		sctp_m_freem(m_notify);
  3664 void
  3665 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
  3666     uint32_t error, void *data, int so_locked
  3667 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
  3668     SCTP_UNUSED
  3669 #endif
  3672 	if ((stcb == NULL) ||
  3673 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
  3674 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
  3675 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
  3676 		/* If the socket is gone we are out of here */
  3677 		return;
  3679 #if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
  3680 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
  3681 #else
  3682 	if (stcb->sctp_socket->so_state & SS_CANTRCVMORE) {
  3683 #endif
  3684 		return;
  3686 #if defined(__APPLE__)
  3687 	if (so_locked) {
  3688 		sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
  3689 	} else {
  3690 		sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
  3692 #endif
  3693 	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
  3694 	    (stcb->asoc.state &  SCTP_STATE_COOKIE_ECHOED)) {
  3695 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
  3696 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
  3697 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
  3698 			/* Don't report these in front states */
  3699 			return;
  3702 	switch (notification) {
  3703 	case SCTP_NOTIFY_ASSOC_UP:
  3704 		if (stcb->asoc.assoc_up_sent == 0) {
  3705 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
  3706 			stcb->asoc.assoc_up_sent = 1;
  3708 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
  3709 			sctp_notify_adaptation_layer(stcb);
  3711 		if (stcb->asoc.peer_supports_auth == 0) {
  3712 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
  3713 			                NULL, so_locked);
  3715 		break;
  3716 	case SCTP_NOTIFY_ASSOC_DOWN:
  3717 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
  3718 #if defined(__Userspace__)
  3719 		if (stcb->sctp_ep->recv_callback) {
  3720 			if (stcb->sctp_socket) {
  3721 				union sctp_sockstore addr;
  3722 				struct sctp_rcvinfo rcv;
  3724 				memset(&addr, 0, sizeof(union sctp_sockstore));
  3725 				memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
  3726 				atomic_add_int(&stcb->asoc.refcnt, 1);
  3727 				SCTP_TCB_UNLOCK(stcb);
  3728 				stcb->sctp_ep->recv_callback(stcb->sctp_socket, addr, NULL, 0, rcv, 0, stcb->sctp_ep->ulp_info);
  3729 				SCTP_TCB_LOCK(stcb);
  3730 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
  3733 #endif
  3734 		break;
  3735 	case SCTP_NOTIFY_INTERFACE_DOWN:
  3737 			struct sctp_nets *net;
  3739 			net = (struct sctp_nets *)data;
  3740 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
  3741 			    (struct sockaddr *)&net->ro._l_addr, error);
  3742 			break;
  3744 	case SCTP_NOTIFY_INTERFACE_UP:
  3746 			struct sctp_nets *net;
  3748 			net = (struct sctp_nets *)data;
  3749 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
  3750 			    (struct sockaddr *)&net->ro._l_addr, error);
  3751 			break;
  3753 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
  3755 			struct sctp_nets *net;
  3757 			net = (struct sctp_nets *)data;
  3758 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
  3759 			    (struct sockaddr *)&net->ro._l_addr, error);
  3760 			break;
  3762 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
  3763 		sctp_notify_send_failed2(stcb, error,
  3764 		                         (struct sctp_stream_queue_pending *)data, so_locked);
  3765 		break;
  3766 	case SCTP_NOTIFY_SENT_DG_FAIL:
  3767 		sctp_notify_send_failed(stcb, 1, error,
  3768 		    (struct sctp_tmit_chunk *)data, so_locked);
  3769 		break;
  3770 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
  3771 		sctp_notify_send_failed(stcb, 0, error,
  3772 		                        (struct sctp_tmit_chunk *)data, so_locked);
  3773 		break;
  3774 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
  3776 			uint32_t val;
  3777 			val = *((uint32_t *)data);
  3779 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
  3780 		break;
  3782 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
  3783 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
  3784 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
  3785 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
  3786 		} else {
  3787 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
  3789 		break;
  3790 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
  3791 		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
  3792 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
  3793 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
  3794 		} else {
  3795 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
  3797 		break;
  3798 	case SCTP_NOTIFY_ASSOC_RESTART:
  3799 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
  3800 		if (stcb->asoc.peer_supports_auth == 0) {
  3801 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
  3802 			                NULL, so_locked);
  3804 		break;
  3805 	case SCTP_NOTIFY_STR_RESET_SEND:
  3806 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
  3807 		break;
  3808 	case SCTP_NOTIFY_STR_RESET_RECV:
  3809 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
  3810 		break;
  3811 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
  3812 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
  3813 		                         (SCTP_STREAM_RESET_OUTGOING_SSN|SCTP_STREAM_RESET_FAILED));
  3814 		break;
  3815 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
  3816 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
  3817 		                         (SCTP_STREAM_RESET_OUTGOING_SSN|SCTP_STREAM_RESET_DENIED));
  3818 		break;
  3819 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
  3820 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
  3821 		                         (SCTP_STREAM_RESET_INCOMING|SCTP_STREAM_RESET_FAILED));
  3822 		break;
  3823 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
  3824 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
  3825 		                         (SCTP_STREAM_RESET_INCOMING|SCTP_STREAM_RESET_DENIED));
  3826 		break;
  3827 	case SCTP_NOTIFY_ASCONF_ADD_IP:
  3828 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
  3829 		    error);
  3830 		break;
  3831 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
  3832 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
  3833 		                             error);
  3834 		break;
  3835 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
  3836 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
  3837 		                             error);
  3838 		break;
  3839 	case SCTP_NOTIFY_PEER_SHUTDOWN:
  3840 		sctp_notify_shutdown_event(stcb);
  3841 		break;
  3842 	case SCTP_NOTIFY_AUTH_NEW_KEY:
  3843 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
  3844 		                           (uint16_t)(uintptr_t)data,
  3845 		                           so_locked);
  3846 		break;
  3847 	case SCTP_NOTIFY_AUTH_FREE_KEY:
  3848 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
  3849 		                           (uint16_t)(uintptr_t)data,
  3850 		                           so_locked);
  3851 		break;
  3852 	case SCTP_NOTIFY_NO_PEER_AUTH:
  3853 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
  3854 		                           (uint16_t)(uintptr_t)data,
  3855 		                           so_locked);
  3856 		break;
  3857 	case SCTP_NOTIFY_SENDER_DRY:
  3858 		sctp_notify_sender_dry_event(stcb, so_locked);
  3859 		break;
  3860 	case SCTP_NOTIFY_REMOTE_ERROR:
  3861 		sctp_notify_remote_error(stcb, error, data);
  3862 		break;
  3863 	default:
  3864 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
  3865 			__FUNCTION__, notification, notification);
  3866 		break;
  3867 	}			/* end switch */
  3870 void
  3871 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
  3872 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
  3873     SCTP_UNUSED
  3874 #endif
  3877 	struct sctp_association *asoc;
  3878 	struct sctp_stream_out *outs;
  3879 	struct sctp_tmit_chunk *chk, *nchk;
  3880 	struct sctp_stream_queue_pending *sp, *nsp;
  3881 	int i;
  3883 	if (stcb == NULL) {
  3884 		return;
  3886 	asoc = &stcb->asoc;
  3887 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
  3888 		/* already being freed */
  3889 		return;
  3891 #if defined(__APPLE__)
  3892 	if (so_locked) {
  3893 		sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
  3894 	} else {
  3895 		sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
  3897 #endif
  3898 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
  3899 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
  3900 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
  3901 		return;
  3903 	/* now through all the gunk freeing chunks */
  3904 	if (holds_lock == 0) {
  3905 		SCTP_TCB_SEND_LOCK(stcb);
  3907 	/* sent queue SHOULD be empty */
  3908 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
  3909 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
  3910 		asoc->sent_queue_cnt--;
  3911 		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
  3912 			if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
  3913 				asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
  3914 #ifdef INVARIANTS
  3915 			} else {
  3916 				panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
  3917 #endif
  3920 		if (chk->data != NULL) {
  3921 			sctp_free_bufspace(stcb, asoc, chk, 1);
  3922 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
  3923 			                error, chk, so_locked);
  3924 			if (chk->data) {
  3925 				sctp_m_freem(chk->data);
  3926 				chk->data = NULL;
  3929 		sctp_free_a_chunk(stcb, chk, so_locked);
  3930 		/*sa_ignore FREED_MEMORY*/
  3932 	/* pending send queue SHOULD be empty */
  3933 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
  3934 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
  3935 		asoc->send_queue_cnt--;
  3936 		if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
  3937 			asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
  3938 #ifdef INVARIANTS
  3939 		} else {
  3940 			panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
  3941 #endif
  3943 		if (chk->data != NULL) {
  3944 			sctp_free_bufspace(stcb, asoc, chk, 1);
  3945 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
  3946 			                error, chk, so_locked);
  3947 			if (chk->data) {
  3948 				sctp_m_freem(chk->data);
  3949 				chk->data = NULL;
  3952 		sctp_free_a_chunk(stcb, chk, so_locked);
  3953 		/*sa_ignore FREED_MEMORY*/
  3955 	for (i = 0; i < asoc->streamoutcnt; i++) {
  3956 		/* For each stream */
  3957 		outs = &asoc->strmout[i];
  3958 		/* clean up any sends there */
  3959 		asoc->locked_on_sending = NULL;
  3960 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
  3961 			asoc->stream_queue_cnt--;
  3962 			TAILQ_REMOVE(&outs->outqueue, sp, next);
  3963 			sctp_free_spbufspace(stcb, asoc, sp);
  3964 			if (sp->data) {
  3965 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
  3966 						error, (void *)sp, so_locked);
  3967 				if (sp->data) {
  3968 					sctp_m_freem(sp->data);
  3969 					sp->data = NULL;
  3970 					sp->tail_mbuf = NULL;
  3971 					sp->length = 0;
  3974 			if (sp->net) {
  3975 				sctp_free_remote_addr(sp->net);
  3976 				sp->net = NULL;
  3978 			/* Free the chunk */
  3979 			sctp_free_a_strmoq(stcb, sp, so_locked);
  3980 			/*sa_ignore FREED_MEMORY*/
  3984 	if (holds_lock == 0) {
  3985 		SCTP_TCB_SEND_UNLOCK(stcb);
  3989 void
  3990 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
  3991 			struct sctp_abort_chunk *abort, int so_locked
  3992 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
  3993     SCTP_UNUSED
  3994 #endif
  3997 	if (stcb == NULL) {
  3998 		return;
  4000 #if defined(__APPLE__)
  4001 	if (so_locked) {
  4002 		sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
  4003 	} else {
  4004 		sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
  4006 #endif
  4007 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
  4008 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
  4009 	     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
  4010 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
  4012 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
  4013 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
  4014 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
  4015 		return;
  4017 	/* Tell them we lost the asoc */
  4018 	sctp_report_all_outbound(stcb, error, 1, so_locked);
  4019 	if (from_peer) {
  4020 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
  4021 	} else {
  4022 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
  4026 void
  4027 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
  4028                        struct mbuf *m, int iphlen,
  4029                        struct sockaddr *src, struct sockaddr *dst,
  4030                        struct sctphdr *sh, struct mbuf *op_err,
  4031 #if defined(__FreeBSD__)
  4032                        uint8_t use_mflowid, uint32_t mflowid,
  4033 #endif
  4034                        uint32_t vrf_id, uint16_t port)
  4036 	uint32_t vtag;
  4037 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  4038 	struct socket *so;
  4039 #endif
  4041 	vtag = 0;
  4042 	if (stcb != NULL) {
  4043 		/* We have a TCB to abort, send notification too */
  4044 		vtag = stcb->asoc.peer_vtag;
  4045 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
  4046 		/* get the assoc vrf id and table id */
  4047 		vrf_id = stcb->asoc.vrf_id;
  4048 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
  4050 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
  4051 #if defined(__FreeBSD__)
  4052 	                use_mflowid, mflowid,
  4053 #endif
  4054 	                vrf_id, port);
  4055 	if (stcb != NULL) {
  4056 		/* Ok, now lets free it */
  4057 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  4058 		so = SCTP_INP_SO(inp);
  4059 		atomic_add_int(&stcb->asoc.refcnt, 1);
  4060 		SCTP_TCB_UNLOCK(stcb);
  4061 		SCTP_SOCKET_LOCK(so, 1);
  4062 		SCTP_TCB_LOCK(stcb);
  4063 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
  4064 #endif
  4065 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
  4066 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
  4067 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
  4068 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
  4070 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL+SCTP_LOC_4);
  4071 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  4072 		SCTP_SOCKET_UNLOCK(so, 1);
  4073 #endif
  4076 #ifdef SCTP_ASOCLOG_OF_TSNS
  4077 void
  4078 sctp_print_out_track_log(struct sctp_tcb *stcb)
  4080 #ifdef NOSIY_PRINTS
  4081 	int i;
  4082 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
  4083 	SCTP_PRINTF("IN bound TSN log-aaa\n");
  4084 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
  4085 		SCTP_PRINTF("None rcvd\n");
  4086 		goto none_in;
  4088 	if (stcb->asoc.tsn_in_wrapped) {
  4089 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
  4090 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
  4091 				    stcb->asoc.in_tsnlog[i].tsn,
  4092 				    stcb->asoc.in_tsnlog[i].strm,
  4093 				    stcb->asoc.in_tsnlog[i].seq,
  4094 				    stcb->asoc.in_tsnlog[i].flgs,
  4095 				    stcb->asoc.in_tsnlog[i].sz);
  4098 	if (stcb->asoc.tsn_in_at) {
  4099 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
  4100 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
  4101 				    stcb->asoc.in_tsnlog[i].tsn,
  4102 				    stcb->asoc.in_tsnlog[i].strm,
  4103 				    stcb->asoc.in_tsnlog[i].seq,
  4104 				    stcb->asoc.in_tsnlog[i].flgs,
  4105 				    stcb->asoc.in_tsnlog[i].sz);
  4108  none_in:
  4109 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
  4110 	if ((stcb->asoc.tsn_out_at == 0) &&
  4111 	    (stcb->asoc.tsn_out_wrapped == 0)) {
  4112 		SCTP_PRINTF("None sent\n");
  4114 	if (stcb->asoc.tsn_out_wrapped) {
  4115 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
  4116 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
  4117 				    stcb->asoc.out_tsnlog[i].tsn,
  4118 				    stcb->asoc.out_tsnlog[i].strm,
  4119 				    stcb->asoc.out_tsnlog[i].seq,
  4120 				    stcb->asoc.out_tsnlog[i].flgs,
  4121 				    stcb->asoc.out_tsnlog[i].sz);
  4124 	if (stcb->asoc.tsn_out_at) {
  4125 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
  4126 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
  4127 				    stcb->asoc.out_tsnlog[i].tsn,
  4128 				    stcb->asoc.out_tsnlog[i].strm,
  4129 				    stcb->asoc.out_tsnlog[i].seq,
  4130 				    stcb->asoc.out_tsnlog[i].flgs,
  4131 				    stcb->asoc.out_tsnlog[i].sz);
  4134 #endif
  4136 #endif
  4138 void
  4139 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
  4140                           struct mbuf *op_err,
  4141                           int so_locked
  4142 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
  4143                           SCTP_UNUSED
  4144 #endif
  4147 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  4148 	struct socket *so;
  4149 #endif
  4151 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  4152 	so = SCTP_INP_SO(inp);
  4153 #endif
  4154 #if defined(__APPLE__)
  4155 	if (so_locked) {
  4156 		sctp_lock_assert(SCTP_INP_SO(inp));
  4157 	} else {
  4158 		sctp_unlock_assert(SCTP_INP_SO(inp));
  4160 #endif
  4161 	if (stcb == NULL) {
  4162 		/* Got to have a TCB */
  4163 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
  4164 			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
  4165 #if defined(__APPLE__)
  4166 				if (!so_locked) {
  4167 					SCTP_SOCKET_LOCK(so, 1);
  4169 #endif
  4170 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
  4171 						SCTP_CALLED_DIRECTLY_NOCMPSET);
  4172 #if defined(__APPLE__)
  4173 				if (!so_locked) {
  4174 					SCTP_SOCKET_UNLOCK(so, 1);
  4176 #endif
  4179 		return;
  4180 	} else {
  4181 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
  4183 	/* notify the ulp */
  4184 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
  4185 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
  4187 	/* notify the peer */
  4188 	sctp_send_abort_tcb(stcb, op_err, so_locked);
  4189 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
  4190 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
  4191 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
  4192 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
  4194 	/* now free the asoc */
  4195 #ifdef SCTP_ASOCLOG_OF_TSNS
  4196 	sctp_print_out_track_log(stcb);
  4197 #endif
  4198 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  4199 	if (!so_locked) {
  4200 		atomic_add_int(&stcb->asoc.refcnt, 1);
  4201 		SCTP_TCB_UNLOCK(stcb);
  4202 		SCTP_SOCKET_LOCK(so, 1);
  4203 		SCTP_TCB_LOCK(stcb);
  4204 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
  4206 #endif
  4207 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL+SCTP_LOC_5);
  4208 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  4209 	if (!so_locked) {
  4210 		SCTP_SOCKET_UNLOCK(so, 1);
  4212 #endif
  4215 void
  4216 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
  4217                  struct sockaddr *src, struct sockaddr *dst,
  4218                  struct sctphdr *sh, struct sctp_inpcb *inp,
  4219 #if defined(__FreeBSD__)
  4220                  uint8_t use_mflowid, uint32_t mflowid,
  4221 #endif
  4222                  uint32_t vrf_id, uint16_t port)
  4224 	struct sctp_chunkhdr *ch, chunk_buf;
  4225 	unsigned int chk_length;
  4226 	int contains_init_chunk;
  4228 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
  4229 	/* Generate a TO address for future reference */
  4230 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
  4231 		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
  4232 #if defined(__APPLE__)
  4233 			SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1);
  4234 #endif
  4235 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
  4236 					SCTP_CALLED_DIRECTLY_NOCMPSET);
  4237 #if defined(__APPLE__)
  4238 			SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1);
  4239 #endif
  4242 	contains_init_chunk = 0;
  4243 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
  4244 	    sizeof(*ch), (uint8_t *) & chunk_buf);
  4245 	while (ch != NULL) {
  4246 		chk_length = ntohs(ch->chunk_length);
  4247 		if (chk_length < sizeof(*ch)) {
  4248 			/* break to abort land */
  4249 			break;
  4251 		switch (ch->chunk_type) {
  4252 		case SCTP_INIT:
  4253 			contains_init_chunk = 1;
  4254 			break;
  4255 		case SCTP_COOKIE_ECHO:
  4256 			/* We hit here only if the assoc is being freed */
  4257 			return;
  4258 		case SCTP_PACKET_DROPPED:
  4259 			/* we don't respond to pkt-dropped */
  4260 			return;
  4261 		case SCTP_ABORT_ASSOCIATION:
  4262 			/* we don't respond with an ABORT to an ABORT */
  4263 			return;
  4264 		case SCTP_SHUTDOWN_COMPLETE:
  4265 			/*
  4266 			 * we ignore it since we are not waiting for it and
  4267 			 * peer is gone
  4268 			 */
  4269 			return;
  4270 		case SCTP_SHUTDOWN_ACK:
  4271 			sctp_send_shutdown_complete2(src, dst, sh,
  4272 #if defined(__FreeBSD__)
  4273 			                             use_mflowid, mflowid,
  4274 #endif
  4275 			                             vrf_id, port);
  4276 			return;
  4277 		default:
  4278 			break;
  4280 		offset += SCTP_SIZE32(chk_length);
  4281 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
  4282 		    sizeof(*ch), (uint8_t *) & chunk_buf);
  4284 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
  4285 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
  4286 	     (contains_init_chunk == 0))) {
  4287 		sctp_send_abort(m, iphlen, src, dst, sh, 0, NULL,
  4288 #if defined(__FreeBSD__)
  4289 		                use_mflowid, mflowid,
  4290 #endif
  4291 		                vrf_id, port);
  4295 /*
  4296  * check the inbound datagram to make sure there is not an abort inside it,
  4297  * if there is return 1, else return 0.
  4298  */
  4299 int
  4300 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
  4302 	struct sctp_chunkhdr *ch;
  4303 	struct sctp_init_chunk *init_chk, chunk_buf;
  4304 	int offset;
  4305 	unsigned int chk_length;
  4307 	offset = iphlen + sizeof(struct sctphdr);
  4308 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
  4309 	    (uint8_t *) & chunk_buf);
  4310 	while (ch != NULL) {
  4311 		chk_length = ntohs(ch->chunk_length);
  4312 		if (chk_length < sizeof(*ch)) {
  4313 			/* packet is probably corrupt */
  4314 			break;
  4316 		/* we seem to be ok, is it an abort? */
  4317 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
  4318 			/* yep, tell them */
  4319 			return (1);
  4321 		if (ch->chunk_type == SCTP_INITIATION) {
  4322 			/* need to update the Vtag */
  4323 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
  4324 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
  4325 			if (init_chk != NULL) {
  4326 				*vtagfill = ntohl(init_chk->init.initiate_tag);
  4329 		/* Nope, move to the next chunk */
  4330 		offset += SCTP_SIZE32(chk_length);
  4331 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
  4332 		    sizeof(*ch), (uint8_t *) & chunk_buf);
  4334 	return (0);
  4337 /*
  4338  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
  4339  * set (i.e. it's 0) so, create this function to compare link local scopes
  4340  */
  4341 #ifdef INET6
  4342 uint32_t
  4343 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
  4345 #if defined(__Userspace__)
  4346     /*__Userspace__ Returning 1 here always */
  4347 #endif
  4348 #if defined(SCTP_EMBEDDED_V6_SCOPE)
  4349 	struct sockaddr_in6 a, b;
  4351 	/* save copies */
  4352 	a = *addr1;
  4353 	b = *addr2;
  4355 	if (a.sin6_scope_id == 0)
  4356 #ifdef SCTP_KAME
  4357 		if (sa6_recoverscope(&a)) {
  4358 #else
  4359 		if (in6_recoverscope(&a, &a.sin6_addr, NULL)) {
  4360 #endif				/* SCTP_KAME */
  4361 			/* can't get scope, so can't match */
  4362 			return (0);
  4364 	if (b.sin6_scope_id == 0)
  4365 #ifdef SCTP_KAME
  4366 		if (sa6_recoverscope(&b)) {
  4367 #else
  4368 		if (in6_recoverscope(&b, &b.sin6_addr, NULL)) {
  4369 #endif				/* SCTP_KAME */
  4370 			/* can't get scope, so can't match */
  4371 			return (0);
  4373 	if (a.sin6_scope_id != b.sin6_scope_id)
  4374 		return (0);
  4375 #else
  4376 	if (addr1->sin6_scope_id != addr2->sin6_scope_id)
  4377 		return (0);
  4378 #endif /* SCTP_EMBEDDED_V6_SCOPE */
  4380 	return (1);
  4383 #if defined(SCTP_EMBEDDED_V6_SCOPE)
  4384 /*
  4385  * returns a sockaddr_in6 with embedded scope recovered and removed
  4386  */
  4387 struct sockaddr_in6 *
  4388 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
  4390 	/* check and strip embedded scope junk */
  4391 	if (addr->sin6_family == AF_INET6) {
  4392 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
  4393 			if (addr->sin6_scope_id == 0) {
  4394 				*store = *addr;
  4395 #ifdef SCTP_KAME
  4396 				if (!sa6_recoverscope(store)) {
  4397 #else
  4398 				if (!in6_recoverscope(store, &store->sin6_addr,
  4399 				    NULL)) {
  4400 #endif /* SCTP_KAME */
  4401 					/* use the recovered scope */
  4402 					addr = store;
  4404 			} else {
  4405 				/* else, return the original "to" addr */
  4406 				in6_clearscope(&addr->sin6_addr);
  4410 	return (addr);
  4412 #endif /* SCTP_EMBEDDED_V6_SCOPE */
  4413 #endif
  4415 /*
  4416  * are the two addresses the same?  currently a "scopeless" check returns: 1
  4417  * if same, 0 if not
  4418  */
  4419 int
  4420 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
  4423 	/* must be valid */
  4424 	if (sa1 == NULL || sa2 == NULL)
  4425 		return (0);
  4427 	/* must be the same family */
  4428 	if (sa1->sa_family != sa2->sa_family)
  4429 		return (0);
  4431 	switch (sa1->sa_family) {
  4432 #ifdef INET6
  4433 	case AF_INET6:
  4435 		/* IPv6 addresses */
  4436 		struct sockaddr_in6 *sin6_1, *sin6_2;
  4438 		sin6_1 = (struct sockaddr_in6 *)sa1;
  4439 		sin6_2 = (struct sockaddr_in6 *)sa2;
  4440 		return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
  4441 		    sin6_2));
  4443 #endif
  4444 #ifdef INET
  4445 	case AF_INET:
  4447 		/* IPv4 addresses */
  4448 		struct sockaddr_in *sin_1, *sin_2;
  4450 		sin_1 = (struct sockaddr_in *)sa1;
  4451 		sin_2 = (struct sockaddr_in *)sa2;
  4452 		return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
  4454 #endif
  4455 #if defined(__Userspace__)
  4456 	case AF_CONN:
  4458 		struct sockaddr_conn *sconn_1, *sconn_2;
  4460 		sconn_1 = (struct sockaddr_conn *)sa1;
  4461 		sconn_2 = (struct sockaddr_conn *)sa2;
  4462 		return (sconn_1->sconn_addr == sconn_2->sconn_addr);
  4464 #endif
  4465 	default:
  4466 		/* we don't do these... */
  4467 		return (0);
  4471 void
  4472 sctp_print_address(struct sockaddr *sa)
  4474 #ifdef INET6
  4475 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
  4476 	char ip6buf[INET6_ADDRSTRLEN];
  4477 #endif
  4478 #endif
  4480 	switch (sa->sa_family) {
  4481 #ifdef INET6
  4482 	case AF_INET6:
  4484 		struct sockaddr_in6 *sin6;
  4486 		sin6 = (struct sockaddr_in6 *)sa;
  4487 #if defined(__Userspace__)
  4488 		SCTP_PRINTF("IPv6 address: %x:%x:%x:%x:%x:%x:%x:%x:port:%d scope:%u\n",
  4489 			    ntohs(sin6->sin6_addr.s6_addr16[0]),
  4490 			    ntohs(sin6->sin6_addr.s6_addr16[1]),
  4491 			    ntohs(sin6->sin6_addr.s6_addr16[2]),
  4492 			    ntohs(sin6->sin6_addr.s6_addr16[3]),
  4493 			    ntohs(sin6->sin6_addr.s6_addr16[4]),
  4494 			    ntohs(sin6->sin6_addr.s6_addr16[5]),
  4495 			    ntohs(sin6->sin6_addr.s6_addr16[6]),
  4496 			    ntohs(sin6->sin6_addr.s6_addr16[7]),
  4497 			    ntohs(sin6->sin6_port),
  4498 			    sin6->sin6_scope_id);
  4499 #else
  4500 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
  4501 		SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
  4502 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
  4503 			    ntohs(sin6->sin6_port),
  4504 			    sin6->sin6_scope_id);
  4505 #else
  4506 		SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
  4507 			    ip6_sprintf(&sin6->sin6_addr),
  4508 			    ntohs(sin6->sin6_port),
  4509 			    sin6->sin6_scope_id);
  4510 #endif
  4511 #endif
  4512 		break;
  4514 #endif
  4515 #ifdef INET
  4516 	case AF_INET:
  4518 		struct sockaddr_in *sin;
  4519 		unsigned char *p;
  4521 		sin = (struct sockaddr_in *)sa;
  4522 		p = (unsigned char *)&sin->sin_addr;
  4523 		SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
  4524 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
  4525 		break;
  4527 #endif
  4528 #if defined(__Userspace__)
  4529 	case AF_CONN:
  4531 		struct sockaddr_conn *sconn;
  4533 		sconn = (struct sockaddr_conn *)sa;
  4534 		SCTP_PRINTF("AF_CONN address: %p\n", sconn->sconn_addr);
  4535 		break;
  4537 #endif
  4538 	default:
  4539 		SCTP_PRINTF("?\n");
  4540 		break;
  4544 void
  4545 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
  4546     struct sctp_inpcb *new_inp,
  4547     struct sctp_tcb *stcb,
  4548     int waitflags)
  4550 	/*
  4551 	 * go through our old INP and pull off any control structures that
  4552 	 * belong to stcb and move then to the new inp.
  4553 	 */
  4554 	struct socket *old_so, *new_so;
  4555 	struct sctp_queued_to_read *control, *nctl;
  4556 	struct sctp_readhead tmp_queue;
  4557 	struct mbuf *m;
  4558 	int error = 0;
  4560 	old_so = old_inp->sctp_socket;
  4561 	new_so = new_inp->sctp_socket;
  4562 	TAILQ_INIT(&tmp_queue);
  4563 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
  4564 	SOCKBUF_LOCK(&(old_so->so_rcv));
  4565 #endif
  4566 #if defined(__FreeBSD__) || defined(__APPLE__)
  4567 	error = sblock(&old_so->so_rcv, waitflags);
  4568 #endif
  4569 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
  4570 	SOCKBUF_UNLOCK(&(old_so->so_rcv));
  4571 #endif
  4572 	if (error) {
  4573 		/* Gak, can't get sblock, we have a problem.
  4574 		 * data will be left stranded.. and we
  4575 		 * don't dare look at it since the
  4576 		 * other thread may be reading something.
  4577 		 * Oh well, its a screwed up app that does
  4578 		 * a peeloff OR a accept while reading
  4579 		 * from the main socket... actually its
  4580 		 * only the peeloff() case, since I think
  4581 		 * read will fail on a listening socket..
  4582 		 */
  4583 		return;
  4585 	/* lock the socket buffers */
  4586 	SCTP_INP_READ_LOCK(old_inp);
  4587 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
  4588 		/* Pull off all for out target stcb */
  4589 		if (control->stcb == stcb) {
  4590 			/* remove it we want it */
  4591 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
  4592 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
  4593 			m = control->data;
  4594 			while (m) {
  4595 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  4596 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE,SCTP_BUF_LEN(m));
  4598 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
  4599 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  4600 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
  4602 				m = SCTP_BUF_NEXT(m);
  4606 	SCTP_INP_READ_UNLOCK(old_inp);
  4607 	/* Remove the sb-lock on the old socket */
  4608 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
  4609 	SOCKBUF_LOCK(&(old_so->so_rcv));
  4610 #endif
  4611 #if defined(__APPLE__)
  4612 	sbunlock(&old_so->so_rcv, 1);
  4613 #endif
  4615 #if defined(__FreeBSD__)
  4616 	sbunlock(&old_so->so_rcv);
  4617 #endif
  4618 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
  4619 	SOCKBUF_UNLOCK(&(old_so->so_rcv));
  4620 #endif
  4621 	/* Now we move them over to the new socket buffer */
  4622 	SCTP_INP_READ_LOCK(new_inp);
  4623 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
  4624 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
  4625 		m = control->data;
  4626 		while (m) {
  4627 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  4628 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
  4630 			sctp_sballoc(stcb, &new_so->so_rcv, m);
  4631 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  4632 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
  4634 			m = SCTP_BUF_NEXT(m);
  4637 	SCTP_INP_READ_UNLOCK(new_inp);
  4640 void
  4641 sctp_add_to_readq(struct sctp_inpcb *inp,
  4642     struct sctp_tcb *stcb,
  4643     struct sctp_queued_to_read *control,
  4644     struct sockbuf *sb,
  4645     int end,
  4646     int inp_read_lock_held,
  4647     int so_locked
  4648 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
  4649     SCTP_UNUSED
  4650 #endif
  4653 	/*
  4654 	 * Here we must place the control on the end of the socket read
  4655 	 * queue AND increment sb_cc so that select will work properly on
  4656 	 * read.
  4657 	 */
  4658 	struct mbuf *m, *prev = NULL;
  4660 	if (inp == NULL) {
  4661 		/* Gak, TSNH!! */
  4662 #ifdef INVARIANTS
  4663 		panic("Gak, inp NULL on add_to_readq");
  4664 #endif
  4665 		return;
  4667 #if defined(__APPLE__)
  4668 	if (so_locked) {
  4669 		sctp_lock_assert(SCTP_INP_SO(inp));
  4670 	} else {
  4671 		sctp_unlock_assert(SCTP_INP_SO(inp));
  4673 #endif
  4674 	if (inp_read_lock_held == 0)
  4675 		SCTP_INP_READ_LOCK(inp);
  4676 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
  4677 		sctp_free_remote_addr(control->whoFrom);
  4678 		if (control->data) {
  4679 			sctp_m_freem(control->data);
  4680 			control->data = NULL;
  4682 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
  4683 		if (inp_read_lock_held == 0)
  4684 			SCTP_INP_READ_UNLOCK(inp);
  4685 		return;
  4687 	if (!(control->spec_flags & M_NOTIFICATION)) {
  4688 		atomic_add_int(&inp->total_recvs, 1);
  4689 		if (!control->do_not_ref_stcb) {
  4690 			atomic_add_int(&stcb->total_recvs, 1);
  4693 	m = control->data;
  4694 	control->held_length = 0;
  4695 	control->length = 0;
  4696 	while (m) {
  4697 		if (SCTP_BUF_LEN(m) == 0) {
  4698 			/* Skip mbufs with NO length */
  4699 			if (prev == NULL) {
  4700 				/* First one */
  4701 				control->data = sctp_m_free(m);
  4702 				m = control->data;
  4703 			} else {
  4704 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
  4705 				m = SCTP_BUF_NEXT(prev);
  4707 			if (m == NULL) {
  4708 				control->tail_mbuf = prev;
  4710 			continue;
  4712 		prev = m;
  4713 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  4714 			sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
  4716 		sctp_sballoc(stcb, sb, m);
  4717 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  4718 			sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
  4720 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
  4721 		m = SCTP_BUF_NEXT(m);
  4723 	if (prev != NULL) {
  4724 		control->tail_mbuf = prev;
  4725 	} else {
  4726 		/* Everything got collapsed out?? */
  4727 		sctp_free_remote_addr(control->whoFrom);
  4728 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
  4729 		if (inp_read_lock_held == 0)
  4730 			SCTP_INP_READ_UNLOCK(inp);
  4731 		return;
  4733 	if (end) {
  4734 		control->end_added = 1;
  4736 #if defined(__Userspace__)
  4737 	if (inp->recv_callback) {
  4738 		if (inp_read_lock_held == 0)
  4739 			SCTP_INP_READ_UNLOCK(inp);
  4740 		if (control->end_added == 1) {
  4741 			struct socket *so;
  4742 			struct mbuf *m;
  4743 			char *buffer;
  4744 			struct sctp_rcvinfo rcv;
  4745 			union sctp_sockstore addr;
  4746 			int flags;
  4748 			if ((buffer = malloc(control->length)) == NULL) {
  4749 				return;
  4751 			so = stcb->sctp_socket;
  4752 			for (m = control->data; m; m = SCTP_BUF_NEXT(m)) {
  4753 				sctp_sbfree(control, control->stcb, &so->so_rcv, m);
  4755 			atomic_add_int(&stcb->asoc.refcnt, 1);
  4756 			SCTP_TCB_UNLOCK(stcb);
  4757 			m_copydata(control->data, 0, control->length, buffer);
  4758 			memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
  4759 			rcv.rcv_sid = control->sinfo_stream;
  4760 			rcv.rcv_ssn = control->sinfo_ssn;
  4761 			rcv.rcv_flags = control->sinfo_flags;
  4762 			rcv.rcv_ppid = control->sinfo_ppid;
  4763 			rcv.rcv_tsn = control->sinfo_tsn;
  4764 			rcv.rcv_cumtsn = control->sinfo_cumtsn;
  4765 			rcv.rcv_context = control->sinfo_context;
  4766 			rcv.rcv_assoc_id = control->sinfo_assoc_id;
  4767 			memset(&addr, 0, sizeof(union sctp_sockstore));
  4768 			switch (control->whoFrom->ro._l_addr.sa.sa_family) {
  4769 #ifdef INET
  4770 			case AF_INET:
  4771 				addr.sin = control->whoFrom->ro._l_addr.sin;
  4772 				break;
  4773 #endif
  4774 #ifdef INET6
  4775 			case AF_INET6:
  4776 				addr.sin6 = control->whoFrom->ro._l_addr.sin6;
  4777 				break;
  4778 #endif
  4779 			case AF_CONN:
  4780 				addr.sconn = control->whoFrom->ro._l_addr.sconn;
  4781 				break;
  4782 			default:
  4783 				addr.sa = control->whoFrom->ro._l_addr.sa;
  4784 				break;
  4786 			flags = MSG_EOR;
  4787 			if (control->spec_flags & M_NOTIFICATION) {
  4788 				flags |= MSG_NOTIFICATION;
  4790 			inp->recv_callback(so, addr, buffer, control->length, rcv, flags, inp->ulp_info);
  4791 			SCTP_TCB_LOCK(stcb);
  4792 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
  4793 			sctp_free_remote_addr(control->whoFrom);
  4794 			control->whoFrom = NULL;
  4795 			sctp_m_freem(control->data);
  4796 			control->data = NULL;
  4797 			control->length = 0;
  4798 			sctp_free_a_readq(stcb, control);
  4800 		return;
  4802 #endif
  4803 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
  4804 	if (inp_read_lock_held == 0)
  4805 		SCTP_INP_READ_UNLOCK(inp);
  4806 	if (inp && inp->sctp_socket) {
  4807 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
  4808 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
  4809 		} else {
  4810 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  4811 			struct socket *so;
  4813 			so = SCTP_INP_SO(inp);
  4814 			if (!so_locked) {
  4815 				if (stcb) {
  4816 					atomic_add_int(&stcb->asoc.refcnt, 1);
  4817 					SCTP_TCB_UNLOCK(stcb);
  4819 				SCTP_SOCKET_LOCK(so, 1);
  4820 				if (stcb) {
  4821 					SCTP_TCB_LOCK(stcb);
  4822 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
  4824 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
  4825 					SCTP_SOCKET_UNLOCK(so, 1);
  4826 					return;
  4829 #endif
  4830 			sctp_sorwakeup(inp, inp->sctp_socket);
  4831 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  4832 			if (!so_locked) {
  4833 				SCTP_SOCKET_UNLOCK(so, 1);
  4835 #endif
  4841 int
  4842 sctp_append_to_readq(struct sctp_inpcb *inp,
  4843     struct sctp_tcb *stcb,
  4844     struct sctp_queued_to_read *control,
  4845     struct mbuf *m,
  4846     int end,
  4847     int ctls_cumack,
  4848     struct sockbuf *sb)
  4850 	/*
  4851 	 * A partial delivery API event is underway. OR we are appending on
  4852 	 * the reassembly queue.
  4854 	 * If PDAPI this means we need to add m to the end of the data.
  4855 	 * Increase the length in the control AND increment the sb_cc.
  4856 	 * Otherwise sb is NULL and all we need to do is put it at the end
  4857 	 * of the mbuf chain.
  4858 	 */
  4859 	int len = 0;
  4860 	struct mbuf *mm, *tail = NULL, *prev = NULL;
  4862 	if (inp) {
  4863 		SCTP_INP_READ_LOCK(inp);
  4865 	if (control == NULL) {
  4866 	get_out:
  4867 		if (inp) {
  4868 			SCTP_INP_READ_UNLOCK(inp);
  4870 		return (-1);
  4872 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
  4873 		SCTP_INP_READ_UNLOCK(inp);
  4874 		return (0);
  4876 	if (control->end_added) {
  4877 		/* huh this one is complete? */
  4878 		goto get_out;
  4880 	mm = m;
  4881 	if (mm == NULL) {
  4882 		goto get_out;
  4885 	while (mm) {
  4886 		if (SCTP_BUF_LEN(mm) == 0) {
  4887 			/* Skip mbufs with NO lenght */
  4888 			if (prev == NULL) {
  4889 				/* First one */
  4890 				m = sctp_m_free(mm);
  4891 				mm = m;
  4892 			} else {
  4893 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
  4894 				mm = SCTP_BUF_NEXT(prev);
  4896 			continue;
  4898 		prev = mm;
  4899 		len += SCTP_BUF_LEN(mm);
  4900 		if (sb) {
  4901 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  4902 				sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
  4904 			sctp_sballoc(stcb, sb, mm);
  4905 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  4906 				sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
  4909 		mm = SCTP_BUF_NEXT(mm);
  4911 	if (prev) {
  4912 		tail = prev;
  4913 	} else {
  4914 		/* Really there should always be a prev */
  4915 		if (m == NULL) {
  4916 			/* Huh nothing left? */
  4917 #ifdef INVARIANTS
  4918 			panic("Nothing left to add?");
  4919 #else
  4920 			goto get_out;
  4921 #endif
  4923 		tail = m;
  4925 	if (control->tail_mbuf) {
  4926 		/* append */
  4927 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
  4928 		control->tail_mbuf = tail;
  4929 	} else {
  4930 		/* nothing there */
  4931 #ifdef INVARIANTS
  4932 		if (control->data != NULL) {
  4933 			panic("This should NOT happen");
  4935 #endif
  4936 		control->data = m;
  4937 		control->tail_mbuf = tail;
  4939 	atomic_add_int(&control->length, len);
  4940 	if (end) {
  4941 		/* message is complete */
  4942 		if (stcb && (control == stcb->asoc.control_pdapi)) {
  4943 			stcb->asoc.control_pdapi = NULL;
  4945 		control->held_length = 0;
  4946 		control->end_added = 1;
  4948 	if (stcb == NULL) {
  4949 		control->do_not_ref_stcb = 1;
  4951 	/*
  4952 	 * When we are appending in partial delivery, the cum-ack is used
  4953 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
  4954 	 * is populated in the outbound sinfo structure from the true cumack
  4955 	 * if the association exists...
  4956 	 */
  4957 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
  4958 #if defined(__Userspace__)
  4959 	if (inp->recv_callback) {
  4960 		uint32_t pd_point, length;
  4962 		length = control->length;
  4963 		if (stcb != NULL && stcb->sctp_socket != NULL) {
  4964 			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
  4965 			               stcb->sctp_ep->partial_delivery_point);
  4966 		} else {
  4967 			pd_point = inp->partial_delivery_point;
  4969 		if ((control->end_added == 1) || (length >= pd_point)) {
  4970 			struct socket *so;
  4971 			char *buffer;
  4972 			struct sctp_rcvinfo rcv;
  4973 			union sctp_sockstore addr;
  4974 			int flags;
  4976 			if ((buffer = malloc(control->length)) == NULL) {
  4977 				return (-1);
  4979 			so = stcb->sctp_socket;
  4980 			for (m = control->data; m; m = SCTP_BUF_NEXT(m)) {
  4981 				sctp_sbfree(control, control->stcb, &so->so_rcv, m);
  4983 			m_copydata(control->data, 0, control->length, buffer);
  4984 			memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
  4985 			rcv.rcv_sid = control->sinfo_stream;
  4986 			rcv.rcv_ssn = control->sinfo_ssn;
  4987 			rcv.rcv_flags = control->sinfo_flags;
  4988 			rcv.rcv_ppid = control->sinfo_ppid;
  4989 			rcv.rcv_tsn = control->sinfo_tsn;
  4990 			rcv.rcv_cumtsn = control->sinfo_cumtsn;
  4991 			rcv.rcv_context = control->sinfo_context;
  4992 			rcv.rcv_assoc_id = control->sinfo_assoc_id;
  4993 			memset(&addr, 0, sizeof(union sctp_sockstore));
  4994 			switch (control->whoFrom->ro._l_addr.sa.sa_family) {
  4995 #ifdef INET
  4996 			case AF_INET:
  4997 				addr.sin = control->whoFrom->ro._l_addr.sin;
  4998 				break;
  4999 #endif
  5000 #ifdef INET6
  5001 			case AF_INET6:
  5002 				addr.sin6 = control->whoFrom->ro._l_addr.sin6;
  5003 				break;
  5004 #endif
  5005 			case AF_CONN:
  5006 				addr.sconn = control->whoFrom->ro._l_addr.sconn;
  5007 				break;
  5008 			default:
  5009 				addr.sa = control->whoFrom->ro._l_addr.sa;
  5010 				break;
  5012 			flags = 0;
  5013 			if (control->end_added == 1) {
  5014 				flags |= MSG_EOR;
  5016 			if (control->spec_flags & M_NOTIFICATION) {
  5017 				flags |= MSG_NOTIFICATION;
  5019 			sctp_m_freem(control->data);
  5020 			control->data = NULL;
  5021 			control->tail_mbuf = NULL;
  5022 			control->length = 0;
  5023 			if (control->end_added) {
  5024 				sctp_free_remote_addr(control->whoFrom);
  5025 				control->whoFrom = NULL;
  5026 				sctp_free_a_readq(stcb, control);
  5027 			} else {
  5028 				control->some_taken = 1;
  5030 			atomic_add_int(&stcb->asoc.refcnt, 1);
  5031 			SCTP_TCB_UNLOCK(stcb);
  5032 			inp->recv_callback(so, addr, buffer, length, rcv, flags, inp->ulp_info);
  5033 			SCTP_TCB_LOCK(stcb);
  5034 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
  5036 		if (inp)
  5037 			SCTP_INP_READ_UNLOCK(inp);
  5038 		return (0);
  5040 #endif
  5041 	if (inp) {
  5042 		SCTP_INP_READ_UNLOCK(inp);
  5044 	if (inp && inp->sctp_socket) {
  5045 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
  5046 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
  5047 		} else {
  5048 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  5049 			struct socket *so;
  5051 			so = SCTP_INP_SO(inp);
  5052 			if (stcb) {
  5053 				atomic_add_int(&stcb->asoc.refcnt, 1);
  5054 				SCTP_TCB_UNLOCK(stcb);
  5056 			SCTP_SOCKET_LOCK(so, 1);
  5057 			if (stcb) {
  5058 				SCTP_TCB_LOCK(stcb);
  5059 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
  5061 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
  5062 				SCTP_SOCKET_UNLOCK(so, 1);
  5063 				return (0);
  5065 #endif
  5066 			sctp_sorwakeup(inp, inp->sctp_socket);
  5067 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  5068 			SCTP_SOCKET_UNLOCK(so, 1);
  5069 #endif
  5072 	return (0);
  5077 /*************HOLD THIS COMMENT FOR PATCH FILE OF
  5078  *************ALTERNATE ROUTING CODE
  5079  */
  5081 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
  5082  *************ALTERNATE ROUTING CODE
  5083  */
  5085 struct mbuf *
  5086 sctp_generate_invmanparam(int err)
  5088 	/* Return a MBUF with a invalid mandatory parameter */
  5089 	struct mbuf *m;
  5091 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_NOWAIT, 1, MT_DATA);
  5092 	if (m) {
  5093 		struct sctp_paramhdr *ph;
  5095 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
  5096 		ph = mtod(m, struct sctp_paramhdr *);
  5097 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
  5098 		ph->param_type = htons(err);
  5100 	return (m);
  5103 #ifdef SCTP_MBCNT_LOGGING
  5104 void
  5105 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
  5106     struct sctp_tmit_chunk *tp1, int chk_cnt)
  5108 	if (tp1->data == NULL) {
  5109 		return;
  5111 	asoc->chunks_on_out_queue -= chk_cnt;
  5112 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
  5113 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
  5114 			       asoc->total_output_queue_size,
  5115 			       tp1->book_size,
  5116 			       0,
  5117 			       tp1->mbcnt);
  5119 	if (asoc->total_output_queue_size >= tp1->book_size) {
  5120 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
  5121 	} else {
  5122 		asoc->total_output_queue_size = 0;
  5125 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
  5126 				  ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
  5127 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
  5128 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
  5129 		} else {
  5130 			stcb->sctp_socket->so_snd.sb_cc = 0;
  5136 #endif
  5138 int
  5139 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
  5140 			   uint8_t sent, int so_locked
  5141 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
  5142 			   SCTP_UNUSED
  5143 #endif
  5146 	struct sctp_stream_out *strq;
  5147 	struct sctp_tmit_chunk *chk = NULL, *tp2;
  5148 	struct sctp_stream_queue_pending *sp;
  5149 	uint16_t stream = 0, seq = 0;
  5150 	uint8_t foundeom = 0;
  5151 	int ret_sz = 0;
  5152 	int notdone;
  5153 	int do_wakeup_routine = 0;
  5154 #if defined(__APPLE__)
  5155 	if (so_locked) {
  5156 		sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
  5157 	} else {
  5158 		sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
  5160 #endif
  5161 	stream = tp1->rec.data.stream_number;
  5162 	seq = tp1->rec.data.stream_seq;
  5163 	do {
  5164 		ret_sz += tp1->book_size;
  5165 		if (tp1->data != NULL) {
  5166 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
  5167 				sctp_flight_size_decrease(tp1);
  5168 				sctp_total_flight_decrease(stcb, tp1);
  5170 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
  5171 			stcb->asoc.peers_rwnd += tp1->send_size;
  5172 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
  5173 			if (sent) {
  5174 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
  5175 			} else {
  5176 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
  5178 			if (tp1->data) {
  5179 				sctp_m_freem(tp1->data);
  5180 				tp1->data = NULL;
  5182 			do_wakeup_routine = 1;
  5183 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
  5184 				stcb->asoc.sent_queue_cnt_removeable--;
  5187 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
  5188 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
  5189 		    SCTP_DATA_NOT_FRAG) {
  5190 			/* not frag'ed we ae done   */
  5191 			notdone = 0;
  5192 			foundeom = 1;
  5193 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
  5194 			/* end of frag, we are done */
  5195 			notdone = 0;
  5196 			foundeom = 1;
  5197 		} else {
  5198 			/*
  5199 			 * Its a begin or middle piece, we must mark all of
  5200 			 * it
  5201 			 */
  5202 			notdone = 1;
  5203 			tp1 = TAILQ_NEXT(tp1, sctp_next);
  5205 	} while (tp1 && notdone);
  5206 	if (foundeom == 0) {
  5207 		/*
  5208 		 * The multi-part message was scattered across the send and
  5209 		 * sent queue.
  5210 		 */
  5211 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
  5212 			if ((tp1->rec.data.stream_number != stream) ||
  5213 		   	    (tp1->rec.data.stream_seq != seq)) {
  5214 				break;
  5216 			/* save to chk in case we have some on stream out
  5217 			 * queue. If so and we have an un-transmitted one
  5218 			 * we don't have to fudge the TSN.
  5219 			 */
  5220 			chk = tp1;
  5221 			ret_sz += tp1->book_size;
  5222 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
  5223 			if (sent) {
  5224 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
  5225 			} else {
  5226 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
  5228 			if (tp1->data) {
  5229 				sctp_m_freem(tp1->data);
  5230 				tp1->data = NULL;
  5232 			/* No flight involved here book the size to 0 */
  5233 			tp1->book_size = 0;
  5234 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
  5235 				foundeom = 1;
  5237 			do_wakeup_routine = 1;
  5238 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
  5239 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
  5240 			/* on to the sent queue so we can wait for it to be passed by. */
  5241 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
  5242 					  sctp_next);
  5243 			stcb->asoc.send_queue_cnt--;
  5244 			stcb->asoc.sent_queue_cnt++;
  5247 	if (foundeom == 0) {
  5248 		/*
  5249 		 * Still no eom found. That means there
  5250 		 * is stuff left on the stream out queue.. yuck.
  5251 		 */
  5252 		SCTP_TCB_SEND_LOCK(stcb);
  5253 		strq = &stcb->asoc.strmout[stream];
  5254 		sp = TAILQ_FIRST(&strq->outqueue);
  5255 		if (sp != NULL) {
  5256 			sp->discard_rest = 1;
  5257 			/*
  5258 			 * We may need to put a chunk on the
  5259 			 * queue that holds the TSN that
  5260 			 * would have been sent with the LAST
  5261 			 * bit.
  5262 			 */
  5263 			if (chk == NULL) {
  5264 				/* Yep, we have to */
  5265 				sctp_alloc_a_chunk(stcb, chk);
  5266 				if (chk == NULL) {
  5267 					/* we are hosed. All we can
  5268 					 * do is nothing.. which will
  5269 					 * cause an abort if the peer is
  5270 					 * paying attention.
  5271 					 */
  5272 					goto oh_well;
  5274 				memset(chk, 0, sizeof(*chk));
  5275 				chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
  5276 				chk->sent = SCTP_FORWARD_TSN_SKIP;
  5277 				chk->asoc = &stcb->asoc;
  5278 				chk->rec.data.stream_seq = strq->next_sequence_send;
  5279 				chk->rec.data.stream_number = sp->stream;
  5280 				chk->rec.data.payloadtype = sp->ppid;
  5281 				chk->rec.data.context = sp->context;
  5282 				chk->flags = sp->act_flags;
  5283 				if (sp->net)
  5284 					chk->whoTo = sp->net;
  5285 				else
  5286 					chk->whoTo = stcb->asoc.primary_destination;
  5287 				atomic_add_int(&chk->whoTo->ref_count, 1);
  5288 #if defined(__FreeBSD__) || defined(__Panda__)
  5289 				chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
  5290 #else
  5291 				chk->rec.data.TSN_seq = stcb->asoc.sending_seq++;
  5292 #endif
  5293 				stcb->asoc.pr_sctp_cnt++;
  5294 				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
  5295 				stcb->asoc.sent_queue_cnt++;
  5296 				stcb->asoc.pr_sctp_cnt++;
  5297 			} else {
  5298 				chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
  5300 			strq->next_sequence_send++;
  5301 		oh_well:
  5302 			if (sp->data) {
  5303 				/* Pull any data to free up the SB and
  5304 				 * allow sender to "add more" while we
  5305 				 * will throw away :-)
  5306 				 */
  5307 				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
  5308 				ret_sz += sp->length;
  5309 				do_wakeup_routine = 1;
  5310 				sp->some_taken = 1;
  5311 				sctp_m_freem(sp->data);
  5312 				sp->data = NULL;
  5313 				sp->tail_mbuf = NULL;
  5314 				sp->length = 0;
  5317 		SCTP_TCB_SEND_UNLOCK(stcb);
  5319 	if (do_wakeup_routine) {
  5320 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  5321 		struct socket *so;
  5323 		so = SCTP_INP_SO(stcb->sctp_ep);
  5324 		if (!so_locked) {
  5325 			atomic_add_int(&stcb->asoc.refcnt, 1);
  5326 			SCTP_TCB_UNLOCK(stcb);
  5327 			SCTP_SOCKET_LOCK(so, 1);
  5328 			SCTP_TCB_LOCK(stcb);
  5329 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
  5330 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
  5331 				/* assoc was freed while we were unlocked */
  5332 				SCTP_SOCKET_UNLOCK(so, 1);
  5333 				return (ret_sz);
  5336 #endif
  5337 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
  5338 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
  5339 		if (!so_locked) {
  5340 			SCTP_SOCKET_UNLOCK(so, 1);
  5342 #endif
  5344 	return (ret_sz);
  5347 /*
  5348  * checks to see if the given address, sa, is one that is currently known by
  5349  * the kernel note: can't distinguish the same address on multiple interfaces
  5350  * and doesn't handle multiple addresses with different zone/scope id's note:
  5351  * ifa_ifwithaddr() compares the entire sockaddr struct
  5352  */
  5353 struct sctp_ifa *
  5354 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
  5355 		    int holds_lock)
  5357 	struct sctp_laddr *laddr;
  5359 	if (holds_lock == 0) {
  5360 		SCTP_INP_RLOCK(inp);
  5363 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
  5364 		if (laddr->ifa == NULL)
  5365 			continue;
  5366 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
  5367 			continue;
  5368 #ifdef INET
  5369 		if (addr->sa_family == AF_INET) {
  5370 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
  5371 			    laddr->ifa->address.sin.sin_addr.s_addr) {
  5372 				/* found him. */
  5373 				if (holds_lock == 0) {
  5374 					SCTP_INP_RUNLOCK(inp);
  5376 				return (laddr->ifa);
  5377 				break;
  5380 #endif
  5381 #ifdef INET6
  5382 		if (addr->sa_family == AF_INET6) {
  5383 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
  5384 						 &laddr->ifa->address.sin6)) {
  5385 				/* found him. */
  5386 				if (holds_lock == 0) {
  5387 					SCTP_INP_RUNLOCK(inp);
  5389 				return (laddr->ifa);
  5390 				break;
  5393 #endif
  5394 #if defined(__Userspace__)
  5395 		if (addr->sa_family == AF_CONN) {
  5396 			if (((struct sockaddr_conn *)addr)->sconn_addr == laddr->ifa->address.sconn.sconn_addr) {
  5397 				/* found him. */
  5398 				if (holds_lock == 0) {
  5399 					SCTP_INP_RUNLOCK(inp);
  5401 				return (laddr->ifa);
  5402 				break;
  5405 #endif
  5407 	if (holds_lock == 0) {
  5408 		SCTP_INP_RUNLOCK(inp);
  5410 	return (NULL);
  5413 uint32_t
  5414 sctp_get_ifa_hash_val(struct sockaddr *addr)
  5416 	switch (addr->sa_family) {
  5417 #ifdef INET
  5418 	case AF_INET:
  5420 		struct sockaddr_in *sin;
  5422 		sin = (struct sockaddr_in *)addr;
  5423 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
  5425 #endif
  5426 #ifdef INET6
  5427 	case AF_INET6:
  5429 		struct sockaddr_in6 *sin6;
  5430 		uint32_t hash_of_addr;
  5432 		sin6 = (struct sockaddr_in6 *)addr;
  5433 #if !defined(__Windows__) && !defined(__Userspace_os_FreeBSD) && !defined(__Userspace_os_Darwin) && !defined(__Userspace_os_Windows)
  5434 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
  5435 				sin6->sin6_addr.s6_addr32[1] +
  5436 				sin6->sin6_addr.s6_addr32[2] +
  5437 				sin6->sin6_addr.s6_addr32[3]);
  5438 #else
  5439 		hash_of_addr = (((uint32_t *)&sin6->sin6_addr)[0] +
  5440 				((uint32_t *)&sin6->sin6_addr)[1] +
  5441 				((uint32_t *)&sin6->sin6_addr)[2] +
  5442 				((uint32_t *)&sin6->sin6_addr)[3]);
  5443 #endif
  5444 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
  5445 		return (hash_of_addr);
  5447 #endif
  5448 #if defined(__Userspace__)
  5449 	case AF_CONN:
  5451 		struct sockaddr_conn *sconn;
  5452 		uintptr_t temp;
  5454 		sconn = (struct sockaddr_conn *)addr;
  5455 		temp = (uintptr_t)sconn->sconn_addr;
  5456 		return ((uint32_t)(temp ^ (temp >> 16)));
  5458 #endif
  5459 	default:
  5460 		break;
  5462 	return (0);
  5465 struct sctp_ifa *
  5466 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
  5468 	struct sctp_ifa *sctp_ifap;
  5469 	struct sctp_vrf *vrf;
  5470 	struct sctp_ifalist *hash_head;
  5471 	uint32_t hash_of_addr;
  5473 	if (holds_lock == 0)
  5474 		SCTP_IPI_ADDR_RLOCK();
  5476 	vrf = sctp_find_vrf(vrf_id);
  5477 	if (vrf == NULL) {
  5478 	stage_right:
  5479 		if (holds_lock == 0)
  5480 			SCTP_IPI_ADDR_RUNLOCK();
  5481 		return (NULL);
  5484 	hash_of_addr = sctp_get_ifa_hash_val(addr);
  5486 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
  5487 	if (hash_head == NULL) {
  5488 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
  5489 			    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
  5490 			    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
  5491 		sctp_print_address(addr);
  5492 		SCTP_PRINTF("No such bucket for address\n");
  5493 		if (holds_lock == 0)
  5494 			SCTP_IPI_ADDR_RUNLOCK();
  5496 		return (NULL);
  5498 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
  5499 		if (sctp_ifap == NULL) {
  5500 #ifdef INVARIANTS
  5501 			panic("Huh LIST_FOREACH corrupt");
  5502 		        goto stage_right;
  5503 #else
  5504 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
  5505 			goto stage_right;
  5506 #endif
  5508 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
  5509 			continue;
  5510 #ifdef INET
  5511 		if (addr->sa_family == AF_INET) {
  5512 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
  5513 			    sctp_ifap->address.sin.sin_addr.s_addr) {
  5514 				/* found him. */
  5515 				if (holds_lock == 0)
  5516 					SCTP_IPI_ADDR_RUNLOCK();
  5517 				return (sctp_ifap);
  5518 				break;
  5521 #endif
  5522 #ifdef INET6
  5523 		if (addr->sa_family == AF_INET6) {
  5524 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
  5525 						 &sctp_ifap->address.sin6)) {
  5526 				/* found him. */
  5527 				if (holds_lock == 0)
  5528 					SCTP_IPI_ADDR_RUNLOCK();
  5529 				return (sctp_ifap);
  5530 				break;
  5533 #endif
  5534 #if defined(__Userspace__)
  5535 		if (addr->sa_family == AF_CONN) {
  5536 			if (((struct sockaddr_conn *)addr)->sconn_addr == sctp_ifap->address.sconn.sconn_addr) {
  5537 				/* found him. */
  5538 				if (holds_lock == 0)
  5539 					SCTP_IPI_ADDR_RUNLOCK();
  5540 				return (sctp_ifap);
  5541 				break;
  5544 #endif
  5546 	if (holds_lock == 0)
  5547 		SCTP_IPI_ADDR_RUNLOCK();
  5548 	return (NULL);
  5551 static void
  5552 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
  5553 	       uint32_t rwnd_req)
  5555 	/* User pulled some data, do we need a rwnd update? */
  5556 	int r_unlocked = 0;
  5557 	uint32_t dif, rwnd;
  5558 	struct socket *so = NULL;
  5560 	if (stcb == NULL)
  5561 		return;
  5563 	atomic_add_int(&stcb->asoc.refcnt, 1);
  5565 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
  5566 				SCTP_STATE_SHUTDOWN_RECEIVED |
  5567 				SCTP_STATE_SHUTDOWN_ACK_SENT)) {
  5568 		/* Pre-check If we are freeing no update */
  5569 		goto no_lock;
  5571 	SCTP_INP_INCR_REF(stcb->sctp_ep);
  5572 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
  5573 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
  5574 		goto out;
  5576 	so = stcb->sctp_socket;
  5577 	if (so == NULL) {
  5578 		goto out;
  5580 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
  5581 	/* Have you have freed enough to look */
  5582 	*freed_so_far = 0;
  5583 	/* Yep, its worth a look and the lock overhead */
  5585 	/* Figure out what the rwnd would be */
  5586 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
  5587 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
  5588 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
  5589 	} else {
  5590 		dif = 0;
  5592 	if (dif >= rwnd_req) {
  5593 		if (hold_rlock) {
  5594 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
  5595 			r_unlocked = 1;
  5597 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
  5598 			/*
  5599 			 * One last check before we allow the guy possibly
  5600 			 * to get in. There is a race, where the guy has not
  5601 			 * reached the gate. In that case
  5602 			 */
  5603 			goto out;
  5605 		SCTP_TCB_LOCK(stcb);
  5606 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
  5607 			/* No reports here */
  5608 			SCTP_TCB_UNLOCK(stcb);
  5609 			goto out;
  5611 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
  5612 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
  5614 		sctp_chunk_output(stcb->sctp_ep, stcb,
  5615 				  SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
  5616 		/* make sure no timer is running */
  5617 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL+SCTP_LOC_6);
  5618 		SCTP_TCB_UNLOCK(stcb);
  5619 	} else {
  5620 		/* Update how much we have pending */
  5621 		stcb->freed_by_sorcv_sincelast = dif;
  5623  out:
  5624 	if (so && r_unlocked && hold_rlock) {
  5625 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
  5628 	SCTP_INP_DECR_REF(stcb->sctp_ep);
  5629  no_lock:
  5630 	atomic_add_int(&stcb->asoc.refcnt, -1);
  5631 	return;
  5634 int
  5635 sctp_sorecvmsg(struct socket *so,
  5636     struct uio *uio,
  5637     struct mbuf **mp,
  5638     struct sockaddr *from,
  5639     int fromlen,
  5640     int *msg_flags,
  5641     struct sctp_sndrcvinfo *sinfo,
  5642     int filling_sinfo)
  5644 	/*
  5645 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
  5646 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
  5647 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
  5648 	 * On the way out we may send out any combination of:
  5649 	 * MSG_NOTIFICATION MSG_EOR
  5651 	 */
  5652 	struct sctp_inpcb *inp = NULL;
  5653 	int my_len = 0;
  5654 	int cp_len = 0, error = 0;
  5655 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
  5656 	struct mbuf *m = NULL;
  5657 	struct sctp_tcb *stcb = NULL;
  5658 	int wakeup_read_socket = 0;
  5659 	int freecnt_applied = 0;
  5660 	int out_flags = 0, in_flags = 0;
  5661 	int block_allowed = 1;
  5662 	uint32_t freed_so_far = 0;
  5663 	uint32_t copied_so_far = 0;
  5664 	int in_eeor_mode = 0;
  5665 	int no_rcv_needed = 0;
  5666 	uint32_t rwnd_req = 0;
  5667 	int hold_sblock = 0;
  5668 	int hold_rlock = 0;
  5669 	int slen = 0;
  5670 	uint32_t held_length = 0;
  5671 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
  5672 	int sockbuf_lock = 0;
  5673 #endif
  5675 	if (uio == NULL) {
  5676 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  5677 		return (EINVAL);
  5680 	if (msg_flags) {
  5681 		in_flags = *msg_flags;
  5682 		if (in_flags & MSG_PEEK)
  5683 			SCTP_STAT_INCR(sctps_read_peeks);
  5684 	} else {
  5685 		in_flags = 0;
  5687 #if defined(__APPLE__)
  5688 #if defined(APPLE_LEOPARD)
  5689 	slen = uio->uio_resid;
  5690 #else
  5691 	slen = uio_resid(uio);
  5692 #endif
  5693 #else
  5694 	slen = uio->uio_resid;
  5695 #endif
  5697 	/* Pull in and set up our int flags */
  5698 	if (in_flags & MSG_OOB) {
  5699 		/* Out of band's NOT supported */
  5700 		return (EOPNOTSUPP);
  5702 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
  5703 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  5704 		return (EINVAL);
  5706 	if ((in_flags & (MSG_DONTWAIT
  5707 #if defined(__FreeBSD__) && __FreeBSD_version > 500000
  5708 			 | MSG_NBIO
  5709 #endif
  5710 		     )) ||
  5711 	    SCTP_SO_IS_NBIO(so)) {
  5712 		block_allowed = 0;
  5714 	/* setup the endpoint */
  5715 	inp = (struct sctp_inpcb *)so->so_pcb;
  5716 	if (inp == NULL) {
  5717 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
  5718 		return (EFAULT);
  5720 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
  5721 	/* Must be at least a MTU's worth */
  5722 	if (rwnd_req < SCTP_MIN_RWND)
  5723 		rwnd_req = SCTP_MIN_RWND;
  5724 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
  5725 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
  5726 #if defined(__APPLE__)
  5727 #if defined(APPLE_LEOPARD)
  5728 		sctp_misc_ints(SCTP_SORECV_ENTER,
  5729 			       rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
  5730 #else
  5731 		sctp_misc_ints(SCTP_SORECV_ENTER,
  5732 			       rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio_resid(uio));
  5733 #endif
  5734 #else
  5735 		sctp_misc_ints(SCTP_SORECV_ENTER,
  5736 			       rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
  5737 #endif
  5739 #if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__)
  5740 	SOCKBUF_LOCK(&so->so_rcv);
  5741 	hold_sblock = 1;
  5742 #endif
  5743 	if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) {
  5744 #if defined(__APPLE__)
  5745 #if defined(APPLE_LEOPARD)
  5746 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
  5747 			       rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
  5748 #else
  5749 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
  5750 			       rwnd_req, block_allowed, so->so_rcv.sb_cc, uio_resid(uio));
  5751 #endif
  5752 #else
  5753 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
  5754 			       rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
  5755 #endif
  5758 #if defined(__APPLE__)
  5759 	error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
  5760 #endif
  5762 #if defined(__FreeBSD__)
  5763 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
  5764 #endif
  5765 	if (error) {
  5766 		goto release_unlocked;
  5768 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
  5769         sockbuf_lock = 1;
  5770 #endif
  5771  restart:
  5772 #if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__)
  5773 	if (hold_sblock == 0) {
  5774 		SOCKBUF_LOCK(&so->so_rcv);
  5775 		hold_sblock = 1;
  5777 #endif
  5778 #if defined(__APPLE__)
  5779 	sbunlock(&so->so_rcv, 1);
  5780 #endif
  5782 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
  5783 	sbunlock(&so->so_rcv);
  5784 #endif
  5786  restart_nosblocks:
  5787 	if (hold_sblock == 0) {
  5788 		SOCKBUF_LOCK(&so->so_rcv);
  5789 		hold_sblock = 1;
  5791 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
  5792 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
  5793 		goto out;
  5795 #if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
  5796 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
  5797 #else
  5798 	if ((so->so_state & SS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
  5799 #endif
  5800 		if (so->so_error) {
  5801 			error = so->so_error;
  5802 			if ((in_flags & MSG_PEEK) == 0)
  5803 				so->so_error = 0;
  5804 			goto out;
  5805 		} else {
  5806 			if (so->so_rcv.sb_cc == 0) {
  5807 				/* indicate EOF */
  5808 				error = 0;
  5809 				goto out;
  5813 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
  5814 		/* we need to wait for data */
  5815 		if ((so->so_rcv.sb_cc == 0) &&
  5816 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
  5817 		     (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
  5818 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
  5819 				/* For active open side clear flags for re-use
  5820 				 * passive open is blocked by connect.
  5821 				 */
  5822 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
  5823 					/* You were aborted, passive side always hits here */
  5824 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
  5825 					error = ECONNRESET;
  5827 				so->so_state &= ~(SS_ISCONNECTING |
  5828 						  SS_ISDISCONNECTING |
  5829 						  SS_ISCONFIRMING |
  5830 						  SS_ISCONNECTED);
  5831 				if (error == 0) {
  5832 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
  5833 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
  5834 						error = ENOTCONN;
  5837 				goto out;
  5840 		error = sbwait(&so->so_rcv);
  5841 		if (error) {
  5842 			goto out;
  5844 		held_length = 0;
  5845 		goto restart_nosblocks;
  5846 	} else if (so->so_rcv.sb_cc == 0) {
  5847 		if (so->so_error) {
  5848 			error = so->so_error;
  5849 			if ((in_flags & MSG_PEEK) == 0)
  5850 				so->so_error = 0;
  5851 		} else {
  5852 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
  5853 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
  5854 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
  5855 					/* For active open side clear flags for re-use
  5856 					 * passive open is blocked by connect.
  5857 					 */
  5858 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
  5859 						/* You were aborted, passive side always hits here */
  5860 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
  5861 						error = ECONNRESET;
  5863 					so->so_state &= ~(SS_ISCONNECTING |
  5864 							  SS_ISDISCONNECTING |
  5865 							  SS_ISCONFIRMING |
  5866 							  SS_ISCONNECTED);
  5867 					if (error == 0) {
  5868 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
  5869 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
  5870 							error = ENOTCONN;
  5873 					goto out;
  5876 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
  5877 			error = EWOULDBLOCK;
  5879 		goto out;
  5881 	if (hold_sblock == 1) {
  5882 		SOCKBUF_UNLOCK(&so->so_rcv);
  5883 		hold_sblock = 0;
  5885 #if defined(__APPLE__)
  5886 	error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
  5887 #endif
  5888 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
  5889 	error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0));
  5890 #endif
  5891 	/* we possibly have data we can read */
  5892 	/*sa_ignore FREED_MEMORY*/
  5893 	control = TAILQ_FIRST(&inp->read_queue);
  5894 	if (control == NULL) {
  5895 		/* This could be happening since
  5896 		 * the appender did the increment but as not
  5897 		 * yet did the tailq insert onto the read_queue
  5898 		 */
  5899 		if (hold_rlock == 0) {
  5900 			SCTP_INP_READ_LOCK(inp);
  5902 		control = TAILQ_FIRST(&inp->read_queue);
  5903 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
  5904 #ifdef INVARIANTS
  5905 			panic("Huh, its non zero and nothing on control?");
  5906 #endif
  5907 			so->so_rcv.sb_cc = 0;
  5909 		SCTP_INP_READ_UNLOCK(inp);
  5910 		hold_rlock = 0;
  5911 		goto restart;
  5914 	if ((control->length == 0) &&
  5915 	    (control->do_not_ref_stcb)) {
  5916 		/* Clean up code for freeing assoc that left behind a pdapi..
  5917 		 * maybe a peer in EEOR that just closed after sending and
  5918 		 * never indicated a EOR.
  5919 		 */
  5920 		if (hold_rlock == 0) {
  5921 			hold_rlock = 1;
  5922 			SCTP_INP_READ_LOCK(inp);
  5924 		control->held_length = 0;
  5925 		if (control->data) {
  5926 			/* Hmm there is data here .. fix */
  5927 			struct mbuf *m_tmp;
  5928 			int cnt = 0;
  5929 			m_tmp = control->data;
  5930 			while (m_tmp) {
  5931 				cnt += SCTP_BUF_LEN(m_tmp);
  5932 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
  5933 					control->tail_mbuf = m_tmp;
  5934 					control->end_added = 1;
  5936 				m_tmp = SCTP_BUF_NEXT(m_tmp);
  5938 			control->length = cnt;
  5939 		} else {
  5940 			/* remove it */
  5941 			TAILQ_REMOVE(&inp->read_queue, control, next);
  5942 			/* Add back any hiddend data */
  5943 			sctp_free_remote_addr(control->whoFrom);
  5944 			sctp_free_a_readq(stcb, control);
  5946 		if (hold_rlock) {
  5947 			hold_rlock = 0;
  5948 			SCTP_INP_READ_UNLOCK(inp);
  5950 		goto restart;
  5952 	if ((control->length == 0) &&
  5953 	    (control->end_added == 1)) {
  5954 		/* Do we also need to check for (control->pdapi_aborted == 1)? */
  5955 		if (hold_rlock == 0) {
  5956 			hold_rlock = 1;
  5957 			SCTP_INP_READ_LOCK(inp);
  5959 		TAILQ_REMOVE(&inp->read_queue, control, next);
  5960 		if (control->data) {
  5961 #ifdef INVARIANTS
  5962 			panic("control->data not null but control->length == 0");
  5963 #else
  5964 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
  5965 			sctp_m_freem(control->data);
  5966 			control->data = NULL;
  5967 #endif
  5969 		if (control->aux_data) {
  5970 			sctp_m_free (control->aux_data);
  5971 			control->aux_data = NULL;
  5973 		sctp_free_remote_addr(control->whoFrom);
  5974 		sctp_free_a_readq(stcb, control);
  5975 		if (hold_rlock) {
  5976 			hold_rlock = 0;
  5977 			SCTP_INP_READ_UNLOCK(inp);
  5979 		goto restart;
  5981 	if (control->length == 0) {
  5982 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
  5983 		    (filling_sinfo)) {
  5984 			/* find a more suitable one then this */
  5985 			ctl = TAILQ_NEXT(control, next);
  5986 			while (ctl) {
  5987 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
  5988 				    (ctl->some_taken ||
  5989 				     (ctl->spec_flags & M_NOTIFICATION) ||
  5990 				     ((ctl->do_not_ref_stcb == 0) &&
  5991 				      (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
  5992 					) {
  5993 					/*-
  5994 					 * If we have a different TCB next, and there is data
  5995 					 * present. If we have already taken some (pdapi), OR we can
  5996 					 * ref the tcb and no delivery as started on this stream, we
  5997 					 * take it. Note we allow a notification on a different
  5998 					 * assoc to be delivered..
  5999 					 */
  6000 					control = ctl;
  6001 					goto found_one;
  6002 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
  6003 					   (ctl->length) &&
  6004 					   ((ctl->some_taken) ||
  6005 					    ((ctl->do_not_ref_stcb == 0) &&
  6006 					     ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
  6007 					     (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
  6008 					/*-
  6009 					 * If we have the same tcb, and there is data present, and we
  6010 					 * have the strm interleave feature present. Then if we have
  6011 					 * taken some (pdapi) or we can refer to tht tcb AND we have
  6012 					 * not started a delivery for this stream, we can take it.
  6013 					 * Note we do NOT allow a notificaiton on the same assoc to
  6014 					 * be delivered.
  6015 					 */
  6016 					control = ctl;
  6017 					goto found_one;
  6019 				ctl = TAILQ_NEXT(ctl, next);
  6022 		/*
  6023 		 * if we reach here, not suitable replacement is available
  6024 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
  6025 		 * into the our held count, and its time to sleep again.
  6026 		 */
  6027 		held_length = so->so_rcv.sb_cc;
  6028 		control->held_length = so->so_rcv.sb_cc;
  6029 		goto restart;
  6031 	/* Clear the held length since there is something to read */
  6032 	control->held_length = 0;
  6033 	if (hold_rlock) {
  6034 		SCTP_INP_READ_UNLOCK(inp);
  6035 		hold_rlock = 0;
  6037  found_one:
  6038 	/*
  6039 	 * If we reach here, control has a some data for us to read off.
  6040 	 * Note that stcb COULD be NULL.
  6041 	 */
  6042 	control->some_taken++;
  6043 	if (hold_sblock) {
  6044 		SOCKBUF_UNLOCK(&so->so_rcv);
  6045 		hold_sblock = 0;
  6047 	stcb = control->stcb;
  6048 	if (stcb) {
  6049 		if ((control->do_not_ref_stcb == 0) &&
  6050 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
  6051 			if (freecnt_applied == 0)
  6052 				stcb = NULL;
  6053 		} else if (control->do_not_ref_stcb == 0) {
  6054 			/* you can't free it on me please */
  6055 			/*
  6056 			 * The lock on the socket buffer protects us so the
  6057 			 * free code will stop. But since we used the socketbuf
  6058 			 * lock and the sender uses the tcb_lock to increment,
  6059 			 * we need to use the atomic add to the refcnt
  6060 			 */
  6061 			if (freecnt_applied) {
  6062 #ifdef INVARIANTS
  6063 				panic("refcnt already incremented");
  6064 #else
  6065 				SCTP_PRINTF("refcnt already incremented?\n");
  6066 #endif
  6067 			} else {
  6068 				atomic_add_int(&stcb->asoc.refcnt, 1);
  6069 				freecnt_applied = 1;
  6071 			/*
  6072 			 * Setup to remember how much we have not yet told
  6073 			 * the peer our rwnd has opened up. Note we grab
  6074 			 * the value from the tcb from last time.
  6075 			 * Note too that sack sending clears this when a sack
  6076 			 * is sent, which is fine. Once we hit the rwnd_req,
  6077 			 * we then will go to the sctp_user_rcvd() that will
  6078 			 * not lock until it KNOWs it MUST send a WUP-SACK.
  6079 			 */
  6080 			freed_so_far = stcb->freed_by_sorcv_sincelast;
  6081 			stcb->freed_by_sorcv_sincelast = 0;
  6084 	if (stcb &&
  6085 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
  6086 	    control->do_not_ref_stcb == 0) {
  6087 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
  6090 	/* First lets get off the sinfo and sockaddr info */
  6091 	if ((sinfo) && filling_sinfo) {
  6092 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
  6093 		nxt = TAILQ_NEXT(control, next);
  6094 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
  6095 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
  6096 			struct sctp_extrcvinfo *s_extra;
  6097 			s_extra = (struct sctp_extrcvinfo *)sinfo;
  6098 			if ((nxt) &&
  6099 			    (nxt->length)) {
  6100 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
  6101 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
  6102 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
  6104 				if (nxt->spec_flags & M_NOTIFICATION) {
  6105 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
  6107 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
  6108 				s_extra->sreinfo_next_length = nxt->length;
  6109 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
  6110 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
  6111 				if (nxt->tail_mbuf != NULL) {
  6112 					if (nxt->end_added) {
  6113 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
  6116 			} else {
  6117 				/* we explicitly 0 this, since the memcpy got
  6118 				 * some other things beyond the older sinfo_
  6119 				 * that is on the control's structure :-D
  6120 				 */
  6121 				nxt = NULL;
  6122 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
  6123 				s_extra->sreinfo_next_aid = 0;
  6124 				s_extra->sreinfo_next_length = 0;
  6125 				s_extra->sreinfo_next_ppid = 0;
  6126 				s_extra->sreinfo_next_stream = 0;
  6129 		/*
  6130 		 * update off the real current cum-ack, if we have an stcb.
  6131 		 */
  6132 		if ((control->do_not_ref_stcb == 0) && stcb)
  6133 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
  6134 		/*
  6135 		 * mask off the high bits, we keep the actual chunk bits in
  6136 		 * there.
  6137 		 */
  6138 		sinfo->sinfo_flags &= 0x00ff;
  6139 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
  6140 			sinfo->sinfo_flags |= SCTP_UNORDERED;
  6143 #ifdef SCTP_ASOCLOG_OF_TSNS
  6145 		int index, newindex;
  6146 		struct sctp_pcbtsn_rlog *entry;
  6147 		do {
  6148 			index = inp->readlog_index;
  6149 			newindex = index + 1;
  6150 			if (newindex >= SCTP_READ_LOG_SIZE) {
  6151 				newindex = 0;
  6153 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
  6154 		entry = &inp->readlog[index];
  6155 		entry->vtag = control->sinfo_assoc_id;
  6156 		entry->strm = control->sinfo_stream;
  6157 		entry->seq = control->sinfo_ssn;
  6158 		entry->sz = control->length;
  6159 		entry->flgs = control->sinfo_flags;
  6161 #endif
  6162 	if (fromlen && from) {
  6163 #ifdef HAVE_SA_LEN
  6164 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sa.sa_len);
  6165 #endif
  6166 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
  6167 #ifdef INET6
  6168 			case AF_INET6:
  6169 #ifndef HAVE_SA_LEN
  6170 				cp_len = min((size_t)fromlen, sizeof(struct sockaddr_in6));
  6171 #endif
  6172 				((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
  6173 				break;
  6174 #endif
  6175 #ifdef INET
  6176 			case AF_INET:
  6177 #ifndef HAVE_SA_LEN
  6178 				cp_len = min((size_t)fromlen, sizeof(struct sockaddr_in));
  6179 #endif
  6180 				((struct sockaddr_in *)from)->sin_port = control->port_from;
  6181 				break;
  6182 #endif
  6183 #if defined(__Userspace__)
  6184 			case AF_CONN:
  6185 #ifndef HAVE_SA_LEN
  6186 				cp_len = min((size_t)fromlen, sizeof(struct sockaddr_conn));
  6187 #endif
  6188 				((struct sockaddr_conn *)from)->sconn_port = control->port_from;
  6189 				break;
  6190 #endif
  6191 			default:
  6192 #ifndef HAVE_SA_LEN
  6193 				cp_len = min((size_t)fromlen, sizeof(struct sockaddr));
  6194 #endif
  6195 				break;
  6197 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
  6199 #if defined(INET) && defined(INET6)
  6200 		if ((sctp_is_feature_on(inp,SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
  6201 		    (from->sa_family == AF_INET) &&
  6202 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
  6203 			struct sockaddr_in *sin;
  6204 			struct sockaddr_in6 sin6;
  6206 			sin = (struct sockaddr_in *)from;
  6207 			bzero(&sin6, sizeof(sin6));
  6208 			sin6.sin6_family = AF_INET6;
  6209 #ifdef HAVE_SIN6_LEN
  6210 			sin6.sin6_len = sizeof(struct sockaddr_in6);
  6211 #endif
  6212 #if defined(__Userspace_os_FreeBSD) || defined(__Userspace_os_Darwin) || defined(__Userspace_os_Windows)
  6213 			((uint32_t *)&sin6.sin6_addr)[2] = htonl(0xffff);
  6214 			bcopy(&sin->sin_addr,
  6215 			      &(((uint32_t *)&sin6.sin6_addr)[3]),
  6216 			      sizeof(uint32_t));
  6217 #elif defined(__Windows__)
  6218 			((uint32_t *)&sin6.sin6_addr)[2] = htonl(0xffff);
  6219 			bcopy(&sin->sin_addr,
  6220 			      &((uint32_t *)&sin6.sin6_addr)[3],
  6221 			      sizeof(uint32_t));
  6222 #else
  6223 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
  6224 			bcopy(&sin->sin_addr,
  6225 			      &sin6.sin6_addr.s6_addr32[3],
  6226 			      sizeof(sin6.sin6_addr.s6_addr32[3]));
  6227 #endif
  6228 			sin6.sin6_port = sin->sin_port;
  6229 			memcpy(from, &sin6, sizeof(struct sockaddr_in6));
  6231 #endif
  6232 #if defined(SCTP_EMBEDDED_V6_SCOPE)
  6233 #ifdef INET6
  6235 			struct sockaddr_in6 lsa6, *from6;
  6237 			from6 = (struct sockaddr_in6 *)from;
  6238 			sctp_recover_scope_mac(from6, (&lsa6));
  6240 #endif
  6241 #endif
  6243 	/* now copy out what data we can */
  6244 	if (mp == NULL) {
  6245 		/* copy out each mbuf in the chain up to length */
  6246 	get_more_data:
  6247 		m = control->data;
  6248 		while (m) {
  6249 			/* Move out all we can */
  6250 #if defined(__APPLE__)
  6251 #if defined(APPLE_LEOPARD)
  6252 			cp_len = (int)uio->uio_resid;
  6253 #else
  6254 			cp_len = (int)uio_resid(uio);
  6255 #endif
  6256 #else
  6257 			cp_len = (int)uio->uio_resid;
  6258 #endif
  6259 			my_len = (int)SCTP_BUF_LEN(m);
  6260 			if (cp_len > my_len) {
  6261 				/* not enough in this buf */
  6262 				cp_len = my_len;
  6264 			if (hold_rlock) {
  6265 				SCTP_INP_READ_UNLOCK(inp);
  6266 				hold_rlock = 0;
  6268 #if defined(__APPLE__)
  6269 			SCTP_SOCKET_UNLOCK(so, 0);
  6270 #endif
  6271 			if (cp_len > 0)
  6272 				error = uiomove(mtod(m, char *), cp_len, uio);
  6273 #if defined(__APPLE__)
  6274 			SCTP_SOCKET_LOCK(so, 0);
  6275 #endif
  6276 			/* re-read */
  6277 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
  6278 				goto release;
  6281 			if ((control->do_not_ref_stcb == 0) && stcb &&
  6282 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
  6283 				no_rcv_needed = 1;
  6285 			if (error) {
  6286 				/* error we are out of here */
  6287 				goto release;
  6289 			if ((SCTP_BUF_NEXT(m) == NULL) &&
  6290 			    (cp_len >= SCTP_BUF_LEN(m)) &&
  6291 			    ((control->end_added == 0) ||
  6292 			     (control->end_added &&
  6293 			      (TAILQ_NEXT(control, next) == NULL)))
  6294 				) {
  6295 				SCTP_INP_READ_LOCK(inp);
  6296 				hold_rlock = 1;
  6298 			if (cp_len == SCTP_BUF_LEN(m)) {
  6299 				if ((SCTP_BUF_NEXT(m)== NULL) &&
  6300 				    (control->end_added)) {
  6301 					out_flags |= MSG_EOR;
  6302 					if ((control->do_not_ref_stcb == 0)  &&
  6303 					    (control->stcb != NULL) &&
  6304 					    ((control->spec_flags & M_NOTIFICATION) == 0))
  6305 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
  6307 				if (control->spec_flags & M_NOTIFICATION) {
  6308 					out_flags |= MSG_NOTIFICATION;
  6310 				/* we ate up the mbuf */
  6311 				if (in_flags & MSG_PEEK) {
  6312 					/* just looking */
  6313 					m = SCTP_BUF_NEXT(m);
  6314 					copied_so_far += cp_len;
  6315 				} else {
  6316 					/* dispose of the mbuf */
  6317 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  6318 						sctp_sblog(&so->so_rcv,
  6319 						   control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
  6321 					sctp_sbfree(control, stcb, &so->so_rcv, m);
  6322 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  6323 						sctp_sblog(&so->so_rcv,
  6324 						   control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
  6326 					copied_so_far += cp_len;
  6327 					freed_so_far += cp_len;
  6328 					freed_so_far += MSIZE;
  6329 					atomic_subtract_int(&control->length, cp_len);
  6330 					control->data = sctp_m_free(m);
  6331 					m = control->data;
  6332 					/* been through it all, must hold sb lock ok to null tail */
  6333 					if (control->data == NULL) {
  6334 #ifdef INVARIANTS
  6335 #if !defined(__APPLE__)
  6336 						if ((control->end_added == 0) ||
  6337 						    (TAILQ_NEXT(control, next) == NULL)) {
  6338 							/* If the end is not added, OR the
  6339 							 * next is NOT null we MUST have the lock.
  6340 							 */
  6341 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
  6342 								panic("Hmm we don't own the lock?");
  6345 #endif
  6346 #endif
  6347 						control->tail_mbuf = NULL;
  6348 #ifdef INVARIANTS
  6349 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
  6350 							panic("end_added, nothing left and no MSG_EOR");
  6352 #endif
  6355 			} else {
  6356 				/* Do we need to trim the mbuf? */
  6357 				if (control->spec_flags & M_NOTIFICATION) {
  6358 					out_flags |= MSG_NOTIFICATION;
  6360 				if ((in_flags & MSG_PEEK) == 0) {
  6361 					SCTP_BUF_RESV_UF(m, cp_len);
  6362 					SCTP_BUF_LEN(m) -= cp_len;
  6363 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  6364 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, cp_len);
  6366 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
  6367 					if ((control->do_not_ref_stcb == 0) &&
  6368 					    stcb) {
  6369 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
  6371 					copied_so_far += cp_len;
  6372 					freed_so_far += cp_len;
  6373 					freed_so_far += MSIZE;
  6374 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  6375 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb,
  6376 							   SCTP_LOG_SBRESULT, 0);
  6378 					atomic_subtract_int(&control->length, cp_len);
  6379 				} else {
  6380 					copied_so_far += cp_len;
  6383 #if defined(__APPLE__)
  6384 #if defined(APPLE_LEOPARD)
  6385 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
  6386 #else
  6387 			if ((out_flags & MSG_EOR) || (uio_resid(uio) == 0)) {
  6388 #endif
  6389 #else
  6390 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
  6391 #endif
  6392 				break;
  6394 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
  6395 			    (control->do_not_ref_stcb == 0) &&
  6396 			    (freed_so_far >= rwnd_req)) {
  6397 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
  6399 		} /* end while(m) */
  6400 		/*
  6401 		 * At this point we have looked at it all and we either have
  6402 		 * a MSG_EOR/or read all the user wants... <OR>
  6403 		 * control->length == 0.
  6404 		 */
  6405 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
  6406 			/* we are done with this control */
  6407 			if (control->length == 0) {
  6408 				if (control->data) {
  6409 #ifdef INVARIANTS
  6410 					panic("control->data not null at read eor?");
  6411 #else
  6412 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
  6413 					sctp_m_freem(control->data);
  6414 					control->data = NULL;
  6415 #endif
  6417 			done_with_control:
  6418 				if (TAILQ_NEXT(control, next) == NULL) {
  6419 					/* If we don't have a next we need a
  6420 					 * lock, if there is a next interrupt
  6421 					 * is filling ahead of us and we don't
  6422 					 * need a lock to remove this guy
  6423 					 * (which is the head of the queue).
  6424 					 */
  6425 					if (hold_rlock == 0) {
  6426 						SCTP_INP_READ_LOCK(inp);
  6427 						hold_rlock = 1;
  6430 				TAILQ_REMOVE(&inp->read_queue, control, next);
  6431 				/* Add back any hiddend data */
  6432 				if (control->held_length) {
  6433 					held_length = 0;
  6434 					control->held_length = 0;
  6435 					wakeup_read_socket = 1;
  6437 				if (control->aux_data) {
  6438 					sctp_m_free (control->aux_data);
  6439 					control->aux_data = NULL;
  6441 				no_rcv_needed = control->do_not_ref_stcb;
  6442 				sctp_free_remote_addr(control->whoFrom);
  6443 				control->data = NULL;
  6444 				sctp_free_a_readq(stcb, control);
  6445 				control = NULL;
  6446 				if ((freed_so_far >= rwnd_req) &&
  6447 				    (no_rcv_needed == 0))
  6448 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
  6450 			} else {
  6451 				/*
  6452 				 * The user did not read all of this
  6453 				 * message, turn off the returned MSG_EOR
  6454 				 * since we are leaving more behind on the
  6455 				 * control to read.
  6456 				 */
  6457 #ifdef INVARIANTS
  6458 				if (control->end_added &&
  6459 				    (control->data == NULL) &&
  6460 				    (control->tail_mbuf == NULL)) {
  6461 					panic("Gak, control->length is corrupt?");
  6463 #endif
  6464 				no_rcv_needed = control->do_not_ref_stcb;
  6465 				out_flags &= ~MSG_EOR;
  6468 		if (out_flags & MSG_EOR) {
  6469 			goto release;
  6471 #if defined(__APPLE__)
  6472 #if defined(APPLE_LEOPARD)
  6473 		if ((uio->uio_resid == 0) ||
  6474 #else
  6475 		if ((uio_resid(uio) == 0) ||
  6476 #endif
  6477 #else
  6478 		if ((uio->uio_resid == 0) ||
  6479 #endif
  6480 		    ((in_eeor_mode) &&
  6481 		     (copied_so_far >= (uint32_t)max(so->so_rcv.sb_lowat, 1)))) {
  6482 			goto release;
  6484 		/*
  6485 		 * If I hit here the receiver wants more and this message is
  6486 		 * NOT done (pd-api). So two questions. Can we block? if not
  6487 		 * we are done. Did the user NOT set MSG_WAITALL?
  6488 		 */
  6489 		if (block_allowed == 0) {
  6490 			goto release;
  6492 		/*
  6493 		 * We need to wait for more data a few things: - We don't
  6494 		 * sbunlock() so we don't get someone else reading. - We
  6495 		 * must be sure to account for the case where what is added
  6496 		 * is NOT to our control when we wakeup.
  6497 		 */
  6499 		/* Do we need to tell the transport a rwnd update might be
  6500 		 * needed before we go to sleep?
  6501 		 */
  6502 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
  6503 		    ((freed_so_far >= rwnd_req) &&
  6504 		     (control->do_not_ref_stcb == 0) &&
  6505 		     (no_rcv_needed == 0))) {
  6506 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
  6508 	wait_some_more:
  6509 #if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
  6510 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
  6511 			goto release;
  6513 #else
  6514 		if (so->so_state & SS_CANTRCVMORE) {
  6515 			goto release;
  6517 #endif
  6519 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
  6520 			goto release;
  6522 		if (hold_rlock == 1) {
  6523 			SCTP_INP_READ_UNLOCK(inp);
  6524 			hold_rlock = 0;
  6526 		if (hold_sblock == 0) {
  6527 			SOCKBUF_LOCK(&so->so_rcv);
  6528 			hold_sblock = 1;
  6530 #if defined(__APPLE__)
  6531 		sbunlock(&so->so_rcv, 1);
  6532 #endif
  6533 		if ((copied_so_far) && (control->length == 0) &&
  6534 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
  6535 			goto release;
  6537 		if (so->so_rcv.sb_cc <= control->held_length) {
  6538 			error = sbwait(&so->so_rcv);
  6539 			if (error) {
  6540 #if defined(__FreeBSD__)
  6541 				goto release;
  6542 #else
  6543 				goto release_unlocked;
  6544 #endif
  6546 			control->held_length = 0;
  6548 #if defined(__APPLE__)
  6549 		error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
  6550 #endif
  6551 		if (hold_sblock) {
  6552 			SOCKBUF_UNLOCK(&so->so_rcv);
  6553 			hold_sblock = 0;
  6555 		if (control->length == 0) {
  6556 			/* still nothing here */
  6557 			if (control->end_added == 1) {
  6558 				/* he aborted, or is done i.e.did a shutdown */
  6559 				out_flags |= MSG_EOR;
  6560 				if (control->pdapi_aborted) {
  6561 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
  6562 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
  6564 					out_flags |= MSG_TRUNC;
  6565 				} else {
  6566 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
  6567 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
  6569 				goto done_with_control;
  6571 			if (so->so_rcv.sb_cc > held_length) {
  6572 				control->held_length = so->so_rcv.sb_cc;
  6573 				held_length = 0;
  6575 			goto wait_some_more;
  6576 		} else if (control->data == NULL) {
  6577 			/* we must re-sync since data
  6578 			 * is probably being added
  6579 			 */
  6580 			SCTP_INP_READ_LOCK(inp);
  6581 			if ((control->length > 0) && (control->data == NULL)) {
  6582 				/* big trouble.. we have the lock and its corrupt? */
  6583 #ifdef INVARIANTS
  6584 				panic ("Impossible data==NULL length !=0");
  6585 #endif
  6586 				out_flags |= MSG_EOR;
  6587 				out_flags |= MSG_TRUNC;
  6588 				control->length = 0;
  6589 				SCTP_INP_READ_UNLOCK(inp);
  6590 				goto done_with_control;
  6592 			SCTP_INP_READ_UNLOCK(inp);
  6593 			/* We will fall around to get more data */
  6595 		goto get_more_data;
  6596 	} else {
  6597 		/*-
  6598 		 * Give caller back the mbuf chain,
  6599 		 * store in uio_resid the length
  6600 		 */
  6601 		wakeup_read_socket = 0;
  6602 		if ((control->end_added == 0) ||
  6603 		    (TAILQ_NEXT(control, next) == NULL)) {
  6604 			/* Need to get rlock */
  6605 			if (hold_rlock == 0) {
  6606 				SCTP_INP_READ_LOCK(inp);
  6607 				hold_rlock = 1;
  6610 		if (control->end_added) {
  6611 			out_flags |= MSG_EOR;
  6612 			if ((control->do_not_ref_stcb == 0) &&
  6613 			    (control->stcb != NULL) &&
  6614 			    ((control->spec_flags & M_NOTIFICATION) == 0))
  6615 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
  6617 		if (control->spec_flags & M_NOTIFICATION) {
  6618 			out_flags |= MSG_NOTIFICATION;
  6620 #if defined(__APPLE__)
  6621 #if defined(APPLE_LEOPARD)
  6622 		uio->uio_resid = control->length;
  6623 #else
  6624 		uio_setresid(uio, control->length);
  6625 #endif
  6626 #else
  6627 		uio->uio_resid = control->length;
  6628 #endif
  6629 		*mp = control->data;
  6630 		m = control->data;
  6631 		while (m) {
  6632 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  6633 				sctp_sblog(&so->so_rcv,
  6634 				   control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
  6636 			sctp_sbfree(control, stcb, &so->so_rcv, m);
  6637 			freed_so_far += SCTP_BUF_LEN(m);
  6638 			freed_so_far += MSIZE;
  6639 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
  6640 				sctp_sblog(&so->so_rcv,
  6641 				   control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
  6643 			m = SCTP_BUF_NEXT(m);
  6645 		control->data = control->tail_mbuf = NULL;
  6646 		control->length = 0;
  6647 		if (out_flags & MSG_EOR) {
  6648 			/* Done with this control */
  6649 			goto done_with_control;
  6652  release:
  6653 	if (hold_rlock == 1) {
  6654 		SCTP_INP_READ_UNLOCK(inp);
  6655 		hold_rlock = 0;
  6657 #if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__)
  6658 	if (hold_sblock == 0) {
  6659 		SOCKBUF_LOCK(&so->so_rcv);
  6660 		hold_sblock = 1;
  6662 #else
  6663 	if (hold_sblock == 1) {
  6664 		SOCKBUF_UNLOCK(&so->so_rcv);
  6665 		hold_sblock = 0;
  6667 #endif
  6668 #if defined(__APPLE__)
  6669 	sbunlock(&so->so_rcv, 1);
  6670 #endif
  6672 #if defined(__FreeBSD__)
  6673 	sbunlock(&so->so_rcv);
  6674 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
  6675 	sockbuf_lock = 0;
  6676 #endif
  6677 #endif
  6679  release_unlocked:
  6680 	if (hold_sblock) {
  6681 		SOCKBUF_UNLOCK(&so->so_rcv);
  6682 		hold_sblock = 0;
  6684 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
  6685 		if ((freed_so_far >= rwnd_req) &&
  6686 		    (control && (control->do_not_ref_stcb == 0)) &&
  6687 		    (no_rcv_needed == 0))
  6688 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
  6690  out:
  6691 	if (msg_flags) {
  6692 		*msg_flags = out_flags;
  6694 	if (((out_flags & MSG_EOR) == 0) &&
  6695 	    ((in_flags & MSG_PEEK) == 0) &&
  6696 	    (sinfo) &&
  6697 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
  6698 	     sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
  6699 		struct sctp_extrcvinfo *s_extra;
  6700 		s_extra = (struct sctp_extrcvinfo *)sinfo;
  6701 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
  6703 	if (hold_rlock == 1) {
  6704 		SCTP_INP_READ_UNLOCK(inp);
  6706 	if (hold_sblock) {
  6707 		SOCKBUF_UNLOCK(&so->so_rcv);
  6709 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
  6710 	if (sockbuf_lock) {
  6711 		sbunlock(&so->so_rcv);
  6713 #endif
  6715 	if (freecnt_applied) {
  6716 		/*
  6717 		 * The lock on the socket buffer protects us so the free
  6718 		 * code will stop. But since we used the socketbuf lock and
  6719 		 * the sender uses the tcb_lock to increment, we need to use
  6720 		 * the atomic add to the refcnt.
  6721 		 */
  6722 		if (stcb == NULL) {
  6723 #ifdef INVARIANTS
  6724 			panic("stcb for refcnt has gone NULL?");
  6725 			goto stage_left;
  6726 #else
  6727 			goto stage_left;
  6728 #endif
  6730 		atomic_add_int(&stcb->asoc.refcnt, -1);
  6731 		/* Save the value back for next time */
  6732 		stcb->freed_by_sorcv_sincelast = freed_so_far;
  6734 	if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) {
  6735 		if (stcb) {
  6736 			sctp_misc_ints(SCTP_SORECV_DONE,
  6737 				       freed_so_far,
  6738 #if defined(__APPLE__)
  6739 #if defined(APPLE_LEOPARD)
  6740 				       ((uio) ? (slen - uio->uio_resid) : slen),
  6741 #else
  6742 				       ((uio) ? (slen - uio_resid(uio)) : slen),
  6743 #endif
  6744 #else
  6745 				       ((uio) ? (slen - uio->uio_resid) : slen),
  6746 #endif
  6747 				       stcb->asoc.my_rwnd,
  6748 				       so->so_rcv.sb_cc);
  6749 		} else {
  6750 			sctp_misc_ints(SCTP_SORECV_DONE,
  6751 				       freed_so_far,
  6752 #if defined(__APPLE__)
  6753 #if defined(APPLE_LEOPARD)
  6754 				       ((uio) ? (slen - uio->uio_resid) : slen),
  6755 #else
  6756 				       ((uio) ? (slen - uio_resid(uio)) : slen),
  6757 #endif
  6758 #else
  6759 				       ((uio) ? (slen - uio->uio_resid) : slen),
  6760 #endif
  6761 				       0,
  6762 				       so->so_rcv.sb_cc);
  6765  stage_left:
  6766 	if (wakeup_read_socket) {
  6767 		sctp_sorwakeup(inp, so);
  6769 	return (error);
  6773 #ifdef SCTP_MBUF_LOGGING
  6774 struct mbuf *
  6775 sctp_m_free(struct mbuf *m)
  6777 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
  6778 		if (SCTP_BUF_IS_EXTENDED(m)) {
  6779 			sctp_log_mb(m, SCTP_MBUF_IFREE);
  6782 	return (m_free(m));
  6785 void sctp_m_freem(struct mbuf *mb)
  6787 	while (mb != NULL)
  6788 		mb = sctp_m_free(mb);
  6791 #endif
  6793 int
  6794 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
  6796 	/* Given a local address. For all associations
  6797 	 * that holds the address, request a peer-set-primary.
  6798 	 */
  6799 	struct sctp_ifa *ifa;
  6800 	struct sctp_laddr *wi;
  6802 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
  6803 	if (ifa == NULL) {
  6804 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
  6805 		return (EADDRNOTAVAIL);
  6807 	/* Now that we have the ifa we must awaken the
  6808 	 * iterator with this message.
  6809 	 */
  6810 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
  6811 	if (wi == NULL) {
  6812 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
  6813 		return (ENOMEM);
  6815 	/* Now incr the count and int wi structure */
  6816 	SCTP_INCR_LADDR_COUNT();
  6817 	bzero(wi, sizeof(*wi));
  6818 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
  6819 	wi->ifa = ifa;
  6820 	wi->action = SCTP_SET_PRIM_ADDR;
  6821 	atomic_add_int(&ifa->refcount, 1);
  6823 	/* Now add it to the work queue */
  6824 	SCTP_WQ_ADDR_LOCK();
  6825 	/*
  6826 	 * Should this really be a tailq? As it is we will process the
  6827 	 * newest first :-0
  6828 	 */
  6829 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
  6830 	SCTP_WQ_ADDR_UNLOCK();
  6831 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
  6832 			 (struct sctp_inpcb *)NULL,
  6833 			 (struct sctp_tcb *)NULL,
  6834 			 (struct sctp_nets *)NULL);
  6835 	return (0);
  6838 #if defined(__Userspace__)
  6839 /* no sctp_soreceive for __Userspace__ now */
  6840 #endif
  6842 #if !defined(__Userspace__)
  6843 int
  6844 sctp_soreceive(	struct socket *so,
  6845 		struct sockaddr **psa,
  6846 		struct uio *uio,
  6847 		struct mbuf **mp0,
  6848 		struct mbuf **controlp,
  6849 		int *flagsp)
  6851 	int error, fromlen;
  6852 	uint8_t sockbuf[256];
  6853 	struct sockaddr *from;
  6854 	struct sctp_extrcvinfo sinfo;
  6855 	int filling_sinfo = 1;
  6856 	struct sctp_inpcb *inp;
  6858 	inp = (struct sctp_inpcb *)so->so_pcb;
  6859 	/* pickup the assoc we are reading from */
  6860 	if (inp == NULL) {
  6861 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  6862 		return (EINVAL);
  6864 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
  6865 	     sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
  6866 	     sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
  6867 	    (controlp == NULL)) {
  6868 		/* user does not want the sndrcv ctl */
  6869 		filling_sinfo = 0;
  6871 	if (psa) {
  6872 		from = (struct sockaddr *)sockbuf;
  6873 		fromlen = sizeof(sockbuf);
  6874 #ifdef HAVE_SA_LEN
  6875 		from->sa_len = 0;
  6876 #endif
  6877 	} else {
  6878 		from = NULL;
  6879 		fromlen = 0;
  6882 #if defined(__APPLE__)
  6883 	SCTP_SOCKET_LOCK(so, 1);
  6884 #endif
  6885 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
  6886 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
  6887 	if ((controlp) && (filling_sinfo)) {
  6888 		/* copy back the sinfo in a CMSG format */
  6889 		if (filling_sinfo)
  6890 			*controlp = sctp_build_ctl_nchunk(inp,
  6891 			                                  (struct sctp_sndrcvinfo *)&sinfo);
  6892 		else
  6893 			*controlp = NULL;
  6895 	if (psa) {
  6896 		/* copy back the address info */
  6897 #ifdef HAVE_SA_LEN
  6898 		if (from && from->sa_len) {
  6899 #else
  6900 		if (from) {
  6901 #endif
  6902 #if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
  6903 			*psa = sodupsockaddr(from, M_NOWAIT);
  6904 #else
  6905 			*psa = dup_sockaddr(from, mp0 == 0);
  6906 #endif
  6907 		} else {
  6908 			*psa = NULL;
  6911 #if defined(__APPLE__)
  6912 	SCTP_SOCKET_UNLOCK(so, 1);
  6913 #endif
  6914 	return (error);
  6918 #if (defined(__FreeBSD__) && __FreeBSD_version < 603000) || defined(__Windows__)
  6919 /*
  6920  * General routine to allocate a hash table with control of memory flags.
  6921  * is in 7.0 and beyond for sure :-)
  6922  */
  6923 void *
  6924 sctp_hashinit_flags(int elements, struct malloc_type *type,
  6925                     u_long *hashmask, int flags)
  6927 	long hashsize;
  6928 	LIST_HEAD(generic, generic) *hashtbl;
  6929 	int i;
  6932 	if (elements <= 0) {
  6933 #ifdef INVARIANTS
  6934 		panic("hashinit: bad elements");
  6935 #else
  6936 		SCTP_PRINTF("hashinit: bad elements?");
  6937 		elements = 1;
  6938 #endif
  6940 	for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
  6941 		continue;
  6942 	hashsize >>= 1;
  6943 	if (flags & HASH_WAITOK)
  6944 		hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
  6945 	else if (flags & HASH_NOWAIT)
  6946 		hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_NOWAIT);
  6947 	else {
  6948 #ifdef INVARIANTS
  6949 		panic("flag incorrect in hashinit_flags");
  6950 #else
  6951 		return (NULL);
  6952 #endif
  6955 	/* no memory? */
  6956 	if (hashtbl == NULL)
  6957 		return (NULL);
  6959 	for (i = 0; i < hashsize; i++)
  6960 		LIST_INIT(&hashtbl[i]);
  6961 	*hashmask = hashsize - 1;
  6962 	return (hashtbl);
  6964 #endif
  6966 #else /*  __Userspace__ ifdef above sctp_soreceive */
  6967 /*
  6968  * __Userspace__ Defining sctp_hashinit_flags() and sctp_hashdestroy() for userland.
  6969  * NOTE: We don't want multiple definitions here. So sctp_hashinit_flags() above for
  6970  *__FreeBSD__ must be excluded.
  6972  */
  6974 void *
  6975 sctp_hashinit_flags(int elements, struct malloc_type *type,
  6976                     u_long *hashmask, int flags)
  6978 	long hashsize;
  6979 	LIST_HEAD(generic, generic) *hashtbl;
  6980 	int i;
  6982 	if (elements <= 0) {
  6983 		SCTP_PRINTF("hashinit: bad elements?");
  6984 #ifdef INVARIANTS
  6985 		return (NULL);
  6986 #else
  6987 		elements = 1;
  6988 #endif
  6990 	for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
  6991 		continue;
  6992 	hashsize >>= 1;
  6993 	/*cannot use MALLOC here because it has to be declared or defined
  6994 	  using MALLOC_DECLARE or MALLOC_DEFINE first. */
  6995 	if (flags & HASH_WAITOK)
  6996 		hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl));
  6997 	else if (flags & HASH_NOWAIT)
  6998 		hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl));
  6999 	else {
  7000 #ifdef INVARIANTS
  7001 		SCTP_PRINTF("flag incorrect in hashinit_flags.\n");
  7002 #endif
  7003 		return (NULL);
  7006 	/* no memory? */
  7007 	if (hashtbl == NULL)
  7008 		return (NULL);
  7010 	for (i = 0; i < hashsize; i++)
  7011 		LIST_INIT(&hashtbl[i]);
  7012 	*hashmask = hashsize - 1;
  7013 	return (hashtbl);
  7017 void
  7018 sctp_hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
  7020 	LIST_HEAD(generic, generic) *hashtbl, *hp;
  7022 	hashtbl = vhashtbl;
  7023 	for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
  7024 		if (!LIST_EMPTY(hp)) {
  7025 			SCTP_PRINTF("hashdestroy: hash not empty.\n");
  7026 			return;
  7028 	FREE(hashtbl, type);
  7032 void
  7033 sctp_hashfreedestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
  7035 	LIST_HEAD(generic, generic) *hashtbl/*, *hp*/;
  7036 	/*
  7037 	LIST_ENTRY(type) *start, *temp;
  7038 	 */
  7039 	hashtbl = vhashtbl;
  7040 	/* Apparently temp is not dynamically allocated, so attempts to
  7041 	   free it results in error.
  7042 	for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
  7043 		if (!LIST_EMPTY(hp)) {
  7044 			start = LIST_FIRST(hp);
  7045 			while (start != NULL) {
  7046 				temp = start;
  7047 				start = start->le_next;
  7048 				SCTP_PRINTF("%s: %p \n", __func__, (void *)temp);
  7049 				FREE(temp, type);
  7052 	 */
  7053 	FREE(hashtbl, type);
  7057 #endif
  7060 int
  7061 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
  7062 			 int totaddr, int *error)
  7064 	int added = 0;
  7065 	int i;
  7066 	struct sctp_inpcb *inp;
  7067 	struct sockaddr *sa;
  7068 	size_t incr = 0;
  7069 #ifdef INET
  7070 	struct sockaddr_in *sin;
  7071 #endif
  7072 #ifdef INET6
  7073 	struct sockaddr_in6 *sin6;
  7074 #endif
  7076 	sa = addr;
  7077 	inp = stcb->sctp_ep;
  7078 	*error = 0;
  7079 	for (i = 0; i < totaddr; i++) {
  7080 		switch (sa->sa_family) {
  7081 #ifdef INET
  7082 		case AF_INET:
  7083 			incr = sizeof(struct sockaddr_in);
  7084 			sin = (struct sockaddr_in *)sa;
  7085 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
  7086 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
  7087 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
  7088 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  7089 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ+SCTP_LOC_7);
  7090 				*error = EINVAL;
  7091 				goto out_now;
  7093 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
  7094 				/* assoc gone no un-lock */
  7095 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
  7096 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ+SCTP_LOC_7);
  7097 				*error = ENOBUFS;
  7098 				goto out_now;
  7100 			added++;
  7101 			break;
  7102 #endif
  7103 #ifdef INET6
  7104 		case AF_INET6:
  7105 			incr = sizeof(struct sockaddr_in6);
  7106 			sin6 = (struct sockaddr_in6 *)sa;
  7107 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
  7108 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
  7109 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  7110 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ+SCTP_LOC_8);
  7111 				*error = EINVAL;
  7112 				goto out_now;
  7114 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
  7115 				/* assoc gone no un-lock */
  7116 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
  7117 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ+SCTP_LOC_8);
  7118 				*error = ENOBUFS;
  7119 				goto out_now;
  7121 			added++;
  7122 			break;
  7123 #endif
  7124 #if defined(__Userspace__)
  7125 		case AF_CONN:
  7126 			incr = sizeof(struct sockaddr_in6);
  7127 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
  7128 				/* assoc gone no un-lock */
  7129 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
  7130 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ+SCTP_LOC_8);
  7131 				*error = ENOBUFS;
  7132 				goto out_now;
  7134 			added++;
  7135 			break;
  7136 #endif
  7137 		default:
  7138 			break;
  7140 		sa = (struct sockaddr *)((caddr_t)sa + incr);
  7142  out_now:
  7143 	return (added);
  7146 struct sctp_tcb *
  7147 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
  7148 			  int *totaddr, int *num_v4, int *num_v6, int *error,
  7149 			  int limit, int *bad_addr)
  7151 	struct sockaddr *sa;
  7152 	struct sctp_tcb *stcb = NULL;
  7153 	size_t incr, at, i;
  7154 	at = incr = 0;
  7155 	sa = addr;
  7157 	*error = *num_v6 = *num_v4 = 0;
  7158 	/* account and validate addresses */
  7159 	for (i = 0; i < (size_t)*totaddr; i++) {
  7160 		switch (sa->sa_family) {
  7161 #ifdef INET
  7162 		case AF_INET:
  7163 			(*num_v4) += 1;
  7164 			incr = sizeof(struct sockaddr_in);
  7165 #ifdef HAVE_SA_LEN
  7166 			if (sa->sa_len != incr) {
  7167 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  7168 				*error = EINVAL;
  7169 				*bad_addr = 1;
  7170 				return (NULL);
  7172 #endif
  7173 			break;
  7174 #endif
  7175 #ifdef INET6
  7176 		case AF_INET6:
  7178 			struct sockaddr_in6 *sin6;
  7180 			sin6 = (struct sockaddr_in6 *)sa;
  7181 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
  7182 				/* Must be non-mapped for connectx */
  7183 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  7184 				*error = EINVAL;
  7185 				*bad_addr = 1;
  7186 				return (NULL);
  7188 			(*num_v6) += 1;
  7189 			incr = sizeof(struct sockaddr_in6);
  7190 #ifdef HAVE_SA_LEN
  7191 			if (sa->sa_len != incr) {
  7192 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  7193 				*error = EINVAL;
  7194 				*bad_addr = 1;
  7195 				return (NULL);
  7197 #endif
  7198 			break;
  7200 #endif
  7201 		default:
  7202 			*totaddr = i;
  7203 			/* we are done */
  7204 			break;
  7206 		if (i == (size_t)*totaddr) {
  7207 			break;
  7209 		SCTP_INP_INCR_REF(inp);
  7210 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
  7211 		if (stcb != NULL) {
  7212 			/* Already have or am bring up an association */
  7213 			return (stcb);
  7214 		} else {
  7215 			SCTP_INP_DECR_REF(inp);
  7217 		if ((at + incr) > (size_t)limit) {
  7218 			*totaddr = i;
  7219 			break;
  7221 		sa = (struct sockaddr *)((caddr_t)sa + incr);
  7223 	return ((struct sctp_tcb *)NULL);
  7226 /*
  7227  * sctp_bindx(ADD) for one address.
  7228  * assumes all arguments are valid/checked by caller.
  7229  */
  7230 void
  7231 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
  7232 		       struct sockaddr *sa, sctp_assoc_t assoc_id,
  7233 		       uint32_t vrf_id, int *error, void *p)
  7235 	struct sockaddr *addr_touse;
  7236 #ifdef INET6
  7237 	struct sockaddr_in sin;
  7238 #endif
  7239 #ifdef SCTP_MVRF
  7240 	int i, fnd = 0;
  7241 #endif
  7243 	/* see if we're bound all already! */
  7244 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
  7245 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  7246 		*error = EINVAL;
  7247 		return;
  7249 #ifdef SCTP_MVRF
  7250 	/* Is the VRF one we have */
  7251 	for (i = 0; i < inp->num_vrfs; i++) {
  7252 		if (vrf_id == inp->m_vrf_ids[i]) {
  7253 			fnd = 1;
  7254 			break;
  7257 	if (!fnd) {
  7258 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  7259 		*error = EINVAL;
  7260 		return;
  7262 #endif
  7263 	addr_touse = sa;
  7264 #ifdef INET6
  7265 	if (sa->sa_family == AF_INET6) {
  7266 		struct sockaddr_in6 *sin6;
  7267 #ifdef HAVE_SA_LEN
  7268 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
  7269 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  7270 			*error = EINVAL;
  7271 			return;
  7273 #endif
  7274 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
  7275 			/* can only bind v6 on PF_INET6 sockets */
  7276 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  7277 			*error = EINVAL;
  7278 			return;
  7280 		sin6 = (struct sockaddr_in6 *)addr_touse;
  7281 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
  7282 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
  7283 			    SCTP_IPV6_V6ONLY(inp)) {
  7284 				/* can't bind v4-mapped on PF_INET sockets */
  7285 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  7286 				*error = EINVAL;
  7287 				return;
  7289 			in6_sin6_2_sin(&sin, sin6);
  7290 			addr_touse = (struct sockaddr *)&sin;
  7293 #endif
  7294 #ifdef INET
  7295 	if (sa->sa_family == AF_INET) {
  7296 #ifdef HAVE_SA_LEN
  7297 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
  7298 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  7299 			*error = EINVAL;
  7300 			return;
  7302 #endif
  7303 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
  7304 		    SCTP_IPV6_V6ONLY(inp)) {
  7305 			/* can't bind v4 on PF_INET sockets */
  7306 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  7307 			*error = EINVAL;
  7308 			return;
  7311 #endif
  7312 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
  7313 #if !(defined(__Panda__) || defined(__Windows__))
  7314 		if (p == NULL) {
  7315 			/* Can't get proc for Net/Open BSD */
  7316 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  7317 			*error = EINVAL;
  7318 			return;
  7320 #endif
  7321 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
  7322 		return;
  7324 	/*
  7325 	 * No locks required here since bind and mgmt_ep_sa
  7326 	 * all do their own locking. If we do something for
  7327 	 * the FIX: below we may need to lock in that case.
  7328 	 */
  7329 	if (assoc_id == 0) {
  7330 		/* add the address */
  7331 		struct sctp_inpcb *lep;
  7332 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
  7334 		/* validate the incoming port */
  7335 		if ((lsin->sin_port != 0) &&
  7336 		    (lsin->sin_port != inp->sctp_lport)) {
  7337 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  7338 			*error = EINVAL;
  7339 			return;
  7340 		} else {
  7341 			/* user specified 0 port, set it to existing port */
  7342 			lsin->sin_port = inp->sctp_lport;
  7345 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
  7346 		if (lep != NULL) {
  7347 			/*
  7348 			 * We must decrement the refcount
  7349 			 * since we have the ep already and
  7350 			 * are binding. No remove going on
  7351 			 * here.
  7352 			 */
  7353 			SCTP_INP_DECR_REF(lep);
  7355 		if (lep == inp) {
  7356 			/* already bound to it.. ok */
  7357 			return;
  7358 		} else if (lep == NULL) {
  7359 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
  7360 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
  7361 						      SCTP_ADD_IP_ADDRESS,
  7362 						      vrf_id, NULL);
  7363 		} else {
  7364 			*error = EADDRINUSE;
  7366 		if (*error)
  7367 			return;
  7368 	} else {
  7369 		/*
  7370 		 * FIX: decide whether we allow assoc based
  7371 		 * bindx
  7372 		 */
  7376 /*
  7377  * sctp_bindx(DELETE) for one address.
  7378  * assumes all arguments are valid/checked by caller.
  7379  */
  7380 void
  7381 sctp_bindx_delete_address(struct sctp_inpcb *inp,
  7382 			  struct sockaddr *sa, sctp_assoc_t assoc_id,
  7383 			  uint32_t vrf_id, int *error)
  7385 	struct sockaddr *addr_touse;
  7386 #ifdef INET6
  7387 	struct sockaddr_in sin;
  7388 #endif
  7389 #ifdef SCTP_MVRF
  7390 	int i, fnd = 0;
  7391 #endif
  7393 	/* see if we're bound all already! */
  7394 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
  7395 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  7396 		*error = EINVAL;
  7397 		return;
  7399 #ifdef SCTP_MVRF
  7400 	/* Is the VRF one we have */
  7401 	for (i = 0; i < inp->num_vrfs; i++) {
  7402 		if (vrf_id == inp->m_vrf_ids[i]) {
  7403 			fnd = 1;
  7404 			break;
  7407 	if (!fnd) {
  7408 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  7409 		*error = EINVAL;
  7410 		return;
  7412 #endif
  7413 	addr_touse = sa;
  7414 #ifdef INET6
  7415 	if (sa->sa_family == AF_INET6) {
  7416 		struct sockaddr_in6 *sin6;
  7417 #ifdef HAVE_SA_LEN
  7418 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
  7419 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  7420 			*error = EINVAL;
  7421 			return;
  7423 #endif
  7424 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
  7425 			/* can only bind v6 on PF_INET6 sockets */
  7426 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  7427 			*error = EINVAL;
  7428 			return;
  7430 		sin6 = (struct sockaddr_in6 *)addr_touse;
  7431 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
  7432 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
  7433 			    SCTP_IPV6_V6ONLY(inp)) {
  7434 				/* can't bind mapped-v4 on PF_INET sockets */
  7435 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  7436 				*error = EINVAL;
  7437 				return;
  7439 			in6_sin6_2_sin(&sin, sin6);
  7440 			addr_touse = (struct sockaddr *)&sin;
  7443 #endif
  7444 #ifdef INET
  7445 	if (sa->sa_family == AF_INET) {
  7446 #ifdef HAVE_SA_LEN
  7447 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
  7448 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  7449 			*error = EINVAL;
  7450 			return;
  7452 #endif
  7453 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
  7454 		    SCTP_IPV6_V6ONLY(inp)) {
  7455 			/* can't bind v4 on PF_INET sockets */
  7456 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
  7457 			*error = EINVAL;
  7458 			return;
  7461 #endif
  7462 	/*
  7463 	 * No lock required mgmt_ep_sa does its own locking.
  7464 	 * If the FIX: below is ever changed we may need to
  7465 	 * lock before calling association level binding.
  7466 	 */
  7467 	if (assoc_id == 0) {
  7468 		/* delete the address */
  7469 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
  7470 					      SCTP_DEL_IP_ADDRESS,
  7471 					      vrf_id, NULL);
  7472 	} else {
  7473 		/*
  7474 		 * FIX: decide whether we allow assoc based
  7475 		 * bindx
  7476 		 */
  7480 /*
  7481  * returns the valid local address count for an assoc, taking into account
  7482  * all scoping rules
  7483  */
  7484 int
  7485 sctp_local_addr_count(struct sctp_tcb *stcb)
  7487 	int loopback_scope;
  7488 #if defined(INET)
  7489 	int ipv4_local_scope, ipv4_addr_legal;
  7490 #endif
  7491 #if defined (INET6)
  7492 	int local_scope, site_scope, ipv6_addr_legal;
  7493 #endif
  7494 #if defined(__Userspace__)
  7495 	int conn_addr_legal;
  7496 #endif
  7497 	struct sctp_vrf *vrf;
  7498 	struct sctp_ifn *sctp_ifn;
  7499 	struct sctp_ifa *sctp_ifa;
  7500 	int count = 0;
  7502 	/* Turn on all the appropriate scopes */
  7503 	loopback_scope = stcb->asoc.scope.loopback_scope;
  7504 #if defined(INET)
  7505 	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
  7506 	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
  7507 #endif
  7508 #if defined(INET6)
  7509 	local_scope = stcb->asoc.scope.local_scope;
  7510 	site_scope = stcb->asoc.scope.site_scope;
  7511 	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
  7512 #endif
  7513 #if defined(__Userspace__)
  7514 	conn_addr_legal = stcb->asoc.scope.conn_addr_legal;
  7515 #endif
  7516 	SCTP_IPI_ADDR_RLOCK();
  7517 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
  7518 	if (vrf == NULL) {
  7519 		/* no vrf, no addresses */
  7520 		SCTP_IPI_ADDR_RUNLOCK();
  7521 		return (0);
  7524 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
  7525 		/*
  7526 		 * bound all case: go through all ifns on the vrf
  7527 		 */
  7528 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
  7529 			if ((loopback_scope == 0) &&
  7530 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
  7531 				continue;
  7533 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
  7534 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
  7535 					continue;
  7536 				switch (sctp_ifa->address.sa.sa_family) {
  7537 #ifdef INET
  7538 				case AF_INET:
  7539 					if (ipv4_addr_legal) {
  7540 						struct sockaddr_in *sin;
  7542 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
  7543 						if (sin->sin_addr.s_addr == 0) {
  7544 							/* skip unspecified addrs */
  7545 							continue;
  7547 						if ((ipv4_local_scope == 0) &&
  7548 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
  7549 							continue;
  7551 						/* count this one */
  7552 						count++;
  7553 					} else {
  7554 						continue;
  7556 					break;
  7557 #endif
  7558 #ifdef INET6
  7559 				case AF_INET6:
  7560 					if (ipv6_addr_legal) {
  7561 						struct sockaddr_in6 *sin6;
  7563 #if defined(SCTP_EMBEDDED_V6_SCOPE) && !defined(SCTP_KAME)
  7564 						struct sockaddr_in6 lsa6;
  7565 #endif
  7566 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
  7567 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
  7568 							continue;
  7570 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
  7571 							if (local_scope == 0)
  7572 								continue;
  7573 #if defined(SCTP_EMBEDDED_V6_SCOPE)
  7574 							if (sin6->sin6_scope_id == 0) {
  7575 #ifdef SCTP_KAME
  7576 								if (sa6_recoverscope(sin6) != 0)
  7577 									/*
  7578 									 * bad link
  7579 									 * local
  7580 									 * address
  7581 									 */
  7582 									continue;
  7583 #else
  7584 								lsa6 = *sin6;
  7585 								if (in6_recoverscope(&lsa6,
  7586 								                     &lsa6.sin6_addr,
  7587 								                     NULL))
  7588 									/*
  7589 									 * bad link
  7590 									 * local
  7591 									 * address
  7592 									 */
  7593 									continue;
  7594 								sin6 = &lsa6;
  7595 #endif /* SCTP_KAME */
  7597 #endif /* SCTP_EMBEDDED_V6_SCOPE */
  7599 						if ((site_scope == 0) &&
  7600 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
  7601 							continue;
  7603 						/* count this one */
  7604 						count++;
  7606 					break;
  7607 #endif
  7608 #if defined(__Userspace__)
  7609 				case AF_CONN:
  7610 					if (conn_addr_legal) {
  7611 						count++;
  7613 					break;
  7614 #endif
  7615 				default:
  7616 					/* TSNH */
  7617 					break;
  7621 	} else {
  7622 		/*
  7623 		 * subset bound case
  7624 		 */
  7625 		struct sctp_laddr *laddr;
  7626 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
  7627 			     sctp_nxt_addr) {
  7628 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
  7629 				continue;
  7631 			/* count this one */
  7632 			count++;
  7635 	SCTP_IPI_ADDR_RUNLOCK();
  7636 	return (count);
  7639 #if defined(SCTP_LOCAL_TRACE_BUF)
  7641 void
  7642 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
  7644 	uint32_t saveindex, newindex;
  7646 #if defined(__Windows__)
  7647 	if (SCTP_BASE_SYSCTL(sctp_log) == NULL) {
  7648 		return;
  7650 	do {
  7651 		saveindex = SCTP_BASE_SYSCTL(sctp_log)->index;
  7652 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
  7653 			newindex = 1;
  7654 		} else {
  7655 			newindex = saveindex + 1;
  7657 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log)->index, saveindex, newindex) == 0);
  7658 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
  7659 		saveindex = 0;
  7661 	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
  7662 	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].subsys = subsys;
  7663 	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[0] = a;
  7664 	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[1] = b;
  7665 	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[2] = c;
  7666 	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[3] = d;
  7667 	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[4] = e;
  7668 	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[5] = f;
  7669 #else
  7670 	do {
  7671 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
  7672 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
  7673 			newindex = 1;
  7674 		} else {
  7675 			newindex = saveindex + 1;
  7677 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
  7678 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
  7679 		saveindex = 0;
  7681 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
  7682 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
  7683 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
  7684 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
  7685 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
  7686 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
  7687 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
  7688 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
  7689 #endif
  7692 #endif
  7693 #if defined(__FreeBSD__)
  7694 #if __FreeBSD_version >= 800044
  7695 static void
  7696 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
  7698 	struct ip *iph;
  7699 #ifdef INET6
  7700 	struct ip6_hdr *ip6;
  7701 #endif
  7702 	struct mbuf *sp, *last;
  7703 	struct udphdr *uhdr;
  7704 	uint16_t port;
  7706 	if ((m->m_flags & M_PKTHDR) == 0) {
  7707 		/* Can't handle one that is not a pkt hdr */
  7708 		goto out;
  7710 	/* Pull the src port */
  7711 	iph = mtod(m, struct ip *);
  7712 	uhdr = (struct udphdr *)((caddr_t)iph + off);
  7713 	port = uhdr->uh_sport;
  7714 	/* Split out the mbuf chain. Leave the
  7715 	 * IP header in m, place the
  7716 	 * rest in the sp.
  7717 	 */
  7718 	sp = m_split(m, off, M_NOWAIT);
  7719 	if (sp == NULL) {
  7720 		/* Gak, drop packet, we can't do a split */
  7721 		goto out;
  7723 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
  7724 		/* Gak, packet can't have an SCTP header in it - too small */
  7725 		m_freem(sp);
  7726 		goto out;
  7728 	/* Now pull up the UDP header and SCTP header together */
  7729 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
  7730 	if (sp == NULL) {
  7731 		/* Gak pullup failed */
  7732 		goto out;
  7734 	/* Trim out the UDP header */
  7735 	m_adj(sp, sizeof(struct udphdr));
  7737 	/* Now reconstruct the mbuf chain */
  7738 	for (last = m; last->m_next; last = last->m_next);
  7739 	last->m_next = sp;
  7740 	m->m_pkthdr.len += sp->m_pkthdr.len;
  7741 	iph = mtod(m, struct ip *);
  7742 	switch (iph->ip_v) {
  7743 #ifdef INET
  7744 	case IPVERSION:
  7745 #if __FreeBSD_version >= 1000000
  7746 		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
  7747 #else
  7748 		iph->ip_len -= sizeof(struct udphdr);
  7749 #endif
  7750 		sctp_input_with_port(m, off, port);
  7751 		break;
  7752 #endif
  7753 #ifdef INET6
  7754 	case IPV6_VERSION >> 4:
  7755 		ip6 = mtod(m, struct ip6_hdr *);
  7756 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
  7757 		sctp6_input_with_port(&m, &off, port);
  7758 		break;
  7759 #endif
  7760 	default:
  7761 		goto out;
  7762 		break;
  7764 	return;
  7765  out:
  7766 	m_freem(m);
  7768 #endif
  7770 void
  7771 sctp_over_udp_stop(void)
  7773 	/*
  7774 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock() for writting!
  7775 	 */
  7776 #ifdef INET
  7777 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
  7778 		soclose(SCTP_BASE_INFO(udp4_tun_socket));
  7779 		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
  7781 #endif
  7782 #ifdef INET6
  7783 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
  7784 		soclose(SCTP_BASE_INFO(udp6_tun_socket));
  7785 		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
  7787 #endif
  7790 int
  7791 sctp_over_udp_start(void)
  7793 #if __FreeBSD_version >= 800044
  7794 	uint16_t port;
  7795 	int ret;
  7796 #ifdef INET
  7797 	struct sockaddr_in sin;
  7798 #endif
  7799 #ifdef INET6
  7800 	struct sockaddr_in6 sin6;
  7801 #endif
  7802 	/*
  7803 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock() for writting!
  7804 	 */
  7805 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
  7806 	if (ntohs(port) == 0) {
  7807 		/* Must have a port set */
  7808 		return (EINVAL);
  7810 #ifdef INET
  7811 	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
  7812 		/* Already running -- must stop first */
  7813 		return (EALREADY);
  7815 #endif
  7816 #ifdef INET6
  7817 	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
  7818 		/* Already running -- must stop first */
  7819 		return (EALREADY);
  7821 #endif
  7822 #ifdef INET
  7823 	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
  7824 	                    SOCK_DGRAM, IPPROTO_UDP,
  7825 	                    curthread->td_ucred, curthread))) {
  7826 		sctp_over_udp_stop();
  7827 		return (ret);
  7829 	/* Call the special UDP hook. */
  7830 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
  7831 	                                    sctp_recv_udp_tunneled_packet))) {
  7832 		sctp_over_udp_stop();
  7833 		return (ret);
  7835 	/* Ok, we have a socket, bind it to the port. */
  7836 	memset(&sin, 0, sizeof(struct sockaddr_in));
  7837 	sin.sin_len = sizeof(struct sockaddr_in);
  7838 	sin.sin_family = AF_INET;
  7839 	sin.sin_port = htons(port);
  7840 	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
  7841 	                  (struct sockaddr *)&sin, curthread))) {
  7842 		sctp_over_udp_stop();
  7843 		return (ret);
  7845 #endif
  7846 #ifdef INET6
  7847 	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
  7848 	                    SOCK_DGRAM, IPPROTO_UDP,
  7849 	                    curthread->td_ucred, curthread))) {
  7850 		sctp_over_udp_stop();
  7851 		return (ret);
  7853 	/* Call the special UDP hook. */
  7854 	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
  7855 	                                    sctp_recv_udp_tunneled_packet))) {
  7856 		sctp_over_udp_stop();
  7857 		return (ret);
  7859 	/* Ok, we have a socket, bind it to the port. */
  7860 	memset(&sin6, 0, sizeof(struct sockaddr_in6));
  7861 	sin6.sin6_len = sizeof(struct sockaddr_in6);
  7862 	sin6.sin6_family = AF_INET6;
  7863 	sin6.sin6_port = htons(port);
  7864 	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
  7865 	                  (struct sockaddr *)&sin6, curthread))) {
  7866 		sctp_over_udp_stop();
  7867 		return (ret);
  7869 #endif
  7870 	return (0);
  7871 #else
  7872 	return (ENOTSUP);
  7873 #endif
  7875 #endif

mercurial