netwerk/sctp/src/netinet/sctputil.c

Tue, 06 Jan 2015 21:39:09 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Tue, 06 Jan 2015 21:39:09 +0100
branch
TOR_BUG_9701
changeset 8
97036ab72558
permissions
-rwxr-xr-x

Conditionally force memory storage according to privacy.thirdparty.isolate;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.

michael@0 1 /*-
michael@0 2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
michael@0 3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
michael@0 4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
michael@0 5 *
michael@0 6 * Redistribution and use in source and binary forms, with or without
michael@0 7 * modification, are permitted provided that the following conditions are met:
michael@0 8 *
michael@0 9 * a) Redistributions of source code must retain the above copyright notice,
michael@0 10 * this list of conditions and the following disclaimer.
michael@0 11 *
michael@0 12 * b) Redistributions in binary form must reproduce the above copyright
michael@0 13 * notice, this list of conditions and the following disclaimer in
michael@0 14 * the documentation and/or other materials provided with the distribution.
michael@0 15 *
michael@0 16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
michael@0 17 * contributors may be used to endorse or promote products derived
michael@0 18 * from this software without specific prior written permission.
michael@0 19 *
michael@0 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
michael@0 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
michael@0 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
michael@0 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
michael@0 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
michael@0 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
michael@0 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
michael@0 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
michael@0 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
michael@0 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
michael@0 30 * THE POSSIBILITY OF SUCH DAMAGE.
michael@0 31 */
michael@0 32
michael@0 33 #ifdef __FreeBSD__
michael@0 34 #include <sys/cdefs.h>
michael@0 35 __FBSDID("$FreeBSD: head/sys/netinet/sctputil.c 259943 2013-12-27 13:07:00Z tuexen $");
michael@0 36 #endif
michael@0 37
michael@0 38 #include <netinet/sctp_os.h>
michael@0 39 #include <netinet/sctp_pcb.h>
michael@0 40 #include <netinet/sctputil.h>
michael@0 41 #include <netinet/sctp_var.h>
michael@0 42 #include <netinet/sctp_sysctl.h>
michael@0 43 #ifdef INET6
michael@0 44 #if defined(__Userspace__) || defined(__FreeBSD__)
michael@0 45 #include <netinet6/sctp6_var.h>
michael@0 46 #endif
michael@0 47 #endif
michael@0 48 #include <netinet/sctp_header.h>
michael@0 49 #include <netinet/sctp_output.h>
michael@0 50 #include <netinet/sctp_uio.h>
michael@0 51 #include <netinet/sctp_timer.h>
michael@0 52 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
michael@0 53 #include <netinet/sctp_auth.h>
michael@0 54 #include <netinet/sctp_asconf.h>
michael@0 55 #include <netinet/sctp_bsd_addr.h>
michael@0 56 #if defined(__Userspace__)
michael@0 57 #include <netinet/sctp_constants.h>
michael@0 58 #endif
michael@0 59 #if defined(__FreeBSD__)
michael@0 60 #include <netinet/udp.h>
michael@0 61 #include <netinet/udp_var.h>
michael@0 62 #include <sys/proc.h>
michael@0 63 #endif
michael@0 64
michael@0 65 #if defined(__APPLE__)
michael@0 66 #define APPLE_FILE_NO 8
michael@0 67 #endif
michael@0 68
michael@0 69 #if defined(__Windows__)
michael@0 70 #if !defined(SCTP_LOCAL_TRACE_BUF)
michael@0 71 #include "eventrace_netinet.h"
michael@0 72 #include "sctputil.tmh" /* this is the file that will be auto generated */
michael@0 73 #endif
michael@0 74 #else
michael@0 75 #ifndef KTR_SCTP
michael@0 76 #define KTR_SCTP KTR_SUBSYS
michael@0 77 #endif
michael@0 78 #endif
michael@0 79
michael@0 80 extern struct sctp_cc_functions sctp_cc_functions[];
michael@0 81 extern struct sctp_ss_functions sctp_ss_functions[];
michael@0 82
michael@0 83 void
michael@0 84 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
michael@0 85 {
michael@0 86 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
michael@0 87 struct sctp_cwnd_log sctp_clog;
michael@0 88
michael@0 89 sctp_clog.x.sb.stcb = stcb;
michael@0 90 sctp_clog.x.sb.so_sbcc = sb->sb_cc;
michael@0 91 if (stcb)
michael@0 92 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
michael@0 93 else
michael@0 94 sctp_clog.x.sb.stcb_sbcc = 0;
michael@0 95 sctp_clog.x.sb.incr = incr;
michael@0 96 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
michael@0 97 SCTP_LOG_EVENT_SB,
michael@0 98 from,
michael@0 99 sctp_clog.x.misc.log1,
michael@0 100 sctp_clog.x.misc.log2,
michael@0 101 sctp_clog.x.misc.log3,
michael@0 102 sctp_clog.x.misc.log4);
michael@0 103 #endif
michael@0 104 }
michael@0 105
michael@0 106 void
michael@0 107 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
michael@0 108 {
michael@0 109 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
michael@0 110 struct sctp_cwnd_log sctp_clog;
michael@0 111
michael@0 112 sctp_clog.x.close.inp = (void *)inp;
michael@0 113 sctp_clog.x.close.sctp_flags = inp->sctp_flags;
michael@0 114 if (stcb) {
michael@0 115 sctp_clog.x.close.stcb = (void *)stcb;
michael@0 116 sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
michael@0 117 } else {
michael@0 118 sctp_clog.x.close.stcb = 0;
michael@0 119 sctp_clog.x.close.state = 0;
michael@0 120 }
michael@0 121 sctp_clog.x.close.loc = loc;
michael@0 122 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
michael@0 123 SCTP_LOG_EVENT_CLOSE,
michael@0 124 0,
michael@0 125 sctp_clog.x.misc.log1,
michael@0 126 sctp_clog.x.misc.log2,
michael@0 127 sctp_clog.x.misc.log3,
michael@0 128 sctp_clog.x.misc.log4);
michael@0 129 #endif
michael@0 130 }
michael@0 131
michael@0 132 void
michael@0 133 rto_logging(struct sctp_nets *net, int from)
michael@0 134 {
michael@0 135 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
michael@0 136 struct sctp_cwnd_log sctp_clog;
michael@0 137
michael@0 138 memset(&sctp_clog, 0, sizeof(sctp_clog));
michael@0 139 sctp_clog.x.rto.net = (void *) net;
michael@0 140 sctp_clog.x.rto.rtt = net->rtt / 1000;
michael@0 141 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
michael@0 142 SCTP_LOG_EVENT_RTT,
michael@0 143 from,
michael@0 144 sctp_clog.x.misc.log1,
michael@0 145 sctp_clog.x.misc.log2,
michael@0 146 sctp_clog.x.misc.log3,
michael@0 147 sctp_clog.x.misc.log4);
michael@0 148 #endif
michael@0 149 }
michael@0 150
michael@0 151 void
michael@0 152 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
michael@0 153 {
michael@0 154 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
michael@0 155 struct sctp_cwnd_log sctp_clog;
michael@0 156
michael@0 157 sctp_clog.x.strlog.stcb = stcb;
michael@0 158 sctp_clog.x.strlog.n_tsn = tsn;
michael@0 159 sctp_clog.x.strlog.n_sseq = sseq;
michael@0 160 sctp_clog.x.strlog.e_tsn = 0;
michael@0 161 sctp_clog.x.strlog.e_sseq = 0;
michael@0 162 sctp_clog.x.strlog.strm = stream;
michael@0 163 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
michael@0 164 SCTP_LOG_EVENT_STRM,
michael@0 165 from,
michael@0 166 sctp_clog.x.misc.log1,
michael@0 167 sctp_clog.x.misc.log2,
michael@0 168 sctp_clog.x.misc.log3,
michael@0 169 sctp_clog.x.misc.log4);
michael@0 170 #endif
michael@0 171 }
michael@0 172
michael@0 173 void
michael@0 174 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
michael@0 175 {
michael@0 176 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
michael@0 177 struct sctp_cwnd_log sctp_clog;
michael@0 178
michael@0 179 sctp_clog.x.nagle.stcb = (void *)stcb;
michael@0 180 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
michael@0 181 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
michael@0 182 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
michael@0 183 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
michael@0 184 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
michael@0 185 SCTP_LOG_EVENT_NAGLE,
michael@0 186 action,
michael@0 187 sctp_clog.x.misc.log1,
michael@0 188 sctp_clog.x.misc.log2,
michael@0 189 sctp_clog.x.misc.log3,
michael@0 190 sctp_clog.x.misc.log4);
michael@0 191 #endif
michael@0 192 }
michael@0 193
michael@0 194 void
michael@0 195 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
michael@0 196 {
michael@0 197 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
michael@0 198 struct sctp_cwnd_log sctp_clog;
michael@0 199
michael@0 200 sctp_clog.x.sack.cumack = cumack;
michael@0 201 sctp_clog.x.sack.oldcumack = old_cumack;
michael@0 202 sctp_clog.x.sack.tsn = tsn;
michael@0 203 sctp_clog.x.sack.numGaps = gaps;
michael@0 204 sctp_clog.x.sack.numDups = dups;
michael@0 205 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
michael@0 206 SCTP_LOG_EVENT_SACK,
michael@0 207 from,
michael@0 208 sctp_clog.x.misc.log1,
michael@0 209 sctp_clog.x.misc.log2,
michael@0 210 sctp_clog.x.misc.log3,
michael@0 211 sctp_clog.x.misc.log4);
michael@0 212 #endif
michael@0 213 }
michael@0 214
michael@0 215 void
michael@0 216 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
michael@0 217 {
michael@0 218 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
michael@0 219 struct sctp_cwnd_log sctp_clog;
michael@0 220
michael@0 221 memset(&sctp_clog, 0, sizeof(sctp_clog));
michael@0 222 sctp_clog.x.map.base = map;
michael@0 223 sctp_clog.x.map.cum = cum;
michael@0 224 sctp_clog.x.map.high = high;
michael@0 225 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
michael@0 226 SCTP_LOG_EVENT_MAP,
michael@0 227 from,
michael@0 228 sctp_clog.x.misc.log1,
michael@0 229 sctp_clog.x.misc.log2,
michael@0 230 sctp_clog.x.misc.log3,
michael@0 231 sctp_clog.x.misc.log4);
michael@0 232 #endif
michael@0 233 }
michael@0 234
michael@0 235 void
michael@0 236 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
michael@0 237 {
michael@0 238 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
michael@0 239 struct sctp_cwnd_log sctp_clog;
michael@0 240
michael@0 241 memset(&sctp_clog, 0, sizeof(sctp_clog));
michael@0 242 sctp_clog.x.fr.largest_tsn = biggest_tsn;
michael@0 243 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
michael@0 244 sctp_clog.x.fr.tsn = tsn;
michael@0 245 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
michael@0 246 SCTP_LOG_EVENT_FR,
michael@0 247 from,
michael@0 248 sctp_clog.x.misc.log1,
michael@0 249 sctp_clog.x.misc.log2,
michael@0 250 sctp_clog.x.misc.log3,
michael@0 251 sctp_clog.x.misc.log4);
michael@0 252 #endif
michael@0 253 }
michael@0 254
michael@0 255 void
michael@0 256 sctp_log_mb(struct mbuf *m, int from)
michael@0 257 {
michael@0 258 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
michael@0 259 struct sctp_cwnd_log sctp_clog;
michael@0 260
michael@0 261 sctp_clog.x.mb.mp = m;
michael@0 262 sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
michael@0 263 sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
michael@0 264 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
michael@0 265 if (SCTP_BUF_IS_EXTENDED(m)) {
michael@0 266 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
michael@0 267 #if defined(__APPLE__)
michael@0 268 /* APPLE does not use a ref_cnt, but a forward/backward ref queue */
michael@0 269 #else
michael@0 270 sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
michael@0 271 #endif
michael@0 272 } else {
michael@0 273 sctp_clog.x.mb.ext = 0;
michael@0 274 sctp_clog.x.mb.refcnt = 0;
michael@0 275 }
michael@0 276 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
michael@0 277 SCTP_LOG_EVENT_MBUF,
michael@0 278 from,
michael@0 279 sctp_clog.x.misc.log1,
michael@0 280 sctp_clog.x.misc.log2,
michael@0 281 sctp_clog.x.misc.log3,
michael@0 282 sctp_clog.x.misc.log4);
michael@0 283 #endif
michael@0 284 }
michael@0 285
michael@0 286 void
michael@0 287 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
michael@0 288 {
michael@0 289 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
michael@0 290 struct sctp_cwnd_log sctp_clog;
michael@0 291
michael@0 292 if (control == NULL) {
michael@0 293 SCTP_PRINTF("Gak log of NULL?\n");
michael@0 294 return;
michael@0 295 }
michael@0 296 sctp_clog.x.strlog.stcb = control->stcb;
michael@0 297 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
michael@0 298 sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
michael@0 299 sctp_clog.x.strlog.strm = control->sinfo_stream;
michael@0 300 if (poschk != NULL) {
michael@0 301 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
michael@0 302 sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
michael@0 303 } else {
michael@0 304 sctp_clog.x.strlog.e_tsn = 0;
michael@0 305 sctp_clog.x.strlog.e_sseq = 0;
michael@0 306 }
michael@0 307 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
michael@0 308 SCTP_LOG_EVENT_STRM,
michael@0 309 from,
michael@0 310 sctp_clog.x.misc.log1,
michael@0 311 sctp_clog.x.misc.log2,
michael@0 312 sctp_clog.x.misc.log3,
michael@0 313 sctp_clog.x.misc.log4);
michael@0 314 #endif
michael@0 315 }
michael@0 316
michael@0 317 void
michael@0 318 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
michael@0 319 {
michael@0 320 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
michael@0 321 struct sctp_cwnd_log sctp_clog;
michael@0 322
michael@0 323 sctp_clog.x.cwnd.net = net;
michael@0 324 if (stcb->asoc.send_queue_cnt > 255)
michael@0 325 sctp_clog.x.cwnd.cnt_in_send = 255;
michael@0 326 else
michael@0 327 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
michael@0 328 if (stcb->asoc.stream_queue_cnt > 255)
michael@0 329 sctp_clog.x.cwnd.cnt_in_str = 255;
michael@0 330 else
michael@0 331 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
michael@0 332
michael@0 333 if (net) {
michael@0 334 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
michael@0 335 sctp_clog.x.cwnd.inflight = net->flight_size;
michael@0 336 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
michael@0 337 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
michael@0 338 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
michael@0 339 }
michael@0 340 if (SCTP_CWNDLOG_PRESEND == from) {
michael@0 341 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
michael@0 342 }
michael@0 343 sctp_clog.x.cwnd.cwnd_augment = augment;
michael@0 344 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
michael@0 345 SCTP_LOG_EVENT_CWND,
michael@0 346 from,
michael@0 347 sctp_clog.x.misc.log1,
michael@0 348 sctp_clog.x.misc.log2,
michael@0 349 sctp_clog.x.misc.log3,
michael@0 350 sctp_clog.x.misc.log4);
michael@0 351 #endif
michael@0 352 }
michael@0 353
michael@0 354 #ifndef __APPLE__
michael@0 355 void
michael@0 356 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
michael@0 357 {
michael@0 358 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
michael@0 359 struct sctp_cwnd_log sctp_clog;
michael@0 360
michael@0 361 memset(&sctp_clog, 0, sizeof(sctp_clog));
michael@0 362 if (inp) {
michael@0 363 sctp_clog.x.lock.sock = (void *) inp->sctp_socket;
michael@0 364
michael@0 365 } else {
michael@0 366 sctp_clog.x.lock.sock = (void *) NULL;
michael@0 367 }
michael@0 368 sctp_clog.x.lock.inp = (void *) inp;
michael@0 369 #if (defined(__FreeBSD__) && __FreeBSD_version >= 503000) || (defined(__APPLE__))
michael@0 370 if (stcb) {
michael@0 371 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
michael@0 372 } else {
michael@0 373 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
michael@0 374 }
michael@0 375 if (inp) {
michael@0 376 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
michael@0 377 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
michael@0 378 } else {
michael@0 379 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
michael@0 380 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
michael@0 381 }
michael@0 382 #if (defined(__FreeBSD__) && __FreeBSD_version <= 602000)
michael@0 383 sctp_clog.x.lock.info_lock = mtx_owned(&SCTP_BASE_INFO(ipi_ep_mtx));
michael@0 384 #else
michael@0 385 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
michael@0 386 #endif
michael@0 387 if (inp && (inp->sctp_socket)) {
michael@0 388 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
michael@0 389 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
michael@0 390 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
michael@0 391 } else {
michael@0 392 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
michael@0 393 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
michael@0 394 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
michael@0 395 }
michael@0 396 #endif
michael@0 397 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
michael@0 398 SCTP_LOG_LOCK_EVENT,
michael@0 399 from,
michael@0 400 sctp_clog.x.misc.log1,
michael@0 401 sctp_clog.x.misc.log2,
michael@0 402 sctp_clog.x.misc.log3,
michael@0 403 sctp_clog.x.misc.log4);
michael@0 404 #endif
michael@0 405 }
michael@0 406 #endif
michael@0 407
michael@0 408 void
michael@0 409 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
michael@0 410 {
michael@0 411 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
michael@0 412 struct sctp_cwnd_log sctp_clog;
michael@0 413
michael@0 414 memset(&sctp_clog, 0, sizeof(sctp_clog));
michael@0 415 sctp_clog.x.cwnd.net = net;
michael@0 416 sctp_clog.x.cwnd.cwnd_new_value = error;
michael@0 417 sctp_clog.x.cwnd.inflight = net->flight_size;
michael@0 418 sctp_clog.x.cwnd.cwnd_augment = burst;
michael@0 419 if (stcb->asoc.send_queue_cnt > 255)
michael@0 420 sctp_clog.x.cwnd.cnt_in_send = 255;
michael@0 421 else
michael@0 422 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
michael@0 423 if (stcb->asoc.stream_queue_cnt > 255)
michael@0 424 sctp_clog.x.cwnd.cnt_in_str = 255;
michael@0 425 else
michael@0 426 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
michael@0 427 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
michael@0 428 SCTP_LOG_EVENT_MAXBURST,
michael@0 429 from,
michael@0 430 sctp_clog.x.misc.log1,
michael@0 431 sctp_clog.x.misc.log2,
michael@0 432 sctp_clog.x.misc.log3,
michael@0 433 sctp_clog.x.misc.log4);
michael@0 434 #endif
michael@0 435 }
michael@0 436
michael@0 437 void
michael@0 438 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
michael@0 439 {
michael@0 440 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
michael@0 441 struct sctp_cwnd_log sctp_clog;
michael@0 442
michael@0 443 sctp_clog.x.rwnd.rwnd = peers_rwnd;
michael@0 444 sctp_clog.x.rwnd.send_size = snd_size;
michael@0 445 sctp_clog.x.rwnd.overhead = overhead;
michael@0 446 sctp_clog.x.rwnd.new_rwnd = 0;
michael@0 447 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
michael@0 448 SCTP_LOG_EVENT_RWND,
michael@0 449 from,
michael@0 450 sctp_clog.x.misc.log1,
michael@0 451 sctp_clog.x.misc.log2,
michael@0 452 sctp_clog.x.misc.log3,
michael@0 453 sctp_clog.x.misc.log4);
michael@0 454 #endif
michael@0 455 }
michael@0 456
michael@0 457 void
michael@0 458 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
michael@0 459 {
michael@0 460 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
michael@0 461 struct sctp_cwnd_log sctp_clog;
michael@0 462
michael@0 463 sctp_clog.x.rwnd.rwnd = peers_rwnd;
michael@0 464 sctp_clog.x.rwnd.send_size = flight_size;
michael@0 465 sctp_clog.x.rwnd.overhead = overhead;
michael@0 466 sctp_clog.x.rwnd.new_rwnd = a_rwndval;
michael@0 467 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
michael@0 468 SCTP_LOG_EVENT_RWND,
michael@0 469 from,
michael@0 470 sctp_clog.x.misc.log1,
michael@0 471 sctp_clog.x.misc.log2,
michael@0 472 sctp_clog.x.misc.log3,
michael@0 473 sctp_clog.x.misc.log4);
michael@0 474 #endif
michael@0 475 }
michael@0 476
michael@0 477 void
michael@0 478 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
michael@0 479 {
michael@0 480 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
michael@0 481 struct sctp_cwnd_log sctp_clog;
michael@0 482
michael@0 483 sctp_clog.x.mbcnt.total_queue_size = total_oq;
michael@0 484 sctp_clog.x.mbcnt.size_change = book;
michael@0 485 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
michael@0 486 sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
michael@0 487 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
michael@0 488 SCTP_LOG_EVENT_MBCNT,
michael@0 489 from,
michael@0 490 sctp_clog.x.misc.log1,
michael@0 491 sctp_clog.x.misc.log2,
michael@0 492 sctp_clog.x.misc.log3,
michael@0 493 sctp_clog.x.misc.log4);
michael@0 494 #endif
michael@0 495 }
michael@0 496
michael@0 497 void
michael@0 498 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
michael@0 499 {
michael@0 500 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
michael@0 501 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
michael@0 502 SCTP_LOG_MISC_EVENT,
michael@0 503 from,
michael@0 504 a, b, c, d);
michael@0 505 #endif
michael@0 506 }
michael@0 507
michael@0 508 void
michael@0 509 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
michael@0 510 {
michael@0 511 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
michael@0 512 struct sctp_cwnd_log sctp_clog;
michael@0 513
michael@0 514 sctp_clog.x.wake.stcb = (void *)stcb;
michael@0 515 sctp_clog.x.wake.wake_cnt = wake_cnt;
michael@0 516 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
michael@0 517 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
michael@0 518 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
michael@0 519
michael@0 520 if (stcb->asoc.stream_queue_cnt < 0xff)
michael@0 521 sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
michael@0 522 else
michael@0 523 sctp_clog.x.wake.stream_qcnt = 0xff;
michael@0 524
michael@0 525 if (stcb->asoc.chunks_on_out_queue < 0xff)
michael@0 526 sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
michael@0 527 else
michael@0 528 sctp_clog.x.wake.chunks_on_oque = 0xff;
michael@0 529
michael@0 530 sctp_clog.x.wake.sctpflags = 0;
michael@0 531 /* set in the defered mode stuff */
michael@0 532 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
michael@0 533 sctp_clog.x.wake.sctpflags |= 1;
michael@0 534 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
michael@0 535 sctp_clog.x.wake.sctpflags |= 2;
michael@0 536 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
michael@0 537 sctp_clog.x.wake.sctpflags |= 4;
michael@0 538 /* what about the sb */
michael@0 539 if (stcb->sctp_socket) {
michael@0 540 struct socket *so = stcb->sctp_socket;
michael@0 541
michael@0 542 sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
michael@0 543 } else {
michael@0 544 sctp_clog.x.wake.sbflags = 0xff;
michael@0 545 }
michael@0 546 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
michael@0 547 SCTP_LOG_EVENT_WAKE,
michael@0 548 from,
michael@0 549 sctp_clog.x.misc.log1,
michael@0 550 sctp_clog.x.misc.log2,
michael@0 551 sctp_clog.x.misc.log3,
michael@0 552 sctp_clog.x.misc.log4);
michael@0 553 #endif
michael@0 554 }
michael@0 555
michael@0 556 void
michael@0 557 sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
michael@0 558 {
michael@0 559 #if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
michael@0 560 struct sctp_cwnd_log sctp_clog;
michael@0 561
michael@0 562 sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
michael@0 563 sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
michael@0 564 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
michael@0 565 sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
michael@0 566 sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
michael@0 567 sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight/1024);
michael@0 568 sctp_clog.x.blk.sndlen = sendlen;
michael@0 569 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
michael@0 570 SCTP_LOG_EVENT_BLOCK,
michael@0 571 from,
michael@0 572 sctp_clog.x.misc.log1,
michael@0 573 sctp_clog.x.misc.log2,
michael@0 574 sctp_clog.x.misc.log3,
michael@0 575 sctp_clog.x.misc.log4);
michael@0 576 #endif
michael@0 577 }
michael@0 578
michael@0 579 int
michael@0 580 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
michael@0 581 {
michael@0 582 /* May need to fix this if ktrdump does not work */
michael@0 583 return (0);
michael@0 584 }
michael@0 585
michael@0 586 #ifdef SCTP_AUDITING_ENABLED
michael@0 587 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
michael@0 588 static int sctp_audit_indx = 0;
michael@0 589
michael@0 590 static
michael@0 591 void
michael@0 592 sctp_print_audit_report(void)
michael@0 593 {
michael@0 594 int i;
michael@0 595 int cnt;
michael@0 596
michael@0 597 cnt = 0;
michael@0 598 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
michael@0 599 if ((sctp_audit_data[i][0] == 0xe0) &&
michael@0 600 (sctp_audit_data[i][1] == 0x01)) {
michael@0 601 cnt = 0;
michael@0 602 SCTP_PRINTF("\n");
michael@0 603 } else if (sctp_audit_data[i][0] == 0xf0) {
michael@0 604 cnt = 0;
michael@0 605 SCTP_PRINTF("\n");
michael@0 606 } else if ((sctp_audit_data[i][0] == 0xc0) &&
michael@0 607 (sctp_audit_data[i][1] == 0x01)) {
michael@0 608 SCTP_PRINTF("\n");
michael@0 609 cnt = 0;
michael@0 610 }
michael@0 611 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
michael@0 612 (uint32_t) sctp_audit_data[i][1]);
michael@0 613 cnt++;
michael@0 614 if ((cnt % 14) == 0)
michael@0 615 SCTP_PRINTF("\n");
michael@0 616 }
michael@0 617 for (i = 0; i < sctp_audit_indx; i++) {
michael@0 618 if ((sctp_audit_data[i][0] == 0xe0) &&
michael@0 619 (sctp_audit_data[i][1] == 0x01)) {
michael@0 620 cnt = 0;
michael@0 621 SCTP_PRINTF("\n");
michael@0 622 } else if (sctp_audit_data[i][0] == 0xf0) {
michael@0 623 cnt = 0;
michael@0 624 SCTP_PRINTF("\n");
michael@0 625 } else if ((sctp_audit_data[i][0] == 0xc0) &&
michael@0 626 (sctp_audit_data[i][1] == 0x01)) {
michael@0 627 SCTP_PRINTF("\n");
michael@0 628 cnt = 0;
michael@0 629 }
michael@0 630 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
michael@0 631 (uint32_t) sctp_audit_data[i][1]);
michael@0 632 cnt++;
michael@0 633 if ((cnt % 14) == 0)
michael@0 634 SCTP_PRINTF("\n");
michael@0 635 }
michael@0 636 SCTP_PRINTF("\n");
michael@0 637 }
michael@0 638
michael@0 639 void
michael@0 640 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
michael@0 641 struct sctp_nets *net)
michael@0 642 {
michael@0 643 int resend_cnt, tot_out, rep, tot_book_cnt;
michael@0 644 struct sctp_nets *lnet;
michael@0 645 struct sctp_tmit_chunk *chk;
michael@0 646
michael@0 647 sctp_audit_data[sctp_audit_indx][0] = 0xAA;
michael@0 648 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
michael@0 649 sctp_audit_indx++;
michael@0 650 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
michael@0 651 sctp_audit_indx = 0;
michael@0 652 }
michael@0 653 if (inp == NULL) {
michael@0 654 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
michael@0 655 sctp_audit_data[sctp_audit_indx][1] = 0x01;
michael@0 656 sctp_audit_indx++;
michael@0 657 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
michael@0 658 sctp_audit_indx = 0;
michael@0 659 }
michael@0 660 return;
michael@0 661 }
michael@0 662 if (stcb == NULL) {
michael@0 663 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
michael@0 664 sctp_audit_data[sctp_audit_indx][1] = 0x02;
michael@0 665 sctp_audit_indx++;
michael@0 666 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
michael@0 667 sctp_audit_indx = 0;
michael@0 668 }
michael@0 669 return;
michael@0 670 }
michael@0 671 sctp_audit_data[sctp_audit_indx][0] = 0xA1;
michael@0 672 sctp_audit_data[sctp_audit_indx][1] =
michael@0 673 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
michael@0 674 sctp_audit_indx++;
michael@0 675 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
michael@0 676 sctp_audit_indx = 0;
michael@0 677 }
michael@0 678 rep = 0;
michael@0 679 tot_book_cnt = 0;
michael@0 680 resend_cnt = tot_out = 0;
michael@0 681 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
michael@0 682 if (chk->sent == SCTP_DATAGRAM_RESEND) {
michael@0 683 resend_cnt++;
michael@0 684 } else if (chk->sent < SCTP_DATAGRAM_RESEND) {
michael@0 685 tot_out += chk->book_size;
michael@0 686 tot_book_cnt++;
michael@0 687 }
michael@0 688 }
michael@0 689 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
michael@0 690 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
michael@0 691 sctp_audit_data[sctp_audit_indx][1] = 0xA1;
michael@0 692 sctp_audit_indx++;
michael@0 693 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
michael@0 694 sctp_audit_indx = 0;
michael@0 695 }
michael@0 696 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
michael@0 697 resend_cnt, stcb->asoc.sent_queue_retran_cnt);
michael@0 698 rep = 1;
michael@0 699 stcb->asoc.sent_queue_retran_cnt = resend_cnt;
michael@0 700 sctp_audit_data[sctp_audit_indx][0] = 0xA2;
michael@0 701 sctp_audit_data[sctp_audit_indx][1] =
michael@0 702 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
michael@0 703 sctp_audit_indx++;
michael@0 704 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
michael@0 705 sctp_audit_indx = 0;
michael@0 706 }
michael@0 707 }
michael@0 708 if (tot_out != stcb->asoc.total_flight) {
michael@0 709 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
michael@0 710 sctp_audit_data[sctp_audit_indx][1] = 0xA2;
michael@0 711 sctp_audit_indx++;
michael@0 712 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
michael@0 713 sctp_audit_indx = 0;
michael@0 714 }
michael@0 715 rep = 1;
michael@0 716 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
michael@0 717 (int)stcb->asoc.total_flight);
michael@0 718 stcb->asoc.total_flight = tot_out;
michael@0 719 }
michael@0 720 if (tot_book_cnt != stcb->asoc.total_flight_count) {
michael@0 721 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
michael@0 722 sctp_audit_data[sctp_audit_indx][1] = 0xA5;
michael@0 723 sctp_audit_indx++;
michael@0 724 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
michael@0 725 sctp_audit_indx = 0;
michael@0 726 }
michael@0 727 rep = 1;
michael@0 728 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
michael@0 729
michael@0 730 stcb->asoc.total_flight_count = tot_book_cnt;
michael@0 731 }
michael@0 732 tot_out = 0;
michael@0 733 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
michael@0 734 tot_out += lnet->flight_size;
michael@0 735 }
michael@0 736 if (tot_out != stcb->asoc.total_flight) {
michael@0 737 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
michael@0 738 sctp_audit_data[sctp_audit_indx][1] = 0xA3;
michael@0 739 sctp_audit_indx++;
michael@0 740 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
michael@0 741 sctp_audit_indx = 0;
michael@0 742 }
michael@0 743 rep = 1;
michael@0 744 SCTP_PRINTF("real flight:%d net total was %d\n",
michael@0 745 stcb->asoc.total_flight, tot_out);
michael@0 746 /* now corrective action */
michael@0 747 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
michael@0 748
michael@0 749 tot_out = 0;
michael@0 750 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
michael@0 751 if ((chk->whoTo == lnet) &&
michael@0 752 (chk->sent < SCTP_DATAGRAM_RESEND)) {
michael@0 753 tot_out += chk->book_size;
michael@0 754 }
michael@0 755 }
michael@0 756 if (lnet->flight_size != tot_out) {
michael@0 757 SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
michael@0 758 (void *)lnet, lnet->flight_size,
michael@0 759 tot_out);
michael@0 760 lnet->flight_size = tot_out;
michael@0 761 }
michael@0 762 }
michael@0 763 }
michael@0 764 if (rep) {
michael@0 765 sctp_print_audit_report();
michael@0 766 }
michael@0 767 }
michael@0 768
michael@0 769 void
michael@0 770 sctp_audit_log(uint8_t ev, uint8_t fd)
michael@0 771 {
michael@0 772
michael@0 773 sctp_audit_data[sctp_audit_indx][0] = ev;
michael@0 774 sctp_audit_data[sctp_audit_indx][1] = fd;
michael@0 775 sctp_audit_indx++;
michael@0 776 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
michael@0 777 sctp_audit_indx = 0;
michael@0 778 }
michael@0 779 }
michael@0 780
michael@0 781 #endif
michael@0 782
michael@0 783 /*
michael@0 784 * sctp_stop_timers_for_shutdown() should be called
michael@0 785 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
michael@0 786 * state to make sure that all timers are stopped.
michael@0 787 */
michael@0 788 void
michael@0 789 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
michael@0 790 {
michael@0 791 struct sctp_association *asoc;
michael@0 792 struct sctp_nets *net;
michael@0 793
michael@0 794 asoc = &stcb->asoc;
michael@0 795
michael@0 796 (void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
michael@0 797 (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
michael@0 798 (void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
michael@0 799 (void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
michael@0 800 (void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
michael@0 801 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
michael@0 802 (void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
michael@0 803 (void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
michael@0 804 }
michael@0 805 }
michael@0 806
michael@0 807 /*
michael@0 808 * a list of sizes based on typical mtu's, used only if next hop size not
michael@0 809 * returned.
michael@0 810 */
michael@0 811 static uint32_t sctp_mtu_sizes[] = {
michael@0 812 68,
michael@0 813 296,
michael@0 814 508,
michael@0 815 512,
michael@0 816 544,
michael@0 817 576,
michael@0 818 1006,
michael@0 819 1492,
michael@0 820 1500,
michael@0 821 1536,
michael@0 822 2002,
michael@0 823 2048,
michael@0 824 4352,
michael@0 825 4464,
michael@0 826 8166,
michael@0 827 17914,
michael@0 828 32000,
michael@0 829 65535
michael@0 830 };
michael@0 831
michael@0 832 /*
michael@0 833 * Return the largest MTU smaller than val. If there is no
michael@0 834 * entry, just return val.
michael@0 835 */
michael@0 836 uint32_t
michael@0 837 sctp_get_prev_mtu(uint32_t val)
michael@0 838 {
michael@0 839 uint32_t i;
michael@0 840
michael@0 841 if (val <= sctp_mtu_sizes[0]) {
michael@0 842 return (val);
michael@0 843 }
michael@0 844 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
michael@0 845 if (val <= sctp_mtu_sizes[i]) {
michael@0 846 break;
michael@0 847 }
michael@0 848 }
michael@0 849 return (sctp_mtu_sizes[i - 1]);
michael@0 850 }
michael@0 851
michael@0 852 /*
michael@0 853 * Return the smallest MTU larger than val. If there is no
michael@0 854 * entry, just return val.
michael@0 855 */
michael@0 856 uint32_t
michael@0 857 sctp_get_next_mtu(uint32_t val)
michael@0 858 {
michael@0 859 /* select another MTU that is just bigger than this one */
michael@0 860 uint32_t i;
michael@0 861
michael@0 862 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
michael@0 863 if (val < sctp_mtu_sizes[i]) {
michael@0 864 return (sctp_mtu_sizes[i]);
michael@0 865 }
michael@0 866 }
michael@0 867 return (val);
michael@0 868 }
michael@0 869
michael@0 870 void
michael@0 871 sctp_fill_random_store(struct sctp_pcb *m)
michael@0 872 {
michael@0 873 /*
michael@0 874 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
michael@0 875 * our counter. The result becomes our good random numbers and we
michael@0 876 * then setup to give these out. Note that we do no locking to
michael@0 877 * protect this. This is ok, since if competing folks call this we
michael@0 878 * will get more gobbled gook in the random store which is what we
michael@0 879 * want. There is a danger that two guys will use the same random
michael@0 880 * numbers, but thats ok too since that is random as well :->
michael@0 881 */
michael@0 882 m->store_at = 0;
michael@0 883 (void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
michael@0 884 sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
michael@0 885 sizeof(m->random_counter), (uint8_t *)m->random_store);
michael@0 886 m->random_counter++;
michael@0 887 }
michael@0 888
michael@0 889 uint32_t
michael@0 890 sctp_select_initial_TSN(struct sctp_pcb *inp)
michael@0 891 {
michael@0 892 /*
michael@0 893 * A true implementation should use random selection process to get
michael@0 894 * the initial stream sequence number, using RFC1750 as a good
michael@0 895 * guideline
michael@0 896 */
michael@0 897 uint32_t x, *xp;
michael@0 898 uint8_t *p;
michael@0 899 int store_at, new_store;
michael@0 900
michael@0 901 if (inp->initial_sequence_debug != 0) {
michael@0 902 uint32_t ret;
michael@0 903
michael@0 904 ret = inp->initial_sequence_debug;
michael@0 905 inp->initial_sequence_debug++;
michael@0 906 return (ret);
michael@0 907 }
michael@0 908 retry:
michael@0 909 store_at = inp->store_at;
michael@0 910 new_store = store_at + sizeof(uint32_t);
michael@0 911 if (new_store >= (SCTP_SIGNATURE_SIZE-3)) {
michael@0 912 new_store = 0;
michael@0 913 }
michael@0 914 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
michael@0 915 goto retry;
michael@0 916 }
michael@0 917 if (new_store == 0) {
michael@0 918 /* Refill the random store */
michael@0 919 sctp_fill_random_store(inp);
michael@0 920 }
michael@0 921 p = &inp->random_store[store_at];
michael@0 922 xp = (uint32_t *)p;
michael@0 923 x = *xp;
michael@0 924 return (x);
michael@0 925 }
michael@0 926
michael@0 927 uint32_t
michael@0 928 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
michael@0 929 {
michael@0 930 uint32_t x;
michael@0 931 struct timeval now;
michael@0 932
michael@0 933 if (check) {
michael@0 934 (void)SCTP_GETTIME_TIMEVAL(&now);
michael@0 935 }
michael@0 936 for (;;) {
michael@0 937 x = sctp_select_initial_TSN(&inp->sctp_ep);
michael@0 938 if (x == 0) {
michael@0 939 /* we never use 0 */
michael@0 940 continue;
michael@0 941 }
michael@0 942 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
michael@0 943 break;
michael@0 944 }
michael@0 945 }
michael@0 946 return (x);
michael@0 947 }
michael@0 948
michael@0 949 int
michael@0 950 sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
michael@0 951 uint32_t override_tag, uint32_t vrf_id)
michael@0 952 {
michael@0 953 struct sctp_association *asoc;
michael@0 954 /*
michael@0 955 * Anything set to zero is taken care of by the allocation routine's
michael@0 956 * bzero
michael@0 957 */
michael@0 958
michael@0 959 /*
michael@0 960 * Up front select what scoping to apply on addresses I tell my peer
michael@0 961 * Not sure what to do with these right now, we will need to come up
michael@0 962 * with a way to set them. We may need to pass them through from the
michael@0 963 * caller in the sctp_aloc_assoc() function.
michael@0 964 */
michael@0 965 int i;
michael@0 966
michael@0 967 asoc = &stcb->asoc;
michael@0 968 /* init all variables to a known value. */
michael@0 969 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
michael@0 970 asoc->max_burst = inp->sctp_ep.max_burst;
michael@0 971 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
michael@0 972 asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
michael@0 973 asoc->cookie_life = inp->sctp_ep.def_cookie_life;
michael@0 974 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
michael@0 975 asoc->ecn_allowed = inp->sctp_ecn_enable;
michael@0 976 asoc->sctp_nr_sack_on_off = (uint8_t)SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
michael@0 977 asoc->sctp_cmt_pf = (uint8_t)0;
michael@0 978 asoc->sctp_frag_point = inp->sctp_frag_point;
michael@0 979 asoc->sctp_features = inp->sctp_features;
michael@0 980 asoc->default_dscp = inp->sctp_ep.default_dscp;
michael@0 981 #ifdef INET6
michael@0 982 if (inp->sctp_ep.default_flowlabel) {
michael@0 983 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
michael@0 984 } else {
michael@0 985 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
michael@0 986 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
michael@0 987 asoc->default_flowlabel &= 0x000fffff;
michael@0 988 asoc->default_flowlabel |= 0x80000000;
michael@0 989 } else {
michael@0 990 asoc->default_flowlabel = 0;
michael@0 991 }
michael@0 992 }
michael@0 993 #endif
michael@0 994 asoc->sb_send_resv = 0;
michael@0 995 if (override_tag) {
michael@0 996 asoc->my_vtag = override_tag;
michael@0 997 } else {
michael@0 998 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
michael@0 999 }
michael@0 1000 /* Get the nonce tags */
michael@0 1001 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
michael@0 1002 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
michael@0 1003 asoc->vrf_id = vrf_id;
michael@0 1004
michael@0 1005 #ifdef SCTP_ASOCLOG_OF_TSNS
michael@0 1006 asoc->tsn_in_at = 0;
michael@0 1007 asoc->tsn_out_at = 0;
michael@0 1008 asoc->tsn_in_wrapped = 0;
michael@0 1009 asoc->tsn_out_wrapped = 0;
michael@0 1010 asoc->cumack_log_at = 0;
michael@0 1011 asoc->cumack_log_atsnt = 0;
michael@0 1012 #endif
michael@0 1013 #ifdef SCTP_FS_SPEC_LOG
michael@0 1014 asoc->fs_index = 0;
michael@0 1015 #endif
michael@0 1016 asoc->refcnt = 0;
michael@0 1017 asoc->assoc_up_sent = 0;
michael@0 1018 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
michael@0 1019 sctp_select_initial_TSN(&inp->sctp_ep);
michael@0 1020 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
michael@0 1021 /* we are optimisitic here */
michael@0 1022 asoc->peer_supports_pktdrop = 1;
michael@0 1023 asoc->peer_supports_nat = 0;
michael@0 1024 asoc->sent_queue_retran_cnt = 0;
michael@0 1025
michael@0 1026 /* for CMT */
michael@0 1027 asoc->last_net_cmt_send_started = NULL;
michael@0 1028
michael@0 1029 /* This will need to be adjusted */
michael@0 1030 asoc->last_acked_seq = asoc->init_seq_number - 1;
michael@0 1031 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
michael@0 1032 asoc->asconf_seq_in = asoc->last_acked_seq;
michael@0 1033
michael@0 1034 /* here we are different, we hold the next one we expect */
michael@0 1035 asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
michael@0 1036
michael@0 1037 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
michael@0 1038 asoc->initial_rto = inp->sctp_ep.initial_rto;
michael@0 1039
michael@0 1040 asoc->max_init_times = inp->sctp_ep.max_init_times;
michael@0 1041 asoc->max_send_times = inp->sctp_ep.max_send_times;
michael@0 1042 asoc->def_net_failure = inp->sctp_ep.def_net_failure;
michael@0 1043 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
michael@0 1044 asoc->free_chunk_cnt = 0;
michael@0 1045
michael@0 1046 asoc->iam_blocking = 0;
michael@0 1047 asoc->context = inp->sctp_context;
michael@0 1048 asoc->local_strreset_support = inp->local_strreset_support;
michael@0 1049 asoc->def_send = inp->def_send;
michael@0 1050 asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
michael@0 1051 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
michael@0 1052 asoc->pr_sctp_cnt = 0;
michael@0 1053 asoc->total_output_queue_size = 0;
michael@0 1054
michael@0 1055 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
michael@0 1056 asoc->scope.ipv6_addr_legal = 1;
michael@0 1057 if (SCTP_IPV6_V6ONLY(inp) == 0) {
michael@0 1058 asoc->scope.ipv4_addr_legal = 1;
michael@0 1059 } else {
michael@0 1060 asoc->scope.ipv4_addr_legal = 0;
michael@0 1061 }
michael@0 1062 #if defined(__Userspace__)
michael@0 1063 asoc->scope.conn_addr_legal = 0;
michael@0 1064 #endif
michael@0 1065 } else {
michael@0 1066 asoc->scope.ipv6_addr_legal = 0;
michael@0 1067 #if defined(__Userspace__)
michael@0 1068 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
michael@0 1069 asoc->scope.conn_addr_legal = 1;
michael@0 1070 asoc->scope.ipv4_addr_legal = 0;
michael@0 1071 } else {
michael@0 1072 asoc->scope.conn_addr_legal = 0;
michael@0 1073 asoc->scope.ipv4_addr_legal = 1;
michael@0 1074 }
michael@0 1075 #else
michael@0 1076 asoc->scope.ipv4_addr_legal = 1;
michael@0 1077 #endif
michael@0 1078 }
michael@0 1079
michael@0 1080 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
michael@0 1081 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
michael@0 1082
michael@0 1083 asoc->smallest_mtu = inp->sctp_frag_point;
michael@0 1084 asoc->minrto = inp->sctp_ep.sctp_minrto;
michael@0 1085 asoc->maxrto = inp->sctp_ep.sctp_maxrto;
michael@0 1086
michael@0 1087 asoc->locked_on_sending = NULL;
michael@0 1088 asoc->stream_locked_on = 0;
michael@0 1089 asoc->ecn_echo_cnt_onq = 0;
michael@0 1090 asoc->stream_locked = 0;
michael@0 1091
michael@0 1092 asoc->send_sack = 1;
michael@0 1093
michael@0 1094 LIST_INIT(&asoc->sctp_restricted_addrs);
michael@0 1095
michael@0 1096 TAILQ_INIT(&asoc->nets);
michael@0 1097 TAILQ_INIT(&asoc->pending_reply_queue);
michael@0 1098 TAILQ_INIT(&asoc->asconf_ack_sent);
michael@0 1099 /* Setup to fill the hb random cache at first HB */
michael@0 1100 asoc->hb_random_idx = 4;
michael@0 1101
michael@0 1102 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
michael@0 1103
michael@0 1104 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
michael@0 1105 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
michael@0 1106
michael@0 1107 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
michael@0 1108 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
michael@0 1109
michael@0 1110 /*
michael@0 1111 * Now the stream parameters, here we allocate space for all streams
michael@0 1112 * that we request by default.
michael@0 1113 */
michael@0 1114 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
michael@0 1115 inp->sctp_ep.pre_open_stream_count;
michael@0 1116 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
michael@0 1117 asoc->streamoutcnt * sizeof(struct sctp_stream_out),
michael@0 1118 SCTP_M_STRMO);
michael@0 1119 if (asoc->strmout == NULL) {
michael@0 1120 /* big trouble no memory */
michael@0 1121 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
michael@0 1122 return (ENOMEM);
michael@0 1123 }
michael@0 1124 for (i = 0; i < asoc->streamoutcnt; i++) {
michael@0 1125 /*
michael@0 1126 * inbound side must be set to 0xffff, also NOTE when we get
michael@0 1127 * the INIT-ACK back (for INIT sender) we MUST reduce the
michael@0 1128 * count (streamoutcnt) but first check if we sent to any of
michael@0 1129 * the upper streams that were dropped (if some were). Those
michael@0 1130 * that were dropped must be notified to the upper layer as
michael@0 1131 * failed to send.
michael@0 1132 */
michael@0 1133 asoc->strmout[i].next_sequence_send = 0x0;
michael@0 1134 TAILQ_INIT(&asoc->strmout[i].outqueue);
michael@0 1135 asoc->strmout[i].chunks_on_queues = 0;
michael@0 1136 asoc->strmout[i].stream_no = i;
michael@0 1137 asoc->strmout[i].last_msg_incomplete = 0;
michael@0 1138 asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
michael@0 1139 }
michael@0 1140 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
michael@0 1141
michael@0 1142 /* Now the mapping array */
michael@0 1143 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
michael@0 1144 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
michael@0 1145 SCTP_M_MAP);
michael@0 1146 if (asoc->mapping_array == NULL) {
michael@0 1147 SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
michael@0 1148 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
michael@0 1149 return (ENOMEM);
michael@0 1150 }
michael@0 1151 memset(asoc->mapping_array, 0, asoc->mapping_array_size);
michael@0 1152 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
michael@0 1153 SCTP_M_MAP);
michael@0 1154 if (asoc->nr_mapping_array == NULL) {
michael@0 1155 SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
michael@0 1156 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
michael@0 1157 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
michael@0 1158 return (ENOMEM);
michael@0 1159 }
michael@0 1160 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
michael@0 1161
michael@0 1162 /* Now the init of the other outqueues */
michael@0 1163 TAILQ_INIT(&asoc->free_chunks);
michael@0 1164 TAILQ_INIT(&asoc->control_send_queue);
michael@0 1165 TAILQ_INIT(&asoc->asconf_send_queue);
michael@0 1166 TAILQ_INIT(&asoc->send_queue);
michael@0 1167 TAILQ_INIT(&asoc->sent_queue);
michael@0 1168 TAILQ_INIT(&asoc->reasmqueue);
michael@0 1169 TAILQ_INIT(&asoc->resetHead);
michael@0 1170 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
michael@0 1171 TAILQ_INIT(&asoc->asconf_queue);
michael@0 1172 /* authentication fields */
michael@0 1173 asoc->authinfo.random = NULL;
michael@0 1174 asoc->authinfo.active_keyid = 0;
michael@0 1175 asoc->authinfo.assoc_key = NULL;
michael@0 1176 asoc->authinfo.assoc_keyid = 0;
michael@0 1177 asoc->authinfo.recv_key = NULL;
michael@0 1178 asoc->authinfo.recv_keyid = 0;
michael@0 1179 LIST_INIT(&asoc->shared_keys);
michael@0 1180 asoc->marked_retrans = 0;
michael@0 1181 asoc->port = inp->sctp_ep.port;
michael@0 1182 asoc->timoinit = 0;
michael@0 1183 asoc->timodata = 0;
michael@0 1184 asoc->timosack = 0;
michael@0 1185 asoc->timoshutdown = 0;
michael@0 1186 asoc->timoheartbeat = 0;
michael@0 1187 asoc->timocookie = 0;
michael@0 1188 asoc->timoshutdownack = 0;
michael@0 1189 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
michael@0 1190 asoc->discontinuity_time = asoc->start_time;
michael@0 1191 /* sa_ignore MEMLEAK {memory is put in the assoc mapping array and freed later when
michael@0 1192 * the association is freed.
michael@0 1193 */
michael@0 1194 return (0);
michael@0 1195 }
michael@0 1196
michael@0 1197 void
michael@0 1198 sctp_print_mapping_array(struct sctp_association *asoc)
michael@0 1199 {
michael@0 1200 unsigned int i, limit;
michael@0 1201
michael@0 1202 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
michael@0 1203 asoc->mapping_array_size,
michael@0 1204 asoc->mapping_array_base_tsn,
michael@0 1205 asoc->cumulative_tsn,
michael@0 1206 asoc->highest_tsn_inside_map,
michael@0 1207 asoc->highest_tsn_inside_nr_map);
michael@0 1208 for (limit = asoc->mapping_array_size; limit > 1; limit--) {
michael@0 1209 if (asoc->mapping_array[limit - 1] != 0) {
michael@0 1210 break;
michael@0 1211 }
michael@0 1212 }
michael@0 1213 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
michael@0 1214 for (i = 0; i < limit; i++) {
michael@0 1215 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
michael@0 1216 }
michael@0 1217 if (limit % 16)
michael@0 1218 SCTP_PRINTF("\n");
michael@0 1219 for (limit = asoc->mapping_array_size; limit > 1; limit--) {
michael@0 1220 if (asoc->nr_mapping_array[limit - 1]) {
michael@0 1221 break;
michael@0 1222 }
michael@0 1223 }
michael@0 1224 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
michael@0 1225 for (i = 0; i < limit; i++) {
michael@0 1226 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ': '\n');
michael@0 1227 }
michael@0 1228 if (limit % 16)
michael@0 1229 SCTP_PRINTF("\n");
michael@0 1230 }
michael@0 1231
michael@0 1232 int
michael@0 1233 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
michael@0 1234 {
michael@0 1235 /* mapping array needs to grow */
michael@0 1236 uint8_t *new_array1, *new_array2;
michael@0 1237 uint32_t new_size;
michael@0 1238
michael@0 1239 new_size = asoc->mapping_array_size + ((needed+7)/8 + SCTP_MAPPING_ARRAY_INCR);
michael@0 1240 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
michael@0 1241 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
michael@0 1242 if ((new_array1 == NULL) || (new_array2 == NULL)) {
michael@0 1243 /* can't get more, forget it */
michael@0 1244 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
michael@0 1245 if (new_array1) {
michael@0 1246 SCTP_FREE(new_array1, SCTP_M_MAP);
michael@0 1247 }
michael@0 1248 if (new_array2) {
michael@0 1249 SCTP_FREE(new_array2, SCTP_M_MAP);
michael@0 1250 }
michael@0 1251 return (-1);
michael@0 1252 }
michael@0 1253 memset(new_array1, 0, new_size);
michael@0 1254 memset(new_array2, 0, new_size);
michael@0 1255 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
michael@0 1256 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
michael@0 1257 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
michael@0 1258 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
michael@0 1259 asoc->mapping_array = new_array1;
michael@0 1260 asoc->nr_mapping_array = new_array2;
michael@0 1261 asoc->mapping_array_size = new_size;
michael@0 1262 return (0);
michael@0 1263 }
michael@0 1264
michael@0 1265
michael@0 1266 static void
michael@0 1267 sctp_iterator_work(struct sctp_iterator *it)
michael@0 1268 {
michael@0 1269 int iteration_count = 0;
michael@0 1270 int inp_skip = 0;
michael@0 1271 int first_in = 1;
michael@0 1272 struct sctp_inpcb *tinp;
michael@0 1273
michael@0 1274 SCTP_INP_INFO_RLOCK();
michael@0 1275 SCTP_ITERATOR_LOCK();
michael@0 1276 if (it->inp) {
michael@0 1277 SCTP_INP_RLOCK(it->inp);
michael@0 1278 SCTP_INP_DECR_REF(it->inp);
michael@0 1279 }
michael@0 1280 if (it->inp == NULL) {
michael@0 1281 /* iterator is complete */
michael@0 1282 done_with_iterator:
michael@0 1283 SCTP_ITERATOR_UNLOCK();
michael@0 1284 SCTP_INP_INFO_RUNLOCK();
michael@0 1285 if (it->function_atend != NULL) {
michael@0 1286 (*it->function_atend) (it->pointer, it->val);
michael@0 1287 }
michael@0 1288 SCTP_FREE(it, SCTP_M_ITER);
michael@0 1289 return;
michael@0 1290 }
michael@0 1291 select_a_new_ep:
michael@0 1292 if (first_in) {
michael@0 1293 first_in = 0;
michael@0 1294 } else {
michael@0 1295 SCTP_INP_RLOCK(it->inp);
michael@0 1296 }
michael@0 1297 while (((it->pcb_flags) &&
michael@0 1298 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
michael@0 1299 ((it->pcb_features) &&
michael@0 1300 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
michael@0 1301 /* endpoint flags or features don't match, so keep looking */
michael@0 1302 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
michael@0 1303 SCTP_INP_RUNLOCK(it->inp);
michael@0 1304 goto done_with_iterator;
michael@0 1305 }
michael@0 1306 tinp = it->inp;
michael@0 1307 it->inp = LIST_NEXT(it->inp, sctp_list);
michael@0 1308 SCTP_INP_RUNLOCK(tinp);
michael@0 1309 if (it->inp == NULL) {
michael@0 1310 goto done_with_iterator;
michael@0 1311 }
michael@0 1312 SCTP_INP_RLOCK(it->inp);
michael@0 1313 }
michael@0 1314 /* now go through each assoc which is in the desired state */
michael@0 1315 if (it->done_current_ep == 0) {
michael@0 1316 if (it->function_inp != NULL)
michael@0 1317 inp_skip = (*it->function_inp)(it->inp, it->pointer, it->val);
michael@0 1318 it->done_current_ep = 1;
michael@0 1319 }
michael@0 1320 if (it->stcb == NULL) {
michael@0 1321 /* run the per instance function */
michael@0 1322 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
michael@0 1323 }
michael@0 1324 if ((inp_skip) || it->stcb == NULL) {
michael@0 1325 if (it->function_inp_end != NULL) {
michael@0 1326 inp_skip = (*it->function_inp_end)(it->inp,
michael@0 1327 it->pointer,
michael@0 1328 it->val);
michael@0 1329 }
michael@0 1330 SCTP_INP_RUNLOCK(it->inp);
michael@0 1331 goto no_stcb;
michael@0 1332 }
michael@0 1333 while (it->stcb) {
michael@0 1334 SCTP_TCB_LOCK(it->stcb);
michael@0 1335 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
michael@0 1336 /* not in the right state... keep looking */
michael@0 1337 SCTP_TCB_UNLOCK(it->stcb);
michael@0 1338 goto next_assoc;
michael@0 1339 }
michael@0 1340 /* see if we have limited out the iterator loop */
michael@0 1341 iteration_count++;
michael@0 1342 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
michael@0 1343 /* Pause to let others grab the lock */
michael@0 1344 atomic_add_int(&it->stcb->asoc.refcnt, 1);
michael@0 1345 SCTP_TCB_UNLOCK(it->stcb);
michael@0 1346 SCTP_INP_INCR_REF(it->inp);
michael@0 1347 SCTP_INP_RUNLOCK(it->inp);
michael@0 1348 SCTP_ITERATOR_UNLOCK();
michael@0 1349 SCTP_INP_INFO_RUNLOCK();
michael@0 1350 SCTP_INP_INFO_RLOCK();
michael@0 1351 SCTP_ITERATOR_LOCK();
michael@0 1352 if (sctp_it_ctl.iterator_flags) {
michael@0 1353 /* We won't be staying here */
michael@0 1354 SCTP_INP_DECR_REF(it->inp);
michael@0 1355 atomic_add_int(&it->stcb->asoc.refcnt, -1);
michael@0 1356 #if !defined(__FreeBSD__)
michael@0 1357 if (sctp_it_ctl.iterator_flags &
michael@0 1358 SCTP_ITERATOR_MUST_EXIT) {
michael@0 1359 goto done_with_iterator;
michael@0 1360 }
michael@0 1361 #endif
michael@0 1362 if (sctp_it_ctl.iterator_flags &
michael@0 1363 SCTP_ITERATOR_STOP_CUR_IT) {
michael@0 1364 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
michael@0 1365 goto done_with_iterator;
michael@0 1366 }
michael@0 1367 if (sctp_it_ctl.iterator_flags &
michael@0 1368 SCTP_ITERATOR_STOP_CUR_INP) {
michael@0 1369 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
michael@0 1370 goto no_stcb;
michael@0 1371 }
michael@0 1372 /* If we reach here huh? */
michael@0 1373 SCTP_PRINTF("Unknown it ctl flag %x\n",
michael@0 1374 sctp_it_ctl.iterator_flags);
michael@0 1375 sctp_it_ctl.iterator_flags = 0;
michael@0 1376 }
michael@0 1377 SCTP_INP_RLOCK(it->inp);
michael@0 1378 SCTP_INP_DECR_REF(it->inp);
michael@0 1379 SCTP_TCB_LOCK(it->stcb);
michael@0 1380 atomic_add_int(&it->stcb->asoc.refcnt, -1);
michael@0 1381 iteration_count = 0;
michael@0 1382 }
michael@0 1383
michael@0 1384 /* run function on this one */
michael@0 1385 (*it->function_assoc)(it->inp, it->stcb, it->pointer, it->val);
michael@0 1386
michael@0 1387 /*
michael@0 1388 * we lie here, it really needs to have its own type but
michael@0 1389 * first I must verify that this won't effect things :-0
michael@0 1390 */
michael@0 1391 if (it->no_chunk_output == 0)
michael@0 1392 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
michael@0 1393
michael@0 1394 SCTP_TCB_UNLOCK(it->stcb);
michael@0 1395 next_assoc:
michael@0 1396 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
michael@0 1397 if (it->stcb == NULL) {
michael@0 1398 /* Run last function */
michael@0 1399 if (it->function_inp_end != NULL) {
michael@0 1400 inp_skip = (*it->function_inp_end)(it->inp,
michael@0 1401 it->pointer,
michael@0 1402 it->val);
michael@0 1403 }
michael@0 1404 }
michael@0 1405 }
michael@0 1406 SCTP_INP_RUNLOCK(it->inp);
michael@0 1407 no_stcb:
michael@0 1408 /* done with all assocs on this endpoint, move on to next endpoint */
michael@0 1409 it->done_current_ep = 0;
michael@0 1410 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
michael@0 1411 it->inp = NULL;
michael@0 1412 } else {
michael@0 1413 it->inp = LIST_NEXT(it->inp, sctp_list);
michael@0 1414 }
michael@0 1415 if (it->inp == NULL) {
michael@0 1416 goto done_with_iterator;
michael@0 1417 }
michael@0 1418 goto select_a_new_ep;
michael@0 1419 }
michael@0 1420
michael@0 1421 void
michael@0 1422 sctp_iterator_worker(void)
michael@0 1423 {
michael@0 1424 struct sctp_iterator *it, *nit;
michael@0 1425
michael@0 1426 /* This function is called with the WQ lock in place */
michael@0 1427
michael@0 1428 sctp_it_ctl.iterator_running = 1;
michael@0 1429 TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
michael@0 1430 sctp_it_ctl.cur_it = it;
michael@0 1431 /* now lets work on this one */
michael@0 1432 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
michael@0 1433 SCTP_IPI_ITERATOR_WQ_UNLOCK();
michael@0 1434 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
michael@0 1435 CURVNET_SET(it->vn);
michael@0 1436 #endif
michael@0 1437 sctp_iterator_work(it);
michael@0 1438 sctp_it_ctl.cur_it = NULL;
michael@0 1439 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
michael@0 1440 CURVNET_RESTORE();
michael@0 1441 #endif
michael@0 1442 SCTP_IPI_ITERATOR_WQ_LOCK();
michael@0 1443 #if !defined(__FreeBSD__)
michael@0 1444 if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
michael@0 1445 break;
michael@0 1446 }
michael@0 1447 #endif
michael@0 1448 /*sa_ignore FREED_MEMORY*/
michael@0 1449 }
michael@0 1450 sctp_it_ctl.iterator_running = 0;
michael@0 1451 return;
michael@0 1452 }
michael@0 1453
michael@0 1454
michael@0 1455 static void
michael@0 1456 sctp_handle_addr_wq(void)
michael@0 1457 {
michael@0 1458 /* deal with the ADDR wq from the rtsock calls */
michael@0 1459 struct sctp_laddr *wi, *nwi;
michael@0 1460 struct sctp_asconf_iterator *asc;
michael@0 1461
michael@0 1462 SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
michael@0 1463 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
michael@0 1464 if (asc == NULL) {
michael@0 1465 /* Try later, no memory */
michael@0 1466 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
michael@0 1467 (struct sctp_inpcb *)NULL,
michael@0 1468 (struct sctp_tcb *)NULL,
michael@0 1469 (struct sctp_nets *)NULL);
michael@0 1470 return;
michael@0 1471 }
michael@0 1472 LIST_INIT(&asc->list_of_work);
michael@0 1473 asc->cnt = 0;
michael@0 1474
michael@0 1475 SCTP_WQ_ADDR_LOCK();
michael@0 1476 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
michael@0 1477 LIST_REMOVE(wi, sctp_nxt_addr);
michael@0 1478 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
michael@0 1479 asc->cnt++;
michael@0 1480 }
michael@0 1481 SCTP_WQ_ADDR_UNLOCK();
michael@0 1482
michael@0 1483 if (asc->cnt == 0) {
michael@0 1484 SCTP_FREE(asc, SCTP_M_ASC_IT);
michael@0 1485 } else {
michael@0 1486 (void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
michael@0 1487 sctp_asconf_iterator_stcb,
michael@0 1488 NULL, /* No ep end for boundall */
michael@0 1489 SCTP_PCB_FLAGS_BOUNDALL,
michael@0 1490 SCTP_PCB_ANY_FEATURES,
michael@0 1491 SCTP_ASOC_ANY_STATE,
michael@0 1492 (void *)asc, 0,
michael@0 1493 sctp_asconf_iterator_end, NULL, 0);
michael@0 1494 }
michael@0 1495 }
michael@0 1496
michael@0 1497 void
michael@0 1498 sctp_timeout_handler(void *t)
michael@0 1499 {
michael@0 1500 struct sctp_inpcb *inp;
michael@0 1501 struct sctp_tcb *stcb;
michael@0 1502 struct sctp_nets *net;
michael@0 1503 struct sctp_timer *tmr;
michael@0 1504 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 1505 struct socket *so;
michael@0 1506 #endif
michael@0 1507 int did_output, type;
michael@0 1508
michael@0 1509 tmr = (struct sctp_timer *)t;
michael@0 1510 inp = (struct sctp_inpcb *)tmr->ep;
michael@0 1511 stcb = (struct sctp_tcb *)tmr->tcb;
michael@0 1512 net = (struct sctp_nets *)tmr->net;
michael@0 1513 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
michael@0 1514 CURVNET_SET((struct vnet *)tmr->vnet);
michael@0 1515 #endif
michael@0 1516 did_output = 1;
michael@0 1517
michael@0 1518 #ifdef SCTP_AUDITING_ENABLED
michael@0 1519 sctp_audit_log(0xF0, (uint8_t) tmr->type);
michael@0 1520 sctp_auditing(3, inp, stcb, net);
michael@0 1521 #endif
michael@0 1522
michael@0 1523 /* sanity checks... */
michael@0 1524 if (tmr->self != (void *)tmr) {
michael@0 1525 /*
michael@0 1526 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
michael@0 1527 * (void *)tmr);
michael@0 1528 */
michael@0 1529 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
michael@0 1530 CURVNET_RESTORE();
michael@0 1531 #endif
michael@0 1532 return;
michael@0 1533 }
michael@0 1534 tmr->stopped_from = 0xa001;
michael@0 1535 if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
michael@0 1536 /*
michael@0 1537 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
michael@0 1538 * tmr->type);
michael@0 1539 */
michael@0 1540 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
michael@0 1541 CURVNET_RESTORE();
michael@0 1542 #endif
michael@0 1543 return;
michael@0 1544 }
michael@0 1545 tmr->stopped_from = 0xa002;
michael@0 1546 if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
michael@0 1547 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
michael@0 1548 CURVNET_RESTORE();
michael@0 1549 #endif
michael@0 1550 return;
michael@0 1551 }
michael@0 1552 /* if this is an iterator timeout, get the struct and clear inp */
michael@0 1553 tmr->stopped_from = 0xa003;
michael@0 1554 type = tmr->type;
michael@0 1555 if (inp) {
michael@0 1556 SCTP_INP_INCR_REF(inp);
michael@0 1557 if ((inp->sctp_socket == NULL) &&
michael@0 1558 ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
michael@0 1559 (tmr->type != SCTP_TIMER_TYPE_INIT) &&
michael@0 1560 (tmr->type != SCTP_TIMER_TYPE_SEND) &&
michael@0 1561 (tmr->type != SCTP_TIMER_TYPE_RECV) &&
michael@0 1562 (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
michael@0 1563 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
michael@0 1564 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
michael@0 1565 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
michael@0 1566 (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
michael@0 1567 ) {
michael@0 1568 SCTP_INP_DECR_REF(inp);
michael@0 1569 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
michael@0 1570 CURVNET_RESTORE();
michael@0 1571 #endif
michael@0 1572 return;
michael@0 1573 }
michael@0 1574 }
michael@0 1575 tmr->stopped_from = 0xa004;
michael@0 1576 if (stcb) {
michael@0 1577 atomic_add_int(&stcb->asoc.refcnt, 1);
michael@0 1578 if (stcb->asoc.state == 0) {
michael@0 1579 atomic_add_int(&stcb->asoc.refcnt, -1);
michael@0 1580 if (inp) {
michael@0 1581 SCTP_INP_DECR_REF(inp);
michael@0 1582 }
michael@0 1583 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
michael@0 1584 CURVNET_RESTORE();
michael@0 1585 #endif
michael@0 1586 return;
michael@0 1587 }
michael@0 1588 }
michael@0 1589 tmr->stopped_from = 0xa005;
michael@0 1590 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
michael@0 1591 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
michael@0 1592 if (inp) {
michael@0 1593 SCTP_INP_DECR_REF(inp);
michael@0 1594 }
michael@0 1595 if (stcb) {
michael@0 1596 atomic_add_int(&stcb->asoc.refcnt, -1);
michael@0 1597 }
michael@0 1598 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
michael@0 1599 CURVNET_RESTORE();
michael@0 1600 #endif
michael@0 1601 return;
michael@0 1602 }
michael@0 1603 tmr->stopped_from = 0xa006;
michael@0 1604
michael@0 1605 if (stcb) {
michael@0 1606 SCTP_TCB_LOCK(stcb);
michael@0 1607 atomic_add_int(&stcb->asoc.refcnt, -1);
michael@0 1608 if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
michael@0 1609 ((stcb->asoc.state == 0) ||
michael@0 1610 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
michael@0 1611 SCTP_TCB_UNLOCK(stcb);
michael@0 1612 if (inp) {
michael@0 1613 SCTP_INP_DECR_REF(inp);
michael@0 1614 }
michael@0 1615 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
michael@0 1616 CURVNET_RESTORE();
michael@0 1617 #endif
michael@0 1618 return;
michael@0 1619 }
michael@0 1620 }
michael@0 1621 /* record in stopped what t-o occured */
michael@0 1622 tmr->stopped_from = tmr->type;
michael@0 1623
michael@0 1624 /* mark as being serviced now */
michael@0 1625 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
michael@0 1626 /*
michael@0 1627 * Callout has been rescheduled.
michael@0 1628 */
michael@0 1629 goto get_out;
michael@0 1630 }
michael@0 1631 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
michael@0 1632 /*
michael@0 1633 * Not active, so no action.
michael@0 1634 */
michael@0 1635 goto get_out;
michael@0 1636 }
michael@0 1637 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
michael@0 1638
michael@0 1639 /* call the handler for the appropriate timer type */
michael@0 1640 switch (tmr->type) {
michael@0 1641 case SCTP_TIMER_TYPE_ZERO_COPY:
michael@0 1642 if (inp == NULL) {
michael@0 1643 break;
michael@0 1644 }
michael@0 1645 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
michael@0 1646 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
michael@0 1647 }
michael@0 1648 break;
michael@0 1649 case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
michael@0 1650 if (inp == NULL) {
michael@0 1651 break;
michael@0 1652 }
michael@0 1653 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
michael@0 1654 SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
michael@0 1655 }
michael@0 1656 break;
michael@0 1657 case SCTP_TIMER_TYPE_ADDR_WQ:
michael@0 1658 sctp_handle_addr_wq();
michael@0 1659 break;
michael@0 1660 case SCTP_TIMER_TYPE_SEND:
michael@0 1661 if ((stcb == NULL) || (inp == NULL)) {
michael@0 1662 break;
michael@0 1663 }
michael@0 1664 SCTP_STAT_INCR(sctps_timodata);
michael@0 1665 stcb->asoc.timodata++;
michael@0 1666 stcb->asoc.num_send_timers_up--;
michael@0 1667 if (stcb->asoc.num_send_timers_up < 0) {
michael@0 1668 stcb->asoc.num_send_timers_up = 0;
michael@0 1669 }
michael@0 1670 SCTP_TCB_LOCK_ASSERT(stcb);
michael@0 1671 if (sctp_t3rxt_timer(inp, stcb, net)) {
michael@0 1672 /* no need to unlock on tcb its gone */
michael@0 1673
michael@0 1674 goto out_decr;
michael@0 1675 }
michael@0 1676 SCTP_TCB_LOCK_ASSERT(stcb);
michael@0 1677 #ifdef SCTP_AUDITING_ENABLED
michael@0 1678 sctp_auditing(4, inp, stcb, net);
michael@0 1679 #endif
michael@0 1680 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
michael@0 1681 if ((stcb->asoc.num_send_timers_up == 0) &&
michael@0 1682 (stcb->asoc.sent_queue_cnt > 0)) {
michael@0 1683 struct sctp_tmit_chunk *chk;
michael@0 1684
michael@0 1685 /*
michael@0 1686 * safeguard. If there on some on the sent queue
michael@0 1687 * somewhere but no timers running something is
michael@0 1688 * wrong... so we start a timer on the first chunk
michael@0 1689 * on the send queue on whatever net it is sent to.
michael@0 1690 */
michael@0 1691 chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
michael@0 1692 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
michael@0 1693 chk->whoTo);
michael@0 1694 }
michael@0 1695 break;
michael@0 1696 case SCTP_TIMER_TYPE_INIT:
michael@0 1697 if ((stcb == NULL) || (inp == NULL)) {
michael@0 1698 break;
michael@0 1699 }
michael@0 1700 SCTP_STAT_INCR(sctps_timoinit);
michael@0 1701 stcb->asoc.timoinit++;
michael@0 1702 if (sctp_t1init_timer(inp, stcb, net)) {
michael@0 1703 /* no need to unlock on tcb its gone */
michael@0 1704 goto out_decr;
michael@0 1705 }
michael@0 1706 /* We do output but not here */
michael@0 1707 did_output = 0;
michael@0 1708 break;
michael@0 1709 case SCTP_TIMER_TYPE_RECV:
michael@0 1710 if ((stcb == NULL) || (inp == NULL)) {
michael@0 1711 break;
michael@0 1712 }
michael@0 1713 SCTP_STAT_INCR(sctps_timosack);
michael@0 1714 stcb->asoc.timosack++;
michael@0 1715 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
michael@0 1716 #ifdef SCTP_AUDITING_ENABLED
michael@0 1717 sctp_auditing(4, inp, stcb, net);
michael@0 1718 #endif
michael@0 1719 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
michael@0 1720 break;
michael@0 1721 case SCTP_TIMER_TYPE_SHUTDOWN:
michael@0 1722 if ((stcb == NULL) || (inp == NULL)) {
michael@0 1723 break;
michael@0 1724 }
michael@0 1725 if (sctp_shutdown_timer(inp, stcb, net)) {
michael@0 1726 /* no need to unlock on tcb its gone */
michael@0 1727 goto out_decr;
michael@0 1728 }
michael@0 1729 SCTP_STAT_INCR(sctps_timoshutdown);
michael@0 1730 stcb->asoc.timoshutdown++;
michael@0 1731 #ifdef SCTP_AUDITING_ENABLED
michael@0 1732 sctp_auditing(4, inp, stcb, net);
michael@0 1733 #endif
michael@0 1734 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
michael@0 1735 break;
michael@0 1736 case SCTP_TIMER_TYPE_HEARTBEAT:
michael@0 1737 if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
michael@0 1738 break;
michael@0 1739 }
michael@0 1740 SCTP_STAT_INCR(sctps_timoheartbeat);
michael@0 1741 stcb->asoc.timoheartbeat++;
michael@0 1742 if (sctp_heartbeat_timer(inp, stcb, net)) {
michael@0 1743 /* no need to unlock on tcb its gone */
michael@0 1744 goto out_decr;
michael@0 1745 }
michael@0 1746 #ifdef SCTP_AUDITING_ENABLED
michael@0 1747 sctp_auditing(4, inp, stcb, net);
michael@0 1748 #endif
michael@0 1749 if (!(net->dest_state & SCTP_ADDR_NOHB)) {
michael@0 1750 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
michael@0 1751 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
michael@0 1752 }
michael@0 1753 break;
michael@0 1754 case SCTP_TIMER_TYPE_COOKIE:
michael@0 1755 if ((stcb == NULL) || (inp == NULL)) {
michael@0 1756 break;
michael@0 1757 }
michael@0 1758
michael@0 1759 if (sctp_cookie_timer(inp, stcb, net)) {
michael@0 1760 /* no need to unlock on tcb its gone */
michael@0 1761 goto out_decr;
michael@0 1762 }
michael@0 1763 SCTP_STAT_INCR(sctps_timocookie);
michael@0 1764 stcb->asoc.timocookie++;
michael@0 1765 #ifdef SCTP_AUDITING_ENABLED
michael@0 1766 sctp_auditing(4, inp, stcb, net);
michael@0 1767 #endif
michael@0 1768 /*
michael@0 1769 * We consider T3 and Cookie timer pretty much the same with
michael@0 1770 * respect to where from in chunk_output.
michael@0 1771 */
michael@0 1772 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
michael@0 1773 break;
michael@0 1774 case SCTP_TIMER_TYPE_NEWCOOKIE:
michael@0 1775 {
michael@0 1776 struct timeval tv;
michael@0 1777 int i, secret;
michael@0 1778 if (inp == NULL) {
michael@0 1779 break;
michael@0 1780 }
michael@0 1781 SCTP_STAT_INCR(sctps_timosecret);
michael@0 1782 (void)SCTP_GETTIME_TIMEVAL(&tv);
michael@0 1783 SCTP_INP_WLOCK(inp);
michael@0 1784 inp->sctp_ep.time_of_secret_change = tv.tv_sec;
michael@0 1785 inp->sctp_ep.last_secret_number =
michael@0 1786 inp->sctp_ep.current_secret_number;
michael@0 1787 inp->sctp_ep.current_secret_number++;
michael@0 1788 if (inp->sctp_ep.current_secret_number >=
michael@0 1789 SCTP_HOW_MANY_SECRETS) {
michael@0 1790 inp->sctp_ep.current_secret_number = 0;
michael@0 1791 }
michael@0 1792 secret = (int)inp->sctp_ep.current_secret_number;
michael@0 1793 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
michael@0 1794 inp->sctp_ep.secret_key[secret][i] =
michael@0 1795 sctp_select_initial_TSN(&inp->sctp_ep);
michael@0 1796 }
michael@0 1797 SCTP_INP_WUNLOCK(inp);
michael@0 1798 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
michael@0 1799 }
michael@0 1800 did_output = 0;
michael@0 1801 break;
michael@0 1802 case SCTP_TIMER_TYPE_PATHMTURAISE:
michael@0 1803 if ((stcb == NULL) || (inp == NULL)) {
michael@0 1804 break;
michael@0 1805 }
michael@0 1806 SCTP_STAT_INCR(sctps_timopathmtu);
michael@0 1807 sctp_pathmtu_timer(inp, stcb, net);
michael@0 1808 did_output = 0;
michael@0 1809 break;
michael@0 1810 case SCTP_TIMER_TYPE_SHUTDOWNACK:
michael@0 1811 if ((stcb == NULL) || (inp == NULL)) {
michael@0 1812 break;
michael@0 1813 }
michael@0 1814 if (sctp_shutdownack_timer(inp, stcb, net)) {
michael@0 1815 /* no need to unlock on tcb its gone */
michael@0 1816 goto out_decr;
michael@0 1817 }
michael@0 1818 SCTP_STAT_INCR(sctps_timoshutdownack);
michael@0 1819 stcb->asoc.timoshutdownack++;
michael@0 1820 #ifdef SCTP_AUDITING_ENABLED
michael@0 1821 sctp_auditing(4, inp, stcb, net);
michael@0 1822 #endif
michael@0 1823 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
michael@0 1824 break;
michael@0 1825 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
michael@0 1826 if ((stcb == NULL) || (inp == NULL)) {
michael@0 1827 break;
michael@0 1828 }
michael@0 1829 SCTP_STAT_INCR(sctps_timoshutdownguard);
michael@0 1830 sctp_abort_an_association(inp, stcb, NULL, SCTP_SO_NOT_LOCKED);
michael@0 1831 /* no need to unlock on tcb its gone */
michael@0 1832 goto out_decr;
michael@0 1833
michael@0 1834 case SCTP_TIMER_TYPE_STRRESET:
michael@0 1835 if ((stcb == NULL) || (inp == NULL)) {
michael@0 1836 break;
michael@0 1837 }
michael@0 1838 if (sctp_strreset_timer(inp, stcb, net)) {
michael@0 1839 /* no need to unlock on tcb its gone */
michael@0 1840 goto out_decr;
michael@0 1841 }
michael@0 1842 SCTP_STAT_INCR(sctps_timostrmrst);
michael@0 1843 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
michael@0 1844 break;
michael@0 1845 case SCTP_TIMER_TYPE_ASCONF:
michael@0 1846 if ((stcb == NULL) || (inp == NULL)) {
michael@0 1847 break;
michael@0 1848 }
michael@0 1849 if (sctp_asconf_timer(inp, stcb, net)) {
michael@0 1850 /* no need to unlock on tcb its gone */
michael@0 1851 goto out_decr;
michael@0 1852 }
michael@0 1853 SCTP_STAT_INCR(sctps_timoasconf);
michael@0 1854 #ifdef SCTP_AUDITING_ENABLED
michael@0 1855 sctp_auditing(4, inp, stcb, net);
michael@0 1856 #endif
michael@0 1857 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
michael@0 1858 break;
michael@0 1859 case SCTP_TIMER_TYPE_PRIM_DELETED:
michael@0 1860 if ((stcb == NULL) || (inp == NULL)) {
michael@0 1861 break;
michael@0 1862 }
michael@0 1863 sctp_delete_prim_timer(inp, stcb, net);
michael@0 1864 SCTP_STAT_INCR(sctps_timodelprim);
michael@0 1865 break;
michael@0 1866
michael@0 1867 case SCTP_TIMER_TYPE_AUTOCLOSE:
michael@0 1868 if ((stcb == NULL) || (inp == NULL)) {
michael@0 1869 break;
michael@0 1870 }
michael@0 1871 SCTP_STAT_INCR(sctps_timoautoclose);
michael@0 1872 sctp_autoclose_timer(inp, stcb, net);
michael@0 1873 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
michael@0 1874 did_output = 0;
michael@0 1875 break;
michael@0 1876 case SCTP_TIMER_TYPE_ASOCKILL:
michael@0 1877 if ((stcb == NULL) || (inp == NULL)) {
michael@0 1878 break;
michael@0 1879 }
michael@0 1880 SCTP_STAT_INCR(sctps_timoassockill);
michael@0 1881 /* Can we free it yet? */
michael@0 1882 SCTP_INP_DECR_REF(inp);
michael@0 1883 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL+SCTP_LOC_1);
michael@0 1884 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 1885 so = SCTP_INP_SO(inp);
michael@0 1886 atomic_add_int(&stcb->asoc.refcnt, 1);
michael@0 1887 SCTP_TCB_UNLOCK(stcb);
michael@0 1888 SCTP_SOCKET_LOCK(so, 1);
michael@0 1889 SCTP_TCB_LOCK(stcb);
michael@0 1890 atomic_subtract_int(&stcb->asoc.refcnt, 1);
michael@0 1891 #endif
michael@0 1892 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL+SCTP_LOC_2);
michael@0 1893 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 1894 SCTP_SOCKET_UNLOCK(so, 1);
michael@0 1895 #endif
michael@0 1896 /*
michael@0 1897 * free asoc, always unlocks (or destroy's) so prevent
michael@0 1898 * duplicate unlock or unlock of a free mtx :-0
michael@0 1899 */
michael@0 1900 stcb = NULL;
michael@0 1901 goto out_no_decr;
michael@0 1902 case SCTP_TIMER_TYPE_INPKILL:
michael@0 1903 SCTP_STAT_INCR(sctps_timoinpkill);
michael@0 1904 if (inp == NULL) {
michael@0 1905 break;
michael@0 1906 }
michael@0 1907 /*
michael@0 1908 * special case, take away our increment since WE are the
michael@0 1909 * killer
michael@0 1910 */
michael@0 1911 SCTP_INP_DECR_REF(inp);
michael@0 1912 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL+SCTP_LOC_3);
michael@0 1913 #if defined(__APPLE__)
michael@0 1914 SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1);
michael@0 1915 #endif
michael@0 1916 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
michael@0 1917 SCTP_CALLED_FROM_INPKILL_TIMER);
michael@0 1918 #if defined(__APPLE__)
michael@0 1919 SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1);
michael@0 1920 #endif
michael@0 1921 inp = NULL;
michael@0 1922 goto out_no_decr;
michael@0 1923 default:
michael@0 1924 SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
michael@0 1925 tmr->type);
michael@0 1926 break;
michael@0 1927 }
michael@0 1928 #ifdef SCTP_AUDITING_ENABLED
michael@0 1929 sctp_audit_log(0xF1, (uint8_t) tmr->type);
michael@0 1930 if (inp)
michael@0 1931 sctp_auditing(5, inp, stcb, net);
michael@0 1932 #endif
michael@0 1933 if ((did_output) && stcb) {
michael@0 1934 /*
michael@0 1935 * Now we need to clean up the control chunk chain if an
michael@0 1936 * ECNE is on it. It must be marked as UNSENT again so next
michael@0 1937 * call will continue to send it until such time that we get
michael@0 1938 * a CWR, to remove it. It is, however, less likely that we
michael@0 1939 * will find a ecn echo on the chain though.
michael@0 1940 */
michael@0 1941 sctp_fix_ecn_echo(&stcb->asoc);
michael@0 1942 }
michael@0 1943 get_out:
michael@0 1944 if (stcb) {
michael@0 1945 SCTP_TCB_UNLOCK(stcb);
michael@0 1946 }
michael@0 1947
michael@0 1948 out_decr:
michael@0 1949 if (inp) {
michael@0 1950 SCTP_INP_DECR_REF(inp);
michael@0 1951 }
michael@0 1952
michael@0 1953 out_no_decr:
michael@0 1954 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
michael@0 1955 type);
michael@0 1956 #if defined(__FreeBSD__) && __FreeBSD_version >= 801000
michael@0 1957 CURVNET_RESTORE();
michael@0 1958 #endif
michael@0 1959 }
michael@0 1960
michael@0 1961 void
michael@0 1962 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
michael@0 1963 struct sctp_nets *net)
michael@0 1964 {
michael@0 1965 uint32_t to_ticks;
michael@0 1966 struct sctp_timer *tmr;
michael@0 1967
michael@0 1968 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
michael@0 1969 return;
michael@0 1970
michael@0 1971 tmr = NULL;
michael@0 1972 if (stcb) {
michael@0 1973 SCTP_TCB_LOCK_ASSERT(stcb);
michael@0 1974 }
michael@0 1975 switch (t_type) {
michael@0 1976 case SCTP_TIMER_TYPE_ZERO_COPY:
michael@0 1977 tmr = &inp->sctp_ep.zero_copy_timer;
michael@0 1978 to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
michael@0 1979 break;
michael@0 1980 case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
michael@0 1981 tmr = &inp->sctp_ep.zero_copy_sendq_timer;
michael@0 1982 to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
michael@0 1983 break;
michael@0 1984 case SCTP_TIMER_TYPE_ADDR_WQ:
michael@0 1985 /* Only 1 tick away :-) */
michael@0 1986 tmr = &SCTP_BASE_INFO(addr_wq_timer);
michael@0 1987 to_ticks = SCTP_ADDRESS_TICK_DELAY;
michael@0 1988 break;
michael@0 1989 case SCTP_TIMER_TYPE_SEND:
michael@0 1990 /* Here we use the RTO timer */
michael@0 1991 {
michael@0 1992 int rto_val;
michael@0 1993
michael@0 1994 if ((stcb == NULL) || (net == NULL)) {
michael@0 1995 return;
michael@0 1996 }
michael@0 1997 tmr = &net->rxt_timer;
michael@0 1998 if (net->RTO == 0) {
michael@0 1999 rto_val = stcb->asoc.initial_rto;
michael@0 2000 } else {
michael@0 2001 rto_val = net->RTO;
michael@0 2002 }
michael@0 2003 to_ticks = MSEC_TO_TICKS(rto_val);
michael@0 2004 }
michael@0 2005 break;
michael@0 2006 case SCTP_TIMER_TYPE_INIT:
michael@0 2007 /*
michael@0 2008 * Here we use the INIT timer default usually about 1
michael@0 2009 * minute.
michael@0 2010 */
michael@0 2011 if ((stcb == NULL) || (net == NULL)) {
michael@0 2012 return;
michael@0 2013 }
michael@0 2014 tmr = &net->rxt_timer;
michael@0 2015 if (net->RTO == 0) {
michael@0 2016 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
michael@0 2017 } else {
michael@0 2018 to_ticks = MSEC_TO_TICKS(net->RTO);
michael@0 2019 }
michael@0 2020 break;
michael@0 2021 case SCTP_TIMER_TYPE_RECV:
michael@0 2022 /*
michael@0 2023 * Here we use the Delayed-Ack timer value from the inp
michael@0 2024 * ususually about 200ms.
michael@0 2025 */
michael@0 2026 if (stcb == NULL) {
michael@0 2027 return;
michael@0 2028 }
michael@0 2029 tmr = &stcb->asoc.dack_timer;
michael@0 2030 to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
michael@0 2031 break;
michael@0 2032 case SCTP_TIMER_TYPE_SHUTDOWN:
michael@0 2033 /* Here we use the RTO of the destination. */
michael@0 2034 if ((stcb == NULL) || (net == NULL)) {
michael@0 2035 return;
michael@0 2036 }
michael@0 2037 if (net->RTO == 0) {
michael@0 2038 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
michael@0 2039 } else {
michael@0 2040 to_ticks = MSEC_TO_TICKS(net->RTO);
michael@0 2041 }
michael@0 2042 tmr = &net->rxt_timer;
michael@0 2043 break;
michael@0 2044 case SCTP_TIMER_TYPE_HEARTBEAT:
michael@0 2045 /*
michael@0 2046 * the net is used here so that we can add in the RTO. Even
michael@0 2047 * though we use a different timer. We also add the HB timer
michael@0 2048 * PLUS a random jitter.
michael@0 2049 */
michael@0 2050 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
michael@0 2051 return;
michael@0 2052 } else {
michael@0 2053 uint32_t rndval;
michael@0 2054 uint32_t jitter;
michael@0 2055
michael@0 2056 if ((net->dest_state & SCTP_ADDR_NOHB) &&
michael@0 2057 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
michael@0 2058 return;
michael@0 2059 }
michael@0 2060 if (net->RTO == 0) {
michael@0 2061 to_ticks = stcb->asoc.initial_rto;
michael@0 2062 } else {
michael@0 2063 to_ticks = net->RTO;
michael@0 2064 }
michael@0 2065 rndval = sctp_select_initial_TSN(&inp->sctp_ep);
michael@0 2066 jitter = rndval % to_ticks;
michael@0 2067 if (jitter >= (to_ticks >> 1)) {
michael@0 2068 to_ticks = to_ticks + (jitter - (to_ticks >> 1));
michael@0 2069 } else {
michael@0 2070 to_ticks = to_ticks - jitter;
michael@0 2071 }
michael@0 2072 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
michael@0 2073 !(net->dest_state & SCTP_ADDR_PF)) {
michael@0 2074 to_ticks += net->heart_beat_delay;
michael@0 2075 }
michael@0 2076 /*
michael@0 2077 * Now we must convert the to_ticks that are now in
michael@0 2078 * ms to ticks.
michael@0 2079 */
michael@0 2080 to_ticks = MSEC_TO_TICKS(to_ticks);
michael@0 2081 tmr = &net->hb_timer;
michael@0 2082 }
michael@0 2083 break;
michael@0 2084 case SCTP_TIMER_TYPE_COOKIE:
michael@0 2085 /*
michael@0 2086 * Here we can use the RTO timer from the network since one
michael@0 2087 * RTT was compelete. If a retran happened then we will be
michael@0 2088 * using the RTO initial value.
michael@0 2089 */
michael@0 2090 if ((stcb == NULL) || (net == NULL)) {
michael@0 2091 return;
michael@0 2092 }
michael@0 2093 if (net->RTO == 0) {
michael@0 2094 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
michael@0 2095 } else {
michael@0 2096 to_ticks = MSEC_TO_TICKS(net->RTO);
michael@0 2097 }
michael@0 2098 tmr = &net->rxt_timer;
michael@0 2099 break;
michael@0 2100 case SCTP_TIMER_TYPE_NEWCOOKIE:
michael@0 2101 /*
michael@0 2102 * nothing needed but the endpoint here ususually about 60
michael@0 2103 * minutes.
michael@0 2104 */
michael@0 2105 if (inp == NULL) {
michael@0 2106 return;
michael@0 2107 }
michael@0 2108 tmr = &inp->sctp_ep.signature_change;
michael@0 2109 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
michael@0 2110 break;
michael@0 2111 case SCTP_TIMER_TYPE_ASOCKILL:
michael@0 2112 if (stcb == NULL) {
michael@0 2113 return;
michael@0 2114 }
michael@0 2115 tmr = &stcb->asoc.strreset_timer;
michael@0 2116 to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
michael@0 2117 break;
michael@0 2118 case SCTP_TIMER_TYPE_INPKILL:
michael@0 2119 /*
michael@0 2120 * The inp is setup to die. We re-use the signature_chage
michael@0 2121 * timer since that has stopped and we are in the GONE
michael@0 2122 * state.
michael@0 2123 */
michael@0 2124 if (inp == NULL) {
michael@0 2125 return;
michael@0 2126 }
michael@0 2127 tmr = &inp->sctp_ep.signature_change;
michael@0 2128 to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
michael@0 2129 break;
michael@0 2130 case SCTP_TIMER_TYPE_PATHMTURAISE:
michael@0 2131 /*
michael@0 2132 * Here we use the value found in the EP for PMTU ususually
michael@0 2133 * about 10 minutes.
michael@0 2134 */
michael@0 2135 if ((stcb == NULL) || (inp == NULL)) {
michael@0 2136 return;
michael@0 2137 }
michael@0 2138 if (net == NULL) {
michael@0 2139 return;
michael@0 2140 }
michael@0 2141 if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
michael@0 2142 return;
michael@0 2143 }
michael@0 2144 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
michael@0 2145 tmr = &net->pmtu_timer;
michael@0 2146 break;
michael@0 2147 case SCTP_TIMER_TYPE_SHUTDOWNACK:
michael@0 2148 /* Here we use the RTO of the destination */
michael@0 2149 if ((stcb == NULL) || (net == NULL)) {
michael@0 2150 return;
michael@0 2151 }
michael@0 2152 if (net->RTO == 0) {
michael@0 2153 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
michael@0 2154 } else {
michael@0 2155 to_ticks = MSEC_TO_TICKS(net->RTO);
michael@0 2156 }
michael@0 2157 tmr = &net->rxt_timer;
michael@0 2158 break;
michael@0 2159 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
michael@0 2160 /*
michael@0 2161 * Here we use the endpoints shutdown guard timer usually
michael@0 2162 * about 3 minutes.
michael@0 2163 */
michael@0 2164 if ((inp == NULL) || (stcb == NULL)) {
michael@0 2165 return;
michael@0 2166 }
michael@0 2167 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
michael@0 2168 tmr = &stcb->asoc.shut_guard_timer;
michael@0 2169 break;
michael@0 2170 case SCTP_TIMER_TYPE_STRRESET:
michael@0 2171 /*
michael@0 2172 * Here the timer comes from the stcb but its value is from
michael@0 2173 * the net's RTO.
michael@0 2174 */
michael@0 2175 if ((stcb == NULL) || (net == NULL)) {
michael@0 2176 return;
michael@0 2177 }
michael@0 2178 if (net->RTO == 0) {
michael@0 2179 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
michael@0 2180 } else {
michael@0 2181 to_ticks = MSEC_TO_TICKS(net->RTO);
michael@0 2182 }
michael@0 2183 tmr = &stcb->asoc.strreset_timer;
michael@0 2184 break;
michael@0 2185 case SCTP_TIMER_TYPE_ASCONF:
michael@0 2186 /*
michael@0 2187 * Here the timer comes from the stcb but its value is from
michael@0 2188 * the net's RTO.
michael@0 2189 */
michael@0 2190 if ((stcb == NULL) || (net == NULL)) {
michael@0 2191 return;
michael@0 2192 }
michael@0 2193 if (net->RTO == 0) {
michael@0 2194 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
michael@0 2195 } else {
michael@0 2196 to_ticks = MSEC_TO_TICKS(net->RTO);
michael@0 2197 }
michael@0 2198 tmr = &stcb->asoc.asconf_timer;
michael@0 2199 break;
michael@0 2200 case SCTP_TIMER_TYPE_PRIM_DELETED:
michael@0 2201 if ((stcb == NULL) || (net != NULL)) {
michael@0 2202 return;
michael@0 2203 }
michael@0 2204 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
michael@0 2205 tmr = &stcb->asoc.delete_prim_timer;
michael@0 2206 break;
michael@0 2207 case SCTP_TIMER_TYPE_AUTOCLOSE:
michael@0 2208 if (stcb == NULL) {
michael@0 2209 return;
michael@0 2210 }
michael@0 2211 if (stcb->asoc.sctp_autoclose_ticks == 0) {
michael@0 2212 /*
michael@0 2213 * Really an error since stcb is NOT set to
michael@0 2214 * autoclose
michael@0 2215 */
michael@0 2216 return;
michael@0 2217 }
michael@0 2218 to_ticks = stcb->asoc.sctp_autoclose_ticks;
michael@0 2219 tmr = &stcb->asoc.autoclose_timer;
michael@0 2220 break;
michael@0 2221 default:
michael@0 2222 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
michael@0 2223 __FUNCTION__, t_type);
michael@0 2224 return;
michael@0 2225 break;
michael@0 2226 }
michael@0 2227 if ((to_ticks <= 0) || (tmr == NULL)) {
michael@0 2228 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
michael@0 2229 __FUNCTION__, t_type, to_ticks, (void *)tmr);
michael@0 2230 return;
michael@0 2231 }
michael@0 2232 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
michael@0 2233 /*
michael@0 2234 * we do NOT allow you to have it already running. if it is
michael@0 2235 * we leave the current one up unchanged
michael@0 2236 */
michael@0 2237 return;
michael@0 2238 }
michael@0 2239 /* At this point we can proceed */
michael@0 2240 if (t_type == SCTP_TIMER_TYPE_SEND) {
michael@0 2241 stcb->asoc.num_send_timers_up++;
michael@0 2242 }
michael@0 2243 tmr->stopped_from = 0;
michael@0 2244 tmr->type = t_type;
michael@0 2245 tmr->ep = (void *)inp;
michael@0 2246 tmr->tcb = (void *)stcb;
michael@0 2247 tmr->net = (void *)net;
michael@0 2248 tmr->self = (void *)tmr;
michael@0 2249 #if defined(__FreeBSD__) && __FreeBSD_version >= 800000
michael@0 2250 tmr->vnet = (void *)curvnet;
michael@0 2251 #endif
michael@0 2252 #ifndef __Panda__
michael@0 2253 tmr->ticks = sctp_get_tick_count();
michael@0 2254 #endif
michael@0 2255 (void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
michael@0 2256 return;
michael@0 2257 }
michael@0 2258
michael@0 2259 void
michael@0 2260 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
michael@0 2261 struct sctp_nets *net, uint32_t from)
michael@0 2262 {
michael@0 2263 struct sctp_timer *tmr;
michael@0 2264
michael@0 2265 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
michael@0 2266 (inp == NULL))
michael@0 2267 return;
michael@0 2268
michael@0 2269 tmr = NULL;
michael@0 2270 if (stcb) {
michael@0 2271 SCTP_TCB_LOCK_ASSERT(stcb);
michael@0 2272 }
michael@0 2273 switch (t_type) {
michael@0 2274 case SCTP_TIMER_TYPE_ZERO_COPY:
michael@0 2275 tmr = &inp->sctp_ep.zero_copy_timer;
michael@0 2276 break;
michael@0 2277 case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
michael@0 2278 tmr = &inp->sctp_ep.zero_copy_sendq_timer;
michael@0 2279 break;
michael@0 2280 case SCTP_TIMER_TYPE_ADDR_WQ:
michael@0 2281 tmr = &SCTP_BASE_INFO(addr_wq_timer);
michael@0 2282 break;
michael@0 2283 case SCTP_TIMER_TYPE_SEND:
michael@0 2284 if ((stcb == NULL) || (net == NULL)) {
michael@0 2285 return;
michael@0 2286 }
michael@0 2287 tmr = &net->rxt_timer;
michael@0 2288 break;
michael@0 2289 case SCTP_TIMER_TYPE_INIT:
michael@0 2290 if ((stcb == NULL) || (net == NULL)) {
michael@0 2291 return;
michael@0 2292 }
michael@0 2293 tmr = &net->rxt_timer;
michael@0 2294 break;
michael@0 2295 case SCTP_TIMER_TYPE_RECV:
michael@0 2296 if (stcb == NULL) {
michael@0 2297 return;
michael@0 2298 }
michael@0 2299 tmr = &stcb->asoc.dack_timer;
michael@0 2300 break;
michael@0 2301 case SCTP_TIMER_TYPE_SHUTDOWN:
michael@0 2302 if ((stcb == NULL) || (net == NULL)) {
michael@0 2303 return;
michael@0 2304 }
michael@0 2305 tmr = &net->rxt_timer;
michael@0 2306 break;
michael@0 2307 case SCTP_TIMER_TYPE_HEARTBEAT:
michael@0 2308 if ((stcb == NULL) || (net == NULL)) {
michael@0 2309 return;
michael@0 2310 }
michael@0 2311 tmr = &net->hb_timer;
michael@0 2312 break;
michael@0 2313 case SCTP_TIMER_TYPE_COOKIE:
michael@0 2314 if ((stcb == NULL) || (net == NULL)) {
michael@0 2315 return;
michael@0 2316 }
michael@0 2317 tmr = &net->rxt_timer;
michael@0 2318 break;
michael@0 2319 case SCTP_TIMER_TYPE_NEWCOOKIE:
michael@0 2320 /* nothing needed but the endpoint here */
michael@0 2321 tmr = &inp->sctp_ep.signature_change;
michael@0 2322 /*
michael@0 2323 * We re-use the newcookie timer for the INP kill timer. We
michael@0 2324 * must assure that we do not kill it by accident.
michael@0 2325 */
michael@0 2326 break;
michael@0 2327 case SCTP_TIMER_TYPE_ASOCKILL:
michael@0 2328 /*
michael@0 2329 * Stop the asoc kill timer.
michael@0 2330 */
michael@0 2331 if (stcb == NULL) {
michael@0 2332 return;
michael@0 2333 }
michael@0 2334 tmr = &stcb->asoc.strreset_timer;
michael@0 2335 break;
michael@0 2336
michael@0 2337 case SCTP_TIMER_TYPE_INPKILL:
michael@0 2338 /*
michael@0 2339 * The inp is setup to die. We re-use the signature_chage
michael@0 2340 * timer since that has stopped and we are in the GONE
michael@0 2341 * state.
michael@0 2342 */
michael@0 2343 tmr = &inp->sctp_ep.signature_change;
michael@0 2344 break;
michael@0 2345 case SCTP_TIMER_TYPE_PATHMTURAISE:
michael@0 2346 if ((stcb == NULL) || (net == NULL)) {
michael@0 2347 return;
michael@0 2348 }
michael@0 2349 tmr = &net->pmtu_timer;
michael@0 2350 break;
michael@0 2351 case SCTP_TIMER_TYPE_SHUTDOWNACK:
michael@0 2352 if ((stcb == NULL) || (net == NULL)) {
michael@0 2353 return;
michael@0 2354 }
michael@0 2355 tmr = &net->rxt_timer;
michael@0 2356 break;
michael@0 2357 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
michael@0 2358 if (stcb == NULL) {
michael@0 2359 return;
michael@0 2360 }
michael@0 2361 tmr = &stcb->asoc.shut_guard_timer;
michael@0 2362 break;
michael@0 2363 case SCTP_TIMER_TYPE_STRRESET:
michael@0 2364 if (stcb == NULL) {
michael@0 2365 return;
michael@0 2366 }
michael@0 2367 tmr = &stcb->asoc.strreset_timer;
michael@0 2368 break;
michael@0 2369 case SCTP_TIMER_TYPE_ASCONF:
michael@0 2370 if (stcb == NULL) {
michael@0 2371 return;
michael@0 2372 }
michael@0 2373 tmr = &stcb->asoc.asconf_timer;
michael@0 2374 break;
michael@0 2375 case SCTP_TIMER_TYPE_PRIM_DELETED:
michael@0 2376 if (stcb == NULL) {
michael@0 2377 return;
michael@0 2378 }
michael@0 2379 tmr = &stcb->asoc.delete_prim_timer;
michael@0 2380 break;
michael@0 2381 case SCTP_TIMER_TYPE_AUTOCLOSE:
michael@0 2382 if (stcb == NULL) {
michael@0 2383 return;
michael@0 2384 }
michael@0 2385 tmr = &stcb->asoc.autoclose_timer;
michael@0 2386 break;
michael@0 2387 default:
michael@0 2388 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
michael@0 2389 __FUNCTION__, t_type);
michael@0 2390 break;
michael@0 2391 }
michael@0 2392 if (tmr == NULL) {
michael@0 2393 return;
michael@0 2394 }
michael@0 2395 if ((tmr->type != t_type) && tmr->type) {
michael@0 2396 /*
michael@0 2397 * Ok we have a timer that is under joint use. Cookie timer
michael@0 2398 * per chance with the SEND timer. We therefore are NOT
michael@0 2399 * running the timer that the caller wants stopped. So just
michael@0 2400 * return.
michael@0 2401 */
michael@0 2402 return;
michael@0 2403 }
michael@0 2404 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
michael@0 2405 stcb->asoc.num_send_timers_up--;
michael@0 2406 if (stcb->asoc.num_send_timers_up < 0) {
michael@0 2407 stcb->asoc.num_send_timers_up = 0;
michael@0 2408 }
michael@0 2409 }
michael@0 2410 tmr->self = NULL;
michael@0 2411 tmr->stopped_from = from;
michael@0 2412 (void)SCTP_OS_TIMER_STOP(&tmr->timer);
michael@0 2413 return;
michael@0 2414 }
michael@0 2415
michael@0 2416 uint32_t
michael@0 2417 sctp_calculate_len(struct mbuf *m)
michael@0 2418 {
michael@0 2419 uint32_t tlen = 0;
michael@0 2420 struct mbuf *at;
michael@0 2421
michael@0 2422 at = m;
michael@0 2423 while (at) {
michael@0 2424 tlen += SCTP_BUF_LEN(at);
michael@0 2425 at = SCTP_BUF_NEXT(at);
michael@0 2426 }
michael@0 2427 return (tlen);
michael@0 2428 }
michael@0 2429
michael@0 2430 void
michael@0 2431 sctp_mtu_size_reset(struct sctp_inpcb *inp,
michael@0 2432 struct sctp_association *asoc, uint32_t mtu)
michael@0 2433 {
michael@0 2434 /*
michael@0 2435 * Reset the P-MTU size on this association, this involves changing
michael@0 2436 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
michael@0 2437 * allow the DF flag to be cleared.
michael@0 2438 */
michael@0 2439 struct sctp_tmit_chunk *chk;
michael@0 2440 unsigned int eff_mtu, ovh;
michael@0 2441
michael@0 2442 asoc->smallest_mtu = mtu;
michael@0 2443 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
michael@0 2444 ovh = SCTP_MIN_OVERHEAD;
michael@0 2445 } else {
michael@0 2446 ovh = SCTP_MIN_V4_OVERHEAD;
michael@0 2447 }
michael@0 2448 eff_mtu = mtu - ovh;
michael@0 2449 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
michael@0 2450 if (chk->send_size > eff_mtu) {
michael@0 2451 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
michael@0 2452 }
michael@0 2453 }
michael@0 2454 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
michael@0 2455 if (chk->send_size > eff_mtu) {
michael@0 2456 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
michael@0 2457 }
michael@0 2458 }
michael@0 2459 }
michael@0 2460
michael@0 2461
michael@0 2462 /*
michael@0 2463 * given an association and starting time of the current RTT period return
michael@0 2464 * RTO in number of msecs net should point to the current network
michael@0 2465 */
michael@0 2466
michael@0 2467 uint32_t
michael@0 2468 sctp_calculate_rto(struct sctp_tcb *stcb,
michael@0 2469 struct sctp_association *asoc,
michael@0 2470 struct sctp_nets *net,
michael@0 2471 struct timeval *told,
michael@0 2472 int safe, int rtt_from_sack)
michael@0 2473 {
michael@0 2474 /*-
michael@0 2475 * given an association and the starting time of the current RTT
michael@0 2476 * period (in value1/value2) return RTO in number of msecs.
michael@0 2477 */
michael@0 2478 int32_t rtt; /* RTT in ms */
michael@0 2479 uint32_t new_rto;
michael@0 2480 int first_measure = 0;
michael@0 2481 struct timeval now, then, *old;
michael@0 2482
michael@0 2483 /* Copy it out for sparc64 */
michael@0 2484 if (safe == sctp_align_unsafe_makecopy) {
michael@0 2485 old = &then;
michael@0 2486 memcpy(&then, told, sizeof(struct timeval));
michael@0 2487 } else if (safe == sctp_align_safe_nocopy) {
michael@0 2488 old = told;
michael@0 2489 } else {
michael@0 2490 /* error */
michael@0 2491 SCTP_PRINTF("Huh, bad rto calc call\n");
michael@0 2492 return (0);
michael@0 2493 }
michael@0 2494 /************************/
michael@0 2495 /* 1. calculate new RTT */
michael@0 2496 /************************/
michael@0 2497 /* get the current time */
michael@0 2498 if (stcb->asoc.use_precise_time) {
michael@0 2499 (void)SCTP_GETPTIME_TIMEVAL(&now);
michael@0 2500 } else {
michael@0 2501 (void)SCTP_GETTIME_TIMEVAL(&now);
michael@0 2502 }
michael@0 2503 timevalsub(&now, old);
michael@0 2504 /* store the current RTT in us */
michael@0 2505 net->rtt = (uint64_t)1000000 * (uint64_t)now.tv_sec +
michael@0 2506 (uint64_t)now.tv_usec;
michael@0 2507 /* computer rtt in ms */
michael@0 2508 rtt = net->rtt / 1000;
michael@0 2509 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
michael@0 2510 /* Tell the CC module that a new update has just occurred from a sack */
michael@0 2511 (*asoc->cc_functions.sctp_rtt_calculated)(stcb, net, &now);
michael@0 2512 }
michael@0 2513 /* Do we need to determine the lan? We do this only
michael@0 2514 * on sacks i.e. RTT being determined from data not
michael@0 2515 * non-data (HB/INIT->INITACK).
michael@0 2516 */
michael@0 2517 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
michael@0 2518 (net->lan_type == SCTP_LAN_UNKNOWN)) {
michael@0 2519 if (net->rtt > SCTP_LOCAL_LAN_RTT) {
michael@0 2520 net->lan_type = SCTP_LAN_INTERNET;
michael@0 2521 } else {
michael@0 2522 net->lan_type = SCTP_LAN_LOCAL;
michael@0 2523 }
michael@0 2524 }
michael@0 2525
michael@0 2526 /***************************/
michael@0 2527 /* 2. update RTTVAR & SRTT */
michael@0 2528 /***************************/
michael@0 2529 /*-
michael@0 2530 * Compute the scaled average lastsa and the
michael@0 2531 * scaled variance lastsv as described in van Jacobson
michael@0 2532 * Paper "Congestion Avoidance and Control", Annex A.
michael@0 2533 *
michael@0 2534 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
michael@0 2535 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
michael@0 2536 */
michael@0 2537 if (net->RTO_measured) {
michael@0 2538 rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
michael@0 2539 net->lastsa += rtt;
michael@0 2540 if (rtt < 0) {
michael@0 2541 rtt = -rtt;
michael@0 2542 }
michael@0 2543 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
michael@0 2544 net->lastsv += rtt;
michael@0 2545 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
michael@0 2546 rto_logging(net, SCTP_LOG_RTTVAR);
michael@0 2547 }
michael@0 2548 } else {
michael@0 2549 /* First RTO measurment */
michael@0 2550 net->RTO_measured = 1;
michael@0 2551 first_measure = 1;
michael@0 2552 net->lastsa = rtt << SCTP_RTT_SHIFT;
michael@0 2553 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
michael@0 2554 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
michael@0 2555 rto_logging(net, SCTP_LOG_INITIAL_RTT);
michael@0 2556 }
michael@0 2557 }
michael@0 2558 if (net->lastsv == 0) {
michael@0 2559 net->lastsv = SCTP_CLOCK_GRANULARITY;
michael@0 2560 }
michael@0 2561 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
michael@0 2562 if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
michael@0 2563 (stcb->asoc.sat_network_lockout == 0)) {
michael@0 2564 stcb->asoc.sat_network = 1;
michael@0 2565 } else if ((!first_measure) && stcb->asoc.sat_network) {
michael@0 2566 stcb->asoc.sat_network = 0;
michael@0 2567 stcb->asoc.sat_network_lockout = 1;
michael@0 2568 }
michael@0 2569 /* bound it, per C6/C7 in Section 5.3.1 */
michael@0 2570 if (new_rto < stcb->asoc.minrto) {
michael@0 2571 new_rto = stcb->asoc.minrto;
michael@0 2572 }
michael@0 2573 if (new_rto > stcb->asoc.maxrto) {
michael@0 2574 new_rto = stcb->asoc.maxrto;
michael@0 2575 }
michael@0 2576 /* we are now returning the RTO */
michael@0 2577 return (new_rto);
michael@0 2578 }
michael@0 2579
michael@0 2580 /*
michael@0 2581 * return a pointer to a contiguous piece of data from the given mbuf chain
michael@0 2582 * starting at 'off' for 'len' bytes. If the desired piece spans more than
michael@0 2583 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
michael@0 2584 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
michael@0 2585 */
michael@0 2586 caddr_t
michael@0 2587 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
michael@0 2588 {
michael@0 2589 uint32_t count;
michael@0 2590 uint8_t *ptr;
michael@0 2591
michael@0 2592 ptr = in_ptr;
michael@0 2593 if ((off < 0) || (len <= 0))
michael@0 2594 return (NULL);
michael@0 2595
michael@0 2596 /* find the desired start location */
michael@0 2597 while ((m != NULL) && (off > 0)) {
michael@0 2598 if (off < SCTP_BUF_LEN(m))
michael@0 2599 break;
michael@0 2600 off -= SCTP_BUF_LEN(m);
michael@0 2601 m = SCTP_BUF_NEXT(m);
michael@0 2602 }
michael@0 2603 if (m == NULL)
michael@0 2604 return (NULL);
michael@0 2605
michael@0 2606 /* is the current mbuf large enough (eg. contiguous)? */
michael@0 2607 if ((SCTP_BUF_LEN(m) - off) >= len) {
michael@0 2608 return (mtod(m, caddr_t) + off);
michael@0 2609 } else {
michael@0 2610 /* else, it spans more than one mbuf, so save a temp copy... */
michael@0 2611 while ((m != NULL) && (len > 0)) {
michael@0 2612 count = min(SCTP_BUF_LEN(m) - off, len);
michael@0 2613 bcopy(mtod(m, caddr_t) + off, ptr, count);
michael@0 2614 len -= count;
michael@0 2615 ptr += count;
michael@0 2616 off = 0;
michael@0 2617 m = SCTP_BUF_NEXT(m);
michael@0 2618 }
michael@0 2619 if ((m == NULL) && (len > 0))
michael@0 2620 return (NULL);
michael@0 2621 else
michael@0 2622 return ((caddr_t)in_ptr);
michael@0 2623 }
michael@0 2624 }
michael@0 2625
michael@0 2626
michael@0 2627
michael@0 2628 struct sctp_paramhdr *
michael@0 2629 sctp_get_next_param(struct mbuf *m,
michael@0 2630 int offset,
michael@0 2631 struct sctp_paramhdr *pull,
michael@0 2632 int pull_limit)
michael@0 2633 {
michael@0 2634 /* This just provides a typed signature to Peter's Pull routine */
michael@0 2635 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
michael@0 2636 (uint8_t *) pull));
michael@0 2637 }
michael@0 2638
michael@0 2639
michael@0 2640 int
michael@0 2641 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
michael@0 2642 {
michael@0 2643 /*
michael@0 2644 * add padlen bytes of 0 filled padding to the end of the mbuf. If
michael@0 2645 * padlen is > 3 this routine will fail.
michael@0 2646 */
michael@0 2647 uint8_t *dp;
michael@0 2648 int i;
michael@0 2649
michael@0 2650 if (padlen > 3) {
michael@0 2651 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
michael@0 2652 return (ENOBUFS);
michael@0 2653 }
michael@0 2654 if (padlen <= M_TRAILINGSPACE(m)) {
michael@0 2655 /*
michael@0 2656 * The easy way. We hope the majority of the time we hit
michael@0 2657 * here :)
michael@0 2658 */
michael@0 2659 dp = (uint8_t *) (mtod(m, caddr_t) + SCTP_BUF_LEN(m));
michael@0 2660 SCTP_BUF_LEN(m) += padlen;
michael@0 2661 } else {
michael@0 2662 /* Hard way we must grow the mbuf */
michael@0 2663 struct mbuf *tmp;
michael@0 2664
michael@0 2665 tmp = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
michael@0 2666 if (tmp == NULL) {
michael@0 2667 /* Out of space GAK! we are in big trouble. */
michael@0 2668 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
michael@0 2669 return (ENOBUFS);
michael@0 2670 }
michael@0 2671 /* setup and insert in middle */
michael@0 2672 SCTP_BUF_LEN(tmp) = padlen;
michael@0 2673 SCTP_BUF_NEXT(tmp) = NULL;
michael@0 2674 SCTP_BUF_NEXT(m) = tmp;
michael@0 2675 dp = mtod(tmp, uint8_t *);
michael@0 2676 }
michael@0 2677 /* zero out the pad */
michael@0 2678 for (i = 0; i < padlen; i++) {
michael@0 2679 *dp = 0;
michael@0 2680 dp++;
michael@0 2681 }
michael@0 2682 return (0);
michael@0 2683 }
michael@0 2684
michael@0 2685 int
michael@0 2686 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
michael@0 2687 {
michael@0 2688 /* find the last mbuf in chain and pad it */
michael@0 2689 struct mbuf *m_at;
michael@0 2690
michael@0 2691 if (last_mbuf) {
michael@0 2692 return (sctp_add_pad_tombuf(last_mbuf, padval));
michael@0 2693 } else {
michael@0 2694 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
michael@0 2695 if (SCTP_BUF_NEXT(m_at) == NULL) {
michael@0 2696 return (sctp_add_pad_tombuf(m_at, padval));
michael@0 2697 }
michael@0 2698 }
michael@0 2699 }
michael@0 2700 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
michael@0 2701 return (EFAULT);
michael@0 2702 }
michael@0 2703
michael@0 2704 static void
michael@0 2705 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
michael@0 2706 uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
michael@0 2707 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
michael@0 2708 SCTP_UNUSED
michael@0 2709 #endif
michael@0 2710 )
michael@0 2711 {
michael@0 2712 struct mbuf *m_notify;
michael@0 2713 struct sctp_assoc_change *sac;
michael@0 2714 struct sctp_queued_to_read *control;
michael@0 2715 size_t notif_len, abort_len;
michael@0 2716 unsigned int i;
michael@0 2717 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 2718 struct socket *so;
michael@0 2719 #endif
michael@0 2720
michael@0 2721 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
michael@0 2722 notif_len = sizeof(struct sctp_assoc_change);
michael@0 2723 if (abort != NULL) {
michael@0 2724 abort_len = ntohs(abort->ch.chunk_length);
michael@0 2725 } else {
michael@0 2726 abort_len = 0;
michael@0 2727 }
michael@0 2728 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
michael@0 2729 notif_len += SCTP_ASSOC_SUPPORTS_MAX;
michael@0 2730 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
michael@0 2731 notif_len += abort_len;
michael@0 2732 }
michael@0 2733 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
michael@0 2734 if (m_notify == NULL) {
michael@0 2735 /* Retry with smaller value. */
michael@0 2736 notif_len = sizeof(struct sctp_assoc_change);
michael@0 2737 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
michael@0 2738 if (m_notify == NULL) {
michael@0 2739 goto set_error;
michael@0 2740 }
michael@0 2741 }
michael@0 2742 SCTP_BUF_NEXT(m_notify) = NULL;
michael@0 2743 sac = mtod(m_notify, struct sctp_assoc_change *);
michael@0 2744 sac->sac_type = SCTP_ASSOC_CHANGE;
michael@0 2745 sac->sac_flags = 0;
michael@0 2746 sac->sac_length = sizeof(struct sctp_assoc_change);
michael@0 2747 sac->sac_state = state;
michael@0 2748 sac->sac_error = error;
michael@0 2749 /* XXX verify these stream counts */
michael@0 2750 sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
michael@0 2751 sac->sac_inbound_streams = stcb->asoc.streamincnt;
michael@0 2752 sac->sac_assoc_id = sctp_get_associd(stcb);
michael@0 2753 if (notif_len > sizeof(struct sctp_assoc_change)) {
michael@0 2754 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
michael@0 2755 i = 0;
michael@0 2756 if (stcb->asoc.peer_supports_prsctp) {
michael@0 2757 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
michael@0 2758 }
michael@0 2759 if (stcb->asoc.peer_supports_auth) {
michael@0 2760 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
michael@0 2761 }
michael@0 2762 if (stcb->asoc.peer_supports_asconf) {
michael@0 2763 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
michael@0 2764 }
michael@0 2765 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
michael@0 2766 if (stcb->asoc.peer_supports_strreset) {
michael@0 2767 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
michael@0 2768 }
michael@0 2769 sac->sac_length += i;
michael@0 2770 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
michael@0 2771 memcpy(sac->sac_info, abort, abort_len);
michael@0 2772 sac->sac_length += abort_len;
michael@0 2773 }
michael@0 2774 }
michael@0 2775 SCTP_BUF_LEN(m_notify) = sac->sac_length;
michael@0 2776 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
michael@0 2777 0, 0, stcb->asoc.context, 0, 0, 0,
michael@0 2778 m_notify);
michael@0 2779 if (control != NULL) {
michael@0 2780 control->length = SCTP_BUF_LEN(m_notify);
michael@0 2781 /* not that we need this */
michael@0 2782 control->tail_mbuf = m_notify;
michael@0 2783 control->spec_flags = M_NOTIFICATION;
michael@0 2784 sctp_add_to_readq(stcb->sctp_ep, stcb,
michael@0 2785 control,
michael@0 2786 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
michael@0 2787 so_locked);
michael@0 2788 } else {
michael@0 2789 sctp_m_freem(m_notify);
michael@0 2790 }
michael@0 2791 }
michael@0 2792 /*
michael@0 2793 * For 1-to-1 style sockets, we send up and error when an ABORT
michael@0 2794 * comes in.
michael@0 2795 */
michael@0 2796 set_error:
michael@0 2797 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
michael@0 2798 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
michael@0 2799 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
michael@0 2800 SOCK_LOCK(stcb->sctp_socket);
michael@0 2801 if (from_peer) {
michael@0 2802 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
michael@0 2803 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
michael@0 2804 stcb->sctp_socket->so_error = ECONNREFUSED;
michael@0 2805 } else {
michael@0 2806 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
michael@0 2807 stcb->sctp_socket->so_error = ECONNRESET;
michael@0 2808 }
michael@0 2809 } else {
michael@0 2810 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
michael@0 2811 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
michael@0 2812 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
michael@0 2813 stcb->sctp_socket->so_error = ETIMEDOUT;
michael@0 2814 } else {
michael@0 2815 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
michael@0 2816 stcb->sctp_socket->so_error = ECONNABORTED;
michael@0 2817 }
michael@0 2818 }
michael@0 2819 }
michael@0 2820 /* Wake ANY sleepers */
michael@0 2821 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 2822 so = SCTP_INP_SO(stcb->sctp_ep);
michael@0 2823 if (!so_locked) {
michael@0 2824 atomic_add_int(&stcb->asoc.refcnt, 1);
michael@0 2825 SCTP_TCB_UNLOCK(stcb);
michael@0 2826 SCTP_SOCKET_LOCK(so, 1);
michael@0 2827 SCTP_TCB_LOCK(stcb);
michael@0 2828 atomic_subtract_int(&stcb->asoc.refcnt, 1);
michael@0 2829 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
michael@0 2830 SCTP_SOCKET_UNLOCK(so, 1);
michael@0 2831 return;
michael@0 2832 }
michael@0 2833 }
michael@0 2834 #endif
michael@0 2835 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
michael@0 2836 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
michael@0 2837 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
michael@0 2838 #if defined(__APPLE__)
michael@0 2839 socantrcvmore(stcb->sctp_socket);
michael@0 2840 #else
michael@0 2841 socantrcvmore_locked(stcb->sctp_socket);
michael@0 2842 #endif
michael@0 2843 }
michael@0 2844 sorwakeup(stcb->sctp_socket);
michael@0 2845 sowwakeup(stcb->sctp_socket);
michael@0 2846 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 2847 if (!so_locked) {
michael@0 2848 SCTP_SOCKET_UNLOCK(so, 1);
michael@0 2849 }
michael@0 2850 #endif
michael@0 2851 }
michael@0 2852
michael@0 2853 static void
michael@0 2854 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
michael@0 2855 struct sockaddr *sa, uint32_t error)
michael@0 2856 {
michael@0 2857 struct mbuf *m_notify;
michael@0 2858 struct sctp_paddr_change *spc;
michael@0 2859 struct sctp_queued_to_read *control;
michael@0 2860
michael@0 2861 if ((stcb == NULL) ||
michael@0 2862 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
michael@0 2863 /* event not enabled */
michael@0 2864 return;
michael@0 2865 }
michael@0 2866 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
michael@0 2867 if (m_notify == NULL)
michael@0 2868 return;
michael@0 2869 SCTP_BUF_LEN(m_notify) = 0;
michael@0 2870 spc = mtod(m_notify, struct sctp_paddr_change *);
michael@0 2871 spc->spc_type = SCTP_PEER_ADDR_CHANGE;
michael@0 2872 spc->spc_flags = 0;
michael@0 2873 spc->spc_length = sizeof(struct sctp_paddr_change);
michael@0 2874 switch (sa->sa_family) {
michael@0 2875 #ifdef INET
michael@0 2876 case AF_INET:
michael@0 2877 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
michael@0 2878 break;
michael@0 2879 #endif
michael@0 2880 #ifdef INET6
michael@0 2881 case AF_INET6:
michael@0 2882 {
michael@0 2883 #ifdef SCTP_EMBEDDED_V6_SCOPE
michael@0 2884 struct sockaddr_in6 *sin6;
michael@0 2885 #endif /* SCTP_EMBEDDED_V6_SCOPE */
michael@0 2886 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
michael@0 2887
michael@0 2888 #ifdef SCTP_EMBEDDED_V6_SCOPE
michael@0 2889 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
michael@0 2890 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
michael@0 2891 if (sin6->sin6_scope_id == 0) {
michael@0 2892 /* recover scope_id for user */
michael@0 2893 #ifdef SCTP_KAME
michael@0 2894 (void)sa6_recoverscope(sin6);
michael@0 2895 #else
michael@0 2896 (void)in6_recoverscope(sin6, &sin6->sin6_addr,
michael@0 2897 NULL);
michael@0 2898 #endif
michael@0 2899 } else {
michael@0 2900 /* clear embedded scope_id for user */
michael@0 2901 in6_clearscope(&sin6->sin6_addr);
michael@0 2902 }
michael@0 2903 }
michael@0 2904 #endif /* SCTP_EMBEDDED_V6_SCOPE */
michael@0 2905 break;
michael@0 2906 }
michael@0 2907 #endif
michael@0 2908 #if defined(__Userspace__)
michael@0 2909 case AF_CONN:
michael@0 2910 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_conn));
michael@0 2911 break;
michael@0 2912 #endif
michael@0 2913 default:
michael@0 2914 /* TSNH */
michael@0 2915 break;
michael@0 2916 }
michael@0 2917 spc->spc_state = state;
michael@0 2918 spc->spc_error = error;
michael@0 2919 spc->spc_assoc_id = sctp_get_associd(stcb);
michael@0 2920
michael@0 2921 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
michael@0 2922 SCTP_BUF_NEXT(m_notify) = NULL;
michael@0 2923
michael@0 2924 /* append to socket */
michael@0 2925 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
michael@0 2926 0, 0, stcb->asoc.context, 0, 0, 0,
michael@0 2927 m_notify);
michael@0 2928 if (control == NULL) {
michael@0 2929 /* no memory */
michael@0 2930 sctp_m_freem(m_notify);
michael@0 2931 return;
michael@0 2932 }
michael@0 2933 control->length = SCTP_BUF_LEN(m_notify);
michael@0 2934 control->spec_flags = M_NOTIFICATION;
michael@0 2935 /* not that we need this */
michael@0 2936 control->tail_mbuf = m_notify;
michael@0 2937 sctp_add_to_readq(stcb->sctp_ep, stcb,
michael@0 2938 control,
michael@0 2939 &stcb->sctp_socket->so_rcv, 1,
michael@0 2940 SCTP_READ_LOCK_NOT_HELD,
michael@0 2941 SCTP_SO_NOT_LOCKED);
michael@0 2942 }
michael@0 2943
michael@0 2944
michael@0 2945 static void
michael@0 2946 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
michael@0 2947 struct sctp_tmit_chunk *chk, int so_locked
michael@0 2948 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
michael@0 2949 SCTP_UNUSED
michael@0 2950 #endif
michael@0 2951 )
michael@0 2952 {
michael@0 2953 struct mbuf *m_notify;
michael@0 2954 struct sctp_send_failed *ssf;
michael@0 2955 struct sctp_send_failed_event *ssfe;
michael@0 2956 struct sctp_queued_to_read *control;
michael@0 2957 int length;
michael@0 2958
michael@0 2959 if ((stcb == NULL) ||
michael@0 2960 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
michael@0 2961 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
michael@0 2962 /* event not enabled */
michael@0 2963 return;
michael@0 2964 }
michael@0 2965
michael@0 2966 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
michael@0 2967 length = sizeof(struct sctp_send_failed_event);
michael@0 2968 } else {
michael@0 2969 length = sizeof(struct sctp_send_failed);
michael@0 2970 }
michael@0 2971 m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
michael@0 2972 if (m_notify == NULL)
michael@0 2973 /* no space left */
michael@0 2974 return;
michael@0 2975 length += chk->send_size;
michael@0 2976 length -= sizeof(struct sctp_data_chunk);
michael@0 2977 SCTP_BUF_LEN(m_notify) = 0;
michael@0 2978 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
michael@0 2979 ssfe = mtod(m_notify, struct sctp_send_failed_event *);
michael@0 2980 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
michael@0 2981 if (sent) {
michael@0 2982 ssfe->ssfe_flags = SCTP_DATA_SENT;
michael@0 2983 } else {
michael@0 2984 ssfe->ssfe_flags = SCTP_DATA_UNSENT;
michael@0 2985 }
michael@0 2986 ssfe->ssfe_length = length;
michael@0 2987 ssfe->ssfe_error = error;
michael@0 2988 /* not exactly what the user sent in, but should be close :) */
michael@0 2989 bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info));
michael@0 2990 ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
michael@0 2991 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
michael@0 2992 ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
michael@0 2993 ssfe->ssfe_info.snd_context = chk->rec.data.context;
michael@0 2994 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
michael@0 2995 ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
michael@0 2996 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
michael@0 2997 } else {
michael@0 2998 ssf = mtod(m_notify, struct sctp_send_failed *);
michael@0 2999 ssf->ssf_type = SCTP_SEND_FAILED;
michael@0 3000 if (sent) {
michael@0 3001 ssf->ssf_flags = SCTP_DATA_SENT;
michael@0 3002 } else {
michael@0 3003 ssf->ssf_flags = SCTP_DATA_UNSENT;
michael@0 3004 }
michael@0 3005 ssf->ssf_length = length;
michael@0 3006 ssf->ssf_error = error;
michael@0 3007 /* not exactly what the user sent in, but should be close :) */
michael@0 3008 bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
michael@0 3009 ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
michael@0 3010 ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
michael@0 3011 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
michael@0 3012 ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
michael@0 3013 ssf->ssf_info.sinfo_context = chk->rec.data.context;
michael@0 3014 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
michael@0 3015 ssf->ssf_assoc_id = sctp_get_associd(stcb);
michael@0 3016 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
michael@0 3017 }
michael@0 3018 if (chk->data) {
michael@0 3019 /*
michael@0 3020 * trim off the sctp chunk header(it should
michael@0 3021 * be there)
michael@0 3022 */
michael@0 3023 if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
michael@0 3024 m_adj(chk->data, sizeof(struct sctp_data_chunk));
michael@0 3025 sctp_mbuf_crush(chk->data);
michael@0 3026 chk->send_size -= sizeof(struct sctp_data_chunk);
michael@0 3027 }
michael@0 3028 }
michael@0 3029 SCTP_BUF_NEXT(m_notify) = chk->data;
michael@0 3030 /* Steal off the mbuf */
michael@0 3031 chk->data = NULL;
michael@0 3032 /*
michael@0 3033 * For this case, we check the actual socket buffer, since the assoc
michael@0 3034 * is going away we don't want to overfill the socket buffer for a
michael@0 3035 * non-reader
michael@0 3036 */
michael@0 3037 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
michael@0 3038 sctp_m_freem(m_notify);
michael@0 3039 return;
michael@0 3040 }
michael@0 3041 /* append to socket */
michael@0 3042 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
michael@0 3043 0, 0, stcb->asoc.context, 0, 0, 0,
michael@0 3044 m_notify);
michael@0 3045 if (control == NULL) {
michael@0 3046 /* no memory */
michael@0 3047 sctp_m_freem(m_notify);
michael@0 3048 return;
michael@0 3049 }
michael@0 3050 control->spec_flags = M_NOTIFICATION;
michael@0 3051 sctp_add_to_readq(stcb->sctp_ep, stcb,
michael@0 3052 control,
michael@0 3053 &stcb->sctp_socket->so_rcv, 1,
michael@0 3054 SCTP_READ_LOCK_NOT_HELD,
michael@0 3055 so_locked);
michael@0 3056 }
michael@0 3057
michael@0 3058
michael@0 3059 static void
michael@0 3060 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
michael@0 3061 struct sctp_stream_queue_pending *sp, int so_locked
michael@0 3062 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
michael@0 3063 SCTP_UNUSED
michael@0 3064 #endif
michael@0 3065 )
michael@0 3066 {
michael@0 3067 struct mbuf *m_notify;
michael@0 3068 struct sctp_send_failed *ssf;
michael@0 3069 struct sctp_send_failed_event *ssfe;
michael@0 3070 struct sctp_queued_to_read *control;
michael@0 3071 int length;
michael@0 3072
michael@0 3073 if ((stcb == NULL) ||
michael@0 3074 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
michael@0 3075 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
michael@0 3076 /* event not enabled */
michael@0 3077 return;
michael@0 3078 }
michael@0 3079 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
michael@0 3080 length = sizeof(struct sctp_send_failed_event);
michael@0 3081 } else {
michael@0 3082 length = sizeof(struct sctp_send_failed);
michael@0 3083 }
michael@0 3084 m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
michael@0 3085 if (m_notify == NULL) {
michael@0 3086 /* no space left */
michael@0 3087 return;
michael@0 3088 }
michael@0 3089 length += sp->length;
michael@0 3090 SCTP_BUF_LEN(m_notify) = 0;
michael@0 3091 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
michael@0 3092 ssfe = mtod(m_notify, struct sctp_send_failed_event *);
michael@0 3093 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
michael@0 3094 ssfe->ssfe_flags = SCTP_DATA_UNSENT;
michael@0 3095 ssfe->ssfe_length = length;
michael@0 3096 ssfe->ssfe_error = error;
michael@0 3097 /* not exactly what the user sent in, but should be close :) */
michael@0 3098 bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info));
michael@0 3099 ssfe->ssfe_info.snd_sid = sp->stream;
michael@0 3100 if (sp->some_taken) {
michael@0 3101 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
michael@0 3102 } else {
michael@0 3103 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
michael@0 3104 }
michael@0 3105 ssfe->ssfe_info.snd_ppid = sp->ppid;
michael@0 3106 ssfe->ssfe_info.snd_context = sp->context;
michael@0 3107 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
michael@0 3108 ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
michael@0 3109 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
michael@0 3110 } else {
michael@0 3111 ssf = mtod(m_notify, struct sctp_send_failed *);
michael@0 3112 ssf->ssf_type = SCTP_SEND_FAILED;
michael@0 3113 ssf->ssf_flags = SCTP_DATA_UNSENT;
michael@0 3114 ssf->ssf_length = length;
michael@0 3115 ssf->ssf_error = error;
michael@0 3116 /* not exactly what the user sent in, but should be close :) */
michael@0 3117 bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
michael@0 3118 ssf->ssf_info.sinfo_stream = sp->stream;
michael@0 3119 ssf->ssf_info.sinfo_ssn = 0;
michael@0 3120 if (sp->some_taken) {
michael@0 3121 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
michael@0 3122 } else {
michael@0 3123 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
michael@0 3124 }
michael@0 3125 ssf->ssf_info.sinfo_ppid = sp->ppid;
michael@0 3126 ssf->ssf_info.sinfo_context = sp->context;
michael@0 3127 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
michael@0 3128 ssf->ssf_assoc_id = sctp_get_associd(stcb);
michael@0 3129 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
michael@0 3130 }
michael@0 3131 SCTP_BUF_NEXT(m_notify) = sp->data;
michael@0 3132
michael@0 3133 /* Steal off the mbuf */
michael@0 3134 sp->data = NULL;
michael@0 3135 /*
michael@0 3136 * For this case, we check the actual socket buffer, since the assoc
michael@0 3137 * is going away we don't want to overfill the socket buffer for a
michael@0 3138 * non-reader
michael@0 3139 */
michael@0 3140 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
michael@0 3141 sctp_m_freem(m_notify);
michael@0 3142 return;
michael@0 3143 }
michael@0 3144 /* append to socket */
michael@0 3145 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
michael@0 3146 0, 0, stcb->asoc.context, 0, 0, 0,
michael@0 3147 m_notify);
michael@0 3148 if (control == NULL) {
michael@0 3149 /* no memory */
michael@0 3150 sctp_m_freem(m_notify);
michael@0 3151 return;
michael@0 3152 }
michael@0 3153 control->spec_flags = M_NOTIFICATION;
michael@0 3154 sctp_add_to_readq(stcb->sctp_ep, stcb,
michael@0 3155 control,
michael@0 3156 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
michael@0 3157 }
michael@0 3158
michael@0 3159
michael@0 3160
michael@0 3161 static void
michael@0 3162 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
michael@0 3163 {
michael@0 3164 struct mbuf *m_notify;
michael@0 3165 struct sctp_adaptation_event *sai;
michael@0 3166 struct sctp_queued_to_read *control;
michael@0 3167
michael@0 3168 if ((stcb == NULL) ||
michael@0 3169 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
michael@0 3170 /* event not enabled */
michael@0 3171 return;
michael@0 3172 }
michael@0 3173
michael@0 3174 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
michael@0 3175 if (m_notify == NULL)
michael@0 3176 /* no space left */
michael@0 3177 return;
michael@0 3178 SCTP_BUF_LEN(m_notify) = 0;
michael@0 3179 sai = mtod(m_notify, struct sctp_adaptation_event *);
michael@0 3180 sai->sai_type = SCTP_ADAPTATION_INDICATION;
michael@0 3181 sai->sai_flags = 0;
michael@0 3182 sai->sai_length = sizeof(struct sctp_adaptation_event);
michael@0 3183 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
michael@0 3184 sai->sai_assoc_id = sctp_get_associd(stcb);
michael@0 3185
michael@0 3186 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
michael@0 3187 SCTP_BUF_NEXT(m_notify) = NULL;
michael@0 3188
michael@0 3189 /* append to socket */
michael@0 3190 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
michael@0 3191 0, 0, stcb->asoc.context, 0, 0, 0,
michael@0 3192 m_notify);
michael@0 3193 if (control == NULL) {
michael@0 3194 /* no memory */
michael@0 3195 sctp_m_freem(m_notify);
michael@0 3196 return;
michael@0 3197 }
michael@0 3198 control->length = SCTP_BUF_LEN(m_notify);
michael@0 3199 control->spec_flags = M_NOTIFICATION;
michael@0 3200 /* not that we need this */
michael@0 3201 control->tail_mbuf = m_notify;
michael@0 3202 sctp_add_to_readq(stcb->sctp_ep, stcb,
michael@0 3203 control,
michael@0 3204 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
michael@0 3205 }
michael@0 3206
michael@0 3207 /* This always must be called with the read-queue LOCKED in the INP */
michael@0 3208 static void
michael@0 3209 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
michael@0 3210 uint32_t val, int so_locked
michael@0 3211 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
michael@0 3212 SCTP_UNUSED
michael@0 3213 #endif
michael@0 3214 )
michael@0 3215 {
michael@0 3216 struct mbuf *m_notify;
michael@0 3217 struct sctp_pdapi_event *pdapi;
michael@0 3218 struct sctp_queued_to_read *control;
michael@0 3219 struct sockbuf *sb;
michael@0 3220
michael@0 3221 if ((stcb == NULL) ||
michael@0 3222 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
michael@0 3223 /* event not enabled */
michael@0 3224 return;
michael@0 3225 }
michael@0 3226 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
michael@0 3227 return;
michael@0 3228 }
michael@0 3229
michael@0 3230 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
michael@0 3231 if (m_notify == NULL)
michael@0 3232 /* no space left */
michael@0 3233 return;
michael@0 3234 SCTP_BUF_LEN(m_notify) = 0;
michael@0 3235 pdapi = mtod(m_notify, struct sctp_pdapi_event *);
michael@0 3236 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
michael@0 3237 pdapi->pdapi_flags = 0;
michael@0 3238 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
michael@0 3239 pdapi->pdapi_indication = error;
michael@0 3240 pdapi->pdapi_stream = (val >> 16);
michael@0 3241 pdapi->pdapi_seq = (val & 0x0000ffff);
michael@0 3242 pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
michael@0 3243
michael@0 3244 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
michael@0 3245 SCTP_BUF_NEXT(m_notify) = NULL;
michael@0 3246 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
michael@0 3247 0, 0, stcb->asoc.context, 0, 0, 0,
michael@0 3248 m_notify);
michael@0 3249 if (control == NULL) {
michael@0 3250 /* no memory */
michael@0 3251 sctp_m_freem(m_notify);
michael@0 3252 return;
michael@0 3253 }
michael@0 3254 control->spec_flags = M_NOTIFICATION;
michael@0 3255 control->length = SCTP_BUF_LEN(m_notify);
michael@0 3256 /* not that we need this */
michael@0 3257 control->tail_mbuf = m_notify;
michael@0 3258 control->held_length = 0;
michael@0 3259 control->length = 0;
michael@0 3260 sb = &stcb->sctp_socket->so_rcv;
michael@0 3261 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
michael@0 3262 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
michael@0 3263 }
michael@0 3264 sctp_sballoc(stcb, sb, m_notify);
michael@0 3265 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
michael@0 3266 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
michael@0 3267 }
michael@0 3268 atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
michael@0 3269 control->end_added = 1;
michael@0 3270 if (stcb->asoc.control_pdapi)
michael@0 3271 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
michael@0 3272 else {
michael@0 3273 /* we really should not see this case */
michael@0 3274 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
michael@0 3275 }
michael@0 3276 if (stcb->sctp_ep && stcb->sctp_socket) {
michael@0 3277 /* This should always be the case */
michael@0 3278 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 3279 struct socket *so;
michael@0 3280
michael@0 3281 so = SCTP_INP_SO(stcb->sctp_ep);
michael@0 3282 if (!so_locked) {
michael@0 3283 atomic_add_int(&stcb->asoc.refcnt, 1);
michael@0 3284 SCTP_TCB_UNLOCK(stcb);
michael@0 3285 SCTP_SOCKET_LOCK(so, 1);
michael@0 3286 SCTP_TCB_LOCK(stcb);
michael@0 3287 atomic_subtract_int(&stcb->asoc.refcnt, 1);
michael@0 3288 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
michael@0 3289 SCTP_SOCKET_UNLOCK(so, 1);
michael@0 3290 return;
michael@0 3291 }
michael@0 3292 }
michael@0 3293 #endif
michael@0 3294 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
michael@0 3295 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 3296 if (!so_locked) {
michael@0 3297 SCTP_SOCKET_UNLOCK(so, 1);
michael@0 3298 }
michael@0 3299 #endif
michael@0 3300 }
michael@0 3301 }
michael@0 3302
michael@0 3303 static void
michael@0 3304 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
michael@0 3305 {
michael@0 3306 struct mbuf *m_notify;
michael@0 3307 struct sctp_shutdown_event *sse;
michael@0 3308 struct sctp_queued_to_read *control;
michael@0 3309
michael@0 3310 /*
michael@0 3311 * For TCP model AND UDP connected sockets we will send an error up
michael@0 3312 * when an SHUTDOWN completes
michael@0 3313 */
michael@0 3314 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
michael@0 3315 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
michael@0 3316 /* mark socket closed for read/write and wakeup! */
michael@0 3317 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 3318 struct socket *so;
michael@0 3319
michael@0 3320 so = SCTP_INP_SO(stcb->sctp_ep);
michael@0 3321 atomic_add_int(&stcb->asoc.refcnt, 1);
michael@0 3322 SCTP_TCB_UNLOCK(stcb);
michael@0 3323 SCTP_SOCKET_LOCK(so, 1);
michael@0 3324 SCTP_TCB_LOCK(stcb);
michael@0 3325 atomic_subtract_int(&stcb->asoc.refcnt, 1);
michael@0 3326 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
michael@0 3327 SCTP_SOCKET_UNLOCK(so, 1);
michael@0 3328 return;
michael@0 3329 }
michael@0 3330 #endif
michael@0 3331 socantsendmore(stcb->sctp_socket);
michael@0 3332 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 3333 SCTP_SOCKET_UNLOCK(so, 1);
michael@0 3334 #endif
michael@0 3335 }
michael@0 3336 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
michael@0 3337 /* event not enabled */
michael@0 3338 return;
michael@0 3339 }
michael@0 3340
michael@0 3341 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
michael@0 3342 if (m_notify == NULL)
michael@0 3343 /* no space left */
michael@0 3344 return;
michael@0 3345 sse = mtod(m_notify, struct sctp_shutdown_event *);
michael@0 3346 sse->sse_type = SCTP_SHUTDOWN_EVENT;
michael@0 3347 sse->sse_flags = 0;
michael@0 3348 sse->sse_length = sizeof(struct sctp_shutdown_event);
michael@0 3349 sse->sse_assoc_id = sctp_get_associd(stcb);
michael@0 3350
michael@0 3351 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
michael@0 3352 SCTP_BUF_NEXT(m_notify) = NULL;
michael@0 3353
michael@0 3354 /* append to socket */
michael@0 3355 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
michael@0 3356 0, 0, stcb->asoc.context, 0, 0, 0,
michael@0 3357 m_notify);
michael@0 3358 if (control == NULL) {
michael@0 3359 /* no memory */
michael@0 3360 sctp_m_freem(m_notify);
michael@0 3361 return;
michael@0 3362 }
michael@0 3363 control->spec_flags = M_NOTIFICATION;
michael@0 3364 control->length = SCTP_BUF_LEN(m_notify);
michael@0 3365 /* not that we need this */
michael@0 3366 control->tail_mbuf = m_notify;
michael@0 3367 sctp_add_to_readq(stcb->sctp_ep, stcb,
michael@0 3368 control,
michael@0 3369 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
michael@0 3370 }
michael@0 3371
michael@0 3372 static void
michael@0 3373 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
michael@0 3374 int so_locked
michael@0 3375 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
michael@0 3376 SCTP_UNUSED
michael@0 3377 #endif
michael@0 3378 )
michael@0 3379 {
michael@0 3380 struct mbuf *m_notify;
michael@0 3381 struct sctp_sender_dry_event *event;
michael@0 3382 struct sctp_queued_to_read *control;
michael@0 3383
michael@0 3384 if ((stcb == NULL) ||
michael@0 3385 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
michael@0 3386 /* event not enabled */
michael@0 3387 return;
michael@0 3388 }
michael@0 3389
michael@0 3390 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
michael@0 3391 if (m_notify == NULL) {
michael@0 3392 /* no space left */
michael@0 3393 return;
michael@0 3394 }
michael@0 3395 SCTP_BUF_LEN(m_notify) = 0;
michael@0 3396 event = mtod(m_notify, struct sctp_sender_dry_event *);
michael@0 3397 event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
michael@0 3398 event->sender_dry_flags = 0;
michael@0 3399 event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
michael@0 3400 event->sender_dry_assoc_id = sctp_get_associd(stcb);
michael@0 3401
michael@0 3402 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
michael@0 3403 SCTP_BUF_NEXT(m_notify) = NULL;
michael@0 3404
michael@0 3405 /* append to socket */
michael@0 3406 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
michael@0 3407 0, 0, stcb->asoc.context, 0, 0, 0,
michael@0 3408 m_notify);
michael@0 3409 if (control == NULL) {
michael@0 3410 /* no memory */
michael@0 3411 sctp_m_freem(m_notify);
michael@0 3412 return;
michael@0 3413 }
michael@0 3414 control->length = SCTP_BUF_LEN(m_notify);
michael@0 3415 control->spec_flags = M_NOTIFICATION;
michael@0 3416 /* not that we need this */
michael@0 3417 control->tail_mbuf = m_notify;
michael@0 3418 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
michael@0 3419 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
michael@0 3420 }
michael@0 3421
michael@0 3422
michael@0 3423 void
michael@0 3424 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
michael@0 3425 {
michael@0 3426 struct mbuf *m_notify;
michael@0 3427 struct sctp_queued_to_read *control;
michael@0 3428 struct sctp_stream_change_event *stradd;
michael@0 3429 int len;
michael@0 3430
michael@0 3431 if ((stcb == NULL) ||
michael@0 3432 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
michael@0 3433 /* event not enabled */
michael@0 3434 return;
michael@0 3435 }
michael@0 3436 if ((stcb->asoc.peer_req_out) && flag) {
michael@0 3437 /* Peer made the request, don't tell the local user */
michael@0 3438 stcb->asoc.peer_req_out = 0;
michael@0 3439 return;
michael@0 3440 }
michael@0 3441 stcb->asoc.peer_req_out = 0;
michael@0 3442 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
michael@0 3443 if (m_notify == NULL)
michael@0 3444 /* no space left */
michael@0 3445 return;
michael@0 3446 SCTP_BUF_LEN(m_notify) = 0;
michael@0 3447 len = sizeof(struct sctp_stream_change_event);
michael@0 3448 if (len > M_TRAILINGSPACE(m_notify)) {
michael@0 3449 /* never enough room */
michael@0 3450 sctp_m_freem(m_notify);
michael@0 3451 return;
michael@0 3452 }
michael@0 3453 stradd = mtod(m_notify, struct sctp_stream_change_event *);
michael@0 3454 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
michael@0 3455 stradd->strchange_flags = flag;
michael@0 3456 stradd->strchange_length = len;
michael@0 3457 stradd->strchange_assoc_id = sctp_get_associd(stcb);
michael@0 3458 stradd->strchange_instrms = numberin;
michael@0 3459 stradd->strchange_outstrms = numberout;
michael@0 3460 SCTP_BUF_LEN(m_notify) = len;
michael@0 3461 SCTP_BUF_NEXT(m_notify) = NULL;
michael@0 3462 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
michael@0 3463 /* no space */
michael@0 3464 sctp_m_freem(m_notify);
michael@0 3465 return;
michael@0 3466 }
michael@0 3467 /* append to socket */
michael@0 3468 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
michael@0 3469 0, 0, stcb->asoc.context, 0, 0, 0,
michael@0 3470 m_notify);
michael@0 3471 if (control == NULL) {
michael@0 3472 /* no memory */
michael@0 3473 sctp_m_freem(m_notify);
michael@0 3474 return;
michael@0 3475 }
michael@0 3476 control->spec_flags = M_NOTIFICATION;
michael@0 3477 control->length = SCTP_BUF_LEN(m_notify);
michael@0 3478 /* not that we need this */
michael@0 3479 control->tail_mbuf = m_notify;
michael@0 3480 sctp_add_to_readq(stcb->sctp_ep, stcb,
michael@0 3481 control,
michael@0 3482 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
michael@0 3483 }
michael@0 3484
michael@0 3485 void
michael@0 3486 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
michael@0 3487 {
michael@0 3488 struct mbuf *m_notify;
michael@0 3489 struct sctp_queued_to_read *control;
michael@0 3490 struct sctp_assoc_reset_event *strasoc;
michael@0 3491 int len;
michael@0 3492
michael@0 3493 if ((stcb == NULL) ||
michael@0 3494 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
michael@0 3495 /* event not enabled */
michael@0 3496 return;
michael@0 3497 }
michael@0 3498 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
michael@0 3499 if (m_notify == NULL)
michael@0 3500 /* no space left */
michael@0 3501 return;
michael@0 3502 SCTP_BUF_LEN(m_notify) = 0;
michael@0 3503 len = sizeof(struct sctp_assoc_reset_event);
michael@0 3504 if (len > M_TRAILINGSPACE(m_notify)) {
michael@0 3505 /* never enough room */
michael@0 3506 sctp_m_freem(m_notify);
michael@0 3507 return;
michael@0 3508 }
michael@0 3509 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
michael@0 3510 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
michael@0 3511 strasoc->assocreset_flags = flag;
michael@0 3512 strasoc->assocreset_length = len;
michael@0 3513 strasoc->assocreset_assoc_id= sctp_get_associd(stcb);
michael@0 3514 strasoc->assocreset_local_tsn = sending_tsn;
michael@0 3515 strasoc->assocreset_remote_tsn = recv_tsn;
michael@0 3516 SCTP_BUF_LEN(m_notify) = len;
michael@0 3517 SCTP_BUF_NEXT(m_notify) = NULL;
michael@0 3518 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
michael@0 3519 /* no space */
michael@0 3520 sctp_m_freem(m_notify);
michael@0 3521 return;
michael@0 3522 }
michael@0 3523 /* append to socket */
michael@0 3524 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
michael@0 3525 0, 0, stcb->asoc.context, 0, 0, 0,
michael@0 3526 m_notify);
michael@0 3527 if (control == NULL) {
michael@0 3528 /* no memory */
michael@0 3529 sctp_m_freem(m_notify);
michael@0 3530 return;
michael@0 3531 }
michael@0 3532 control->spec_flags = M_NOTIFICATION;
michael@0 3533 control->length = SCTP_BUF_LEN(m_notify);
michael@0 3534 /* not that we need this */
michael@0 3535 control->tail_mbuf = m_notify;
michael@0 3536 sctp_add_to_readq(stcb->sctp_ep, stcb,
michael@0 3537 control,
michael@0 3538 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
michael@0 3539 }
michael@0 3540
michael@0 3541
michael@0 3542
michael@0 3543 static void
michael@0 3544 sctp_notify_stream_reset(struct sctp_tcb *stcb,
michael@0 3545 int number_entries, uint16_t * list, int flag)
michael@0 3546 {
michael@0 3547 struct mbuf *m_notify;
michael@0 3548 struct sctp_queued_to_read *control;
michael@0 3549 struct sctp_stream_reset_event *strreset;
michael@0 3550 int len;
michael@0 3551
michael@0 3552 if ((stcb == NULL) ||
michael@0 3553 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
michael@0 3554 /* event not enabled */
michael@0 3555 return;
michael@0 3556 }
michael@0 3557
michael@0 3558 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
michael@0 3559 if (m_notify == NULL)
michael@0 3560 /* no space left */
michael@0 3561 return;
michael@0 3562 SCTP_BUF_LEN(m_notify) = 0;
michael@0 3563 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
michael@0 3564 if (len > M_TRAILINGSPACE(m_notify)) {
michael@0 3565 /* never enough room */
michael@0 3566 sctp_m_freem(m_notify);
michael@0 3567 return;
michael@0 3568 }
michael@0 3569 strreset = mtod(m_notify, struct sctp_stream_reset_event *);
michael@0 3570 strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
michael@0 3571 strreset->strreset_flags = flag;
michael@0 3572 strreset->strreset_length = len;
michael@0 3573 strreset->strreset_assoc_id = sctp_get_associd(stcb);
michael@0 3574 if (number_entries) {
michael@0 3575 int i;
michael@0 3576
michael@0 3577 for (i = 0; i < number_entries; i++) {
michael@0 3578 strreset->strreset_stream_list[i] = ntohs(list[i]);
michael@0 3579 }
michael@0 3580 }
michael@0 3581 SCTP_BUF_LEN(m_notify) = len;
michael@0 3582 SCTP_BUF_NEXT(m_notify) = NULL;
michael@0 3583 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
michael@0 3584 /* no space */
michael@0 3585 sctp_m_freem(m_notify);
michael@0 3586 return;
michael@0 3587 }
michael@0 3588 /* append to socket */
michael@0 3589 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
michael@0 3590 0, 0, stcb->asoc.context, 0, 0, 0,
michael@0 3591 m_notify);
michael@0 3592 if (control == NULL) {
michael@0 3593 /* no memory */
michael@0 3594 sctp_m_freem(m_notify);
michael@0 3595 return;
michael@0 3596 }
michael@0 3597 control->spec_flags = M_NOTIFICATION;
michael@0 3598 control->length = SCTP_BUF_LEN(m_notify);
michael@0 3599 /* not that we need this */
michael@0 3600 control->tail_mbuf = m_notify;
michael@0 3601 sctp_add_to_readq(stcb->sctp_ep, stcb,
michael@0 3602 control,
michael@0 3603 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
michael@0 3604 }
michael@0 3605
michael@0 3606
michael@0 3607 static void
michael@0 3608 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
michael@0 3609 {
michael@0 3610 struct mbuf *m_notify;
michael@0 3611 struct sctp_remote_error *sre;
michael@0 3612 struct sctp_queued_to_read *control;
michael@0 3613 size_t notif_len, chunk_len;
michael@0 3614
michael@0 3615 if ((stcb == NULL) ||
michael@0 3616 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
michael@0 3617 return;
michael@0 3618 }
michael@0 3619 if (chunk != NULL) {
michael@0 3620 chunk_len = ntohs(chunk->ch.chunk_length);
michael@0 3621 } else {
michael@0 3622 chunk_len = 0;
michael@0 3623 }
michael@0 3624 notif_len = sizeof(struct sctp_remote_error) + chunk_len;
michael@0 3625 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
michael@0 3626 if (m_notify == NULL) {
michael@0 3627 /* Retry with smaller value. */
michael@0 3628 notif_len = sizeof(struct sctp_remote_error);
michael@0 3629 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
michael@0 3630 if (m_notify == NULL) {
michael@0 3631 return;
michael@0 3632 }
michael@0 3633 }
michael@0 3634 SCTP_BUF_NEXT(m_notify) = NULL;
michael@0 3635 sre = mtod(m_notify, struct sctp_remote_error *);
michael@0 3636 sre->sre_type = SCTP_REMOTE_ERROR;
michael@0 3637 sre->sre_flags = 0;
michael@0 3638 sre->sre_length = sizeof(struct sctp_remote_error);
michael@0 3639 sre->sre_error = error;
michael@0 3640 sre->sre_assoc_id = sctp_get_associd(stcb);
michael@0 3641 if (notif_len > sizeof(struct sctp_remote_error)) {
michael@0 3642 memcpy(sre->sre_data, chunk, chunk_len);
michael@0 3643 sre->sre_length += chunk_len;
michael@0 3644 }
michael@0 3645 SCTP_BUF_LEN(m_notify) = sre->sre_length;
michael@0 3646 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
michael@0 3647 0, 0, stcb->asoc.context, 0, 0, 0,
michael@0 3648 m_notify);
michael@0 3649 if (control != NULL) {
michael@0 3650 control->length = SCTP_BUF_LEN(m_notify);
michael@0 3651 /* not that we need this */
michael@0 3652 control->tail_mbuf = m_notify;
michael@0 3653 control->spec_flags = M_NOTIFICATION;
michael@0 3654 sctp_add_to_readq(stcb->sctp_ep, stcb,
michael@0 3655 control,
michael@0 3656 &stcb->sctp_socket->so_rcv, 1,
michael@0 3657 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
michael@0 3658 } else {
michael@0 3659 sctp_m_freem(m_notify);
michael@0 3660 }
michael@0 3661 }
michael@0 3662
michael@0 3663
michael@0 3664 void
michael@0 3665 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
michael@0 3666 uint32_t error, void *data, int so_locked
michael@0 3667 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
michael@0 3668 SCTP_UNUSED
michael@0 3669 #endif
michael@0 3670 )
michael@0 3671 {
michael@0 3672 if ((stcb == NULL) ||
michael@0 3673 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
michael@0 3674 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
michael@0 3675 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
michael@0 3676 /* If the socket is gone we are out of here */
michael@0 3677 return;
michael@0 3678 }
michael@0 3679 #if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
michael@0 3680 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
michael@0 3681 #else
michael@0 3682 if (stcb->sctp_socket->so_state & SS_CANTRCVMORE) {
michael@0 3683 #endif
michael@0 3684 return;
michael@0 3685 }
michael@0 3686 #if defined(__APPLE__)
michael@0 3687 if (so_locked) {
michael@0 3688 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
michael@0 3689 } else {
michael@0 3690 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
michael@0 3691 }
michael@0 3692 #endif
michael@0 3693 if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
michael@0 3694 (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
michael@0 3695 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
michael@0 3696 (notification == SCTP_NOTIFY_INTERFACE_UP) ||
michael@0 3697 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
michael@0 3698 /* Don't report these in front states */
michael@0 3699 return;
michael@0 3700 }
michael@0 3701 }
michael@0 3702 switch (notification) {
michael@0 3703 case SCTP_NOTIFY_ASSOC_UP:
michael@0 3704 if (stcb->asoc.assoc_up_sent == 0) {
michael@0 3705 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
michael@0 3706 stcb->asoc.assoc_up_sent = 1;
michael@0 3707 }
michael@0 3708 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
michael@0 3709 sctp_notify_adaptation_layer(stcb);
michael@0 3710 }
michael@0 3711 if (stcb->asoc.peer_supports_auth == 0) {
michael@0 3712 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
michael@0 3713 NULL, so_locked);
michael@0 3714 }
michael@0 3715 break;
michael@0 3716 case SCTP_NOTIFY_ASSOC_DOWN:
michael@0 3717 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
michael@0 3718 #if defined(__Userspace__)
michael@0 3719 if (stcb->sctp_ep->recv_callback) {
michael@0 3720 if (stcb->sctp_socket) {
michael@0 3721 union sctp_sockstore addr;
michael@0 3722 struct sctp_rcvinfo rcv;
michael@0 3723
michael@0 3724 memset(&addr, 0, sizeof(union sctp_sockstore));
michael@0 3725 memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
michael@0 3726 atomic_add_int(&stcb->asoc.refcnt, 1);
michael@0 3727 SCTP_TCB_UNLOCK(stcb);
michael@0 3728 stcb->sctp_ep->recv_callback(stcb->sctp_socket, addr, NULL, 0, rcv, 0, stcb->sctp_ep->ulp_info);
michael@0 3729 SCTP_TCB_LOCK(stcb);
michael@0 3730 atomic_subtract_int(&stcb->asoc.refcnt, 1);
michael@0 3731 }
michael@0 3732 }
michael@0 3733 #endif
michael@0 3734 break;
michael@0 3735 case SCTP_NOTIFY_INTERFACE_DOWN:
michael@0 3736 {
michael@0 3737 struct sctp_nets *net;
michael@0 3738
michael@0 3739 net = (struct sctp_nets *)data;
michael@0 3740 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
michael@0 3741 (struct sockaddr *)&net->ro._l_addr, error);
michael@0 3742 break;
michael@0 3743 }
michael@0 3744 case SCTP_NOTIFY_INTERFACE_UP:
michael@0 3745 {
michael@0 3746 struct sctp_nets *net;
michael@0 3747
michael@0 3748 net = (struct sctp_nets *)data;
michael@0 3749 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
michael@0 3750 (struct sockaddr *)&net->ro._l_addr, error);
michael@0 3751 break;
michael@0 3752 }
michael@0 3753 case SCTP_NOTIFY_INTERFACE_CONFIRMED:
michael@0 3754 {
michael@0 3755 struct sctp_nets *net;
michael@0 3756
michael@0 3757 net = (struct sctp_nets *)data;
michael@0 3758 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
michael@0 3759 (struct sockaddr *)&net->ro._l_addr, error);
michael@0 3760 break;
michael@0 3761 }
michael@0 3762 case SCTP_NOTIFY_SPECIAL_SP_FAIL:
michael@0 3763 sctp_notify_send_failed2(stcb, error,
michael@0 3764 (struct sctp_stream_queue_pending *)data, so_locked);
michael@0 3765 break;
michael@0 3766 case SCTP_NOTIFY_SENT_DG_FAIL:
michael@0 3767 sctp_notify_send_failed(stcb, 1, error,
michael@0 3768 (struct sctp_tmit_chunk *)data, so_locked);
michael@0 3769 break;
michael@0 3770 case SCTP_NOTIFY_UNSENT_DG_FAIL:
michael@0 3771 sctp_notify_send_failed(stcb, 0, error,
michael@0 3772 (struct sctp_tmit_chunk *)data, so_locked);
michael@0 3773 break;
michael@0 3774 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
michael@0 3775 {
michael@0 3776 uint32_t val;
michael@0 3777 val = *((uint32_t *)data);
michael@0 3778
michael@0 3779 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
michael@0 3780 break;
michael@0 3781 }
michael@0 3782 case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
michael@0 3783 if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
michael@0 3784 ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
michael@0 3785 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
michael@0 3786 } else {
michael@0 3787 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
michael@0 3788 }
michael@0 3789 break;
michael@0 3790 case SCTP_NOTIFY_ASSOC_REM_ABORTED:
michael@0 3791 if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
michael@0 3792 ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
michael@0 3793 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
michael@0 3794 } else {
michael@0 3795 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
michael@0 3796 }
michael@0 3797 break;
michael@0 3798 case SCTP_NOTIFY_ASSOC_RESTART:
michael@0 3799 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
michael@0 3800 if (stcb->asoc.peer_supports_auth == 0) {
michael@0 3801 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
michael@0 3802 NULL, so_locked);
michael@0 3803 }
michael@0 3804 break;
michael@0 3805 case SCTP_NOTIFY_STR_RESET_SEND:
michael@0 3806 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
michael@0 3807 break;
michael@0 3808 case SCTP_NOTIFY_STR_RESET_RECV:
michael@0 3809 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
michael@0 3810 break;
michael@0 3811 case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
michael@0 3812 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
michael@0 3813 (SCTP_STREAM_RESET_OUTGOING_SSN|SCTP_STREAM_RESET_FAILED));
michael@0 3814 break;
michael@0 3815 case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
michael@0 3816 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
michael@0 3817 (SCTP_STREAM_RESET_OUTGOING_SSN|SCTP_STREAM_RESET_DENIED));
michael@0 3818 break;
michael@0 3819 case SCTP_NOTIFY_STR_RESET_FAILED_IN:
michael@0 3820 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
michael@0 3821 (SCTP_STREAM_RESET_INCOMING|SCTP_STREAM_RESET_FAILED));
michael@0 3822 break;
michael@0 3823 case SCTP_NOTIFY_STR_RESET_DENIED_IN:
michael@0 3824 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
michael@0 3825 (SCTP_STREAM_RESET_INCOMING|SCTP_STREAM_RESET_DENIED));
michael@0 3826 break;
michael@0 3827 case SCTP_NOTIFY_ASCONF_ADD_IP:
michael@0 3828 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
michael@0 3829 error);
michael@0 3830 break;
michael@0 3831 case SCTP_NOTIFY_ASCONF_DELETE_IP:
michael@0 3832 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
michael@0 3833 error);
michael@0 3834 break;
michael@0 3835 case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
michael@0 3836 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
michael@0 3837 error);
michael@0 3838 break;
michael@0 3839 case SCTP_NOTIFY_PEER_SHUTDOWN:
michael@0 3840 sctp_notify_shutdown_event(stcb);
michael@0 3841 break;
michael@0 3842 case SCTP_NOTIFY_AUTH_NEW_KEY:
michael@0 3843 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
michael@0 3844 (uint16_t)(uintptr_t)data,
michael@0 3845 so_locked);
michael@0 3846 break;
michael@0 3847 case SCTP_NOTIFY_AUTH_FREE_KEY:
michael@0 3848 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
michael@0 3849 (uint16_t)(uintptr_t)data,
michael@0 3850 so_locked);
michael@0 3851 break;
michael@0 3852 case SCTP_NOTIFY_NO_PEER_AUTH:
michael@0 3853 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
michael@0 3854 (uint16_t)(uintptr_t)data,
michael@0 3855 so_locked);
michael@0 3856 break;
michael@0 3857 case SCTP_NOTIFY_SENDER_DRY:
michael@0 3858 sctp_notify_sender_dry_event(stcb, so_locked);
michael@0 3859 break;
michael@0 3860 case SCTP_NOTIFY_REMOTE_ERROR:
michael@0 3861 sctp_notify_remote_error(stcb, error, data);
michael@0 3862 break;
michael@0 3863 default:
michael@0 3864 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
michael@0 3865 __FUNCTION__, notification, notification);
michael@0 3866 break;
michael@0 3867 } /* end switch */
michael@0 3868 }
michael@0 3869
michael@0 3870 void
michael@0 3871 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
michael@0 3872 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
michael@0 3873 SCTP_UNUSED
michael@0 3874 #endif
michael@0 3875 )
michael@0 3876 {
michael@0 3877 struct sctp_association *asoc;
michael@0 3878 struct sctp_stream_out *outs;
michael@0 3879 struct sctp_tmit_chunk *chk, *nchk;
michael@0 3880 struct sctp_stream_queue_pending *sp, *nsp;
michael@0 3881 int i;
michael@0 3882
michael@0 3883 if (stcb == NULL) {
michael@0 3884 return;
michael@0 3885 }
michael@0 3886 asoc = &stcb->asoc;
michael@0 3887 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
michael@0 3888 /* already being freed */
michael@0 3889 return;
michael@0 3890 }
michael@0 3891 #if defined(__APPLE__)
michael@0 3892 if (so_locked) {
michael@0 3893 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
michael@0 3894 } else {
michael@0 3895 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
michael@0 3896 }
michael@0 3897 #endif
michael@0 3898 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
michael@0 3899 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
michael@0 3900 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
michael@0 3901 return;
michael@0 3902 }
michael@0 3903 /* now through all the gunk freeing chunks */
michael@0 3904 if (holds_lock == 0) {
michael@0 3905 SCTP_TCB_SEND_LOCK(stcb);
michael@0 3906 }
michael@0 3907 /* sent queue SHOULD be empty */
michael@0 3908 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
michael@0 3909 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
michael@0 3910 asoc->sent_queue_cnt--;
michael@0 3911 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
michael@0 3912 if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
michael@0 3913 asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
michael@0 3914 #ifdef INVARIANTS
michael@0 3915 } else {
michael@0 3916 panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
michael@0 3917 #endif
michael@0 3918 }
michael@0 3919 }
michael@0 3920 if (chk->data != NULL) {
michael@0 3921 sctp_free_bufspace(stcb, asoc, chk, 1);
michael@0 3922 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
michael@0 3923 error, chk, so_locked);
michael@0 3924 if (chk->data) {
michael@0 3925 sctp_m_freem(chk->data);
michael@0 3926 chk->data = NULL;
michael@0 3927 }
michael@0 3928 }
michael@0 3929 sctp_free_a_chunk(stcb, chk, so_locked);
michael@0 3930 /*sa_ignore FREED_MEMORY*/
michael@0 3931 }
michael@0 3932 /* pending send queue SHOULD be empty */
michael@0 3933 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
michael@0 3934 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
michael@0 3935 asoc->send_queue_cnt--;
michael@0 3936 if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
michael@0 3937 asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
michael@0 3938 #ifdef INVARIANTS
michael@0 3939 } else {
michael@0 3940 panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
michael@0 3941 #endif
michael@0 3942 }
michael@0 3943 if (chk->data != NULL) {
michael@0 3944 sctp_free_bufspace(stcb, asoc, chk, 1);
michael@0 3945 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
michael@0 3946 error, chk, so_locked);
michael@0 3947 if (chk->data) {
michael@0 3948 sctp_m_freem(chk->data);
michael@0 3949 chk->data = NULL;
michael@0 3950 }
michael@0 3951 }
michael@0 3952 sctp_free_a_chunk(stcb, chk, so_locked);
michael@0 3953 /*sa_ignore FREED_MEMORY*/
michael@0 3954 }
michael@0 3955 for (i = 0; i < asoc->streamoutcnt; i++) {
michael@0 3956 /* For each stream */
michael@0 3957 outs = &asoc->strmout[i];
michael@0 3958 /* clean up any sends there */
michael@0 3959 asoc->locked_on_sending = NULL;
michael@0 3960 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
michael@0 3961 asoc->stream_queue_cnt--;
michael@0 3962 TAILQ_REMOVE(&outs->outqueue, sp, next);
michael@0 3963 sctp_free_spbufspace(stcb, asoc, sp);
michael@0 3964 if (sp->data) {
michael@0 3965 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
michael@0 3966 error, (void *)sp, so_locked);
michael@0 3967 if (sp->data) {
michael@0 3968 sctp_m_freem(sp->data);
michael@0 3969 sp->data = NULL;
michael@0 3970 sp->tail_mbuf = NULL;
michael@0 3971 sp->length = 0;
michael@0 3972 }
michael@0 3973 }
michael@0 3974 if (sp->net) {
michael@0 3975 sctp_free_remote_addr(sp->net);
michael@0 3976 sp->net = NULL;
michael@0 3977 }
michael@0 3978 /* Free the chunk */
michael@0 3979 sctp_free_a_strmoq(stcb, sp, so_locked);
michael@0 3980 /*sa_ignore FREED_MEMORY*/
michael@0 3981 }
michael@0 3982 }
michael@0 3983
michael@0 3984 if (holds_lock == 0) {
michael@0 3985 SCTP_TCB_SEND_UNLOCK(stcb);
michael@0 3986 }
michael@0 3987 }
michael@0 3988
michael@0 3989 void
michael@0 3990 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
michael@0 3991 struct sctp_abort_chunk *abort, int so_locked
michael@0 3992 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
michael@0 3993 SCTP_UNUSED
michael@0 3994 #endif
michael@0 3995 )
michael@0 3996 {
michael@0 3997 if (stcb == NULL) {
michael@0 3998 return;
michael@0 3999 }
michael@0 4000 #if defined(__APPLE__)
michael@0 4001 if (so_locked) {
michael@0 4002 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
michael@0 4003 } else {
michael@0 4004 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
michael@0 4005 }
michael@0 4006 #endif
michael@0 4007 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
michael@0 4008 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
michael@0 4009 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
michael@0 4010 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
michael@0 4011 }
michael@0 4012 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
michael@0 4013 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
michael@0 4014 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
michael@0 4015 return;
michael@0 4016 }
michael@0 4017 /* Tell them we lost the asoc */
michael@0 4018 sctp_report_all_outbound(stcb, error, 1, so_locked);
michael@0 4019 if (from_peer) {
michael@0 4020 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
michael@0 4021 } else {
michael@0 4022 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
michael@0 4023 }
michael@0 4024 }
michael@0 4025
michael@0 4026 void
michael@0 4027 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
michael@0 4028 struct mbuf *m, int iphlen,
michael@0 4029 struct sockaddr *src, struct sockaddr *dst,
michael@0 4030 struct sctphdr *sh, struct mbuf *op_err,
michael@0 4031 #if defined(__FreeBSD__)
michael@0 4032 uint8_t use_mflowid, uint32_t mflowid,
michael@0 4033 #endif
michael@0 4034 uint32_t vrf_id, uint16_t port)
michael@0 4035 {
michael@0 4036 uint32_t vtag;
michael@0 4037 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 4038 struct socket *so;
michael@0 4039 #endif
michael@0 4040
michael@0 4041 vtag = 0;
michael@0 4042 if (stcb != NULL) {
michael@0 4043 /* We have a TCB to abort, send notification too */
michael@0 4044 vtag = stcb->asoc.peer_vtag;
michael@0 4045 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
michael@0 4046 /* get the assoc vrf id and table id */
michael@0 4047 vrf_id = stcb->asoc.vrf_id;
michael@0 4048 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
michael@0 4049 }
michael@0 4050 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
michael@0 4051 #if defined(__FreeBSD__)
michael@0 4052 use_mflowid, mflowid,
michael@0 4053 #endif
michael@0 4054 vrf_id, port);
michael@0 4055 if (stcb != NULL) {
michael@0 4056 /* Ok, now lets free it */
michael@0 4057 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 4058 so = SCTP_INP_SO(inp);
michael@0 4059 atomic_add_int(&stcb->asoc.refcnt, 1);
michael@0 4060 SCTP_TCB_UNLOCK(stcb);
michael@0 4061 SCTP_SOCKET_LOCK(so, 1);
michael@0 4062 SCTP_TCB_LOCK(stcb);
michael@0 4063 atomic_subtract_int(&stcb->asoc.refcnt, 1);
michael@0 4064 #endif
michael@0 4065 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
michael@0 4066 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
michael@0 4067 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
michael@0 4068 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
michael@0 4069 }
michael@0 4070 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL+SCTP_LOC_4);
michael@0 4071 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 4072 SCTP_SOCKET_UNLOCK(so, 1);
michael@0 4073 #endif
michael@0 4074 }
michael@0 4075 }
michael@0 4076 #ifdef SCTP_ASOCLOG_OF_TSNS
michael@0 4077 void
michael@0 4078 sctp_print_out_track_log(struct sctp_tcb *stcb)
michael@0 4079 {
michael@0 4080 #ifdef NOSIY_PRINTS
michael@0 4081 int i;
michael@0 4082 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
michael@0 4083 SCTP_PRINTF("IN bound TSN log-aaa\n");
michael@0 4084 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
michael@0 4085 SCTP_PRINTF("None rcvd\n");
michael@0 4086 goto none_in;
michael@0 4087 }
michael@0 4088 if (stcb->asoc.tsn_in_wrapped) {
michael@0 4089 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
michael@0 4090 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
michael@0 4091 stcb->asoc.in_tsnlog[i].tsn,
michael@0 4092 stcb->asoc.in_tsnlog[i].strm,
michael@0 4093 stcb->asoc.in_tsnlog[i].seq,
michael@0 4094 stcb->asoc.in_tsnlog[i].flgs,
michael@0 4095 stcb->asoc.in_tsnlog[i].sz);
michael@0 4096 }
michael@0 4097 }
michael@0 4098 if (stcb->asoc.tsn_in_at) {
michael@0 4099 for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
michael@0 4100 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
michael@0 4101 stcb->asoc.in_tsnlog[i].tsn,
michael@0 4102 stcb->asoc.in_tsnlog[i].strm,
michael@0 4103 stcb->asoc.in_tsnlog[i].seq,
michael@0 4104 stcb->asoc.in_tsnlog[i].flgs,
michael@0 4105 stcb->asoc.in_tsnlog[i].sz);
michael@0 4106 }
michael@0 4107 }
michael@0 4108 none_in:
michael@0 4109 SCTP_PRINTF("OUT bound TSN log-aaa\n");
michael@0 4110 if ((stcb->asoc.tsn_out_at == 0) &&
michael@0 4111 (stcb->asoc.tsn_out_wrapped == 0)) {
michael@0 4112 SCTP_PRINTF("None sent\n");
michael@0 4113 }
michael@0 4114 if (stcb->asoc.tsn_out_wrapped) {
michael@0 4115 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
michael@0 4116 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
michael@0 4117 stcb->asoc.out_tsnlog[i].tsn,
michael@0 4118 stcb->asoc.out_tsnlog[i].strm,
michael@0 4119 stcb->asoc.out_tsnlog[i].seq,
michael@0 4120 stcb->asoc.out_tsnlog[i].flgs,
michael@0 4121 stcb->asoc.out_tsnlog[i].sz);
michael@0 4122 }
michael@0 4123 }
michael@0 4124 if (stcb->asoc.tsn_out_at) {
michael@0 4125 for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
michael@0 4126 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
michael@0 4127 stcb->asoc.out_tsnlog[i].tsn,
michael@0 4128 stcb->asoc.out_tsnlog[i].strm,
michael@0 4129 stcb->asoc.out_tsnlog[i].seq,
michael@0 4130 stcb->asoc.out_tsnlog[i].flgs,
michael@0 4131 stcb->asoc.out_tsnlog[i].sz);
michael@0 4132 }
michael@0 4133 }
michael@0 4134 #endif
michael@0 4135 }
michael@0 4136 #endif
michael@0 4137
michael@0 4138 void
michael@0 4139 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
michael@0 4140 struct mbuf *op_err,
michael@0 4141 int so_locked
michael@0 4142 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
michael@0 4143 SCTP_UNUSED
michael@0 4144 #endif
michael@0 4145 )
michael@0 4146 {
michael@0 4147 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 4148 struct socket *so;
michael@0 4149 #endif
michael@0 4150
michael@0 4151 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 4152 so = SCTP_INP_SO(inp);
michael@0 4153 #endif
michael@0 4154 #if defined(__APPLE__)
michael@0 4155 if (so_locked) {
michael@0 4156 sctp_lock_assert(SCTP_INP_SO(inp));
michael@0 4157 } else {
michael@0 4158 sctp_unlock_assert(SCTP_INP_SO(inp));
michael@0 4159 }
michael@0 4160 #endif
michael@0 4161 if (stcb == NULL) {
michael@0 4162 /* Got to have a TCB */
michael@0 4163 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
michael@0 4164 if (LIST_EMPTY(&inp->sctp_asoc_list)) {
michael@0 4165 #if defined(__APPLE__)
michael@0 4166 if (!so_locked) {
michael@0 4167 SCTP_SOCKET_LOCK(so, 1);
michael@0 4168 }
michael@0 4169 #endif
michael@0 4170 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
michael@0 4171 SCTP_CALLED_DIRECTLY_NOCMPSET);
michael@0 4172 #if defined(__APPLE__)
michael@0 4173 if (!so_locked) {
michael@0 4174 SCTP_SOCKET_UNLOCK(so, 1);
michael@0 4175 }
michael@0 4176 #endif
michael@0 4177 }
michael@0 4178 }
michael@0 4179 return;
michael@0 4180 } else {
michael@0 4181 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
michael@0 4182 }
michael@0 4183 /* notify the ulp */
michael@0 4184 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
michael@0 4185 sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
michael@0 4186 }
michael@0 4187 /* notify the peer */
michael@0 4188 sctp_send_abort_tcb(stcb, op_err, so_locked);
michael@0 4189 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
michael@0 4190 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
michael@0 4191 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
michael@0 4192 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
michael@0 4193 }
michael@0 4194 /* now free the asoc */
michael@0 4195 #ifdef SCTP_ASOCLOG_OF_TSNS
michael@0 4196 sctp_print_out_track_log(stcb);
michael@0 4197 #endif
michael@0 4198 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 4199 if (!so_locked) {
michael@0 4200 atomic_add_int(&stcb->asoc.refcnt, 1);
michael@0 4201 SCTP_TCB_UNLOCK(stcb);
michael@0 4202 SCTP_SOCKET_LOCK(so, 1);
michael@0 4203 SCTP_TCB_LOCK(stcb);
michael@0 4204 atomic_subtract_int(&stcb->asoc.refcnt, 1);
michael@0 4205 }
michael@0 4206 #endif
michael@0 4207 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL+SCTP_LOC_5);
michael@0 4208 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 4209 if (!so_locked) {
michael@0 4210 SCTP_SOCKET_UNLOCK(so, 1);
michael@0 4211 }
michael@0 4212 #endif
michael@0 4213 }
michael@0 4214
michael@0 4215 void
michael@0 4216 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
michael@0 4217 struct sockaddr *src, struct sockaddr *dst,
michael@0 4218 struct sctphdr *sh, struct sctp_inpcb *inp,
michael@0 4219 #if defined(__FreeBSD__)
michael@0 4220 uint8_t use_mflowid, uint32_t mflowid,
michael@0 4221 #endif
michael@0 4222 uint32_t vrf_id, uint16_t port)
michael@0 4223 {
michael@0 4224 struct sctp_chunkhdr *ch, chunk_buf;
michael@0 4225 unsigned int chk_length;
michael@0 4226 int contains_init_chunk;
michael@0 4227
michael@0 4228 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
michael@0 4229 /* Generate a TO address for future reference */
michael@0 4230 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
michael@0 4231 if (LIST_EMPTY(&inp->sctp_asoc_list)) {
michael@0 4232 #if defined(__APPLE__)
michael@0 4233 SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1);
michael@0 4234 #endif
michael@0 4235 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
michael@0 4236 SCTP_CALLED_DIRECTLY_NOCMPSET);
michael@0 4237 #if defined(__APPLE__)
michael@0 4238 SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1);
michael@0 4239 #endif
michael@0 4240 }
michael@0 4241 }
michael@0 4242 contains_init_chunk = 0;
michael@0 4243 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
michael@0 4244 sizeof(*ch), (uint8_t *) & chunk_buf);
michael@0 4245 while (ch != NULL) {
michael@0 4246 chk_length = ntohs(ch->chunk_length);
michael@0 4247 if (chk_length < sizeof(*ch)) {
michael@0 4248 /* break to abort land */
michael@0 4249 break;
michael@0 4250 }
michael@0 4251 switch (ch->chunk_type) {
michael@0 4252 case SCTP_INIT:
michael@0 4253 contains_init_chunk = 1;
michael@0 4254 break;
michael@0 4255 case SCTP_COOKIE_ECHO:
michael@0 4256 /* We hit here only if the assoc is being freed */
michael@0 4257 return;
michael@0 4258 case SCTP_PACKET_DROPPED:
michael@0 4259 /* we don't respond to pkt-dropped */
michael@0 4260 return;
michael@0 4261 case SCTP_ABORT_ASSOCIATION:
michael@0 4262 /* we don't respond with an ABORT to an ABORT */
michael@0 4263 return;
michael@0 4264 case SCTP_SHUTDOWN_COMPLETE:
michael@0 4265 /*
michael@0 4266 * we ignore it since we are not waiting for it and
michael@0 4267 * peer is gone
michael@0 4268 */
michael@0 4269 return;
michael@0 4270 case SCTP_SHUTDOWN_ACK:
michael@0 4271 sctp_send_shutdown_complete2(src, dst, sh,
michael@0 4272 #if defined(__FreeBSD__)
michael@0 4273 use_mflowid, mflowid,
michael@0 4274 #endif
michael@0 4275 vrf_id, port);
michael@0 4276 return;
michael@0 4277 default:
michael@0 4278 break;
michael@0 4279 }
michael@0 4280 offset += SCTP_SIZE32(chk_length);
michael@0 4281 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
michael@0 4282 sizeof(*ch), (uint8_t *) & chunk_buf);
michael@0 4283 }
michael@0 4284 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
michael@0 4285 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
michael@0 4286 (contains_init_chunk == 0))) {
michael@0 4287 sctp_send_abort(m, iphlen, src, dst, sh, 0, NULL,
michael@0 4288 #if defined(__FreeBSD__)
michael@0 4289 use_mflowid, mflowid,
michael@0 4290 #endif
michael@0 4291 vrf_id, port);
michael@0 4292 }
michael@0 4293 }
michael@0 4294
michael@0 4295 /*
michael@0 4296 * check the inbound datagram to make sure there is not an abort inside it,
michael@0 4297 * if there is return 1, else return 0.
michael@0 4298 */
michael@0 4299 int
michael@0 4300 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
michael@0 4301 {
michael@0 4302 struct sctp_chunkhdr *ch;
michael@0 4303 struct sctp_init_chunk *init_chk, chunk_buf;
michael@0 4304 int offset;
michael@0 4305 unsigned int chk_length;
michael@0 4306
michael@0 4307 offset = iphlen + sizeof(struct sctphdr);
michael@0 4308 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
michael@0 4309 (uint8_t *) & chunk_buf);
michael@0 4310 while (ch != NULL) {
michael@0 4311 chk_length = ntohs(ch->chunk_length);
michael@0 4312 if (chk_length < sizeof(*ch)) {
michael@0 4313 /* packet is probably corrupt */
michael@0 4314 break;
michael@0 4315 }
michael@0 4316 /* we seem to be ok, is it an abort? */
michael@0 4317 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
michael@0 4318 /* yep, tell them */
michael@0 4319 return (1);
michael@0 4320 }
michael@0 4321 if (ch->chunk_type == SCTP_INITIATION) {
michael@0 4322 /* need to update the Vtag */
michael@0 4323 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
michael@0 4324 offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
michael@0 4325 if (init_chk != NULL) {
michael@0 4326 *vtagfill = ntohl(init_chk->init.initiate_tag);
michael@0 4327 }
michael@0 4328 }
michael@0 4329 /* Nope, move to the next chunk */
michael@0 4330 offset += SCTP_SIZE32(chk_length);
michael@0 4331 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
michael@0 4332 sizeof(*ch), (uint8_t *) & chunk_buf);
michael@0 4333 }
michael@0 4334 return (0);
michael@0 4335 }
michael@0 4336
michael@0 4337 /*
michael@0 4338 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
michael@0 4339 * set (i.e. it's 0) so, create this function to compare link local scopes
michael@0 4340 */
michael@0 4341 #ifdef INET6
michael@0 4342 uint32_t
michael@0 4343 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
michael@0 4344 {
michael@0 4345 #if defined(__Userspace__)
michael@0 4346 /*__Userspace__ Returning 1 here always */
michael@0 4347 #endif
michael@0 4348 #if defined(SCTP_EMBEDDED_V6_SCOPE)
michael@0 4349 struct sockaddr_in6 a, b;
michael@0 4350
michael@0 4351 /* save copies */
michael@0 4352 a = *addr1;
michael@0 4353 b = *addr2;
michael@0 4354
michael@0 4355 if (a.sin6_scope_id == 0)
michael@0 4356 #ifdef SCTP_KAME
michael@0 4357 if (sa6_recoverscope(&a)) {
michael@0 4358 #else
michael@0 4359 if (in6_recoverscope(&a, &a.sin6_addr, NULL)) {
michael@0 4360 #endif /* SCTP_KAME */
michael@0 4361 /* can't get scope, so can't match */
michael@0 4362 return (0);
michael@0 4363 }
michael@0 4364 if (b.sin6_scope_id == 0)
michael@0 4365 #ifdef SCTP_KAME
michael@0 4366 if (sa6_recoverscope(&b)) {
michael@0 4367 #else
michael@0 4368 if (in6_recoverscope(&b, &b.sin6_addr, NULL)) {
michael@0 4369 #endif /* SCTP_KAME */
michael@0 4370 /* can't get scope, so can't match */
michael@0 4371 return (0);
michael@0 4372 }
michael@0 4373 if (a.sin6_scope_id != b.sin6_scope_id)
michael@0 4374 return (0);
michael@0 4375 #else
michael@0 4376 if (addr1->sin6_scope_id != addr2->sin6_scope_id)
michael@0 4377 return (0);
michael@0 4378 #endif /* SCTP_EMBEDDED_V6_SCOPE */
michael@0 4379
michael@0 4380 return (1);
michael@0 4381 }
michael@0 4382
michael@0 4383 #if defined(SCTP_EMBEDDED_V6_SCOPE)
michael@0 4384 /*
michael@0 4385 * returns a sockaddr_in6 with embedded scope recovered and removed
michael@0 4386 */
michael@0 4387 struct sockaddr_in6 *
michael@0 4388 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
michael@0 4389 {
michael@0 4390 /* check and strip embedded scope junk */
michael@0 4391 if (addr->sin6_family == AF_INET6) {
michael@0 4392 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
michael@0 4393 if (addr->sin6_scope_id == 0) {
michael@0 4394 *store = *addr;
michael@0 4395 #ifdef SCTP_KAME
michael@0 4396 if (!sa6_recoverscope(store)) {
michael@0 4397 #else
michael@0 4398 if (!in6_recoverscope(store, &store->sin6_addr,
michael@0 4399 NULL)) {
michael@0 4400 #endif /* SCTP_KAME */
michael@0 4401 /* use the recovered scope */
michael@0 4402 addr = store;
michael@0 4403 }
michael@0 4404 } else {
michael@0 4405 /* else, return the original "to" addr */
michael@0 4406 in6_clearscope(&addr->sin6_addr);
michael@0 4407 }
michael@0 4408 }
michael@0 4409 }
michael@0 4410 return (addr);
michael@0 4411 }
michael@0 4412 #endif /* SCTP_EMBEDDED_V6_SCOPE */
michael@0 4413 #endif
michael@0 4414
michael@0 4415 /*
michael@0 4416 * are the two addresses the same? currently a "scopeless" check returns: 1
michael@0 4417 * if same, 0 if not
michael@0 4418 */
michael@0 4419 int
michael@0 4420 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
michael@0 4421 {
michael@0 4422
michael@0 4423 /* must be valid */
michael@0 4424 if (sa1 == NULL || sa2 == NULL)
michael@0 4425 return (0);
michael@0 4426
michael@0 4427 /* must be the same family */
michael@0 4428 if (sa1->sa_family != sa2->sa_family)
michael@0 4429 return (0);
michael@0 4430
michael@0 4431 switch (sa1->sa_family) {
michael@0 4432 #ifdef INET6
michael@0 4433 case AF_INET6:
michael@0 4434 {
michael@0 4435 /* IPv6 addresses */
michael@0 4436 struct sockaddr_in6 *sin6_1, *sin6_2;
michael@0 4437
michael@0 4438 sin6_1 = (struct sockaddr_in6 *)sa1;
michael@0 4439 sin6_2 = (struct sockaddr_in6 *)sa2;
michael@0 4440 return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
michael@0 4441 sin6_2));
michael@0 4442 }
michael@0 4443 #endif
michael@0 4444 #ifdef INET
michael@0 4445 case AF_INET:
michael@0 4446 {
michael@0 4447 /* IPv4 addresses */
michael@0 4448 struct sockaddr_in *sin_1, *sin_2;
michael@0 4449
michael@0 4450 sin_1 = (struct sockaddr_in *)sa1;
michael@0 4451 sin_2 = (struct sockaddr_in *)sa2;
michael@0 4452 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
michael@0 4453 }
michael@0 4454 #endif
michael@0 4455 #if defined(__Userspace__)
michael@0 4456 case AF_CONN:
michael@0 4457 {
michael@0 4458 struct sockaddr_conn *sconn_1, *sconn_2;
michael@0 4459
michael@0 4460 sconn_1 = (struct sockaddr_conn *)sa1;
michael@0 4461 sconn_2 = (struct sockaddr_conn *)sa2;
michael@0 4462 return (sconn_1->sconn_addr == sconn_2->sconn_addr);
michael@0 4463 }
michael@0 4464 #endif
michael@0 4465 default:
michael@0 4466 /* we don't do these... */
michael@0 4467 return (0);
michael@0 4468 }
michael@0 4469 }
michael@0 4470
michael@0 4471 void
michael@0 4472 sctp_print_address(struct sockaddr *sa)
michael@0 4473 {
michael@0 4474 #ifdef INET6
michael@0 4475 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
michael@0 4476 char ip6buf[INET6_ADDRSTRLEN];
michael@0 4477 #endif
michael@0 4478 #endif
michael@0 4479
michael@0 4480 switch (sa->sa_family) {
michael@0 4481 #ifdef INET6
michael@0 4482 case AF_INET6:
michael@0 4483 {
michael@0 4484 struct sockaddr_in6 *sin6;
michael@0 4485
michael@0 4486 sin6 = (struct sockaddr_in6 *)sa;
michael@0 4487 #if defined(__Userspace__)
michael@0 4488 SCTP_PRINTF("IPv6 address: %x:%x:%x:%x:%x:%x:%x:%x:port:%d scope:%u\n",
michael@0 4489 ntohs(sin6->sin6_addr.s6_addr16[0]),
michael@0 4490 ntohs(sin6->sin6_addr.s6_addr16[1]),
michael@0 4491 ntohs(sin6->sin6_addr.s6_addr16[2]),
michael@0 4492 ntohs(sin6->sin6_addr.s6_addr16[3]),
michael@0 4493 ntohs(sin6->sin6_addr.s6_addr16[4]),
michael@0 4494 ntohs(sin6->sin6_addr.s6_addr16[5]),
michael@0 4495 ntohs(sin6->sin6_addr.s6_addr16[6]),
michael@0 4496 ntohs(sin6->sin6_addr.s6_addr16[7]),
michael@0 4497 ntohs(sin6->sin6_port),
michael@0 4498 sin6->sin6_scope_id);
michael@0 4499 #else
michael@0 4500 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
michael@0 4501 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
michael@0 4502 ip6_sprintf(ip6buf, &sin6->sin6_addr),
michael@0 4503 ntohs(sin6->sin6_port),
michael@0 4504 sin6->sin6_scope_id);
michael@0 4505 #else
michael@0 4506 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
michael@0 4507 ip6_sprintf(&sin6->sin6_addr),
michael@0 4508 ntohs(sin6->sin6_port),
michael@0 4509 sin6->sin6_scope_id);
michael@0 4510 #endif
michael@0 4511 #endif
michael@0 4512 break;
michael@0 4513 }
michael@0 4514 #endif
michael@0 4515 #ifdef INET
michael@0 4516 case AF_INET:
michael@0 4517 {
michael@0 4518 struct sockaddr_in *sin;
michael@0 4519 unsigned char *p;
michael@0 4520
michael@0 4521 sin = (struct sockaddr_in *)sa;
michael@0 4522 p = (unsigned char *)&sin->sin_addr;
michael@0 4523 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
michael@0 4524 p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
michael@0 4525 break;
michael@0 4526 }
michael@0 4527 #endif
michael@0 4528 #if defined(__Userspace__)
michael@0 4529 case AF_CONN:
michael@0 4530 {
michael@0 4531 struct sockaddr_conn *sconn;
michael@0 4532
michael@0 4533 sconn = (struct sockaddr_conn *)sa;
michael@0 4534 SCTP_PRINTF("AF_CONN address: %p\n", sconn->sconn_addr);
michael@0 4535 break;
michael@0 4536 }
michael@0 4537 #endif
michael@0 4538 default:
michael@0 4539 SCTP_PRINTF("?\n");
michael@0 4540 break;
michael@0 4541 }
michael@0 4542 }
michael@0 4543
michael@0 4544 void
michael@0 4545 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
michael@0 4546 struct sctp_inpcb *new_inp,
michael@0 4547 struct sctp_tcb *stcb,
michael@0 4548 int waitflags)
michael@0 4549 {
michael@0 4550 /*
michael@0 4551 * go through our old INP and pull off any control structures that
michael@0 4552 * belong to stcb and move then to the new inp.
michael@0 4553 */
michael@0 4554 struct socket *old_so, *new_so;
michael@0 4555 struct sctp_queued_to_read *control, *nctl;
michael@0 4556 struct sctp_readhead tmp_queue;
michael@0 4557 struct mbuf *m;
michael@0 4558 int error = 0;
michael@0 4559
michael@0 4560 old_so = old_inp->sctp_socket;
michael@0 4561 new_so = new_inp->sctp_socket;
michael@0 4562 TAILQ_INIT(&tmp_queue);
michael@0 4563 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
michael@0 4564 SOCKBUF_LOCK(&(old_so->so_rcv));
michael@0 4565 #endif
michael@0 4566 #if defined(__FreeBSD__) || defined(__APPLE__)
michael@0 4567 error = sblock(&old_so->so_rcv, waitflags);
michael@0 4568 #endif
michael@0 4569 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
michael@0 4570 SOCKBUF_UNLOCK(&(old_so->so_rcv));
michael@0 4571 #endif
michael@0 4572 if (error) {
michael@0 4573 /* Gak, can't get sblock, we have a problem.
michael@0 4574 * data will be left stranded.. and we
michael@0 4575 * don't dare look at it since the
michael@0 4576 * other thread may be reading something.
michael@0 4577 * Oh well, its a screwed up app that does
michael@0 4578 * a peeloff OR a accept while reading
michael@0 4579 * from the main socket... actually its
michael@0 4580 * only the peeloff() case, since I think
michael@0 4581 * read will fail on a listening socket..
michael@0 4582 */
michael@0 4583 return;
michael@0 4584 }
michael@0 4585 /* lock the socket buffers */
michael@0 4586 SCTP_INP_READ_LOCK(old_inp);
michael@0 4587 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
michael@0 4588 /* Pull off all for out target stcb */
michael@0 4589 if (control->stcb == stcb) {
michael@0 4590 /* remove it we want it */
michael@0 4591 TAILQ_REMOVE(&old_inp->read_queue, control, next);
michael@0 4592 TAILQ_INSERT_TAIL(&tmp_queue, control, next);
michael@0 4593 m = control->data;
michael@0 4594 while (m) {
michael@0 4595 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
michael@0 4596 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE,SCTP_BUF_LEN(m));
michael@0 4597 }
michael@0 4598 sctp_sbfree(control, stcb, &old_so->so_rcv, m);
michael@0 4599 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
michael@0 4600 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
michael@0 4601 }
michael@0 4602 m = SCTP_BUF_NEXT(m);
michael@0 4603 }
michael@0 4604 }
michael@0 4605 }
michael@0 4606 SCTP_INP_READ_UNLOCK(old_inp);
michael@0 4607 /* Remove the sb-lock on the old socket */
michael@0 4608 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
michael@0 4609 SOCKBUF_LOCK(&(old_so->so_rcv));
michael@0 4610 #endif
michael@0 4611 #if defined(__APPLE__)
michael@0 4612 sbunlock(&old_so->so_rcv, 1);
michael@0 4613 #endif
michael@0 4614
michael@0 4615 #if defined(__FreeBSD__)
michael@0 4616 sbunlock(&old_so->so_rcv);
michael@0 4617 #endif
michael@0 4618 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
michael@0 4619 SOCKBUF_UNLOCK(&(old_so->so_rcv));
michael@0 4620 #endif
michael@0 4621 /* Now we move them over to the new socket buffer */
michael@0 4622 SCTP_INP_READ_LOCK(new_inp);
michael@0 4623 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
michael@0 4624 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
michael@0 4625 m = control->data;
michael@0 4626 while (m) {
michael@0 4627 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
michael@0 4628 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
michael@0 4629 }
michael@0 4630 sctp_sballoc(stcb, &new_so->so_rcv, m);
michael@0 4631 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
michael@0 4632 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
michael@0 4633 }
michael@0 4634 m = SCTP_BUF_NEXT(m);
michael@0 4635 }
michael@0 4636 }
michael@0 4637 SCTP_INP_READ_UNLOCK(new_inp);
michael@0 4638 }
michael@0 4639
michael@0 4640 void
michael@0 4641 sctp_add_to_readq(struct sctp_inpcb *inp,
michael@0 4642 struct sctp_tcb *stcb,
michael@0 4643 struct sctp_queued_to_read *control,
michael@0 4644 struct sockbuf *sb,
michael@0 4645 int end,
michael@0 4646 int inp_read_lock_held,
michael@0 4647 int so_locked
michael@0 4648 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
michael@0 4649 SCTP_UNUSED
michael@0 4650 #endif
michael@0 4651 )
michael@0 4652 {
michael@0 4653 /*
michael@0 4654 * Here we must place the control on the end of the socket read
michael@0 4655 * queue AND increment sb_cc so that select will work properly on
michael@0 4656 * read.
michael@0 4657 */
michael@0 4658 struct mbuf *m, *prev = NULL;
michael@0 4659
michael@0 4660 if (inp == NULL) {
michael@0 4661 /* Gak, TSNH!! */
michael@0 4662 #ifdef INVARIANTS
michael@0 4663 panic("Gak, inp NULL on add_to_readq");
michael@0 4664 #endif
michael@0 4665 return;
michael@0 4666 }
michael@0 4667 #if defined(__APPLE__)
michael@0 4668 if (so_locked) {
michael@0 4669 sctp_lock_assert(SCTP_INP_SO(inp));
michael@0 4670 } else {
michael@0 4671 sctp_unlock_assert(SCTP_INP_SO(inp));
michael@0 4672 }
michael@0 4673 #endif
michael@0 4674 if (inp_read_lock_held == 0)
michael@0 4675 SCTP_INP_READ_LOCK(inp);
michael@0 4676 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
michael@0 4677 sctp_free_remote_addr(control->whoFrom);
michael@0 4678 if (control->data) {
michael@0 4679 sctp_m_freem(control->data);
michael@0 4680 control->data = NULL;
michael@0 4681 }
michael@0 4682 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
michael@0 4683 if (inp_read_lock_held == 0)
michael@0 4684 SCTP_INP_READ_UNLOCK(inp);
michael@0 4685 return;
michael@0 4686 }
michael@0 4687 if (!(control->spec_flags & M_NOTIFICATION)) {
michael@0 4688 atomic_add_int(&inp->total_recvs, 1);
michael@0 4689 if (!control->do_not_ref_stcb) {
michael@0 4690 atomic_add_int(&stcb->total_recvs, 1);
michael@0 4691 }
michael@0 4692 }
michael@0 4693 m = control->data;
michael@0 4694 control->held_length = 0;
michael@0 4695 control->length = 0;
michael@0 4696 while (m) {
michael@0 4697 if (SCTP_BUF_LEN(m) == 0) {
michael@0 4698 /* Skip mbufs with NO length */
michael@0 4699 if (prev == NULL) {
michael@0 4700 /* First one */
michael@0 4701 control->data = sctp_m_free(m);
michael@0 4702 m = control->data;
michael@0 4703 } else {
michael@0 4704 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
michael@0 4705 m = SCTP_BUF_NEXT(prev);
michael@0 4706 }
michael@0 4707 if (m == NULL) {
michael@0 4708 control->tail_mbuf = prev;
michael@0 4709 }
michael@0 4710 continue;
michael@0 4711 }
michael@0 4712 prev = m;
michael@0 4713 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
michael@0 4714 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
michael@0 4715 }
michael@0 4716 sctp_sballoc(stcb, sb, m);
michael@0 4717 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
michael@0 4718 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
michael@0 4719 }
michael@0 4720 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
michael@0 4721 m = SCTP_BUF_NEXT(m);
michael@0 4722 }
michael@0 4723 if (prev != NULL) {
michael@0 4724 control->tail_mbuf = prev;
michael@0 4725 } else {
michael@0 4726 /* Everything got collapsed out?? */
michael@0 4727 sctp_free_remote_addr(control->whoFrom);
michael@0 4728 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
michael@0 4729 if (inp_read_lock_held == 0)
michael@0 4730 SCTP_INP_READ_UNLOCK(inp);
michael@0 4731 return;
michael@0 4732 }
michael@0 4733 if (end) {
michael@0 4734 control->end_added = 1;
michael@0 4735 }
michael@0 4736 #if defined(__Userspace__)
michael@0 4737 if (inp->recv_callback) {
michael@0 4738 if (inp_read_lock_held == 0)
michael@0 4739 SCTP_INP_READ_UNLOCK(inp);
michael@0 4740 if (control->end_added == 1) {
michael@0 4741 struct socket *so;
michael@0 4742 struct mbuf *m;
michael@0 4743 char *buffer;
michael@0 4744 struct sctp_rcvinfo rcv;
michael@0 4745 union sctp_sockstore addr;
michael@0 4746 int flags;
michael@0 4747
michael@0 4748 if ((buffer = malloc(control->length)) == NULL) {
michael@0 4749 return;
michael@0 4750 }
michael@0 4751 so = stcb->sctp_socket;
michael@0 4752 for (m = control->data; m; m = SCTP_BUF_NEXT(m)) {
michael@0 4753 sctp_sbfree(control, control->stcb, &so->so_rcv, m);
michael@0 4754 }
michael@0 4755 atomic_add_int(&stcb->asoc.refcnt, 1);
michael@0 4756 SCTP_TCB_UNLOCK(stcb);
michael@0 4757 m_copydata(control->data, 0, control->length, buffer);
michael@0 4758 memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
michael@0 4759 rcv.rcv_sid = control->sinfo_stream;
michael@0 4760 rcv.rcv_ssn = control->sinfo_ssn;
michael@0 4761 rcv.rcv_flags = control->sinfo_flags;
michael@0 4762 rcv.rcv_ppid = control->sinfo_ppid;
michael@0 4763 rcv.rcv_tsn = control->sinfo_tsn;
michael@0 4764 rcv.rcv_cumtsn = control->sinfo_cumtsn;
michael@0 4765 rcv.rcv_context = control->sinfo_context;
michael@0 4766 rcv.rcv_assoc_id = control->sinfo_assoc_id;
michael@0 4767 memset(&addr, 0, sizeof(union sctp_sockstore));
michael@0 4768 switch (control->whoFrom->ro._l_addr.sa.sa_family) {
michael@0 4769 #ifdef INET
michael@0 4770 case AF_INET:
michael@0 4771 addr.sin = control->whoFrom->ro._l_addr.sin;
michael@0 4772 break;
michael@0 4773 #endif
michael@0 4774 #ifdef INET6
michael@0 4775 case AF_INET6:
michael@0 4776 addr.sin6 = control->whoFrom->ro._l_addr.sin6;
michael@0 4777 break;
michael@0 4778 #endif
michael@0 4779 case AF_CONN:
michael@0 4780 addr.sconn = control->whoFrom->ro._l_addr.sconn;
michael@0 4781 break;
michael@0 4782 default:
michael@0 4783 addr.sa = control->whoFrom->ro._l_addr.sa;
michael@0 4784 break;
michael@0 4785 }
michael@0 4786 flags = MSG_EOR;
michael@0 4787 if (control->spec_flags & M_NOTIFICATION) {
michael@0 4788 flags |= MSG_NOTIFICATION;
michael@0 4789 }
michael@0 4790 inp->recv_callback(so, addr, buffer, control->length, rcv, flags, inp->ulp_info);
michael@0 4791 SCTP_TCB_LOCK(stcb);
michael@0 4792 atomic_subtract_int(&stcb->asoc.refcnt, 1);
michael@0 4793 sctp_free_remote_addr(control->whoFrom);
michael@0 4794 control->whoFrom = NULL;
michael@0 4795 sctp_m_freem(control->data);
michael@0 4796 control->data = NULL;
michael@0 4797 control->length = 0;
michael@0 4798 sctp_free_a_readq(stcb, control);
michael@0 4799 }
michael@0 4800 return;
michael@0 4801 }
michael@0 4802 #endif
michael@0 4803 TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
michael@0 4804 if (inp_read_lock_held == 0)
michael@0 4805 SCTP_INP_READ_UNLOCK(inp);
michael@0 4806 if (inp && inp->sctp_socket) {
michael@0 4807 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
michael@0 4808 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
michael@0 4809 } else {
michael@0 4810 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 4811 struct socket *so;
michael@0 4812
michael@0 4813 so = SCTP_INP_SO(inp);
michael@0 4814 if (!so_locked) {
michael@0 4815 if (stcb) {
michael@0 4816 atomic_add_int(&stcb->asoc.refcnt, 1);
michael@0 4817 SCTP_TCB_UNLOCK(stcb);
michael@0 4818 }
michael@0 4819 SCTP_SOCKET_LOCK(so, 1);
michael@0 4820 if (stcb) {
michael@0 4821 SCTP_TCB_LOCK(stcb);
michael@0 4822 atomic_subtract_int(&stcb->asoc.refcnt, 1);
michael@0 4823 }
michael@0 4824 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
michael@0 4825 SCTP_SOCKET_UNLOCK(so, 1);
michael@0 4826 return;
michael@0 4827 }
michael@0 4828 }
michael@0 4829 #endif
michael@0 4830 sctp_sorwakeup(inp, inp->sctp_socket);
michael@0 4831 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 4832 if (!so_locked) {
michael@0 4833 SCTP_SOCKET_UNLOCK(so, 1);
michael@0 4834 }
michael@0 4835 #endif
michael@0 4836 }
michael@0 4837 }
michael@0 4838 }
michael@0 4839
michael@0 4840
michael@0 4841 int
michael@0 4842 sctp_append_to_readq(struct sctp_inpcb *inp,
michael@0 4843 struct sctp_tcb *stcb,
michael@0 4844 struct sctp_queued_to_read *control,
michael@0 4845 struct mbuf *m,
michael@0 4846 int end,
michael@0 4847 int ctls_cumack,
michael@0 4848 struct sockbuf *sb)
michael@0 4849 {
michael@0 4850 /*
michael@0 4851 * A partial delivery API event is underway. OR we are appending on
michael@0 4852 * the reassembly queue.
michael@0 4853 *
michael@0 4854 * If PDAPI this means we need to add m to the end of the data.
michael@0 4855 * Increase the length in the control AND increment the sb_cc.
michael@0 4856 * Otherwise sb is NULL and all we need to do is put it at the end
michael@0 4857 * of the mbuf chain.
michael@0 4858 */
michael@0 4859 int len = 0;
michael@0 4860 struct mbuf *mm, *tail = NULL, *prev = NULL;
michael@0 4861
michael@0 4862 if (inp) {
michael@0 4863 SCTP_INP_READ_LOCK(inp);
michael@0 4864 }
michael@0 4865 if (control == NULL) {
michael@0 4866 get_out:
michael@0 4867 if (inp) {
michael@0 4868 SCTP_INP_READ_UNLOCK(inp);
michael@0 4869 }
michael@0 4870 return (-1);
michael@0 4871 }
michael@0 4872 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
michael@0 4873 SCTP_INP_READ_UNLOCK(inp);
michael@0 4874 return (0);
michael@0 4875 }
michael@0 4876 if (control->end_added) {
michael@0 4877 /* huh this one is complete? */
michael@0 4878 goto get_out;
michael@0 4879 }
michael@0 4880 mm = m;
michael@0 4881 if (mm == NULL) {
michael@0 4882 goto get_out;
michael@0 4883 }
michael@0 4884
michael@0 4885 while (mm) {
michael@0 4886 if (SCTP_BUF_LEN(mm) == 0) {
michael@0 4887 /* Skip mbufs with NO lenght */
michael@0 4888 if (prev == NULL) {
michael@0 4889 /* First one */
michael@0 4890 m = sctp_m_free(mm);
michael@0 4891 mm = m;
michael@0 4892 } else {
michael@0 4893 SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
michael@0 4894 mm = SCTP_BUF_NEXT(prev);
michael@0 4895 }
michael@0 4896 continue;
michael@0 4897 }
michael@0 4898 prev = mm;
michael@0 4899 len += SCTP_BUF_LEN(mm);
michael@0 4900 if (sb) {
michael@0 4901 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
michael@0 4902 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
michael@0 4903 }
michael@0 4904 sctp_sballoc(stcb, sb, mm);
michael@0 4905 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
michael@0 4906 sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
michael@0 4907 }
michael@0 4908 }
michael@0 4909 mm = SCTP_BUF_NEXT(mm);
michael@0 4910 }
michael@0 4911 if (prev) {
michael@0 4912 tail = prev;
michael@0 4913 } else {
michael@0 4914 /* Really there should always be a prev */
michael@0 4915 if (m == NULL) {
michael@0 4916 /* Huh nothing left? */
michael@0 4917 #ifdef INVARIANTS
michael@0 4918 panic("Nothing left to add?");
michael@0 4919 #else
michael@0 4920 goto get_out;
michael@0 4921 #endif
michael@0 4922 }
michael@0 4923 tail = m;
michael@0 4924 }
michael@0 4925 if (control->tail_mbuf) {
michael@0 4926 /* append */
michael@0 4927 SCTP_BUF_NEXT(control->tail_mbuf) = m;
michael@0 4928 control->tail_mbuf = tail;
michael@0 4929 } else {
michael@0 4930 /* nothing there */
michael@0 4931 #ifdef INVARIANTS
michael@0 4932 if (control->data != NULL) {
michael@0 4933 panic("This should NOT happen");
michael@0 4934 }
michael@0 4935 #endif
michael@0 4936 control->data = m;
michael@0 4937 control->tail_mbuf = tail;
michael@0 4938 }
michael@0 4939 atomic_add_int(&control->length, len);
michael@0 4940 if (end) {
michael@0 4941 /* message is complete */
michael@0 4942 if (stcb && (control == stcb->asoc.control_pdapi)) {
michael@0 4943 stcb->asoc.control_pdapi = NULL;
michael@0 4944 }
michael@0 4945 control->held_length = 0;
michael@0 4946 control->end_added = 1;
michael@0 4947 }
michael@0 4948 if (stcb == NULL) {
michael@0 4949 control->do_not_ref_stcb = 1;
michael@0 4950 }
michael@0 4951 /*
michael@0 4952 * When we are appending in partial delivery, the cum-ack is used
michael@0 4953 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
michael@0 4954 * is populated in the outbound sinfo structure from the true cumack
michael@0 4955 * if the association exists...
michael@0 4956 */
michael@0 4957 control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
michael@0 4958 #if defined(__Userspace__)
michael@0 4959 if (inp->recv_callback) {
michael@0 4960 uint32_t pd_point, length;
michael@0 4961
michael@0 4962 length = control->length;
michael@0 4963 if (stcb != NULL && stcb->sctp_socket != NULL) {
michael@0 4964 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
michael@0 4965 stcb->sctp_ep->partial_delivery_point);
michael@0 4966 } else {
michael@0 4967 pd_point = inp->partial_delivery_point;
michael@0 4968 }
michael@0 4969 if ((control->end_added == 1) || (length >= pd_point)) {
michael@0 4970 struct socket *so;
michael@0 4971 char *buffer;
michael@0 4972 struct sctp_rcvinfo rcv;
michael@0 4973 union sctp_sockstore addr;
michael@0 4974 int flags;
michael@0 4975
michael@0 4976 if ((buffer = malloc(control->length)) == NULL) {
michael@0 4977 return (-1);
michael@0 4978 }
michael@0 4979 so = stcb->sctp_socket;
michael@0 4980 for (m = control->data; m; m = SCTP_BUF_NEXT(m)) {
michael@0 4981 sctp_sbfree(control, control->stcb, &so->so_rcv, m);
michael@0 4982 }
michael@0 4983 m_copydata(control->data, 0, control->length, buffer);
michael@0 4984 memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
michael@0 4985 rcv.rcv_sid = control->sinfo_stream;
michael@0 4986 rcv.rcv_ssn = control->sinfo_ssn;
michael@0 4987 rcv.rcv_flags = control->sinfo_flags;
michael@0 4988 rcv.rcv_ppid = control->sinfo_ppid;
michael@0 4989 rcv.rcv_tsn = control->sinfo_tsn;
michael@0 4990 rcv.rcv_cumtsn = control->sinfo_cumtsn;
michael@0 4991 rcv.rcv_context = control->sinfo_context;
michael@0 4992 rcv.rcv_assoc_id = control->sinfo_assoc_id;
michael@0 4993 memset(&addr, 0, sizeof(union sctp_sockstore));
michael@0 4994 switch (control->whoFrom->ro._l_addr.sa.sa_family) {
michael@0 4995 #ifdef INET
michael@0 4996 case AF_INET:
michael@0 4997 addr.sin = control->whoFrom->ro._l_addr.sin;
michael@0 4998 break;
michael@0 4999 #endif
michael@0 5000 #ifdef INET6
michael@0 5001 case AF_INET6:
michael@0 5002 addr.sin6 = control->whoFrom->ro._l_addr.sin6;
michael@0 5003 break;
michael@0 5004 #endif
michael@0 5005 case AF_CONN:
michael@0 5006 addr.sconn = control->whoFrom->ro._l_addr.sconn;
michael@0 5007 break;
michael@0 5008 default:
michael@0 5009 addr.sa = control->whoFrom->ro._l_addr.sa;
michael@0 5010 break;
michael@0 5011 }
michael@0 5012 flags = 0;
michael@0 5013 if (control->end_added == 1) {
michael@0 5014 flags |= MSG_EOR;
michael@0 5015 }
michael@0 5016 if (control->spec_flags & M_NOTIFICATION) {
michael@0 5017 flags |= MSG_NOTIFICATION;
michael@0 5018 }
michael@0 5019 sctp_m_freem(control->data);
michael@0 5020 control->data = NULL;
michael@0 5021 control->tail_mbuf = NULL;
michael@0 5022 control->length = 0;
michael@0 5023 if (control->end_added) {
michael@0 5024 sctp_free_remote_addr(control->whoFrom);
michael@0 5025 control->whoFrom = NULL;
michael@0 5026 sctp_free_a_readq(stcb, control);
michael@0 5027 } else {
michael@0 5028 control->some_taken = 1;
michael@0 5029 }
michael@0 5030 atomic_add_int(&stcb->asoc.refcnt, 1);
michael@0 5031 SCTP_TCB_UNLOCK(stcb);
michael@0 5032 inp->recv_callback(so, addr, buffer, length, rcv, flags, inp->ulp_info);
michael@0 5033 SCTP_TCB_LOCK(stcb);
michael@0 5034 atomic_subtract_int(&stcb->asoc.refcnt, 1);
michael@0 5035 }
michael@0 5036 if (inp)
michael@0 5037 SCTP_INP_READ_UNLOCK(inp);
michael@0 5038 return (0);
michael@0 5039 }
michael@0 5040 #endif
michael@0 5041 if (inp) {
michael@0 5042 SCTP_INP_READ_UNLOCK(inp);
michael@0 5043 }
michael@0 5044 if (inp && inp->sctp_socket) {
michael@0 5045 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
michael@0 5046 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
michael@0 5047 } else {
michael@0 5048 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 5049 struct socket *so;
michael@0 5050
michael@0 5051 so = SCTP_INP_SO(inp);
michael@0 5052 if (stcb) {
michael@0 5053 atomic_add_int(&stcb->asoc.refcnt, 1);
michael@0 5054 SCTP_TCB_UNLOCK(stcb);
michael@0 5055 }
michael@0 5056 SCTP_SOCKET_LOCK(so, 1);
michael@0 5057 if (stcb) {
michael@0 5058 SCTP_TCB_LOCK(stcb);
michael@0 5059 atomic_subtract_int(&stcb->asoc.refcnt, 1);
michael@0 5060 }
michael@0 5061 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
michael@0 5062 SCTP_SOCKET_UNLOCK(so, 1);
michael@0 5063 return (0);
michael@0 5064 }
michael@0 5065 #endif
michael@0 5066 sctp_sorwakeup(inp, inp->sctp_socket);
michael@0 5067 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 5068 SCTP_SOCKET_UNLOCK(so, 1);
michael@0 5069 #endif
michael@0 5070 }
michael@0 5071 }
michael@0 5072 return (0);
michael@0 5073 }
michael@0 5074
michael@0 5075
michael@0 5076
michael@0 5077 /*************HOLD THIS COMMENT FOR PATCH FILE OF
michael@0 5078 *************ALTERNATE ROUTING CODE
michael@0 5079 */
michael@0 5080
michael@0 5081 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
michael@0 5082 *************ALTERNATE ROUTING CODE
michael@0 5083 */
michael@0 5084
michael@0 5085 struct mbuf *
michael@0 5086 sctp_generate_invmanparam(int err)
michael@0 5087 {
michael@0 5088 /* Return a MBUF with a invalid mandatory parameter */
michael@0 5089 struct mbuf *m;
michael@0 5090
michael@0 5091 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_NOWAIT, 1, MT_DATA);
michael@0 5092 if (m) {
michael@0 5093 struct sctp_paramhdr *ph;
michael@0 5094
michael@0 5095 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
michael@0 5096 ph = mtod(m, struct sctp_paramhdr *);
michael@0 5097 ph->param_length = htons(sizeof(struct sctp_paramhdr));
michael@0 5098 ph->param_type = htons(err);
michael@0 5099 }
michael@0 5100 return (m);
michael@0 5101 }
michael@0 5102
michael@0 5103 #ifdef SCTP_MBCNT_LOGGING
michael@0 5104 void
michael@0 5105 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
michael@0 5106 struct sctp_tmit_chunk *tp1, int chk_cnt)
michael@0 5107 {
michael@0 5108 if (tp1->data == NULL) {
michael@0 5109 return;
michael@0 5110 }
michael@0 5111 asoc->chunks_on_out_queue -= chk_cnt;
michael@0 5112 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
michael@0 5113 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
michael@0 5114 asoc->total_output_queue_size,
michael@0 5115 tp1->book_size,
michael@0 5116 0,
michael@0 5117 tp1->mbcnt);
michael@0 5118 }
michael@0 5119 if (asoc->total_output_queue_size >= tp1->book_size) {
michael@0 5120 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
michael@0 5121 } else {
michael@0 5122 asoc->total_output_queue_size = 0;
michael@0 5123 }
michael@0 5124
michael@0 5125 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
michael@0 5126 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
michael@0 5127 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
michael@0 5128 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
michael@0 5129 } else {
michael@0 5130 stcb->sctp_socket->so_snd.sb_cc = 0;
michael@0 5131
michael@0 5132 }
michael@0 5133 }
michael@0 5134 }
michael@0 5135
michael@0 5136 #endif
michael@0 5137
michael@0 5138 int
michael@0 5139 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
michael@0 5140 uint8_t sent, int so_locked
michael@0 5141 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
michael@0 5142 SCTP_UNUSED
michael@0 5143 #endif
michael@0 5144 )
michael@0 5145 {
michael@0 5146 struct sctp_stream_out *strq;
michael@0 5147 struct sctp_tmit_chunk *chk = NULL, *tp2;
michael@0 5148 struct sctp_stream_queue_pending *sp;
michael@0 5149 uint16_t stream = 0, seq = 0;
michael@0 5150 uint8_t foundeom = 0;
michael@0 5151 int ret_sz = 0;
michael@0 5152 int notdone;
michael@0 5153 int do_wakeup_routine = 0;
michael@0 5154 #if defined(__APPLE__)
michael@0 5155 if (so_locked) {
michael@0 5156 sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
michael@0 5157 } else {
michael@0 5158 sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
michael@0 5159 }
michael@0 5160 #endif
michael@0 5161 stream = tp1->rec.data.stream_number;
michael@0 5162 seq = tp1->rec.data.stream_seq;
michael@0 5163 do {
michael@0 5164 ret_sz += tp1->book_size;
michael@0 5165 if (tp1->data != NULL) {
michael@0 5166 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
michael@0 5167 sctp_flight_size_decrease(tp1);
michael@0 5168 sctp_total_flight_decrease(stcb, tp1);
michael@0 5169 }
michael@0 5170 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
michael@0 5171 stcb->asoc.peers_rwnd += tp1->send_size;
michael@0 5172 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
michael@0 5173 if (sent) {
michael@0 5174 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
michael@0 5175 } else {
michael@0 5176 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
michael@0 5177 }
michael@0 5178 if (tp1->data) {
michael@0 5179 sctp_m_freem(tp1->data);
michael@0 5180 tp1->data = NULL;
michael@0 5181 }
michael@0 5182 do_wakeup_routine = 1;
michael@0 5183 if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
michael@0 5184 stcb->asoc.sent_queue_cnt_removeable--;
michael@0 5185 }
michael@0 5186 }
michael@0 5187 tp1->sent = SCTP_FORWARD_TSN_SKIP;
michael@0 5188 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
michael@0 5189 SCTP_DATA_NOT_FRAG) {
michael@0 5190 /* not frag'ed we ae done */
michael@0 5191 notdone = 0;
michael@0 5192 foundeom = 1;
michael@0 5193 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
michael@0 5194 /* end of frag, we are done */
michael@0 5195 notdone = 0;
michael@0 5196 foundeom = 1;
michael@0 5197 } else {
michael@0 5198 /*
michael@0 5199 * Its a begin or middle piece, we must mark all of
michael@0 5200 * it
michael@0 5201 */
michael@0 5202 notdone = 1;
michael@0 5203 tp1 = TAILQ_NEXT(tp1, sctp_next);
michael@0 5204 }
michael@0 5205 } while (tp1 && notdone);
michael@0 5206 if (foundeom == 0) {
michael@0 5207 /*
michael@0 5208 * The multi-part message was scattered across the send and
michael@0 5209 * sent queue.
michael@0 5210 */
michael@0 5211 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
michael@0 5212 if ((tp1->rec.data.stream_number != stream) ||
michael@0 5213 (tp1->rec.data.stream_seq != seq)) {
michael@0 5214 break;
michael@0 5215 }
michael@0 5216 /* save to chk in case we have some on stream out
michael@0 5217 * queue. If so and we have an un-transmitted one
michael@0 5218 * we don't have to fudge the TSN.
michael@0 5219 */
michael@0 5220 chk = tp1;
michael@0 5221 ret_sz += tp1->book_size;
michael@0 5222 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
michael@0 5223 if (sent) {
michael@0 5224 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
michael@0 5225 } else {
michael@0 5226 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
michael@0 5227 }
michael@0 5228 if (tp1->data) {
michael@0 5229 sctp_m_freem(tp1->data);
michael@0 5230 tp1->data = NULL;
michael@0 5231 }
michael@0 5232 /* No flight involved here book the size to 0 */
michael@0 5233 tp1->book_size = 0;
michael@0 5234 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
michael@0 5235 foundeom = 1;
michael@0 5236 }
michael@0 5237 do_wakeup_routine = 1;
michael@0 5238 tp1->sent = SCTP_FORWARD_TSN_SKIP;
michael@0 5239 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
michael@0 5240 /* on to the sent queue so we can wait for it to be passed by. */
michael@0 5241 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
michael@0 5242 sctp_next);
michael@0 5243 stcb->asoc.send_queue_cnt--;
michael@0 5244 stcb->asoc.sent_queue_cnt++;
michael@0 5245 }
michael@0 5246 }
michael@0 5247 if (foundeom == 0) {
michael@0 5248 /*
michael@0 5249 * Still no eom found. That means there
michael@0 5250 * is stuff left on the stream out queue.. yuck.
michael@0 5251 */
michael@0 5252 SCTP_TCB_SEND_LOCK(stcb);
michael@0 5253 strq = &stcb->asoc.strmout[stream];
michael@0 5254 sp = TAILQ_FIRST(&strq->outqueue);
michael@0 5255 if (sp != NULL) {
michael@0 5256 sp->discard_rest = 1;
michael@0 5257 /*
michael@0 5258 * We may need to put a chunk on the
michael@0 5259 * queue that holds the TSN that
michael@0 5260 * would have been sent with the LAST
michael@0 5261 * bit.
michael@0 5262 */
michael@0 5263 if (chk == NULL) {
michael@0 5264 /* Yep, we have to */
michael@0 5265 sctp_alloc_a_chunk(stcb, chk);
michael@0 5266 if (chk == NULL) {
michael@0 5267 /* we are hosed. All we can
michael@0 5268 * do is nothing.. which will
michael@0 5269 * cause an abort if the peer is
michael@0 5270 * paying attention.
michael@0 5271 */
michael@0 5272 goto oh_well;
michael@0 5273 }
michael@0 5274 memset(chk, 0, sizeof(*chk));
michael@0 5275 chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
michael@0 5276 chk->sent = SCTP_FORWARD_TSN_SKIP;
michael@0 5277 chk->asoc = &stcb->asoc;
michael@0 5278 chk->rec.data.stream_seq = strq->next_sequence_send;
michael@0 5279 chk->rec.data.stream_number = sp->stream;
michael@0 5280 chk->rec.data.payloadtype = sp->ppid;
michael@0 5281 chk->rec.data.context = sp->context;
michael@0 5282 chk->flags = sp->act_flags;
michael@0 5283 if (sp->net)
michael@0 5284 chk->whoTo = sp->net;
michael@0 5285 else
michael@0 5286 chk->whoTo = stcb->asoc.primary_destination;
michael@0 5287 atomic_add_int(&chk->whoTo->ref_count, 1);
michael@0 5288 #if defined(__FreeBSD__) || defined(__Panda__)
michael@0 5289 chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
michael@0 5290 #else
michael@0 5291 chk->rec.data.TSN_seq = stcb->asoc.sending_seq++;
michael@0 5292 #endif
michael@0 5293 stcb->asoc.pr_sctp_cnt++;
michael@0 5294 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
michael@0 5295 stcb->asoc.sent_queue_cnt++;
michael@0 5296 stcb->asoc.pr_sctp_cnt++;
michael@0 5297 } else {
michael@0 5298 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
michael@0 5299 }
michael@0 5300 strq->next_sequence_send++;
michael@0 5301 oh_well:
michael@0 5302 if (sp->data) {
michael@0 5303 /* Pull any data to free up the SB and
michael@0 5304 * allow sender to "add more" while we
michael@0 5305 * will throw away :-)
michael@0 5306 */
michael@0 5307 sctp_free_spbufspace(stcb, &stcb->asoc, sp);
michael@0 5308 ret_sz += sp->length;
michael@0 5309 do_wakeup_routine = 1;
michael@0 5310 sp->some_taken = 1;
michael@0 5311 sctp_m_freem(sp->data);
michael@0 5312 sp->data = NULL;
michael@0 5313 sp->tail_mbuf = NULL;
michael@0 5314 sp->length = 0;
michael@0 5315 }
michael@0 5316 }
michael@0 5317 SCTP_TCB_SEND_UNLOCK(stcb);
michael@0 5318 }
michael@0 5319 if (do_wakeup_routine) {
michael@0 5320 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 5321 struct socket *so;
michael@0 5322
michael@0 5323 so = SCTP_INP_SO(stcb->sctp_ep);
michael@0 5324 if (!so_locked) {
michael@0 5325 atomic_add_int(&stcb->asoc.refcnt, 1);
michael@0 5326 SCTP_TCB_UNLOCK(stcb);
michael@0 5327 SCTP_SOCKET_LOCK(so, 1);
michael@0 5328 SCTP_TCB_LOCK(stcb);
michael@0 5329 atomic_subtract_int(&stcb->asoc.refcnt, 1);
michael@0 5330 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
michael@0 5331 /* assoc was freed while we were unlocked */
michael@0 5332 SCTP_SOCKET_UNLOCK(so, 1);
michael@0 5333 return (ret_sz);
michael@0 5334 }
michael@0 5335 }
michael@0 5336 #endif
michael@0 5337 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
michael@0 5338 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
michael@0 5339 if (!so_locked) {
michael@0 5340 SCTP_SOCKET_UNLOCK(so, 1);
michael@0 5341 }
michael@0 5342 #endif
michael@0 5343 }
michael@0 5344 return (ret_sz);
michael@0 5345 }
michael@0 5346
michael@0 5347 /*
michael@0 5348 * checks to see if the given address, sa, is one that is currently known by
michael@0 5349 * the kernel note: can't distinguish the same address on multiple interfaces
michael@0 5350 * and doesn't handle multiple addresses with different zone/scope id's note:
michael@0 5351 * ifa_ifwithaddr() compares the entire sockaddr struct
michael@0 5352 */
michael@0 5353 struct sctp_ifa *
michael@0 5354 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
michael@0 5355 int holds_lock)
michael@0 5356 {
michael@0 5357 struct sctp_laddr *laddr;
michael@0 5358
michael@0 5359 if (holds_lock == 0) {
michael@0 5360 SCTP_INP_RLOCK(inp);
michael@0 5361 }
michael@0 5362
michael@0 5363 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
michael@0 5364 if (laddr->ifa == NULL)
michael@0 5365 continue;
michael@0 5366 if (addr->sa_family != laddr->ifa->address.sa.sa_family)
michael@0 5367 continue;
michael@0 5368 #ifdef INET
michael@0 5369 if (addr->sa_family == AF_INET) {
michael@0 5370 if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
michael@0 5371 laddr->ifa->address.sin.sin_addr.s_addr) {
michael@0 5372 /* found him. */
michael@0 5373 if (holds_lock == 0) {
michael@0 5374 SCTP_INP_RUNLOCK(inp);
michael@0 5375 }
michael@0 5376 return (laddr->ifa);
michael@0 5377 break;
michael@0 5378 }
michael@0 5379 }
michael@0 5380 #endif
michael@0 5381 #ifdef INET6
michael@0 5382 if (addr->sa_family == AF_INET6) {
michael@0 5383 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
michael@0 5384 &laddr->ifa->address.sin6)) {
michael@0 5385 /* found him. */
michael@0 5386 if (holds_lock == 0) {
michael@0 5387 SCTP_INP_RUNLOCK(inp);
michael@0 5388 }
michael@0 5389 return (laddr->ifa);
michael@0 5390 break;
michael@0 5391 }
michael@0 5392 }
michael@0 5393 #endif
michael@0 5394 #if defined(__Userspace__)
michael@0 5395 if (addr->sa_family == AF_CONN) {
michael@0 5396 if (((struct sockaddr_conn *)addr)->sconn_addr == laddr->ifa->address.sconn.sconn_addr) {
michael@0 5397 /* found him. */
michael@0 5398 if (holds_lock == 0) {
michael@0 5399 SCTP_INP_RUNLOCK(inp);
michael@0 5400 }
michael@0 5401 return (laddr->ifa);
michael@0 5402 break;
michael@0 5403 }
michael@0 5404 }
michael@0 5405 #endif
michael@0 5406 }
michael@0 5407 if (holds_lock == 0) {
michael@0 5408 SCTP_INP_RUNLOCK(inp);
michael@0 5409 }
michael@0 5410 return (NULL);
michael@0 5411 }
michael@0 5412
michael@0 5413 uint32_t
michael@0 5414 sctp_get_ifa_hash_val(struct sockaddr *addr)
michael@0 5415 {
michael@0 5416 switch (addr->sa_family) {
michael@0 5417 #ifdef INET
michael@0 5418 case AF_INET:
michael@0 5419 {
michael@0 5420 struct sockaddr_in *sin;
michael@0 5421
michael@0 5422 sin = (struct sockaddr_in *)addr;
michael@0 5423 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
michael@0 5424 }
michael@0 5425 #endif
michael@0 5426 #ifdef INET6
michael@0 5427 case AF_INET6:
michael@0 5428 {
michael@0 5429 struct sockaddr_in6 *sin6;
michael@0 5430 uint32_t hash_of_addr;
michael@0 5431
michael@0 5432 sin6 = (struct sockaddr_in6 *)addr;
michael@0 5433 #if !defined(__Windows__) && !defined(__Userspace_os_FreeBSD) && !defined(__Userspace_os_Darwin) && !defined(__Userspace_os_Windows)
michael@0 5434 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
michael@0 5435 sin6->sin6_addr.s6_addr32[1] +
michael@0 5436 sin6->sin6_addr.s6_addr32[2] +
michael@0 5437 sin6->sin6_addr.s6_addr32[3]);
michael@0 5438 #else
michael@0 5439 hash_of_addr = (((uint32_t *)&sin6->sin6_addr)[0] +
michael@0 5440 ((uint32_t *)&sin6->sin6_addr)[1] +
michael@0 5441 ((uint32_t *)&sin6->sin6_addr)[2] +
michael@0 5442 ((uint32_t *)&sin6->sin6_addr)[3]);
michael@0 5443 #endif
michael@0 5444 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
michael@0 5445 return (hash_of_addr);
michael@0 5446 }
michael@0 5447 #endif
michael@0 5448 #if defined(__Userspace__)
michael@0 5449 case AF_CONN:
michael@0 5450 {
michael@0 5451 struct sockaddr_conn *sconn;
michael@0 5452 uintptr_t temp;
michael@0 5453
michael@0 5454 sconn = (struct sockaddr_conn *)addr;
michael@0 5455 temp = (uintptr_t)sconn->sconn_addr;
michael@0 5456 return ((uint32_t)(temp ^ (temp >> 16)));
michael@0 5457 }
michael@0 5458 #endif
michael@0 5459 default:
michael@0 5460 break;
michael@0 5461 }
michael@0 5462 return (0);
michael@0 5463 }
michael@0 5464
michael@0 5465 struct sctp_ifa *
michael@0 5466 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
michael@0 5467 {
michael@0 5468 struct sctp_ifa *sctp_ifap;
michael@0 5469 struct sctp_vrf *vrf;
michael@0 5470 struct sctp_ifalist *hash_head;
michael@0 5471 uint32_t hash_of_addr;
michael@0 5472
michael@0 5473 if (holds_lock == 0)
michael@0 5474 SCTP_IPI_ADDR_RLOCK();
michael@0 5475
michael@0 5476 vrf = sctp_find_vrf(vrf_id);
michael@0 5477 if (vrf == NULL) {
michael@0 5478 stage_right:
michael@0 5479 if (holds_lock == 0)
michael@0 5480 SCTP_IPI_ADDR_RUNLOCK();
michael@0 5481 return (NULL);
michael@0 5482 }
michael@0 5483
michael@0 5484 hash_of_addr = sctp_get_ifa_hash_val(addr);
michael@0 5485
michael@0 5486 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
michael@0 5487 if (hash_head == NULL) {
michael@0 5488 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
michael@0 5489 hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
michael@0 5490 (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
michael@0 5491 sctp_print_address(addr);
michael@0 5492 SCTP_PRINTF("No such bucket for address\n");
michael@0 5493 if (holds_lock == 0)
michael@0 5494 SCTP_IPI_ADDR_RUNLOCK();
michael@0 5495
michael@0 5496 return (NULL);
michael@0 5497 }
michael@0 5498 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
michael@0 5499 if (sctp_ifap == NULL) {
michael@0 5500 #ifdef INVARIANTS
michael@0 5501 panic("Huh LIST_FOREACH corrupt");
michael@0 5502 goto stage_right;
michael@0 5503 #else
michael@0 5504 SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
michael@0 5505 goto stage_right;
michael@0 5506 #endif
michael@0 5507 }
michael@0 5508 if (addr->sa_family != sctp_ifap->address.sa.sa_family)
michael@0 5509 continue;
michael@0 5510 #ifdef INET
michael@0 5511 if (addr->sa_family == AF_INET) {
michael@0 5512 if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
michael@0 5513 sctp_ifap->address.sin.sin_addr.s_addr) {
michael@0 5514 /* found him. */
michael@0 5515 if (holds_lock == 0)
michael@0 5516 SCTP_IPI_ADDR_RUNLOCK();
michael@0 5517 return (sctp_ifap);
michael@0 5518 break;
michael@0 5519 }
michael@0 5520 }
michael@0 5521 #endif
michael@0 5522 #ifdef INET6
michael@0 5523 if (addr->sa_family == AF_INET6) {
michael@0 5524 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
michael@0 5525 &sctp_ifap->address.sin6)) {
michael@0 5526 /* found him. */
michael@0 5527 if (holds_lock == 0)
michael@0 5528 SCTP_IPI_ADDR_RUNLOCK();
michael@0 5529 return (sctp_ifap);
michael@0 5530 break;
michael@0 5531 }
michael@0 5532 }
michael@0 5533 #endif
michael@0 5534 #if defined(__Userspace__)
michael@0 5535 if (addr->sa_family == AF_CONN) {
michael@0 5536 if (((struct sockaddr_conn *)addr)->sconn_addr == sctp_ifap->address.sconn.sconn_addr) {
michael@0 5537 /* found him. */
michael@0 5538 if (holds_lock == 0)
michael@0 5539 SCTP_IPI_ADDR_RUNLOCK();
michael@0 5540 return (sctp_ifap);
michael@0 5541 break;
michael@0 5542 }
michael@0 5543 }
michael@0 5544 #endif
michael@0 5545 }
michael@0 5546 if (holds_lock == 0)
michael@0 5547 SCTP_IPI_ADDR_RUNLOCK();
michael@0 5548 return (NULL);
michael@0 5549 }
michael@0 5550
michael@0 5551 static void
michael@0 5552 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
michael@0 5553 uint32_t rwnd_req)
michael@0 5554 {
michael@0 5555 /* User pulled some data, do we need a rwnd update? */
michael@0 5556 int r_unlocked = 0;
michael@0 5557 uint32_t dif, rwnd;
michael@0 5558 struct socket *so = NULL;
michael@0 5559
michael@0 5560 if (stcb == NULL)
michael@0 5561 return;
michael@0 5562
michael@0 5563 atomic_add_int(&stcb->asoc.refcnt, 1);
michael@0 5564
michael@0 5565 if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
michael@0 5566 SCTP_STATE_SHUTDOWN_RECEIVED |
michael@0 5567 SCTP_STATE_SHUTDOWN_ACK_SENT)) {
michael@0 5568 /* Pre-check If we are freeing no update */
michael@0 5569 goto no_lock;
michael@0 5570 }
michael@0 5571 SCTP_INP_INCR_REF(stcb->sctp_ep);
michael@0 5572 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
michael@0 5573 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
michael@0 5574 goto out;
michael@0 5575 }
michael@0 5576 so = stcb->sctp_socket;
michael@0 5577 if (so == NULL) {
michael@0 5578 goto out;
michael@0 5579 }
michael@0 5580 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
michael@0 5581 /* Have you have freed enough to look */
michael@0 5582 *freed_so_far = 0;
michael@0 5583 /* Yep, its worth a look and the lock overhead */
michael@0 5584
michael@0 5585 /* Figure out what the rwnd would be */
michael@0 5586 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
michael@0 5587 if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
michael@0 5588 dif = rwnd - stcb->asoc.my_last_reported_rwnd;
michael@0 5589 } else {
michael@0 5590 dif = 0;
michael@0 5591 }
michael@0 5592 if (dif >= rwnd_req) {
michael@0 5593 if (hold_rlock) {
michael@0 5594 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
michael@0 5595 r_unlocked = 1;
michael@0 5596 }
michael@0 5597 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
michael@0 5598 /*
michael@0 5599 * One last check before we allow the guy possibly
michael@0 5600 * to get in. There is a race, where the guy has not
michael@0 5601 * reached the gate. In that case
michael@0 5602 */
michael@0 5603 goto out;
michael@0 5604 }
michael@0 5605 SCTP_TCB_LOCK(stcb);
michael@0 5606 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
michael@0 5607 /* No reports here */
michael@0 5608 SCTP_TCB_UNLOCK(stcb);
michael@0 5609 goto out;
michael@0 5610 }
michael@0 5611 SCTP_STAT_INCR(sctps_wu_sacks_sent);
michael@0 5612 sctp_send_sack(stcb, SCTP_SO_LOCKED);
michael@0 5613
michael@0 5614 sctp_chunk_output(stcb->sctp_ep, stcb,
michael@0 5615 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
michael@0 5616 /* make sure no timer is running */
michael@0 5617 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL+SCTP_LOC_6);
michael@0 5618 SCTP_TCB_UNLOCK(stcb);
michael@0 5619 } else {
michael@0 5620 /* Update how much we have pending */
michael@0 5621 stcb->freed_by_sorcv_sincelast = dif;
michael@0 5622 }
michael@0 5623 out:
michael@0 5624 if (so && r_unlocked && hold_rlock) {
michael@0 5625 SCTP_INP_READ_LOCK(stcb->sctp_ep);
michael@0 5626 }
michael@0 5627
michael@0 5628 SCTP_INP_DECR_REF(stcb->sctp_ep);
michael@0 5629 no_lock:
michael@0 5630 atomic_add_int(&stcb->asoc.refcnt, -1);
michael@0 5631 return;
michael@0 5632 }
michael@0 5633
michael@0 5634 int
michael@0 5635 sctp_sorecvmsg(struct socket *so,
michael@0 5636 struct uio *uio,
michael@0 5637 struct mbuf **mp,
michael@0 5638 struct sockaddr *from,
michael@0 5639 int fromlen,
michael@0 5640 int *msg_flags,
michael@0 5641 struct sctp_sndrcvinfo *sinfo,
michael@0 5642 int filling_sinfo)
michael@0 5643 {
michael@0 5644 /*
michael@0 5645 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
michael@0 5646 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
michael@0 5647 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
michael@0 5648 * On the way out we may send out any combination of:
michael@0 5649 * MSG_NOTIFICATION MSG_EOR
michael@0 5650 *
michael@0 5651 */
michael@0 5652 struct sctp_inpcb *inp = NULL;
michael@0 5653 int my_len = 0;
michael@0 5654 int cp_len = 0, error = 0;
michael@0 5655 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
michael@0 5656 struct mbuf *m = NULL;
michael@0 5657 struct sctp_tcb *stcb = NULL;
michael@0 5658 int wakeup_read_socket = 0;
michael@0 5659 int freecnt_applied = 0;
michael@0 5660 int out_flags = 0, in_flags = 0;
michael@0 5661 int block_allowed = 1;
michael@0 5662 uint32_t freed_so_far = 0;
michael@0 5663 uint32_t copied_so_far = 0;
michael@0 5664 int in_eeor_mode = 0;
michael@0 5665 int no_rcv_needed = 0;
michael@0 5666 uint32_t rwnd_req = 0;
michael@0 5667 int hold_sblock = 0;
michael@0 5668 int hold_rlock = 0;
michael@0 5669 int slen = 0;
michael@0 5670 uint32_t held_length = 0;
michael@0 5671 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
michael@0 5672 int sockbuf_lock = 0;
michael@0 5673 #endif
michael@0 5674
michael@0 5675 if (uio == NULL) {
michael@0 5676 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
michael@0 5677 return (EINVAL);
michael@0 5678 }
michael@0 5679
michael@0 5680 if (msg_flags) {
michael@0 5681 in_flags = *msg_flags;
michael@0 5682 if (in_flags & MSG_PEEK)
michael@0 5683 SCTP_STAT_INCR(sctps_read_peeks);
michael@0 5684 } else {
michael@0 5685 in_flags = 0;
michael@0 5686 }
michael@0 5687 #if defined(__APPLE__)
michael@0 5688 #if defined(APPLE_LEOPARD)
michael@0 5689 slen = uio->uio_resid;
michael@0 5690 #else
michael@0 5691 slen = uio_resid(uio);
michael@0 5692 #endif
michael@0 5693 #else
michael@0 5694 slen = uio->uio_resid;
michael@0 5695 #endif
michael@0 5696
michael@0 5697 /* Pull in and set up our int flags */
michael@0 5698 if (in_flags & MSG_OOB) {
michael@0 5699 /* Out of band's NOT supported */
michael@0 5700 return (EOPNOTSUPP);
michael@0 5701 }
michael@0 5702 if ((in_flags & MSG_PEEK) && (mp != NULL)) {
michael@0 5703 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
michael@0 5704 return (EINVAL);
michael@0 5705 }
michael@0 5706 if ((in_flags & (MSG_DONTWAIT
michael@0 5707 #if defined(__FreeBSD__) && __FreeBSD_version > 500000
michael@0 5708 | MSG_NBIO
michael@0 5709 #endif
michael@0 5710 )) ||
michael@0 5711 SCTP_SO_IS_NBIO(so)) {
michael@0 5712 block_allowed = 0;
michael@0 5713 }
michael@0 5714 /* setup the endpoint */
michael@0 5715 inp = (struct sctp_inpcb *)so->so_pcb;
michael@0 5716 if (inp == NULL) {
michael@0 5717 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
michael@0 5718 return (EFAULT);
michael@0 5719 }
michael@0 5720 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
michael@0 5721 /* Must be at least a MTU's worth */
michael@0 5722 if (rwnd_req < SCTP_MIN_RWND)
michael@0 5723 rwnd_req = SCTP_MIN_RWND;
michael@0 5724 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
michael@0 5725 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
michael@0 5726 #if defined(__APPLE__)
michael@0 5727 #if defined(APPLE_LEOPARD)
michael@0 5728 sctp_misc_ints(SCTP_SORECV_ENTER,
michael@0 5729 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
michael@0 5730 #else
michael@0 5731 sctp_misc_ints(SCTP_SORECV_ENTER,
michael@0 5732 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio_resid(uio));
michael@0 5733 #endif
michael@0 5734 #else
michael@0 5735 sctp_misc_ints(SCTP_SORECV_ENTER,
michael@0 5736 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
michael@0 5737 #endif
michael@0 5738 }
michael@0 5739 #if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__)
michael@0 5740 SOCKBUF_LOCK(&so->so_rcv);
michael@0 5741 hold_sblock = 1;
michael@0 5742 #endif
michael@0 5743 if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) {
michael@0 5744 #if defined(__APPLE__)
michael@0 5745 #if defined(APPLE_LEOPARD)
michael@0 5746 sctp_misc_ints(SCTP_SORECV_ENTERPL,
michael@0 5747 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
michael@0 5748 #else
michael@0 5749 sctp_misc_ints(SCTP_SORECV_ENTERPL,
michael@0 5750 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio_resid(uio));
michael@0 5751 #endif
michael@0 5752 #else
michael@0 5753 sctp_misc_ints(SCTP_SORECV_ENTERPL,
michael@0 5754 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
michael@0 5755 #endif
michael@0 5756 }
michael@0 5757
michael@0 5758 #if defined(__APPLE__)
michael@0 5759 error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
michael@0 5760 #endif
michael@0 5761
michael@0 5762 #if defined(__FreeBSD__)
michael@0 5763 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
michael@0 5764 #endif
michael@0 5765 if (error) {
michael@0 5766 goto release_unlocked;
michael@0 5767 }
michael@0 5768 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
michael@0 5769 sockbuf_lock = 1;
michael@0 5770 #endif
michael@0 5771 restart:
michael@0 5772 #if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__)
michael@0 5773 if (hold_sblock == 0) {
michael@0 5774 SOCKBUF_LOCK(&so->so_rcv);
michael@0 5775 hold_sblock = 1;
michael@0 5776 }
michael@0 5777 #endif
michael@0 5778 #if defined(__APPLE__)
michael@0 5779 sbunlock(&so->so_rcv, 1);
michael@0 5780 #endif
michael@0 5781
michael@0 5782 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
michael@0 5783 sbunlock(&so->so_rcv);
michael@0 5784 #endif
michael@0 5785
michael@0 5786 restart_nosblocks:
michael@0 5787 if (hold_sblock == 0) {
michael@0 5788 SOCKBUF_LOCK(&so->so_rcv);
michael@0 5789 hold_sblock = 1;
michael@0 5790 }
michael@0 5791 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
michael@0 5792 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
michael@0 5793 goto out;
michael@0 5794 }
michael@0 5795 #if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
michael@0 5796 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
michael@0 5797 #else
michael@0 5798 if ((so->so_state & SS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
michael@0 5799 #endif
michael@0 5800 if (so->so_error) {
michael@0 5801 error = so->so_error;
michael@0 5802 if ((in_flags & MSG_PEEK) == 0)
michael@0 5803 so->so_error = 0;
michael@0 5804 goto out;
michael@0 5805 } else {
michael@0 5806 if (so->so_rcv.sb_cc == 0) {
michael@0 5807 /* indicate EOF */
michael@0 5808 error = 0;
michael@0 5809 goto out;
michael@0 5810 }
michael@0 5811 }
michael@0 5812 }
michael@0 5813 if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
michael@0 5814 /* we need to wait for data */
michael@0 5815 if ((so->so_rcv.sb_cc == 0) &&
michael@0 5816 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
michael@0 5817 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
michael@0 5818 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
michael@0 5819 /* For active open side clear flags for re-use
michael@0 5820 * passive open is blocked by connect.
michael@0 5821 */
michael@0 5822 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
michael@0 5823 /* You were aborted, passive side always hits here */
michael@0 5824 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
michael@0 5825 error = ECONNRESET;
michael@0 5826 }
michael@0 5827 so->so_state &= ~(SS_ISCONNECTING |
michael@0 5828 SS_ISDISCONNECTING |
michael@0 5829 SS_ISCONFIRMING |
michael@0 5830 SS_ISCONNECTED);
michael@0 5831 if (error == 0) {
michael@0 5832 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
michael@0 5833 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
michael@0 5834 error = ENOTCONN;
michael@0 5835 }
michael@0 5836 }
michael@0 5837 goto out;
michael@0 5838 }
michael@0 5839 }
michael@0 5840 error = sbwait(&so->so_rcv);
michael@0 5841 if (error) {
michael@0 5842 goto out;
michael@0 5843 }
michael@0 5844 held_length = 0;
michael@0 5845 goto restart_nosblocks;
michael@0 5846 } else if (so->so_rcv.sb_cc == 0) {
michael@0 5847 if (so->so_error) {
michael@0 5848 error = so->so_error;
michael@0 5849 if ((in_flags & MSG_PEEK) == 0)
michael@0 5850 so->so_error = 0;
michael@0 5851 } else {
michael@0 5852 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
michael@0 5853 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
michael@0 5854 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
michael@0 5855 /* For active open side clear flags for re-use
michael@0 5856 * passive open is blocked by connect.
michael@0 5857 */
michael@0 5858 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
michael@0 5859 /* You were aborted, passive side always hits here */
michael@0 5860 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
michael@0 5861 error = ECONNRESET;
michael@0 5862 }
michael@0 5863 so->so_state &= ~(SS_ISCONNECTING |
michael@0 5864 SS_ISDISCONNECTING |
michael@0 5865 SS_ISCONFIRMING |
michael@0 5866 SS_ISCONNECTED);
michael@0 5867 if (error == 0) {
michael@0 5868 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
michael@0 5869 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
michael@0 5870 error = ENOTCONN;
michael@0 5871 }
michael@0 5872 }
michael@0 5873 goto out;
michael@0 5874 }
michael@0 5875 }
michael@0 5876 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
michael@0 5877 error = EWOULDBLOCK;
michael@0 5878 }
michael@0 5879 goto out;
michael@0 5880 }
michael@0 5881 if (hold_sblock == 1) {
michael@0 5882 SOCKBUF_UNLOCK(&so->so_rcv);
michael@0 5883 hold_sblock = 0;
michael@0 5884 }
michael@0 5885 #if defined(__APPLE__)
michael@0 5886 error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
michael@0 5887 #endif
michael@0 5888 #if defined(__FreeBSD__) && __FreeBSD_version < 700000
michael@0 5889 error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0));
michael@0 5890 #endif
michael@0 5891 /* we possibly have data we can read */
michael@0 5892 /*sa_ignore FREED_MEMORY*/
michael@0 5893 control = TAILQ_FIRST(&inp->read_queue);
michael@0 5894 if (control == NULL) {
michael@0 5895 /* This could be happening since
michael@0 5896 * the appender did the increment but as not
michael@0 5897 * yet did the tailq insert onto the read_queue
michael@0 5898 */
michael@0 5899 if (hold_rlock == 0) {
michael@0 5900 SCTP_INP_READ_LOCK(inp);
michael@0 5901 }
michael@0 5902 control = TAILQ_FIRST(&inp->read_queue);
michael@0 5903 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
michael@0 5904 #ifdef INVARIANTS
michael@0 5905 panic("Huh, its non zero and nothing on control?");
michael@0 5906 #endif
michael@0 5907 so->so_rcv.sb_cc = 0;
michael@0 5908 }
michael@0 5909 SCTP_INP_READ_UNLOCK(inp);
michael@0 5910 hold_rlock = 0;
michael@0 5911 goto restart;
michael@0 5912 }
michael@0 5913
michael@0 5914 if ((control->length == 0) &&
michael@0 5915 (control->do_not_ref_stcb)) {
michael@0 5916 /* Clean up code for freeing assoc that left behind a pdapi..
michael@0 5917 * maybe a peer in EEOR that just closed after sending and
michael@0 5918 * never indicated a EOR.
michael@0 5919 */
michael@0 5920 if (hold_rlock == 0) {
michael@0 5921 hold_rlock = 1;
michael@0 5922 SCTP_INP_READ_LOCK(inp);
michael@0 5923 }
michael@0 5924 control->held_length = 0;
michael@0 5925 if (control->data) {
michael@0 5926 /* Hmm there is data here .. fix */
michael@0 5927 struct mbuf *m_tmp;
michael@0 5928 int cnt = 0;
michael@0 5929 m_tmp = control->data;
michael@0 5930 while (m_tmp) {
michael@0 5931 cnt += SCTP_BUF_LEN(m_tmp);
michael@0 5932 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
michael@0 5933 control->tail_mbuf = m_tmp;
michael@0 5934 control->end_added = 1;
michael@0 5935 }
michael@0 5936 m_tmp = SCTP_BUF_NEXT(m_tmp);
michael@0 5937 }
michael@0 5938 control->length = cnt;
michael@0 5939 } else {
michael@0 5940 /* remove it */
michael@0 5941 TAILQ_REMOVE(&inp->read_queue, control, next);
michael@0 5942 /* Add back any hiddend data */
michael@0 5943 sctp_free_remote_addr(control->whoFrom);
michael@0 5944 sctp_free_a_readq(stcb, control);
michael@0 5945 }
michael@0 5946 if (hold_rlock) {
michael@0 5947 hold_rlock = 0;
michael@0 5948 SCTP_INP_READ_UNLOCK(inp);
michael@0 5949 }
michael@0 5950 goto restart;
michael@0 5951 }
michael@0 5952 if ((control->length == 0) &&
michael@0 5953 (control->end_added == 1)) {
michael@0 5954 /* Do we also need to check for (control->pdapi_aborted == 1)? */
michael@0 5955 if (hold_rlock == 0) {
michael@0 5956 hold_rlock = 1;
michael@0 5957 SCTP_INP_READ_LOCK(inp);
michael@0 5958 }
michael@0 5959 TAILQ_REMOVE(&inp->read_queue, control, next);
michael@0 5960 if (control->data) {
michael@0 5961 #ifdef INVARIANTS
michael@0 5962 panic("control->data not null but control->length == 0");
michael@0 5963 #else
michael@0 5964 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
michael@0 5965 sctp_m_freem(control->data);
michael@0 5966 control->data = NULL;
michael@0 5967 #endif
michael@0 5968 }
michael@0 5969 if (control->aux_data) {
michael@0 5970 sctp_m_free (control->aux_data);
michael@0 5971 control->aux_data = NULL;
michael@0 5972 }
michael@0 5973 sctp_free_remote_addr(control->whoFrom);
michael@0 5974 sctp_free_a_readq(stcb, control);
michael@0 5975 if (hold_rlock) {
michael@0 5976 hold_rlock = 0;
michael@0 5977 SCTP_INP_READ_UNLOCK(inp);
michael@0 5978 }
michael@0 5979 goto restart;
michael@0 5980 }
michael@0 5981 if (control->length == 0) {
michael@0 5982 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
michael@0 5983 (filling_sinfo)) {
michael@0 5984 /* find a more suitable one then this */
michael@0 5985 ctl = TAILQ_NEXT(control, next);
michael@0 5986 while (ctl) {
michael@0 5987 if ((ctl->stcb != control->stcb) && (ctl->length) &&
michael@0 5988 (ctl->some_taken ||
michael@0 5989 (ctl->spec_flags & M_NOTIFICATION) ||
michael@0 5990 ((ctl->do_not_ref_stcb == 0) &&
michael@0 5991 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
michael@0 5992 ) {
michael@0 5993 /*-
michael@0 5994 * If we have a different TCB next, and there is data
michael@0 5995 * present. If we have already taken some (pdapi), OR we can
michael@0 5996 * ref the tcb and no delivery as started on this stream, we
michael@0 5997 * take it. Note we allow a notification on a different
michael@0 5998 * assoc to be delivered..
michael@0 5999 */
michael@0 6000 control = ctl;
michael@0 6001 goto found_one;
michael@0 6002 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
michael@0 6003 (ctl->length) &&
michael@0 6004 ((ctl->some_taken) ||
michael@0 6005 ((ctl->do_not_ref_stcb == 0) &&
michael@0 6006 ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
michael@0 6007 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
michael@0 6008 /*-
michael@0 6009 * If we have the same tcb, and there is data present, and we
michael@0 6010 * have the strm interleave feature present. Then if we have
michael@0 6011 * taken some (pdapi) or we can refer to tht tcb AND we have
michael@0 6012 * not started a delivery for this stream, we can take it.
michael@0 6013 * Note we do NOT allow a notificaiton on the same assoc to
michael@0 6014 * be delivered.
michael@0 6015 */
michael@0 6016 control = ctl;
michael@0 6017 goto found_one;
michael@0 6018 }
michael@0 6019 ctl = TAILQ_NEXT(ctl, next);
michael@0 6020 }
michael@0 6021 }
michael@0 6022 /*
michael@0 6023 * if we reach here, not suitable replacement is available
michael@0 6024 * <or> fragment interleave is NOT on. So stuff the sb_cc
michael@0 6025 * into the our held count, and its time to sleep again.
michael@0 6026 */
michael@0 6027 held_length = so->so_rcv.sb_cc;
michael@0 6028 control->held_length = so->so_rcv.sb_cc;
michael@0 6029 goto restart;
michael@0 6030 }
michael@0 6031 /* Clear the held length since there is something to read */
michael@0 6032 control->held_length = 0;
michael@0 6033 if (hold_rlock) {
michael@0 6034 SCTP_INP_READ_UNLOCK(inp);
michael@0 6035 hold_rlock = 0;
michael@0 6036 }
michael@0 6037 found_one:
michael@0 6038 /*
michael@0 6039 * If we reach here, control has a some data for us to read off.
michael@0 6040 * Note that stcb COULD be NULL.
michael@0 6041 */
michael@0 6042 control->some_taken++;
michael@0 6043 if (hold_sblock) {
michael@0 6044 SOCKBUF_UNLOCK(&so->so_rcv);
michael@0 6045 hold_sblock = 0;
michael@0 6046 }
michael@0 6047 stcb = control->stcb;
michael@0 6048 if (stcb) {
michael@0 6049 if ((control->do_not_ref_stcb == 0) &&
michael@0 6050 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
michael@0 6051 if (freecnt_applied == 0)
michael@0 6052 stcb = NULL;
michael@0 6053 } else if (control->do_not_ref_stcb == 0) {
michael@0 6054 /* you can't free it on me please */
michael@0 6055 /*
michael@0 6056 * The lock on the socket buffer protects us so the
michael@0 6057 * free code will stop. But since we used the socketbuf
michael@0 6058 * lock and the sender uses the tcb_lock to increment,
michael@0 6059 * we need to use the atomic add to the refcnt
michael@0 6060 */
michael@0 6061 if (freecnt_applied) {
michael@0 6062 #ifdef INVARIANTS
michael@0 6063 panic("refcnt already incremented");
michael@0 6064 #else
michael@0 6065 SCTP_PRINTF("refcnt already incremented?\n");
michael@0 6066 #endif
michael@0 6067 } else {
michael@0 6068 atomic_add_int(&stcb->asoc.refcnt, 1);
michael@0 6069 freecnt_applied = 1;
michael@0 6070 }
michael@0 6071 /*
michael@0 6072 * Setup to remember how much we have not yet told
michael@0 6073 * the peer our rwnd has opened up. Note we grab
michael@0 6074 * the value from the tcb from last time.
michael@0 6075 * Note too that sack sending clears this when a sack
michael@0 6076 * is sent, which is fine. Once we hit the rwnd_req,
michael@0 6077 * we then will go to the sctp_user_rcvd() that will
michael@0 6078 * not lock until it KNOWs it MUST send a WUP-SACK.
michael@0 6079 */
michael@0 6080 freed_so_far = stcb->freed_by_sorcv_sincelast;
michael@0 6081 stcb->freed_by_sorcv_sincelast = 0;
michael@0 6082 }
michael@0 6083 }
michael@0 6084 if (stcb &&
michael@0 6085 ((control->spec_flags & M_NOTIFICATION) == 0) &&
michael@0 6086 control->do_not_ref_stcb == 0) {
michael@0 6087 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
michael@0 6088 }
michael@0 6089
michael@0 6090 /* First lets get off the sinfo and sockaddr info */
michael@0 6091 if ((sinfo) && filling_sinfo) {
michael@0 6092 memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
michael@0 6093 nxt = TAILQ_NEXT(control, next);
michael@0 6094 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
michael@0 6095 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
michael@0 6096 struct sctp_extrcvinfo *s_extra;
michael@0 6097 s_extra = (struct sctp_extrcvinfo *)sinfo;
michael@0 6098 if ((nxt) &&
michael@0 6099 (nxt->length)) {
michael@0 6100 s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
michael@0 6101 if (nxt->sinfo_flags & SCTP_UNORDERED) {
michael@0 6102 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
michael@0 6103 }
michael@0 6104 if (nxt->spec_flags & M_NOTIFICATION) {
michael@0 6105 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
michael@0 6106 }
michael@0 6107 s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
michael@0 6108 s_extra->sreinfo_next_length = nxt->length;
michael@0 6109 s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
michael@0 6110 s_extra->sreinfo_next_stream = nxt->sinfo_stream;
michael@0 6111 if (nxt->tail_mbuf != NULL) {
michael@0 6112 if (nxt->end_added) {
michael@0 6113 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
michael@0 6114 }
michael@0 6115 }
michael@0 6116 } else {
michael@0 6117 /* we explicitly 0 this, since the memcpy got
michael@0 6118 * some other things beyond the older sinfo_
michael@0 6119 * that is on the control's structure :-D
michael@0 6120 */
michael@0 6121 nxt = NULL;
michael@0 6122 s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
michael@0 6123 s_extra->sreinfo_next_aid = 0;
michael@0 6124 s_extra->sreinfo_next_length = 0;
michael@0 6125 s_extra->sreinfo_next_ppid = 0;
michael@0 6126 s_extra->sreinfo_next_stream = 0;
michael@0 6127 }
michael@0 6128 }
michael@0 6129 /*
michael@0 6130 * update off the real current cum-ack, if we have an stcb.
michael@0 6131 */
michael@0 6132 if ((control->do_not_ref_stcb == 0) && stcb)
michael@0 6133 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
michael@0 6134 /*
michael@0 6135 * mask off the high bits, we keep the actual chunk bits in
michael@0 6136 * there.
michael@0 6137 */
michael@0 6138 sinfo->sinfo_flags &= 0x00ff;
michael@0 6139 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
michael@0 6140 sinfo->sinfo_flags |= SCTP_UNORDERED;
michael@0 6141 }
michael@0 6142 }
michael@0 6143 #ifdef SCTP_ASOCLOG_OF_TSNS
michael@0 6144 {
michael@0 6145 int index, newindex;
michael@0 6146 struct sctp_pcbtsn_rlog *entry;
michael@0 6147 do {
michael@0 6148 index = inp->readlog_index;
michael@0 6149 newindex = index + 1;
michael@0 6150 if (newindex >= SCTP_READ_LOG_SIZE) {
michael@0 6151 newindex = 0;
michael@0 6152 }
michael@0 6153 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
michael@0 6154 entry = &inp->readlog[index];
michael@0 6155 entry->vtag = control->sinfo_assoc_id;
michael@0 6156 entry->strm = control->sinfo_stream;
michael@0 6157 entry->seq = control->sinfo_ssn;
michael@0 6158 entry->sz = control->length;
michael@0 6159 entry->flgs = control->sinfo_flags;
michael@0 6160 }
michael@0 6161 #endif
michael@0 6162 if (fromlen && from) {
michael@0 6163 #ifdef HAVE_SA_LEN
michael@0 6164 cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sa.sa_len);
michael@0 6165 #endif
michael@0 6166 switch (control->whoFrom->ro._l_addr.sa.sa_family) {
michael@0 6167 #ifdef INET6
michael@0 6168 case AF_INET6:
michael@0 6169 #ifndef HAVE_SA_LEN
michael@0 6170 cp_len = min((size_t)fromlen, sizeof(struct sockaddr_in6));
michael@0 6171 #endif
michael@0 6172 ((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
michael@0 6173 break;
michael@0 6174 #endif
michael@0 6175 #ifdef INET
michael@0 6176 case AF_INET:
michael@0 6177 #ifndef HAVE_SA_LEN
michael@0 6178 cp_len = min((size_t)fromlen, sizeof(struct sockaddr_in));
michael@0 6179 #endif
michael@0 6180 ((struct sockaddr_in *)from)->sin_port = control->port_from;
michael@0 6181 break;
michael@0 6182 #endif
michael@0 6183 #if defined(__Userspace__)
michael@0 6184 case AF_CONN:
michael@0 6185 #ifndef HAVE_SA_LEN
michael@0 6186 cp_len = min((size_t)fromlen, sizeof(struct sockaddr_conn));
michael@0 6187 #endif
michael@0 6188 ((struct sockaddr_conn *)from)->sconn_port = control->port_from;
michael@0 6189 break;
michael@0 6190 #endif
michael@0 6191 default:
michael@0 6192 #ifndef HAVE_SA_LEN
michael@0 6193 cp_len = min((size_t)fromlen, sizeof(struct sockaddr));
michael@0 6194 #endif
michael@0 6195 break;
michael@0 6196 }
michael@0 6197 memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
michael@0 6198
michael@0 6199 #if defined(INET) && defined(INET6)
michael@0 6200 if ((sctp_is_feature_on(inp,SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
michael@0 6201 (from->sa_family == AF_INET) &&
michael@0 6202 ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
michael@0 6203 struct sockaddr_in *sin;
michael@0 6204 struct sockaddr_in6 sin6;
michael@0 6205
michael@0 6206 sin = (struct sockaddr_in *)from;
michael@0 6207 bzero(&sin6, sizeof(sin6));
michael@0 6208 sin6.sin6_family = AF_INET6;
michael@0 6209 #ifdef HAVE_SIN6_LEN
michael@0 6210 sin6.sin6_len = sizeof(struct sockaddr_in6);
michael@0 6211 #endif
michael@0 6212 #if defined(__Userspace_os_FreeBSD) || defined(__Userspace_os_Darwin) || defined(__Userspace_os_Windows)
michael@0 6213 ((uint32_t *)&sin6.sin6_addr)[2] = htonl(0xffff);
michael@0 6214 bcopy(&sin->sin_addr,
michael@0 6215 &(((uint32_t *)&sin6.sin6_addr)[3]),
michael@0 6216 sizeof(uint32_t));
michael@0 6217 #elif defined(__Windows__)
michael@0 6218 ((uint32_t *)&sin6.sin6_addr)[2] = htonl(0xffff);
michael@0 6219 bcopy(&sin->sin_addr,
michael@0 6220 &((uint32_t *)&sin6.sin6_addr)[3],
michael@0 6221 sizeof(uint32_t));
michael@0 6222 #else
michael@0 6223 sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
michael@0 6224 bcopy(&sin->sin_addr,
michael@0 6225 &sin6.sin6_addr.s6_addr32[3],
michael@0 6226 sizeof(sin6.sin6_addr.s6_addr32[3]));
michael@0 6227 #endif
michael@0 6228 sin6.sin6_port = sin->sin_port;
michael@0 6229 memcpy(from, &sin6, sizeof(struct sockaddr_in6));
michael@0 6230 }
michael@0 6231 #endif
michael@0 6232 #if defined(SCTP_EMBEDDED_V6_SCOPE)
michael@0 6233 #ifdef INET6
michael@0 6234 {
michael@0 6235 struct sockaddr_in6 lsa6, *from6;
michael@0 6236
michael@0 6237 from6 = (struct sockaddr_in6 *)from;
michael@0 6238 sctp_recover_scope_mac(from6, (&lsa6));
michael@0 6239 }
michael@0 6240 #endif
michael@0 6241 #endif
michael@0 6242 }
michael@0 6243 /* now copy out what data we can */
michael@0 6244 if (mp == NULL) {
michael@0 6245 /* copy out each mbuf in the chain up to length */
michael@0 6246 get_more_data:
michael@0 6247 m = control->data;
michael@0 6248 while (m) {
michael@0 6249 /* Move out all we can */
michael@0 6250 #if defined(__APPLE__)
michael@0 6251 #if defined(APPLE_LEOPARD)
michael@0 6252 cp_len = (int)uio->uio_resid;
michael@0 6253 #else
michael@0 6254 cp_len = (int)uio_resid(uio);
michael@0 6255 #endif
michael@0 6256 #else
michael@0 6257 cp_len = (int)uio->uio_resid;
michael@0 6258 #endif
michael@0 6259 my_len = (int)SCTP_BUF_LEN(m);
michael@0 6260 if (cp_len > my_len) {
michael@0 6261 /* not enough in this buf */
michael@0 6262 cp_len = my_len;
michael@0 6263 }
michael@0 6264 if (hold_rlock) {
michael@0 6265 SCTP_INP_READ_UNLOCK(inp);
michael@0 6266 hold_rlock = 0;
michael@0 6267 }
michael@0 6268 #if defined(__APPLE__)
michael@0 6269 SCTP_SOCKET_UNLOCK(so, 0);
michael@0 6270 #endif
michael@0 6271 if (cp_len > 0)
michael@0 6272 error = uiomove(mtod(m, char *), cp_len, uio);
michael@0 6273 #if defined(__APPLE__)
michael@0 6274 SCTP_SOCKET_LOCK(so, 0);
michael@0 6275 #endif
michael@0 6276 /* re-read */
michael@0 6277 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
michael@0 6278 goto release;
michael@0 6279 }
michael@0 6280
michael@0 6281 if ((control->do_not_ref_stcb == 0) && stcb &&
michael@0 6282 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
michael@0 6283 no_rcv_needed = 1;
michael@0 6284 }
michael@0 6285 if (error) {
michael@0 6286 /* error we are out of here */
michael@0 6287 goto release;
michael@0 6288 }
michael@0 6289 if ((SCTP_BUF_NEXT(m) == NULL) &&
michael@0 6290 (cp_len >= SCTP_BUF_LEN(m)) &&
michael@0 6291 ((control->end_added == 0) ||
michael@0 6292 (control->end_added &&
michael@0 6293 (TAILQ_NEXT(control, next) == NULL)))
michael@0 6294 ) {
michael@0 6295 SCTP_INP_READ_LOCK(inp);
michael@0 6296 hold_rlock = 1;
michael@0 6297 }
michael@0 6298 if (cp_len == SCTP_BUF_LEN(m)) {
michael@0 6299 if ((SCTP_BUF_NEXT(m)== NULL) &&
michael@0 6300 (control->end_added)) {
michael@0 6301 out_flags |= MSG_EOR;
michael@0 6302 if ((control->do_not_ref_stcb == 0) &&
michael@0 6303 (control->stcb != NULL) &&
michael@0 6304 ((control->spec_flags & M_NOTIFICATION) == 0))
michael@0 6305 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
michael@0 6306 }
michael@0 6307 if (control->spec_flags & M_NOTIFICATION) {
michael@0 6308 out_flags |= MSG_NOTIFICATION;
michael@0 6309 }
michael@0 6310 /* we ate up the mbuf */
michael@0 6311 if (in_flags & MSG_PEEK) {
michael@0 6312 /* just looking */
michael@0 6313 m = SCTP_BUF_NEXT(m);
michael@0 6314 copied_so_far += cp_len;
michael@0 6315 } else {
michael@0 6316 /* dispose of the mbuf */
michael@0 6317 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
michael@0 6318 sctp_sblog(&so->so_rcv,
michael@0 6319 control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
michael@0 6320 }
michael@0 6321 sctp_sbfree(control, stcb, &so->so_rcv, m);
michael@0 6322 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
michael@0 6323 sctp_sblog(&so->so_rcv,
michael@0 6324 control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
michael@0 6325 }
michael@0 6326 copied_so_far += cp_len;
michael@0 6327 freed_so_far += cp_len;
michael@0 6328 freed_so_far += MSIZE;
michael@0 6329 atomic_subtract_int(&control->length, cp_len);
michael@0 6330 control->data = sctp_m_free(m);
michael@0 6331 m = control->data;
michael@0 6332 /* been through it all, must hold sb lock ok to null tail */
michael@0 6333 if (control->data == NULL) {
michael@0 6334 #ifdef INVARIANTS
michael@0 6335 #if !defined(__APPLE__)
michael@0 6336 if ((control->end_added == 0) ||
michael@0 6337 (TAILQ_NEXT(control, next) == NULL)) {
michael@0 6338 /* If the end is not added, OR the
michael@0 6339 * next is NOT null we MUST have the lock.
michael@0 6340 */
michael@0 6341 if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
michael@0 6342 panic("Hmm we don't own the lock?");
michael@0 6343 }
michael@0 6344 }
michael@0 6345 #endif
michael@0 6346 #endif
michael@0 6347 control->tail_mbuf = NULL;
michael@0 6348 #ifdef INVARIANTS
michael@0 6349 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
michael@0 6350 panic("end_added, nothing left and no MSG_EOR");
michael@0 6351 }
michael@0 6352 #endif
michael@0 6353 }
michael@0 6354 }
michael@0 6355 } else {
michael@0 6356 /* Do we need to trim the mbuf? */
michael@0 6357 if (control->spec_flags & M_NOTIFICATION) {
michael@0 6358 out_flags |= MSG_NOTIFICATION;
michael@0 6359 }
michael@0 6360 if ((in_flags & MSG_PEEK) == 0) {
michael@0 6361 SCTP_BUF_RESV_UF(m, cp_len);
michael@0 6362 SCTP_BUF_LEN(m) -= cp_len;
michael@0 6363 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
michael@0 6364 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, cp_len);
michael@0 6365 }
michael@0 6366 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
michael@0 6367 if ((control->do_not_ref_stcb == 0) &&
michael@0 6368 stcb) {
michael@0 6369 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
michael@0 6370 }
michael@0 6371 copied_so_far += cp_len;
michael@0 6372 freed_so_far += cp_len;
michael@0 6373 freed_so_far += MSIZE;
michael@0 6374 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
michael@0 6375 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb,
michael@0 6376 SCTP_LOG_SBRESULT, 0);
michael@0 6377 }
michael@0 6378 atomic_subtract_int(&control->length, cp_len);
michael@0 6379 } else {
michael@0 6380 copied_so_far += cp_len;
michael@0 6381 }
michael@0 6382 }
michael@0 6383 #if defined(__APPLE__)
michael@0 6384 #if defined(APPLE_LEOPARD)
michael@0 6385 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
michael@0 6386 #else
michael@0 6387 if ((out_flags & MSG_EOR) || (uio_resid(uio) == 0)) {
michael@0 6388 #endif
michael@0 6389 #else
michael@0 6390 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
michael@0 6391 #endif
michael@0 6392 break;
michael@0 6393 }
michael@0 6394 if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
michael@0 6395 (control->do_not_ref_stcb == 0) &&
michael@0 6396 (freed_so_far >= rwnd_req)) {
michael@0 6397 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
michael@0 6398 }
michael@0 6399 } /* end while(m) */
michael@0 6400 /*
michael@0 6401 * At this point we have looked at it all and we either have
michael@0 6402 * a MSG_EOR/or read all the user wants... <OR>
michael@0 6403 * control->length == 0.
michael@0 6404 */
michael@0 6405 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
michael@0 6406 /* we are done with this control */
michael@0 6407 if (control->length == 0) {
michael@0 6408 if (control->data) {
michael@0 6409 #ifdef INVARIANTS
michael@0 6410 panic("control->data not null at read eor?");
michael@0 6411 #else
michael@0 6412 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
michael@0 6413 sctp_m_freem(control->data);
michael@0 6414 control->data = NULL;
michael@0 6415 #endif
michael@0 6416 }
michael@0 6417 done_with_control:
michael@0 6418 if (TAILQ_NEXT(control, next) == NULL) {
michael@0 6419 /* If we don't have a next we need a
michael@0 6420 * lock, if there is a next interrupt
michael@0 6421 * is filling ahead of us and we don't
michael@0 6422 * need a lock to remove this guy
michael@0 6423 * (which is the head of the queue).
michael@0 6424 */
michael@0 6425 if (hold_rlock == 0) {
michael@0 6426 SCTP_INP_READ_LOCK(inp);
michael@0 6427 hold_rlock = 1;
michael@0 6428 }
michael@0 6429 }
michael@0 6430 TAILQ_REMOVE(&inp->read_queue, control, next);
michael@0 6431 /* Add back any hiddend data */
michael@0 6432 if (control->held_length) {
michael@0 6433 held_length = 0;
michael@0 6434 control->held_length = 0;
michael@0 6435 wakeup_read_socket = 1;
michael@0 6436 }
michael@0 6437 if (control->aux_data) {
michael@0 6438 sctp_m_free (control->aux_data);
michael@0 6439 control->aux_data = NULL;
michael@0 6440 }
michael@0 6441 no_rcv_needed = control->do_not_ref_stcb;
michael@0 6442 sctp_free_remote_addr(control->whoFrom);
michael@0 6443 control->data = NULL;
michael@0 6444 sctp_free_a_readq(stcb, control);
michael@0 6445 control = NULL;
michael@0 6446 if ((freed_so_far >= rwnd_req) &&
michael@0 6447 (no_rcv_needed == 0))
michael@0 6448 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
michael@0 6449
michael@0 6450 } else {
michael@0 6451 /*
michael@0 6452 * The user did not read all of this
michael@0 6453 * message, turn off the returned MSG_EOR
michael@0 6454 * since we are leaving more behind on the
michael@0 6455 * control to read.
michael@0 6456 */
michael@0 6457 #ifdef INVARIANTS
michael@0 6458 if (control->end_added &&
michael@0 6459 (control->data == NULL) &&
michael@0 6460 (control->tail_mbuf == NULL)) {
michael@0 6461 panic("Gak, control->length is corrupt?");
michael@0 6462 }
michael@0 6463 #endif
michael@0 6464 no_rcv_needed = control->do_not_ref_stcb;
michael@0 6465 out_flags &= ~MSG_EOR;
michael@0 6466 }
michael@0 6467 }
michael@0 6468 if (out_flags & MSG_EOR) {
michael@0 6469 goto release;
michael@0 6470 }
michael@0 6471 #if defined(__APPLE__)
michael@0 6472 #if defined(APPLE_LEOPARD)
michael@0 6473 if ((uio->uio_resid == 0) ||
michael@0 6474 #else
michael@0 6475 if ((uio_resid(uio) == 0) ||
michael@0 6476 #endif
michael@0 6477 #else
michael@0 6478 if ((uio->uio_resid == 0) ||
michael@0 6479 #endif
michael@0 6480 ((in_eeor_mode) &&
michael@0 6481 (copied_so_far >= (uint32_t)max(so->so_rcv.sb_lowat, 1)))) {
michael@0 6482 goto release;
michael@0 6483 }
michael@0 6484 /*
michael@0 6485 * If I hit here the receiver wants more and this message is
michael@0 6486 * NOT done (pd-api). So two questions. Can we block? if not
michael@0 6487 * we are done. Did the user NOT set MSG_WAITALL?
michael@0 6488 */
michael@0 6489 if (block_allowed == 0) {
michael@0 6490 goto release;
michael@0 6491 }
michael@0 6492 /*
michael@0 6493 * We need to wait for more data a few things: - We don't
michael@0 6494 * sbunlock() so we don't get someone else reading. - We
michael@0 6495 * must be sure to account for the case where what is added
michael@0 6496 * is NOT to our control when we wakeup.
michael@0 6497 */
michael@0 6498
michael@0 6499 /* Do we need to tell the transport a rwnd update might be
michael@0 6500 * needed before we go to sleep?
michael@0 6501 */
michael@0 6502 if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
michael@0 6503 ((freed_so_far >= rwnd_req) &&
michael@0 6504 (control->do_not_ref_stcb == 0) &&
michael@0 6505 (no_rcv_needed == 0))) {
michael@0 6506 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
michael@0 6507 }
michael@0 6508 wait_some_more:
michael@0 6509 #if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
michael@0 6510 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
michael@0 6511 goto release;
michael@0 6512 }
michael@0 6513 #else
michael@0 6514 if (so->so_state & SS_CANTRCVMORE) {
michael@0 6515 goto release;
michael@0 6516 }
michael@0 6517 #endif
michael@0 6518
michael@0 6519 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
michael@0 6520 goto release;
michael@0 6521
michael@0 6522 if (hold_rlock == 1) {
michael@0 6523 SCTP_INP_READ_UNLOCK(inp);
michael@0 6524 hold_rlock = 0;
michael@0 6525 }
michael@0 6526 if (hold_sblock == 0) {
michael@0 6527 SOCKBUF_LOCK(&so->so_rcv);
michael@0 6528 hold_sblock = 1;
michael@0 6529 }
michael@0 6530 #if defined(__APPLE__)
michael@0 6531 sbunlock(&so->so_rcv, 1);
michael@0 6532 #endif
michael@0 6533 if ((copied_so_far) && (control->length == 0) &&
michael@0 6534 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
michael@0 6535 goto release;
michael@0 6536 }
michael@0 6537 if (so->so_rcv.sb_cc <= control->held_length) {
michael@0 6538 error = sbwait(&so->so_rcv);
michael@0 6539 if (error) {
michael@0 6540 #if defined(__FreeBSD__)
michael@0 6541 goto release;
michael@0 6542 #else
michael@0 6543 goto release_unlocked;
michael@0 6544 #endif
michael@0 6545 }
michael@0 6546 control->held_length = 0;
michael@0 6547 }
michael@0 6548 #if defined(__APPLE__)
michael@0 6549 error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
michael@0 6550 #endif
michael@0 6551 if (hold_sblock) {
michael@0 6552 SOCKBUF_UNLOCK(&so->so_rcv);
michael@0 6553 hold_sblock = 0;
michael@0 6554 }
michael@0 6555 if (control->length == 0) {
michael@0 6556 /* still nothing here */
michael@0 6557 if (control->end_added == 1) {
michael@0 6558 /* he aborted, or is done i.e.did a shutdown */
michael@0 6559 out_flags |= MSG_EOR;
michael@0 6560 if (control->pdapi_aborted) {
michael@0 6561 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
michael@0 6562 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
michael@0 6563
michael@0 6564 out_flags |= MSG_TRUNC;
michael@0 6565 } else {
michael@0 6566 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
michael@0 6567 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
michael@0 6568 }
michael@0 6569 goto done_with_control;
michael@0 6570 }
michael@0 6571 if (so->so_rcv.sb_cc > held_length) {
michael@0 6572 control->held_length = so->so_rcv.sb_cc;
michael@0 6573 held_length = 0;
michael@0 6574 }
michael@0 6575 goto wait_some_more;
michael@0 6576 } else if (control->data == NULL) {
michael@0 6577 /* we must re-sync since data
michael@0 6578 * is probably being added
michael@0 6579 */
michael@0 6580 SCTP_INP_READ_LOCK(inp);
michael@0 6581 if ((control->length > 0) && (control->data == NULL)) {
michael@0 6582 /* big trouble.. we have the lock and its corrupt? */
michael@0 6583 #ifdef INVARIANTS
michael@0 6584 panic ("Impossible data==NULL length !=0");
michael@0 6585 #endif
michael@0 6586 out_flags |= MSG_EOR;
michael@0 6587 out_flags |= MSG_TRUNC;
michael@0 6588 control->length = 0;
michael@0 6589 SCTP_INP_READ_UNLOCK(inp);
michael@0 6590 goto done_with_control;
michael@0 6591 }
michael@0 6592 SCTP_INP_READ_UNLOCK(inp);
michael@0 6593 /* We will fall around to get more data */
michael@0 6594 }
michael@0 6595 goto get_more_data;
michael@0 6596 } else {
michael@0 6597 /*-
michael@0 6598 * Give caller back the mbuf chain,
michael@0 6599 * store in uio_resid the length
michael@0 6600 */
michael@0 6601 wakeup_read_socket = 0;
michael@0 6602 if ((control->end_added == 0) ||
michael@0 6603 (TAILQ_NEXT(control, next) == NULL)) {
michael@0 6604 /* Need to get rlock */
michael@0 6605 if (hold_rlock == 0) {
michael@0 6606 SCTP_INP_READ_LOCK(inp);
michael@0 6607 hold_rlock = 1;
michael@0 6608 }
michael@0 6609 }
michael@0 6610 if (control->end_added) {
michael@0 6611 out_flags |= MSG_EOR;
michael@0 6612 if ((control->do_not_ref_stcb == 0) &&
michael@0 6613 (control->stcb != NULL) &&
michael@0 6614 ((control->spec_flags & M_NOTIFICATION) == 0))
michael@0 6615 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
michael@0 6616 }
michael@0 6617 if (control->spec_flags & M_NOTIFICATION) {
michael@0 6618 out_flags |= MSG_NOTIFICATION;
michael@0 6619 }
michael@0 6620 #if defined(__APPLE__)
michael@0 6621 #if defined(APPLE_LEOPARD)
michael@0 6622 uio->uio_resid = control->length;
michael@0 6623 #else
michael@0 6624 uio_setresid(uio, control->length);
michael@0 6625 #endif
michael@0 6626 #else
michael@0 6627 uio->uio_resid = control->length;
michael@0 6628 #endif
michael@0 6629 *mp = control->data;
michael@0 6630 m = control->data;
michael@0 6631 while (m) {
michael@0 6632 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
michael@0 6633 sctp_sblog(&so->so_rcv,
michael@0 6634 control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
michael@0 6635 }
michael@0 6636 sctp_sbfree(control, stcb, &so->so_rcv, m);
michael@0 6637 freed_so_far += SCTP_BUF_LEN(m);
michael@0 6638 freed_so_far += MSIZE;
michael@0 6639 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
michael@0 6640 sctp_sblog(&so->so_rcv,
michael@0 6641 control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
michael@0 6642 }
michael@0 6643 m = SCTP_BUF_NEXT(m);
michael@0 6644 }
michael@0 6645 control->data = control->tail_mbuf = NULL;
michael@0 6646 control->length = 0;
michael@0 6647 if (out_flags & MSG_EOR) {
michael@0 6648 /* Done with this control */
michael@0 6649 goto done_with_control;
michael@0 6650 }
michael@0 6651 }
michael@0 6652 release:
michael@0 6653 if (hold_rlock == 1) {
michael@0 6654 SCTP_INP_READ_UNLOCK(inp);
michael@0 6655 hold_rlock = 0;
michael@0 6656 }
michael@0 6657 #if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__)
michael@0 6658 if (hold_sblock == 0) {
michael@0 6659 SOCKBUF_LOCK(&so->so_rcv);
michael@0 6660 hold_sblock = 1;
michael@0 6661 }
michael@0 6662 #else
michael@0 6663 if (hold_sblock == 1) {
michael@0 6664 SOCKBUF_UNLOCK(&so->so_rcv);
michael@0 6665 hold_sblock = 0;
michael@0 6666 }
michael@0 6667 #endif
michael@0 6668 #if defined(__APPLE__)
michael@0 6669 sbunlock(&so->so_rcv, 1);
michael@0 6670 #endif
michael@0 6671
michael@0 6672 #if defined(__FreeBSD__)
michael@0 6673 sbunlock(&so->so_rcv);
michael@0 6674 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
michael@0 6675 sockbuf_lock = 0;
michael@0 6676 #endif
michael@0 6677 #endif
michael@0 6678
michael@0 6679 release_unlocked:
michael@0 6680 if (hold_sblock) {
michael@0 6681 SOCKBUF_UNLOCK(&so->so_rcv);
michael@0 6682 hold_sblock = 0;
michael@0 6683 }
michael@0 6684 if ((stcb) && (in_flags & MSG_PEEK) == 0) {
michael@0 6685 if ((freed_so_far >= rwnd_req) &&
michael@0 6686 (control && (control->do_not_ref_stcb == 0)) &&
michael@0 6687 (no_rcv_needed == 0))
michael@0 6688 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
michael@0 6689 }
michael@0 6690 out:
michael@0 6691 if (msg_flags) {
michael@0 6692 *msg_flags = out_flags;
michael@0 6693 }
michael@0 6694 if (((out_flags & MSG_EOR) == 0) &&
michael@0 6695 ((in_flags & MSG_PEEK) == 0) &&
michael@0 6696 (sinfo) &&
michael@0 6697 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
michael@0 6698 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
michael@0 6699 struct sctp_extrcvinfo *s_extra;
michael@0 6700 s_extra = (struct sctp_extrcvinfo *)sinfo;
michael@0 6701 s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
michael@0 6702 }
michael@0 6703 if (hold_rlock == 1) {
michael@0 6704 SCTP_INP_READ_UNLOCK(inp);
michael@0 6705 }
michael@0 6706 if (hold_sblock) {
michael@0 6707 SOCKBUF_UNLOCK(&so->so_rcv);
michael@0 6708 }
michael@0 6709 #if defined(__FreeBSD__) && __FreeBSD_version >= 700000
michael@0 6710 if (sockbuf_lock) {
michael@0 6711 sbunlock(&so->so_rcv);
michael@0 6712 }
michael@0 6713 #endif
michael@0 6714
michael@0 6715 if (freecnt_applied) {
michael@0 6716 /*
michael@0 6717 * The lock on the socket buffer protects us so the free
michael@0 6718 * code will stop. But since we used the socketbuf lock and
michael@0 6719 * the sender uses the tcb_lock to increment, we need to use
michael@0 6720 * the atomic add to the refcnt.
michael@0 6721 */
michael@0 6722 if (stcb == NULL) {
michael@0 6723 #ifdef INVARIANTS
michael@0 6724 panic("stcb for refcnt has gone NULL?");
michael@0 6725 goto stage_left;
michael@0 6726 #else
michael@0 6727 goto stage_left;
michael@0 6728 #endif
michael@0 6729 }
michael@0 6730 atomic_add_int(&stcb->asoc.refcnt, -1);
michael@0 6731 /* Save the value back for next time */
michael@0 6732 stcb->freed_by_sorcv_sincelast = freed_so_far;
michael@0 6733 }
michael@0 6734 if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) {
michael@0 6735 if (stcb) {
michael@0 6736 sctp_misc_ints(SCTP_SORECV_DONE,
michael@0 6737 freed_so_far,
michael@0 6738 #if defined(__APPLE__)
michael@0 6739 #if defined(APPLE_LEOPARD)
michael@0 6740 ((uio) ? (slen - uio->uio_resid) : slen),
michael@0 6741 #else
michael@0 6742 ((uio) ? (slen - uio_resid(uio)) : slen),
michael@0 6743 #endif
michael@0 6744 #else
michael@0 6745 ((uio) ? (slen - uio->uio_resid) : slen),
michael@0 6746 #endif
michael@0 6747 stcb->asoc.my_rwnd,
michael@0 6748 so->so_rcv.sb_cc);
michael@0 6749 } else {
michael@0 6750 sctp_misc_ints(SCTP_SORECV_DONE,
michael@0 6751 freed_so_far,
michael@0 6752 #if defined(__APPLE__)
michael@0 6753 #if defined(APPLE_LEOPARD)
michael@0 6754 ((uio) ? (slen - uio->uio_resid) : slen),
michael@0 6755 #else
michael@0 6756 ((uio) ? (slen - uio_resid(uio)) : slen),
michael@0 6757 #endif
michael@0 6758 #else
michael@0 6759 ((uio) ? (slen - uio->uio_resid) : slen),
michael@0 6760 #endif
michael@0 6761 0,
michael@0 6762 so->so_rcv.sb_cc);
michael@0 6763 }
michael@0 6764 }
michael@0 6765 stage_left:
michael@0 6766 if (wakeup_read_socket) {
michael@0 6767 sctp_sorwakeup(inp, so);
michael@0 6768 }
michael@0 6769 return (error);
michael@0 6770 }
michael@0 6771
michael@0 6772
michael@0 6773 #ifdef SCTP_MBUF_LOGGING
michael@0 6774 struct mbuf *
michael@0 6775 sctp_m_free(struct mbuf *m)
michael@0 6776 {
michael@0 6777 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
michael@0 6778 if (SCTP_BUF_IS_EXTENDED(m)) {
michael@0 6779 sctp_log_mb(m, SCTP_MBUF_IFREE);
michael@0 6780 }
michael@0 6781 }
michael@0 6782 return (m_free(m));
michael@0 6783 }
michael@0 6784
michael@0 6785 void sctp_m_freem(struct mbuf *mb)
michael@0 6786 {
michael@0 6787 while (mb != NULL)
michael@0 6788 mb = sctp_m_free(mb);
michael@0 6789 }
michael@0 6790
michael@0 6791 #endif
michael@0 6792
michael@0 6793 int
michael@0 6794 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
michael@0 6795 {
michael@0 6796 /* Given a local address. For all associations
michael@0 6797 * that holds the address, request a peer-set-primary.
michael@0 6798 */
michael@0 6799 struct sctp_ifa *ifa;
michael@0 6800 struct sctp_laddr *wi;
michael@0 6801
michael@0 6802 ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
michael@0 6803 if (ifa == NULL) {
michael@0 6804 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
michael@0 6805 return (EADDRNOTAVAIL);
michael@0 6806 }
michael@0 6807 /* Now that we have the ifa we must awaken the
michael@0 6808 * iterator with this message.
michael@0 6809 */
michael@0 6810 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
michael@0 6811 if (wi == NULL) {
michael@0 6812 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
michael@0 6813 return (ENOMEM);
michael@0 6814 }
michael@0 6815 /* Now incr the count and int wi structure */
michael@0 6816 SCTP_INCR_LADDR_COUNT();
michael@0 6817 bzero(wi, sizeof(*wi));
michael@0 6818 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
michael@0 6819 wi->ifa = ifa;
michael@0 6820 wi->action = SCTP_SET_PRIM_ADDR;
michael@0 6821 atomic_add_int(&ifa->refcount, 1);
michael@0 6822
michael@0 6823 /* Now add it to the work queue */
michael@0 6824 SCTP_WQ_ADDR_LOCK();
michael@0 6825 /*
michael@0 6826 * Should this really be a tailq? As it is we will process the
michael@0 6827 * newest first :-0
michael@0 6828 */
michael@0 6829 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
michael@0 6830 SCTP_WQ_ADDR_UNLOCK();
michael@0 6831 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
michael@0 6832 (struct sctp_inpcb *)NULL,
michael@0 6833 (struct sctp_tcb *)NULL,
michael@0 6834 (struct sctp_nets *)NULL);
michael@0 6835 return (0);
michael@0 6836 }
michael@0 6837
michael@0 6838 #if defined(__Userspace__)
michael@0 6839 /* no sctp_soreceive for __Userspace__ now */
michael@0 6840 #endif
michael@0 6841
michael@0 6842 #if !defined(__Userspace__)
michael@0 6843 int
michael@0 6844 sctp_soreceive( struct socket *so,
michael@0 6845 struct sockaddr **psa,
michael@0 6846 struct uio *uio,
michael@0 6847 struct mbuf **mp0,
michael@0 6848 struct mbuf **controlp,
michael@0 6849 int *flagsp)
michael@0 6850 {
michael@0 6851 int error, fromlen;
michael@0 6852 uint8_t sockbuf[256];
michael@0 6853 struct sockaddr *from;
michael@0 6854 struct sctp_extrcvinfo sinfo;
michael@0 6855 int filling_sinfo = 1;
michael@0 6856 struct sctp_inpcb *inp;
michael@0 6857
michael@0 6858 inp = (struct sctp_inpcb *)so->so_pcb;
michael@0 6859 /* pickup the assoc we are reading from */
michael@0 6860 if (inp == NULL) {
michael@0 6861 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
michael@0 6862 return (EINVAL);
michael@0 6863 }
michael@0 6864 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
michael@0 6865 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
michael@0 6866 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
michael@0 6867 (controlp == NULL)) {
michael@0 6868 /* user does not want the sndrcv ctl */
michael@0 6869 filling_sinfo = 0;
michael@0 6870 }
michael@0 6871 if (psa) {
michael@0 6872 from = (struct sockaddr *)sockbuf;
michael@0 6873 fromlen = sizeof(sockbuf);
michael@0 6874 #ifdef HAVE_SA_LEN
michael@0 6875 from->sa_len = 0;
michael@0 6876 #endif
michael@0 6877 } else {
michael@0 6878 from = NULL;
michael@0 6879 fromlen = 0;
michael@0 6880 }
michael@0 6881
michael@0 6882 #if defined(__APPLE__)
michael@0 6883 SCTP_SOCKET_LOCK(so, 1);
michael@0 6884 #endif
michael@0 6885 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
michael@0 6886 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
michael@0 6887 if ((controlp) && (filling_sinfo)) {
michael@0 6888 /* copy back the sinfo in a CMSG format */
michael@0 6889 if (filling_sinfo)
michael@0 6890 *controlp = sctp_build_ctl_nchunk(inp,
michael@0 6891 (struct sctp_sndrcvinfo *)&sinfo);
michael@0 6892 else
michael@0 6893 *controlp = NULL;
michael@0 6894 }
michael@0 6895 if (psa) {
michael@0 6896 /* copy back the address info */
michael@0 6897 #ifdef HAVE_SA_LEN
michael@0 6898 if (from && from->sa_len) {
michael@0 6899 #else
michael@0 6900 if (from) {
michael@0 6901 #endif
michael@0 6902 #if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
michael@0 6903 *psa = sodupsockaddr(from, M_NOWAIT);
michael@0 6904 #else
michael@0 6905 *psa = dup_sockaddr(from, mp0 == 0);
michael@0 6906 #endif
michael@0 6907 } else {
michael@0 6908 *psa = NULL;
michael@0 6909 }
michael@0 6910 }
michael@0 6911 #if defined(__APPLE__)
michael@0 6912 SCTP_SOCKET_UNLOCK(so, 1);
michael@0 6913 #endif
michael@0 6914 return (error);
michael@0 6915 }
michael@0 6916
michael@0 6917
michael@0 6918 #if (defined(__FreeBSD__) && __FreeBSD_version < 603000) || defined(__Windows__)
michael@0 6919 /*
michael@0 6920 * General routine to allocate a hash table with control of memory flags.
michael@0 6921 * is in 7.0 and beyond for sure :-)
michael@0 6922 */
michael@0 6923 void *
michael@0 6924 sctp_hashinit_flags(int elements, struct malloc_type *type,
michael@0 6925 u_long *hashmask, int flags)
michael@0 6926 {
michael@0 6927 long hashsize;
michael@0 6928 LIST_HEAD(generic, generic) *hashtbl;
michael@0 6929 int i;
michael@0 6930
michael@0 6931
michael@0 6932 if (elements <= 0) {
michael@0 6933 #ifdef INVARIANTS
michael@0 6934 panic("hashinit: bad elements");
michael@0 6935 #else
michael@0 6936 SCTP_PRINTF("hashinit: bad elements?");
michael@0 6937 elements = 1;
michael@0 6938 #endif
michael@0 6939 }
michael@0 6940 for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
michael@0 6941 continue;
michael@0 6942 hashsize >>= 1;
michael@0 6943 if (flags & HASH_WAITOK)
michael@0 6944 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
michael@0 6945 else if (flags & HASH_NOWAIT)
michael@0 6946 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_NOWAIT);
michael@0 6947 else {
michael@0 6948 #ifdef INVARIANTS
michael@0 6949 panic("flag incorrect in hashinit_flags");
michael@0 6950 #else
michael@0 6951 return (NULL);
michael@0 6952 #endif
michael@0 6953 }
michael@0 6954
michael@0 6955 /* no memory? */
michael@0 6956 if (hashtbl == NULL)
michael@0 6957 return (NULL);
michael@0 6958
michael@0 6959 for (i = 0; i < hashsize; i++)
michael@0 6960 LIST_INIT(&hashtbl[i]);
michael@0 6961 *hashmask = hashsize - 1;
michael@0 6962 return (hashtbl);
michael@0 6963 }
michael@0 6964 #endif
michael@0 6965
michael@0 6966 #else /* __Userspace__ ifdef above sctp_soreceive */
michael@0 6967 /*
michael@0 6968 * __Userspace__ Defining sctp_hashinit_flags() and sctp_hashdestroy() for userland.
michael@0 6969 * NOTE: We don't want multiple definitions here. So sctp_hashinit_flags() above for
michael@0 6970 *__FreeBSD__ must be excluded.
michael@0 6971 *
michael@0 6972 */
michael@0 6973
michael@0 6974 void *
michael@0 6975 sctp_hashinit_flags(int elements, struct malloc_type *type,
michael@0 6976 u_long *hashmask, int flags)
michael@0 6977 {
michael@0 6978 long hashsize;
michael@0 6979 LIST_HEAD(generic, generic) *hashtbl;
michael@0 6980 int i;
michael@0 6981
michael@0 6982 if (elements <= 0) {
michael@0 6983 SCTP_PRINTF("hashinit: bad elements?");
michael@0 6984 #ifdef INVARIANTS
michael@0 6985 return (NULL);
michael@0 6986 #else
michael@0 6987 elements = 1;
michael@0 6988 #endif
michael@0 6989 }
michael@0 6990 for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
michael@0 6991 continue;
michael@0 6992 hashsize >>= 1;
michael@0 6993 /*cannot use MALLOC here because it has to be declared or defined
michael@0 6994 using MALLOC_DECLARE or MALLOC_DEFINE first. */
michael@0 6995 if (flags & HASH_WAITOK)
michael@0 6996 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl));
michael@0 6997 else if (flags & HASH_NOWAIT)
michael@0 6998 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl));
michael@0 6999 else {
michael@0 7000 #ifdef INVARIANTS
michael@0 7001 SCTP_PRINTF("flag incorrect in hashinit_flags.\n");
michael@0 7002 #endif
michael@0 7003 return (NULL);
michael@0 7004 }
michael@0 7005
michael@0 7006 /* no memory? */
michael@0 7007 if (hashtbl == NULL)
michael@0 7008 return (NULL);
michael@0 7009
michael@0 7010 for (i = 0; i < hashsize; i++)
michael@0 7011 LIST_INIT(&hashtbl[i]);
michael@0 7012 *hashmask = hashsize - 1;
michael@0 7013 return (hashtbl);
michael@0 7014 }
michael@0 7015
michael@0 7016
michael@0 7017 void
michael@0 7018 sctp_hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
michael@0 7019 {
michael@0 7020 LIST_HEAD(generic, generic) *hashtbl, *hp;
michael@0 7021
michael@0 7022 hashtbl = vhashtbl;
michael@0 7023 for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
michael@0 7024 if (!LIST_EMPTY(hp)) {
michael@0 7025 SCTP_PRINTF("hashdestroy: hash not empty.\n");
michael@0 7026 return;
michael@0 7027 }
michael@0 7028 FREE(hashtbl, type);
michael@0 7029 }
michael@0 7030
michael@0 7031
michael@0 7032 void
michael@0 7033 sctp_hashfreedestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
michael@0 7034 {
michael@0 7035 LIST_HEAD(generic, generic) *hashtbl/*, *hp*/;
michael@0 7036 /*
michael@0 7037 LIST_ENTRY(type) *start, *temp;
michael@0 7038 */
michael@0 7039 hashtbl = vhashtbl;
michael@0 7040 /* Apparently temp is not dynamically allocated, so attempts to
michael@0 7041 free it results in error.
michael@0 7042 for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
michael@0 7043 if (!LIST_EMPTY(hp)) {
michael@0 7044 start = LIST_FIRST(hp);
michael@0 7045 while (start != NULL) {
michael@0 7046 temp = start;
michael@0 7047 start = start->le_next;
michael@0 7048 SCTP_PRINTF("%s: %p \n", __func__, (void *)temp);
michael@0 7049 FREE(temp, type);
michael@0 7050 }
michael@0 7051 }
michael@0 7052 */
michael@0 7053 FREE(hashtbl, type);
michael@0 7054 }
michael@0 7055
michael@0 7056
michael@0 7057 #endif
michael@0 7058
michael@0 7059
michael@0 7060 int
michael@0 7061 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
michael@0 7062 int totaddr, int *error)
michael@0 7063 {
michael@0 7064 int added = 0;
michael@0 7065 int i;
michael@0 7066 struct sctp_inpcb *inp;
michael@0 7067 struct sockaddr *sa;
michael@0 7068 size_t incr = 0;
michael@0 7069 #ifdef INET
michael@0 7070 struct sockaddr_in *sin;
michael@0 7071 #endif
michael@0 7072 #ifdef INET6
michael@0 7073 struct sockaddr_in6 *sin6;
michael@0 7074 #endif
michael@0 7075
michael@0 7076 sa = addr;
michael@0 7077 inp = stcb->sctp_ep;
michael@0 7078 *error = 0;
michael@0 7079 for (i = 0; i < totaddr; i++) {
michael@0 7080 switch (sa->sa_family) {
michael@0 7081 #ifdef INET
michael@0 7082 case AF_INET:
michael@0 7083 incr = sizeof(struct sockaddr_in);
michael@0 7084 sin = (struct sockaddr_in *)sa;
michael@0 7085 if ((sin->sin_addr.s_addr == INADDR_ANY) ||
michael@0 7086 (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
michael@0 7087 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
michael@0 7088 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
michael@0 7089 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ+SCTP_LOC_7);
michael@0 7090 *error = EINVAL;
michael@0 7091 goto out_now;
michael@0 7092 }
michael@0 7093 if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
michael@0 7094 /* assoc gone no un-lock */
michael@0 7095 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
michael@0 7096 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ+SCTP_LOC_7);
michael@0 7097 *error = ENOBUFS;
michael@0 7098 goto out_now;
michael@0 7099 }
michael@0 7100 added++;
michael@0 7101 break;
michael@0 7102 #endif
michael@0 7103 #ifdef INET6
michael@0 7104 case AF_INET6:
michael@0 7105 incr = sizeof(struct sockaddr_in6);
michael@0 7106 sin6 = (struct sockaddr_in6 *)sa;
michael@0 7107 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
michael@0 7108 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
michael@0 7109 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
michael@0 7110 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ+SCTP_LOC_8);
michael@0 7111 *error = EINVAL;
michael@0 7112 goto out_now;
michael@0 7113 }
michael@0 7114 if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
michael@0 7115 /* assoc gone no un-lock */
michael@0 7116 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
michael@0 7117 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ+SCTP_LOC_8);
michael@0 7118 *error = ENOBUFS;
michael@0 7119 goto out_now;
michael@0 7120 }
michael@0 7121 added++;
michael@0 7122 break;
michael@0 7123 #endif
michael@0 7124 #if defined(__Userspace__)
michael@0 7125 case AF_CONN:
michael@0 7126 incr = sizeof(struct sockaddr_in6);
michael@0 7127 if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
michael@0 7128 /* assoc gone no un-lock */
michael@0 7129 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
michael@0 7130 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ+SCTP_LOC_8);
michael@0 7131 *error = ENOBUFS;
michael@0 7132 goto out_now;
michael@0 7133 }
michael@0 7134 added++;
michael@0 7135 break;
michael@0 7136 #endif
michael@0 7137 default:
michael@0 7138 break;
michael@0 7139 }
michael@0 7140 sa = (struct sockaddr *)((caddr_t)sa + incr);
michael@0 7141 }
michael@0 7142 out_now:
michael@0 7143 return (added);
michael@0 7144 }
michael@0 7145
michael@0 7146 struct sctp_tcb *
michael@0 7147 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
michael@0 7148 int *totaddr, int *num_v4, int *num_v6, int *error,
michael@0 7149 int limit, int *bad_addr)
michael@0 7150 {
michael@0 7151 struct sockaddr *sa;
michael@0 7152 struct sctp_tcb *stcb = NULL;
michael@0 7153 size_t incr, at, i;
michael@0 7154 at = incr = 0;
michael@0 7155 sa = addr;
michael@0 7156
michael@0 7157 *error = *num_v6 = *num_v4 = 0;
michael@0 7158 /* account and validate addresses */
michael@0 7159 for (i = 0; i < (size_t)*totaddr; i++) {
michael@0 7160 switch (sa->sa_family) {
michael@0 7161 #ifdef INET
michael@0 7162 case AF_INET:
michael@0 7163 (*num_v4) += 1;
michael@0 7164 incr = sizeof(struct sockaddr_in);
michael@0 7165 #ifdef HAVE_SA_LEN
michael@0 7166 if (sa->sa_len != incr) {
michael@0 7167 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
michael@0 7168 *error = EINVAL;
michael@0 7169 *bad_addr = 1;
michael@0 7170 return (NULL);
michael@0 7171 }
michael@0 7172 #endif
michael@0 7173 break;
michael@0 7174 #endif
michael@0 7175 #ifdef INET6
michael@0 7176 case AF_INET6:
michael@0 7177 {
michael@0 7178 struct sockaddr_in6 *sin6;
michael@0 7179
michael@0 7180 sin6 = (struct sockaddr_in6 *)sa;
michael@0 7181 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
michael@0 7182 /* Must be non-mapped for connectx */
michael@0 7183 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
michael@0 7184 *error = EINVAL;
michael@0 7185 *bad_addr = 1;
michael@0 7186 return (NULL);
michael@0 7187 }
michael@0 7188 (*num_v6) += 1;
michael@0 7189 incr = sizeof(struct sockaddr_in6);
michael@0 7190 #ifdef HAVE_SA_LEN
michael@0 7191 if (sa->sa_len != incr) {
michael@0 7192 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
michael@0 7193 *error = EINVAL;
michael@0 7194 *bad_addr = 1;
michael@0 7195 return (NULL);
michael@0 7196 }
michael@0 7197 #endif
michael@0 7198 break;
michael@0 7199 }
michael@0 7200 #endif
michael@0 7201 default:
michael@0 7202 *totaddr = i;
michael@0 7203 /* we are done */
michael@0 7204 break;
michael@0 7205 }
michael@0 7206 if (i == (size_t)*totaddr) {
michael@0 7207 break;
michael@0 7208 }
michael@0 7209 SCTP_INP_INCR_REF(inp);
michael@0 7210 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
michael@0 7211 if (stcb != NULL) {
michael@0 7212 /* Already have or am bring up an association */
michael@0 7213 return (stcb);
michael@0 7214 } else {
michael@0 7215 SCTP_INP_DECR_REF(inp);
michael@0 7216 }
michael@0 7217 if ((at + incr) > (size_t)limit) {
michael@0 7218 *totaddr = i;
michael@0 7219 break;
michael@0 7220 }
michael@0 7221 sa = (struct sockaddr *)((caddr_t)sa + incr);
michael@0 7222 }
michael@0 7223 return ((struct sctp_tcb *)NULL);
michael@0 7224 }
michael@0 7225
michael@0 7226 /*
michael@0 7227 * sctp_bindx(ADD) for one address.
michael@0 7228 * assumes all arguments are valid/checked by caller.
michael@0 7229 */
michael@0 7230 void
michael@0 7231 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
michael@0 7232 struct sockaddr *sa, sctp_assoc_t assoc_id,
michael@0 7233 uint32_t vrf_id, int *error, void *p)
michael@0 7234 {
michael@0 7235 struct sockaddr *addr_touse;
michael@0 7236 #ifdef INET6
michael@0 7237 struct sockaddr_in sin;
michael@0 7238 #endif
michael@0 7239 #ifdef SCTP_MVRF
michael@0 7240 int i, fnd = 0;
michael@0 7241 #endif
michael@0 7242
michael@0 7243 /* see if we're bound all already! */
michael@0 7244 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
michael@0 7245 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
michael@0 7246 *error = EINVAL;
michael@0 7247 return;
michael@0 7248 }
michael@0 7249 #ifdef SCTP_MVRF
michael@0 7250 /* Is the VRF one we have */
michael@0 7251 for (i = 0; i < inp->num_vrfs; i++) {
michael@0 7252 if (vrf_id == inp->m_vrf_ids[i]) {
michael@0 7253 fnd = 1;
michael@0 7254 break;
michael@0 7255 }
michael@0 7256 }
michael@0 7257 if (!fnd) {
michael@0 7258 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
michael@0 7259 *error = EINVAL;
michael@0 7260 return;
michael@0 7261 }
michael@0 7262 #endif
michael@0 7263 addr_touse = sa;
michael@0 7264 #ifdef INET6
michael@0 7265 if (sa->sa_family == AF_INET6) {
michael@0 7266 struct sockaddr_in6 *sin6;
michael@0 7267 #ifdef HAVE_SA_LEN
michael@0 7268 if (sa->sa_len != sizeof(struct sockaddr_in6)) {
michael@0 7269 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
michael@0 7270 *error = EINVAL;
michael@0 7271 return;
michael@0 7272 }
michael@0 7273 #endif
michael@0 7274 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
michael@0 7275 /* can only bind v6 on PF_INET6 sockets */
michael@0 7276 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
michael@0 7277 *error = EINVAL;
michael@0 7278 return;
michael@0 7279 }
michael@0 7280 sin6 = (struct sockaddr_in6 *)addr_touse;
michael@0 7281 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
michael@0 7282 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
michael@0 7283 SCTP_IPV6_V6ONLY(inp)) {
michael@0 7284 /* can't bind v4-mapped on PF_INET sockets */
michael@0 7285 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
michael@0 7286 *error = EINVAL;
michael@0 7287 return;
michael@0 7288 }
michael@0 7289 in6_sin6_2_sin(&sin, sin6);
michael@0 7290 addr_touse = (struct sockaddr *)&sin;
michael@0 7291 }
michael@0 7292 }
michael@0 7293 #endif
michael@0 7294 #ifdef INET
michael@0 7295 if (sa->sa_family == AF_INET) {
michael@0 7296 #ifdef HAVE_SA_LEN
michael@0 7297 if (sa->sa_len != sizeof(struct sockaddr_in)) {
michael@0 7298 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
michael@0 7299 *error = EINVAL;
michael@0 7300 return;
michael@0 7301 }
michael@0 7302 #endif
michael@0 7303 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
michael@0 7304 SCTP_IPV6_V6ONLY(inp)) {
michael@0 7305 /* can't bind v4 on PF_INET sockets */
michael@0 7306 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
michael@0 7307 *error = EINVAL;
michael@0 7308 return;
michael@0 7309 }
michael@0 7310 }
michael@0 7311 #endif
michael@0 7312 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
michael@0 7313 #if !(defined(__Panda__) || defined(__Windows__))
michael@0 7314 if (p == NULL) {
michael@0 7315 /* Can't get proc for Net/Open BSD */
michael@0 7316 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
michael@0 7317 *error = EINVAL;
michael@0 7318 return;
michael@0 7319 }
michael@0 7320 #endif
michael@0 7321 *error = sctp_inpcb_bind(so, addr_touse, NULL, p);
michael@0 7322 return;
michael@0 7323 }
michael@0 7324 /*
michael@0 7325 * No locks required here since bind and mgmt_ep_sa
michael@0 7326 * all do their own locking. If we do something for
michael@0 7327 * the FIX: below we may need to lock in that case.
michael@0 7328 */
michael@0 7329 if (assoc_id == 0) {
michael@0 7330 /* add the address */
michael@0 7331 struct sctp_inpcb *lep;
michael@0 7332 struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
michael@0 7333
michael@0 7334 /* validate the incoming port */
michael@0 7335 if ((lsin->sin_port != 0) &&
michael@0 7336 (lsin->sin_port != inp->sctp_lport)) {
michael@0 7337 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
michael@0 7338 *error = EINVAL;
michael@0 7339 return;
michael@0 7340 } else {
michael@0 7341 /* user specified 0 port, set it to existing port */
michael@0 7342 lsin->sin_port = inp->sctp_lport;
michael@0 7343 }
michael@0 7344
michael@0 7345 lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
michael@0 7346 if (lep != NULL) {
michael@0 7347 /*
michael@0 7348 * We must decrement the refcount
michael@0 7349 * since we have the ep already and
michael@0 7350 * are binding. No remove going on
michael@0 7351 * here.
michael@0 7352 */
michael@0 7353 SCTP_INP_DECR_REF(lep);
michael@0 7354 }
michael@0 7355 if (lep == inp) {
michael@0 7356 /* already bound to it.. ok */
michael@0 7357 return;
michael@0 7358 } else if (lep == NULL) {
michael@0 7359 ((struct sockaddr_in *)addr_touse)->sin_port = 0;
michael@0 7360 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
michael@0 7361 SCTP_ADD_IP_ADDRESS,
michael@0 7362 vrf_id, NULL);
michael@0 7363 } else {
michael@0 7364 *error = EADDRINUSE;
michael@0 7365 }
michael@0 7366 if (*error)
michael@0 7367 return;
michael@0 7368 } else {
michael@0 7369 /*
michael@0 7370 * FIX: decide whether we allow assoc based
michael@0 7371 * bindx
michael@0 7372 */
michael@0 7373 }
michael@0 7374 }
michael@0 7375
michael@0 7376 /*
michael@0 7377 * sctp_bindx(DELETE) for one address.
michael@0 7378 * assumes all arguments are valid/checked by caller.
michael@0 7379 */
michael@0 7380 void
michael@0 7381 sctp_bindx_delete_address(struct sctp_inpcb *inp,
michael@0 7382 struct sockaddr *sa, sctp_assoc_t assoc_id,
michael@0 7383 uint32_t vrf_id, int *error)
michael@0 7384 {
michael@0 7385 struct sockaddr *addr_touse;
michael@0 7386 #ifdef INET6
michael@0 7387 struct sockaddr_in sin;
michael@0 7388 #endif
michael@0 7389 #ifdef SCTP_MVRF
michael@0 7390 int i, fnd = 0;
michael@0 7391 #endif
michael@0 7392
michael@0 7393 /* see if we're bound all already! */
michael@0 7394 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
michael@0 7395 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
michael@0 7396 *error = EINVAL;
michael@0 7397 return;
michael@0 7398 }
michael@0 7399 #ifdef SCTP_MVRF
michael@0 7400 /* Is the VRF one we have */
michael@0 7401 for (i = 0; i < inp->num_vrfs; i++) {
michael@0 7402 if (vrf_id == inp->m_vrf_ids[i]) {
michael@0 7403 fnd = 1;
michael@0 7404 break;
michael@0 7405 }
michael@0 7406 }
michael@0 7407 if (!fnd) {
michael@0 7408 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
michael@0 7409 *error = EINVAL;
michael@0 7410 return;
michael@0 7411 }
michael@0 7412 #endif
michael@0 7413 addr_touse = sa;
michael@0 7414 #ifdef INET6
michael@0 7415 if (sa->sa_family == AF_INET6) {
michael@0 7416 struct sockaddr_in6 *sin6;
michael@0 7417 #ifdef HAVE_SA_LEN
michael@0 7418 if (sa->sa_len != sizeof(struct sockaddr_in6)) {
michael@0 7419 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
michael@0 7420 *error = EINVAL;
michael@0 7421 return;
michael@0 7422 }
michael@0 7423 #endif
michael@0 7424 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
michael@0 7425 /* can only bind v6 on PF_INET6 sockets */
michael@0 7426 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
michael@0 7427 *error = EINVAL;
michael@0 7428 return;
michael@0 7429 }
michael@0 7430 sin6 = (struct sockaddr_in6 *)addr_touse;
michael@0 7431 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
michael@0 7432 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
michael@0 7433 SCTP_IPV6_V6ONLY(inp)) {
michael@0 7434 /* can't bind mapped-v4 on PF_INET sockets */
michael@0 7435 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
michael@0 7436 *error = EINVAL;
michael@0 7437 return;
michael@0 7438 }
michael@0 7439 in6_sin6_2_sin(&sin, sin6);
michael@0 7440 addr_touse = (struct sockaddr *)&sin;
michael@0 7441 }
michael@0 7442 }
michael@0 7443 #endif
michael@0 7444 #ifdef INET
michael@0 7445 if (sa->sa_family == AF_INET) {
michael@0 7446 #ifdef HAVE_SA_LEN
michael@0 7447 if (sa->sa_len != sizeof(struct sockaddr_in)) {
michael@0 7448 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
michael@0 7449 *error = EINVAL;
michael@0 7450 return;
michael@0 7451 }
michael@0 7452 #endif
michael@0 7453 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
michael@0 7454 SCTP_IPV6_V6ONLY(inp)) {
michael@0 7455 /* can't bind v4 on PF_INET sockets */
michael@0 7456 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
michael@0 7457 *error = EINVAL;
michael@0 7458 return;
michael@0 7459 }
michael@0 7460 }
michael@0 7461 #endif
michael@0 7462 /*
michael@0 7463 * No lock required mgmt_ep_sa does its own locking.
michael@0 7464 * If the FIX: below is ever changed we may need to
michael@0 7465 * lock before calling association level binding.
michael@0 7466 */
michael@0 7467 if (assoc_id == 0) {
michael@0 7468 /* delete the address */
michael@0 7469 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
michael@0 7470 SCTP_DEL_IP_ADDRESS,
michael@0 7471 vrf_id, NULL);
michael@0 7472 } else {
michael@0 7473 /*
michael@0 7474 * FIX: decide whether we allow assoc based
michael@0 7475 * bindx
michael@0 7476 */
michael@0 7477 }
michael@0 7478 }
michael@0 7479
michael@0 7480 /*
michael@0 7481 * returns the valid local address count for an assoc, taking into account
michael@0 7482 * all scoping rules
michael@0 7483 */
michael@0 7484 int
michael@0 7485 sctp_local_addr_count(struct sctp_tcb *stcb)
michael@0 7486 {
michael@0 7487 int loopback_scope;
michael@0 7488 #if defined(INET)
michael@0 7489 int ipv4_local_scope, ipv4_addr_legal;
michael@0 7490 #endif
michael@0 7491 #if defined (INET6)
michael@0 7492 int local_scope, site_scope, ipv6_addr_legal;
michael@0 7493 #endif
michael@0 7494 #if defined(__Userspace__)
michael@0 7495 int conn_addr_legal;
michael@0 7496 #endif
michael@0 7497 struct sctp_vrf *vrf;
michael@0 7498 struct sctp_ifn *sctp_ifn;
michael@0 7499 struct sctp_ifa *sctp_ifa;
michael@0 7500 int count = 0;
michael@0 7501
michael@0 7502 /* Turn on all the appropriate scopes */
michael@0 7503 loopback_scope = stcb->asoc.scope.loopback_scope;
michael@0 7504 #if defined(INET)
michael@0 7505 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
michael@0 7506 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
michael@0 7507 #endif
michael@0 7508 #if defined(INET6)
michael@0 7509 local_scope = stcb->asoc.scope.local_scope;
michael@0 7510 site_scope = stcb->asoc.scope.site_scope;
michael@0 7511 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
michael@0 7512 #endif
michael@0 7513 #if defined(__Userspace__)
michael@0 7514 conn_addr_legal = stcb->asoc.scope.conn_addr_legal;
michael@0 7515 #endif
michael@0 7516 SCTP_IPI_ADDR_RLOCK();
michael@0 7517 vrf = sctp_find_vrf(stcb->asoc.vrf_id);
michael@0 7518 if (vrf == NULL) {
michael@0 7519 /* no vrf, no addresses */
michael@0 7520 SCTP_IPI_ADDR_RUNLOCK();
michael@0 7521 return (0);
michael@0 7522 }
michael@0 7523
michael@0 7524 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
michael@0 7525 /*
michael@0 7526 * bound all case: go through all ifns on the vrf
michael@0 7527 */
michael@0 7528 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
michael@0 7529 if ((loopback_scope == 0) &&
michael@0 7530 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
michael@0 7531 continue;
michael@0 7532 }
michael@0 7533 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
michael@0 7534 if (sctp_is_addr_restricted(stcb, sctp_ifa))
michael@0 7535 continue;
michael@0 7536 switch (sctp_ifa->address.sa.sa_family) {
michael@0 7537 #ifdef INET
michael@0 7538 case AF_INET:
michael@0 7539 if (ipv4_addr_legal) {
michael@0 7540 struct sockaddr_in *sin;
michael@0 7541
michael@0 7542 sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
michael@0 7543 if (sin->sin_addr.s_addr == 0) {
michael@0 7544 /* skip unspecified addrs */
michael@0 7545 continue;
michael@0 7546 }
michael@0 7547 if ((ipv4_local_scope == 0) &&
michael@0 7548 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
michael@0 7549 continue;
michael@0 7550 }
michael@0 7551 /* count this one */
michael@0 7552 count++;
michael@0 7553 } else {
michael@0 7554 continue;
michael@0 7555 }
michael@0 7556 break;
michael@0 7557 #endif
michael@0 7558 #ifdef INET6
michael@0 7559 case AF_INET6:
michael@0 7560 if (ipv6_addr_legal) {
michael@0 7561 struct sockaddr_in6 *sin6;
michael@0 7562
michael@0 7563 #if defined(SCTP_EMBEDDED_V6_SCOPE) && !defined(SCTP_KAME)
michael@0 7564 struct sockaddr_in6 lsa6;
michael@0 7565 #endif
michael@0 7566 sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
michael@0 7567 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
michael@0 7568 continue;
michael@0 7569 }
michael@0 7570 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
michael@0 7571 if (local_scope == 0)
michael@0 7572 continue;
michael@0 7573 #if defined(SCTP_EMBEDDED_V6_SCOPE)
michael@0 7574 if (sin6->sin6_scope_id == 0) {
michael@0 7575 #ifdef SCTP_KAME
michael@0 7576 if (sa6_recoverscope(sin6) != 0)
michael@0 7577 /*
michael@0 7578 * bad link
michael@0 7579 * local
michael@0 7580 * address
michael@0 7581 */
michael@0 7582 continue;
michael@0 7583 #else
michael@0 7584 lsa6 = *sin6;
michael@0 7585 if (in6_recoverscope(&lsa6,
michael@0 7586 &lsa6.sin6_addr,
michael@0 7587 NULL))
michael@0 7588 /*
michael@0 7589 * bad link
michael@0 7590 * local
michael@0 7591 * address
michael@0 7592 */
michael@0 7593 continue;
michael@0 7594 sin6 = &lsa6;
michael@0 7595 #endif /* SCTP_KAME */
michael@0 7596 }
michael@0 7597 #endif /* SCTP_EMBEDDED_V6_SCOPE */
michael@0 7598 }
michael@0 7599 if ((site_scope == 0) &&
michael@0 7600 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
michael@0 7601 continue;
michael@0 7602 }
michael@0 7603 /* count this one */
michael@0 7604 count++;
michael@0 7605 }
michael@0 7606 break;
michael@0 7607 #endif
michael@0 7608 #if defined(__Userspace__)
michael@0 7609 case AF_CONN:
michael@0 7610 if (conn_addr_legal) {
michael@0 7611 count++;
michael@0 7612 }
michael@0 7613 break;
michael@0 7614 #endif
michael@0 7615 default:
michael@0 7616 /* TSNH */
michael@0 7617 break;
michael@0 7618 }
michael@0 7619 }
michael@0 7620 }
michael@0 7621 } else {
michael@0 7622 /*
michael@0 7623 * subset bound case
michael@0 7624 */
michael@0 7625 struct sctp_laddr *laddr;
michael@0 7626 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
michael@0 7627 sctp_nxt_addr) {
michael@0 7628 if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
michael@0 7629 continue;
michael@0 7630 }
michael@0 7631 /* count this one */
michael@0 7632 count++;
michael@0 7633 }
michael@0 7634 }
michael@0 7635 SCTP_IPI_ADDR_RUNLOCK();
michael@0 7636 return (count);
michael@0 7637 }
michael@0 7638
michael@0 7639 #if defined(SCTP_LOCAL_TRACE_BUF)
michael@0 7640
michael@0 7641 void
michael@0 7642 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
michael@0 7643 {
michael@0 7644 uint32_t saveindex, newindex;
michael@0 7645
michael@0 7646 #if defined(__Windows__)
michael@0 7647 if (SCTP_BASE_SYSCTL(sctp_log) == NULL) {
michael@0 7648 return;
michael@0 7649 }
michael@0 7650 do {
michael@0 7651 saveindex = SCTP_BASE_SYSCTL(sctp_log)->index;
michael@0 7652 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
michael@0 7653 newindex = 1;
michael@0 7654 } else {
michael@0 7655 newindex = saveindex + 1;
michael@0 7656 }
michael@0 7657 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log)->index, saveindex, newindex) == 0);
michael@0 7658 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
michael@0 7659 saveindex = 0;
michael@0 7660 }
michael@0 7661 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
michael@0 7662 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].subsys = subsys;
michael@0 7663 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[0] = a;
michael@0 7664 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[1] = b;
michael@0 7665 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[2] = c;
michael@0 7666 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[3] = d;
michael@0 7667 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[4] = e;
michael@0 7668 SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[5] = f;
michael@0 7669 #else
michael@0 7670 do {
michael@0 7671 saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
michael@0 7672 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
michael@0 7673 newindex = 1;
michael@0 7674 } else {
michael@0 7675 newindex = saveindex + 1;
michael@0 7676 }
michael@0 7677 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
michael@0 7678 if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
michael@0 7679 saveindex = 0;
michael@0 7680 }
michael@0 7681 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
michael@0 7682 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
michael@0 7683 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
michael@0 7684 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
michael@0 7685 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
michael@0 7686 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
michael@0 7687 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
michael@0 7688 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
michael@0 7689 #endif
michael@0 7690 }
michael@0 7691
michael@0 7692 #endif
michael@0 7693 #if defined(__FreeBSD__)
michael@0 7694 #if __FreeBSD_version >= 800044
michael@0 7695 static void
michael@0 7696 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
michael@0 7697 {
michael@0 7698 struct ip *iph;
michael@0 7699 #ifdef INET6
michael@0 7700 struct ip6_hdr *ip6;
michael@0 7701 #endif
michael@0 7702 struct mbuf *sp, *last;
michael@0 7703 struct udphdr *uhdr;
michael@0 7704 uint16_t port;
michael@0 7705
michael@0 7706 if ((m->m_flags & M_PKTHDR) == 0) {
michael@0 7707 /* Can't handle one that is not a pkt hdr */
michael@0 7708 goto out;
michael@0 7709 }
michael@0 7710 /* Pull the src port */
michael@0 7711 iph = mtod(m, struct ip *);
michael@0 7712 uhdr = (struct udphdr *)((caddr_t)iph + off);
michael@0 7713 port = uhdr->uh_sport;
michael@0 7714 /* Split out the mbuf chain. Leave the
michael@0 7715 * IP header in m, place the
michael@0 7716 * rest in the sp.
michael@0 7717 */
michael@0 7718 sp = m_split(m, off, M_NOWAIT);
michael@0 7719 if (sp == NULL) {
michael@0 7720 /* Gak, drop packet, we can't do a split */
michael@0 7721 goto out;
michael@0 7722 }
michael@0 7723 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
michael@0 7724 /* Gak, packet can't have an SCTP header in it - too small */
michael@0 7725 m_freem(sp);
michael@0 7726 goto out;
michael@0 7727 }
michael@0 7728 /* Now pull up the UDP header and SCTP header together */
michael@0 7729 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
michael@0 7730 if (sp == NULL) {
michael@0 7731 /* Gak pullup failed */
michael@0 7732 goto out;
michael@0 7733 }
michael@0 7734 /* Trim out the UDP header */
michael@0 7735 m_adj(sp, sizeof(struct udphdr));
michael@0 7736
michael@0 7737 /* Now reconstruct the mbuf chain */
michael@0 7738 for (last = m; last->m_next; last = last->m_next);
michael@0 7739 last->m_next = sp;
michael@0 7740 m->m_pkthdr.len += sp->m_pkthdr.len;
michael@0 7741 iph = mtod(m, struct ip *);
michael@0 7742 switch (iph->ip_v) {
michael@0 7743 #ifdef INET
michael@0 7744 case IPVERSION:
michael@0 7745 #if __FreeBSD_version >= 1000000
michael@0 7746 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
michael@0 7747 #else
michael@0 7748 iph->ip_len -= sizeof(struct udphdr);
michael@0 7749 #endif
michael@0 7750 sctp_input_with_port(m, off, port);
michael@0 7751 break;
michael@0 7752 #endif
michael@0 7753 #ifdef INET6
michael@0 7754 case IPV6_VERSION >> 4:
michael@0 7755 ip6 = mtod(m, struct ip6_hdr *);
michael@0 7756 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
michael@0 7757 sctp6_input_with_port(&m, &off, port);
michael@0 7758 break;
michael@0 7759 #endif
michael@0 7760 default:
michael@0 7761 goto out;
michael@0 7762 break;
michael@0 7763 }
michael@0 7764 return;
michael@0 7765 out:
michael@0 7766 m_freem(m);
michael@0 7767 }
michael@0 7768 #endif
michael@0 7769
michael@0 7770 void
michael@0 7771 sctp_over_udp_stop(void)
michael@0 7772 {
michael@0 7773 /*
michael@0 7774 * This function assumes sysctl caller holds sctp_sysctl_info_lock() for writting!
michael@0 7775 */
michael@0 7776 #ifdef INET
michael@0 7777 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
michael@0 7778 soclose(SCTP_BASE_INFO(udp4_tun_socket));
michael@0 7779 SCTP_BASE_INFO(udp4_tun_socket) = NULL;
michael@0 7780 }
michael@0 7781 #endif
michael@0 7782 #ifdef INET6
michael@0 7783 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
michael@0 7784 soclose(SCTP_BASE_INFO(udp6_tun_socket));
michael@0 7785 SCTP_BASE_INFO(udp6_tun_socket) = NULL;
michael@0 7786 }
michael@0 7787 #endif
michael@0 7788 }
michael@0 7789
michael@0 7790 int
michael@0 7791 sctp_over_udp_start(void)
michael@0 7792 {
michael@0 7793 #if __FreeBSD_version >= 800044
michael@0 7794 uint16_t port;
michael@0 7795 int ret;
michael@0 7796 #ifdef INET
michael@0 7797 struct sockaddr_in sin;
michael@0 7798 #endif
michael@0 7799 #ifdef INET6
michael@0 7800 struct sockaddr_in6 sin6;
michael@0 7801 #endif
michael@0 7802 /*
michael@0 7803 * This function assumes sysctl caller holds sctp_sysctl_info_lock() for writting!
michael@0 7804 */
michael@0 7805 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
michael@0 7806 if (ntohs(port) == 0) {
michael@0 7807 /* Must have a port set */
michael@0 7808 return (EINVAL);
michael@0 7809 }
michael@0 7810 #ifdef INET
michael@0 7811 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
michael@0 7812 /* Already running -- must stop first */
michael@0 7813 return (EALREADY);
michael@0 7814 }
michael@0 7815 #endif
michael@0 7816 #ifdef INET6
michael@0 7817 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
michael@0 7818 /* Already running -- must stop first */
michael@0 7819 return (EALREADY);
michael@0 7820 }
michael@0 7821 #endif
michael@0 7822 #ifdef INET
michael@0 7823 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
michael@0 7824 SOCK_DGRAM, IPPROTO_UDP,
michael@0 7825 curthread->td_ucred, curthread))) {
michael@0 7826 sctp_over_udp_stop();
michael@0 7827 return (ret);
michael@0 7828 }
michael@0 7829 /* Call the special UDP hook. */
michael@0 7830 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
michael@0 7831 sctp_recv_udp_tunneled_packet))) {
michael@0 7832 sctp_over_udp_stop();
michael@0 7833 return (ret);
michael@0 7834 }
michael@0 7835 /* Ok, we have a socket, bind it to the port. */
michael@0 7836 memset(&sin, 0, sizeof(struct sockaddr_in));
michael@0 7837 sin.sin_len = sizeof(struct sockaddr_in);
michael@0 7838 sin.sin_family = AF_INET;
michael@0 7839 sin.sin_port = htons(port);
michael@0 7840 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
michael@0 7841 (struct sockaddr *)&sin, curthread))) {
michael@0 7842 sctp_over_udp_stop();
michael@0 7843 return (ret);
michael@0 7844 }
michael@0 7845 #endif
michael@0 7846 #ifdef INET6
michael@0 7847 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
michael@0 7848 SOCK_DGRAM, IPPROTO_UDP,
michael@0 7849 curthread->td_ucred, curthread))) {
michael@0 7850 sctp_over_udp_stop();
michael@0 7851 return (ret);
michael@0 7852 }
michael@0 7853 /* Call the special UDP hook. */
michael@0 7854 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
michael@0 7855 sctp_recv_udp_tunneled_packet))) {
michael@0 7856 sctp_over_udp_stop();
michael@0 7857 return (ret);
michael@0 7858 }
michael@0 7859 /* Ok, we have a socket, bind it to the port. */
michael@0 7860 memset(&sin6, 0, sizeof(struct sockaddr_in6));
michael@0 7861 sin6.sin6_len = sizeof(struct sockaddr_in6);
michael@0 7862 sin6.sin6_family = AF_INET6;
michael@0 7863 sin6.sin6_port = htons(port);
michael@0 7864 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
michael@0 7865 (struct sockaddr *)&sin6, curthread))) {
michael@0 7866 sctp_over_udp_stop();
michael@0 7867 return (ret);
michael@0 7868 }
michael@0 7869 #endif
michael@0 7870 return (0);
michael@0 7871 #else
michael@0 7872 return (ENOTSUP);
michael@0 7873 #endif
michael@0 7874 }
michael@0 7875 #endif

mercurial