Thu, 15 Jan 2015 15:59:08 +0100
Implement a real Private Browsing Mode condition by changing the API/ABI;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.
michael@0 | 1 | /*- |
michael@0 | 2 | * Copyright (c) 1982, 1986, 1990, 1993 |
michael@0 | 3 | * The Regents of the University of California. All rights reserved. |
michael@0 | 4 | * |
michael@0 | 5 | * Redistribution and use in source and binary forms, with or without |
michael@0 | 6 | * modification, are permitted provided that the following conditions |
michael@0 | 7 | * are met: |
michael@0 | 8 | * 1. Redistributions of source code must retain the above copyright |
michael@0 | 9 | * notice, this list of conditions and the following disclaimer. |
michael@0 | 10 | * 2. Redistributions in binary form must reproduce the above copyright |
michael@0 | 11 | * notice, this list of conditions and the following disclaimer in the |
michael@0 | 12 | * documentation and/or other materials provided with the distribution. |
michael@0 | 13 | * 4. Neither the name of the University nor the names of its contributors |
michael@0 | 14 | * may be used to endorse or promote products derived from this software |
michael@0 | 15 | * without specific prior written permission. |
michael@0 | 16 | * |
michael@0 | 17 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
michael@0 | 18 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
michael@0 | 19 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
michael@0 | 20 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
michael@0 | 21 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
michael@0 | 22 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
michael@0 | 23 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
michael@0 | 24 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
michael@0 | 25 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
michael@0 | 26 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
michael@0 | 27 | * SUCH DAMAGE. |
michael@0 | 28 | * |
michael@0 | 29 | */ |
michael@0 | 30 | |
michael@0 | 31 | /* __Userspace__ version of <sys/socketvar.h> goes here.*/ |
michael@0 | 32 | |
michael@0 | 33 | #ifndef _USER_SOCKETVAR_H_ |
michael@0 | 34 | #define _USER_SOCKETVAR_H_ |
michael@0 | 35 | |
michael@0 | 36 | #if defined(__Userspace_os_Darwin) |
michael@0 | 37 | #include <sys/types.h> |
michael@0 | 38 | #include <unistd.h> |
michael@0 | 39 | #endif |
michael@0 | 40 | |
michael@0 | 41 | /* #include <sys/selinfo.h> */ /*__Userspace__ alternative?*/ /* for struct selinfo */ |
michael@0 | 42 | /* #include <sys/_lock.h> was 0 byte file */ |
michael@0 | 43 | /* #include <sys/_mutex.h> was 0 byte file */ |
michael@0 | 44 | /* #include <sys/_sx.h> */ /*__Userspace__ alternative?*/ |
michael@0 | 45 | #if !defined(__Userspace_os_DragonFly) && !defined(__Userspace_os_FreeBSD) && !defined(__Userspace_os_NetBSD) && !defined(__Userspace_os_Windows) |
michael@0 | 46 | #include <sys/uio.h> |
michael@0 | 47 | #endif |
michael@0 | 48 | #define SOCK_MAXADDRLEN 255 |
michael@0 | 49 | #if !defined(MSG_NOTIFICATION) |
michael@0 | 50 | #define MSG_NOTIFICATION 0x2000 /* SCTP notification */ |
michael@0 | 51 | #endif |
michael@0 | 52 | #define SCTP_SO_LINGER 0x0001 |
michael@0 | 53 | #define SCTP_SO_ACCEPTCONN 0x0002 |
michael@0 | 54 | #define SS_CANTRCVMORE 0x020 |
michael@0 | 55 | #define SS_CANTSENDMORE 0x010 |
michael@0 | 56 | |
michael@0 | 57 | #if defined(__Userspace_os_Darwin) || defined(__Userspace_os_DragonFly) || defined(__Userspace_os_FreeBSD) || defined(__Userspace_os_OpenBSD) || defined (__Userspace_os_Windows) |
michael@0 | 58 | #define UIO_MAXIOV 1024 |
michael@0 | 59 | #define ERESTART (-1) |
michael@0 | 60 | #endif |
michael@0 | 61 | |
michael@0 | 62 | #if !defined(__Userspace_os_Darwin) && !defined(__Userspace_os_NetBSD) && !defined(__Userspace_os_OpenBSD) |
michael@0 | 63 | enum uio_rw { UIO_READ, UIO_WRITE }; |
michael@0 | 64 | #endif |
michael@0 | 65 | |
michael@0 | 66 | #if !defined(__Userspace_os_NetBSD) && !defined(__Userspace_os_OpenBSD) |
michael@0 | 67 | /* Segment flag values. */ |
michael@0 | 68 | enum uio_seg { |
michael@0 | 69 | UIO_USERSPACE, /* from user data space */ |
michael@0 | 70 | UIO_SYSSPACE /* from system space */ |
michael@0 | 71 | }; |
michael@0 | 72 | #endif |
michael@0 | 73 | |
michael@0 | 74 | struct proc { |
michael@0 | 75 | int stub; /* struct proc is a dummy for __Userspace__ */ |
michael@0 | 76 | }; |
michael@0 | 77 | |
michael@0 | 78 | MALLOC_DECLARE(M_ACCF); |
michael@0 | 79 | MALLOC_DECLARE(M_PCB); |
michael@0 | 80 | MALLOC_DECLARE(M_SONAME); |
michael@0 | 81 | |
michael@0 | 82 | /* __Userspace__ Are these all the fields we need? |
michael@0 | 83 | * Removing struct thread *uio_td; owner field |
michael@0 | 84 | */ |
michael@0 | 85 | struct uio { |
michael@0 | 86 | struct iovec *uio_iov; /* scatter/gather list */ |
michael@0 | 87 | int uio_iovcnt; /* length of scatter/gather list */ |
michael@0 | 88 | off_t uio_offset; /* offset in target object */ |
michael@0 | 89 | int uio_resid; /* remaining bytes to process */ |
michael@0 | 90 | enum uio_seg uio_segflg; /* address space */ |
michael@0 | 91 | enum uio_rw uio_rw; /* operation */ |
michael@0 | 92 | }; |
michael@0 | 93 | |
michael@0 | 94 | |
michael@0 | 95 | /* __Userspace__ */ |
michael@0 | 96 | |
michael@0 | 97 | /* |
michael@0 | 98 | * Kernel structure per socket. |
michael@0 | 99 | * Contains send and receive buffer queues, |
michael@0 | 100 | * handle on protocol and pointer to protocol |
michael@0 | 101 | * private data and error information. |
michael@0 | 102 | */ |
michael@0 | 103 | #if defined (__Userspace_os_Windows) |
michael@0 | 104 | #define AF_ROUTE 17 |
michael@0 | 105 | typedef __int32 pid_t; |
michael@0 | 106 | typedef unsigned __int32 uid_t; |
michael@0 | 107 | enum sigType { |
michael@0 | 108 | SIGNAL = 0, |
michael@0 | 109 | BROADCAST = 1, |
michael@0 | 110 | MAX_EVENTS = 2 |
michael@0 | 111 | }; |
michael@0 | 112 | #endif |
michael@0 | 113 | |
michael@0 | 114 | /*- |
michael@0 | 115 | * Locking key to struct socket: |
michael@0 | 116 | * (a) constant after allocation, no locking required. |
michael@0 | 117 | * (b) locked by SOCK_LOCK(so). |
michael@0 | 118 | * (c) locked by SOCKBUF_LOCK(&so->so_rcv). |
michael@0 | 119 | * (d) locked by SOCKBUF_LOCK(&so->so_snd). |
michael@0 | 120 | * (e) locked by ACCEPT_LOCK(). |
michael@0 | 121 | * (f) not locked since integer reads/writes are atomic. |
michael@0 | 122 | * (g) used only as a sleep/wakeup address, no value. |
michael@0 | 123 | * (h) locked by global mutex so_global_mtx. |
michael@0 | 124 | */ |
michael@0 | 125 | struct socket { |
michael@0 | 126 | int so_count; /* (b) reference count */ |
michael@0 | 127 | short so_type; /* (a) generic type, see socket.h */ |
michael@0 | 128 | short so_options; /* from socket call, see socket.h */ |
michael@0 | 129 | short so_linger; /* time to linger while closing */ |
michael@0 | 130 | short so_state; /* (b) internal state flags SS_* */ |
michael@0 | 131 | int so_qstate; /* (e) internal state flags SQ_* */ |
michael@0 | 132 | void *so_pcb; /* protocol control block */ |
michael@0 | 133 | int so_dom; |
michael@0 | 134 | /* |
michael@0 | 135 | * Variables for connection queuing. |
michael@0 | 136 | * Socket where accepts occur is so_head in all subsidiary sockets. |
michael@0 | 137 | * If so_head is 0, socket is not related to an accept. |
michael@0 | 138 | * For head socket so_incomp queues partially completed connections, |
michael@0 | 139 | * while so_comp is a queue of connections ready to be accepted. |
michael@0 | 140 | * If a connection is aborted and it has so_head set, then |
michael@0 | 141 | * it has to be pulled out of either so_incomp or so_comp. |
michael@0 | 142 | * We allow connections to queue up based on current queue lengths |
michael@0 | 143 | * and limit on number of queued connections for this socket. |
michael@0 | 144 | */ |
michael@0 | 145 | struct socket *so_head; /* (e) back pointer to listen socket */ |
michael@0 | 146 | TAILQ_HEAD(, socket) so_incomp; /* (e) queue of partial unaccepted connections */ |
michael@0 | 147 | TAILQ_HEAD(, socket) so_comp; /* (e) queue of complete unaccepted connections */ |
michael@0 | 148 | TAILQ_ENTRY(socket) so_list; /* (e) list of unaccepted connections */ |
michael@0 | 149 | u_short so_qlen; /* (e) number of unaccepted connections */ |
michael@0 | 150 | u_short so_incqlen; /* (e) number of unaccepted incomplete |
michael@0 | 151 | connections */ |
michael@0 | 152 | u_short so_qlimit; /* (e) max number queued connections */ |
michael@0 | 153 | short so_timeo; /* (g) connection timeout */ |
michael@0 | 154 | userland_cond_t timeo_cond; /* timeo_cond condition variable being used in wakeup */ |
michael@0 | 155 | |
michael@0 | 156 | u_short so_error; /* (f) error affecting connection */ |
michael@0 | 157 | struct sigio *so_sigio; /* [sg] information for async I/O or |
michael@0 | 158 | out of band data (SIGURG) */ |
michael@0 | 159 | u_long so_oobmark; /* (c) chars to oob mark */ |
michael@0 | 160 | TAILQ_HEAD(, aiocblist) so_aiojobq; /* AIO ops waiting on socket */ |
michael@0 | 161 | /* |
michael@0 | 162 | * Variables for socket buffering. |
michael@0 | 163 | */ |
michael@0 | 164 | struct sockbuf { |
michael@0 | 165 | /* __Userspace__ Many of these fields may |
michael@0 | 166 | * not be required for the sctp stack. |
michael@0 | 167 | * Commenting out the following. |
michael@0 | 168 | * Including pthread mutex and condition variable to be |
michael@0 | 169 | * used by sbwait, sorwakeup and sowwakeup. |
michael@0 | 170 | */ |
michael@0 | 171 | /* struct selinfo sb_sel;*/ /* process selecting read/write */ |
michael@0 | 172 | /* struct mtx sb_mtx;*/ /* sockbuf lock */ |
michael@0 | 173 | /* struct sx sb_sx;*/ /* prevent I/O interlacing */ |
michael@0 | 174 | userland_cond_t sb_cond; /* sockbuf condition variable */ |
michael@0 | 175 | userland_mutex_t sb_mtx; /* sockbuf lock associated with sb_cond */ |
michael@0 | 176 | short sb_state; /* (c/d) socket state on sockbuf */ |
michael@0 | 177 | #define sb_startzero sb_mb |
michael@0 | 178 | struct mbuf *sb_mb; /* (c/d) the mbuf chain */ |
michael@0 | 179 | struct mbuf *sb_mbtail; /* (c/d) the last mbuf in the chain */ |
michael@0 | 180 | struct mbuf *sb_lastrecord; /* (c/d) first mbuf of last |
michael@0 | 181 | * record in socket buffer */ |
michael@0 | 182 | struct mbuf *sb_sndptr; /* (c/d) pointer into mbuf chain */ |
michael@0 | 183 | u_int sb_sndptroff; /* (c/d) byte offset of ptr into chain */ |
michael@0 | 184 | u_int sb_cc; /* (c/d) actual chars in buffer */ |
michael@0 | 185 | u_int sb_hiwat; /* (c/d) max actual char count */ |
michael@0 | 186 | u_int sb_mbcnt; /* (c/d) chars of mbufs used */ |
michael@0 | 187 | u_int sb_mbmax; /* (c/d) max chars of mbufs to use */ |
michael@0 | 188 | u_int sb_ctl; /* (c/d) non-data chars in buffer */ |
michael@0 | 189 | int sb_lowat; /* (c/d) low water mark */ |
michael@0 | 190 | int sb_timeo; /* (c/d) timeout for read/write */ |
michael@0 | 191 | short sb_flags; /* (c/d) flags, see below */ |
michael@0 | 192 | } so_rcv, so_snd; |
michael@0 | 193 | /* |
michael@0 | 194 | * Constants for sb_flags field of struct sockbuf. |
michael@0 | 195 | */ |
michael@0 | 196 | #define SB_MAX (256*1024) /* default for max chars in sockbuf */ |
michael@0 | 197 | #define SB_RAW (64*1024*2) /*Aligning so->so_rcv.sb_hiwat with the receive buffer size of raw socket*/ |
michael@0 | 198 | /* |
michael@0 | 199 | * Constants for sb_flags field of struct sockbuf. |
michael@0 | 200 | */ |
michael@0 | 201 | #define SB_WAIT 0x04 /* someone is waiting for data/space */ |
michael@0 | 202 | #define SB_SEL 0x08 /* someone is selecting */ |
michael@0 | 203 | #define SB_ASYNC 0x10 /* ASYNC I/O, need signals */ |
michael@0 | 204 | #define SB_UPCALL 0x20 /* someone wants an upcall */ |
michael@0 | 205 | #define SB_NOINTR 0x40 /* operations not interruptible */ |
michael@0 | 206 | #define SB_AIO 0x80 /* AIO operations queued */ |
michael@0 | 207 | #define SB_KNOTE 0x100 /* kernel note attached */ |
michael@0 | 208 | #define SB_AUTOSIZE 0x800 /* automatically size socket buffer */ |
michael@0 | 209 | |
michael@0 | 210 | void (*so_upcall)(struct socket *, void *, int); |
michael@0 | 211 | void *so_upcallarg; |
michael@0 | 212 | struct ucred *so_cred; /* (a) user credentials */ |
michael@0 | 213 | struct label *so_label; /* (b) MAC label for socket */ |
michael@0 | 214 | struct label *so_peerlabel; /* (b) cached MAC label for peer */ |
michael@0 | 215 | /* NB: generation count must not be first. */ |
michael@0 | 216 | uint32_t so_gencnt; /* (h) generation count */ |
michael@0 | 217 | void *so_emuldata; /* (b) private data for emulators */ |
michael@0 | 218 | struct so_accf { |
michael@0 | 219 | struct accept_filter *so_accept_filter; |
michael@0 | 220 | void *so_accept_filter_arg; /* saved filter args */ |
michael@0 | 221 | char *so_accept_filter_str; /* saved user args */ |
michael@0 | 222 | } *so_accf; |
michael@0 | 223 | }; |
michael@0 | 224 | |
michael@0 | 225 | #define SB_EMPTY_FIXUP(sb) do { \ |
michael@0 | 226 | if ((sb)->sb_mb == NULL) { \ |
michael@0 | 227 | (sb)->sb_mbtail = NULL; \ |
michael@0 | 228 | (sb)->sb_lastrecord = NULL; \ |
michael@0 | 229 | } \ |
michael@0 | 230 | } while (/*CONSTCOND*/0) |
michael@0 | 231 | |
michael@0 | 232 | /* |
michael@0 | 233 | * Global accept mutex to serialize access to accept queues and |
michael@0 | 234 | * fields associated with multiple sockets. This allows us to |
michael@0 | 235 | * avoid defining a lock order between listen and accept sockets |
michael@0 | 236 | * until such time as it proves to be a good idea. |
michael@0 | 237 | */ |
michael@0 | 238 | #if defined(__Userspace_os_Windows) |
michael@0 | 239 | extern userland_mutex_t accept_mtx; |
michael@0 | 240 | extern userland_cond_t accept_cond; |
michael@0 | 241 | #define ACCEPT_LOCK_ASSERT() |
michael@0 | 242 | #define ACCEPT_LOCK() do { \ |
michael@0 | 243 | EnterCriticalSection(&accept_mtx); \ |
michael@0 | 244 | } while (0) |
michael@0 | 245 | #define ACCEPT_UNLOCK() do { \ |
michael@0 | 246 | LeaveCriticalSection(&accept_mtx); \ |
michael@0 | 247 | } while (0) |
michael@0 | 248 | #define ACCEPT_UNLOCK_ASSERT() |
michael@0 | 249 | #else |
michael@0 | 250 | extern userland_mutex_t accept_mtx; |
michael@0 | 251 | extern userland_cond_t accept_cond; |
michael@0 | 252 | #define ACCEPT_LOCK_ASSERT() KASSERT(pthread_mutex_trylock(&accept_mtx) == EBUSY, ("%s: accept_mtx not locked", __func__)) |
michael@0 | 253 | #define ACCEPT_LOCK() (void)pthread_mutex_lock(&accept_mtx) |
michael@0 | 254 | #define ACCEPT_UNLOCK() (void)pthread_mutex_unlock(&accept_mtx) |
michael@0 | 255 | #define ACCEPT_UNLOCK_ASSERT() do{ \ |
michael@0 | 256 | KASSERT(pthread_mutex_trylock(&accept_mtx) == 0, ("%s: accept_mtx locked", __func__)); \ |
michael@0 | 257 | (void)pthread_mutex_unlock(&accept_mtx); \ |
michael@0 | 258 | } while (0) |
michael@0 | 259 | #endif |
michael@0 | 260 | |
michael@0 | 261 | /* |
michael@0 | 262 | * Per-socket buffer mutex used to protect most fields in the socket |
michael@0 | 263 | * buffer. |
michael@0 | 264 | */ |
michael@0 | 265 | #define SOCKBUF_MTX(_sb) (&(_sb)->sb_mtx) |
michael@0 | 266 | #if defined (__Userspace_os_Windows) |
michael@0 | 267 | #define SOCKBUF_LOCK_INIT(_sb, _name) \ |
michael@0 | 268 | InitializeCriticalSection(SOCKBUF_MTX(_sb)) |
michael@0 | 269 | #define SOCKBUF_LOCK_DESTROY(_sb) DeleteCriticalSection(SOCKBUF_MTX(_sb)) |
michael@0 | 270 | #define SOCKBUF_COND_INIT(_sb) InitializeConditionVariable((&(_sb)->sb_cond)) |
michael@0 | 271 | #define SOCKBUF_COND_DESTROY(_sb) DeleteConditionVariable((&(_sb)->sb_cond)) |
michael@0 | 272 | #define SOCK_COND_INIT(_so) InitializeConditionVariable((&(_so)->timeo_cond)) |
michael@0 | 273 | #define SOCK_COND_DESTROY(_so) DeleteConditionVariable((&(_so)->timeo_cond)) |
michael@0 | 274 | #define SOCK_COND(_so) (&(_so)->timeo_cond) |
michael@0 | 275 | #else |
michael@0 | 276 | #define SOCKBUF_LOCK_INIT(_sb, _name) \ |
michael@0 | 277 | pthread_mutex_init(SOCKBUF_MTX(_sb), NULL) |
michael@0 | 278 | #define SOCKBUF_LOCK_DESTROY(_sb) pthread_mutex_destroy(SOCKBUF_MTX(_sb)) |
michael@0 | 279 | #define SOCKBUF_COND_INIT(_sb) pthread_cond_init((&(_sb)->sb_cond), NULL) |
michael@0 | 280 | #define SOCKBUF_COND_DESTROY(_sb) pthread_cond_destroy((&(_sb)->sb_cond)) |
michael@0 | 281 | #define SOCK_COND_INIT(_so) pthread_cond_init((&(_so)->timeo_cond), NULL) |
michael@0 | 282 | #define SOCK_COND_DESTROY(_so) pthread_cond_destroy((&(_so)->timeo_cond)) |
michael@0 | 283 | #define SOCK_COND(_so) (&(_so)->timeo_cond) |
michael@0 | 284 | #endif |
michael@0 | 285 | /*__Userspace__ SOCKBUF_LOCK(_sb) is now defined in netinet/sctp_process_lock.h */ |
michael@0 | 286 | |
michael@0 | 287 | /* #define SOCKBUF_OWNED(_sb) mtx_owned(SOCKBUF_MTX(_sb)) unused */ |
michael@0 | 288 | /*__Userspace__ SOCKBUF_UNLOCK(_sb) is now defined in netinet/sctp_process_lock.h */ |
michael@0 | 289 | |
michael@0 | 290 | /*__Userspace__ SOCKBUF_LOCK_ASSERT(_sb) is now defined in netinet/sctp_process_lock.h */ |
michael@0 | 291 | |
michael@0 | 292 | /* #define SOCKBUF_UNLOCK_ASSERT(_sb) mtx_assert(SOCKBUF_MTX(_sb), MA_NOTOWNED) unused */ |
michael@0 | 293 | |
michael@0 | 294 | /* |
michael@0 | 295 | * Per-socket mutex: we reuse the receive socket buffer mutex for space |
michael@0 | 296 | * efficiency. This decision should probably be revisited as we optimize |
michael@0 | 297 | * locking for the socket code. |
michael@0 | 298 | */ |
michael@0 | 299 | #define SOCK_MTX(_so) SOCKBUF_MTX(&(_so)->so_rcv) |
michael@0 | 300 | /*__Userspace__ SOCK_LOCK(_so) is now defined in netinet/sctp_process_lock.h */ |
michael@0 | 301 | |
michael@0 | 302 | /* #define SOCK_OWNED(_so) SOCKBUF_OWNED(&(_so)->so_rcv) unused */ |
michael@0 | 303 | /*__Userspace__ SOCK_UNLOCK(_so) is now defined in netinet/sctp_process_lock.h */ |
michael@0 | 304 | |
michael@0 | 305 | #define SOCK_LOCK_ASSERT(_so) SOCKBUF_LOCK_ASSERT(&(_so)->so_rcv) |
michael@0 | 306 | |
michael@0 | 307 | /* |
michael@0 | 308 | * Socket state bits. |
michael@0 | 309 | * |
michael@0 | 310 | * Historically, this bits were all kept in the so_state field. For |
michael@0 | 311 | * locking reasons, they are now in multiple fields, as they are |
michael@0 | 312 | * locked differently. so_state maintains basic socket state protected |
michael@0 | 313 | * by the socket lock. so_qstate holds information about the socket |
michael@0 | 314 | * accept queues. Each socket buffer also has a state field holding |
michael@0 | 315 | * information relevant to that socket buffer (can't send, rcv). Many |
michael@0 | 316 | * fields will be read without locks to improve performance and avoid |
michael@0 | 317 | * lock order issues. However, this approach must be used with caution. |
michael@0 | 318 | */ |
michael@0 | 319 | #define SS_NOFDREF 0x0001 /* no file table ref any more */ |
michael@0 | 320 | #define SS_ISCONNECTED 0x0002 /* socket connected to a peer */ |
michael@0 | 321 | #define SS_ISCONNECTING 0x0004 /* in process of connecting to peer */ |
michael@0 | 322 | #define SS_ISDISCONNECTING 0x0008 /* in process of disconnecting */ |
michael@0 | 323 | #define SS_NBIO 0x0100 /* non-blocking ops */ |
michael@0 | 324 | #define SS_ASYNC 0x0200 /* async i/o notify */ |
michael@0 | 325 | #define SS_ISCONFIRMING 0x0400 /* deciding to accept connection req */ |
michael@0 | 326 | #define SS_ISDISCONNECTED 0x2000 /* socket disconnected from peer */ |
michael@0 | 327 | /* |
michael@0 | 328 | * Protocols can mark a socket as SS_PROTOREF to indicate that, following |
michael@0 | 329 | * pru_detach, they still want the socket to persist, and will free it |
michael@0 | 330 | * themselves when they are done. Protocols should only ever call sofree() |
michael@0 | 331 | * following setting this flag in pru_detach(), and never otherwise, as |
michael@0 | 332 | * sofree() bypasses socket reference counting. |
michael@0 | 333 | */ |
michael@0 | 334 | #define SS_PROTOREF 0x4000 /* strong protocol reference */ |
michael@0 | 335 | |
michael@0 | 336 | /* |
michael@0 | 337 | * Socket state bits now stored in the socket buffer state field. |
michael@0 | 338 | */ |
michael@0 | 339 | #define SBS_CANTSENDMORE 0x0010 /* can't send more data to peer */ |
michael@0 | 340 | #define SBS_CANTRCVMORE 0x0020 /* can't receive more data from peer */ |
michael@0 | 341 | #define SBS_RCVATMARK 0x0040 /* at mark on input */ |
michael@0 | 342 | |
michael@0 | 343 | /* |
michael@0 | 344 | * Socket state bits stored in so_qstate. |
michael@0 | 345 | */ |
michael@0 | 346 | #define SQ_INCOMP 0x0800 /* unaccepted, incomplete connection */ |
michael@0 | 347 | #define SQ_COMP 0x1000 /* unaccepted, complete connection */ |
michael@0 | 348 | |
michael@0 | 349 | /* |
michael@0 | 350 | * Externalized form of struct socket used by the sysctl(3) interface. |
michael@0 | 351 | */ |
michael@0 | 352 | struct xsocket { |
michael@0 | 353 | size_t xso_len; /* length of this structure */ |
michael@0 | 354 | struct socket *xso_so; /* makes a convenient handle sometimes */ |
michael@0 | 355 | short so_type; |
michael@0 | 356 | short so_options; |
michael@0 | 357 | short so_linger; |
michael@0 | 358 | short so_state; |
michael@0 | 359 | caddr_t so_pcb; /* another convenient handle */ |
michael@0 | 360 | int xso_protocol; |
michael@0 | 361 | int xso_family; |
michael@0 | 362 | u_short so_qlen; |
michael@0 | 363 | u_short so_incqlen; |
michael@0 | 364 | u_short so_qlimit; |
michael@0 | 365 | short so_timeo; |
michael@0 | 366 | u_short so_error; |
michael@0 | 367 | pid_t so_pgid; |
michael@0 | 368 | u_long so_oobmark; |
michael@0 | 369 | struct xsockbuf { |
michael@0 | 370 | u_int sb_cc; |
michael@0 | 371 | u_int sb_hiwat; |
michael@0 | 372 | u_int sb_mbcnt; |
michael@0 | 373 | u_int sb_mbmax; |
michael@0 | 374 | int sb_lowat; |
michael@0 | 375 | int sb_timeo; |
michael@0 | 376 | short sb_flags; |
michael@0 | 377 | } so_rcv, so_snd; |
michael@0 | 378 | uid_t so_uid; /* XXX */ |
michael@0 | 379 | }; |
michael@0 | 380 | |
michael@0 | 381 | #if defined(_KERNEL) |
michael@0 | 382 | |
michael@0 | 383 | |
michael@0 | 384 | /* |
michael@0 | 385 | * Macros for sockets and socket buffering. |
michael@0 | 386 | */ |
michael@0 | 387 | |
michael@0 | 388 | /* |
michael@0 | 389 | * Do we need to notify the other side when I/O is possible? |
michael@0 | 390 | */ |
michael@0 | 391 | #define sb_notify(sb) (((sb)->sb_flags & (SB_WAIT | SB_SEL | SB_ASYNC | \ |
michael@0 | 392 | SB_UPCALL | SB_AIO | SB_KNOTE)) != 0) |
michael@0 | 393 | |
michael@0 | 394 | /* |
michael@0 | 395 | * How much space is there in a socket buffer (so->so_snd or so->so_rcv)? |
michael@0 | 396 | * This is problematical if the fields are unsigned, as the space might |
michael@0 | 397 | * still be negative (cc > hiwat or mbcnt > mbmax). Should detect |
michael@0 | 398 | * overflow and return 0. Should use "lmin" but it doesn't exist now. |
michael@0 | 399 | */ |
michael@0 | 400 | #define sbspace(sb) \ |
michael@0 | 401 | ((long) imin((int)((sb)->sb_hiwat - (sb)->sb_cc), \ |
michael@0 | 402 | (int)((sb)->sb_mbmax - (sb)->sb_mbcnt))) |
michael@0 | 403 | |
michael@0 | 404 | /* do we have to send all at once on a socket? */ |
michael@0 | 405 | #define sosendallatonce(so) \ |
michael@0 | 406 | ((so)->so_proto->pr_flags & PR_ATOMIC) |
michael@0 | 407 | |
michael@0 | 408 | /* can we read something from so? */ |
michael@0 | 409 | #define soreadable(so) \ |
michael@0 | 410 | ((so)->so_rcv.sb_cc >= (so)->so_rcv.sb_lowat || \ |
michael@0 | 411 | ((so)->so_rcv.sb_state & SBS_CANTRCVMORE) || \ |
michael@0 | 412 | !TAILQ_EMPTY(&(so)->so_comp) || (so)->so_error) |
michael@0 | 413 | |
michael@0 | 414 | /* can we write something to so? */ |
michael@0 | 415 | #define sowriteable(so) \ |
michael@0 | 416 | ((sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat && \ |
michael@0 | 417 | (((so)->so_state&SS_ISCONNECTED) || \ |
michael@0 | 418 | ((so)->so_proto->pr_flags&PR_CONNREQUIRED)==0)) || \ |
michael@0 | 419 | ((so)->so_snd.sb_state & SBS_CANTSENDMORE) || \ |
michael@0 | 420 | (so)->so_error) |
michael@0 | 421 | |
michael@0 | 422 | /* adjust counters in sb reflecting allocation of m */ |
michael@0 | 423 | #define sballoc(sb, m) { \ |
michael@0 | 424 | (sb)->sb_cc += (m)->m_len; \ |
michael@0 | 425 | if ((m)->m_type != MT_DATA && (m)->m_type != MT_OOBDATA) \ |
michael@0 | 426 | (sb)->sb_ctl += (m)->m_len; \ |
michael@0 | 427 | (sb)->sb_mbcnt += MSIZE; \ |
michael@0 | 428 | if ((m)->m_flags & M_EXT) \ |
michael@0 | 429 | (sb)->sb_mbcnt += (m)->m_ext.ext_size; \ |
michael@0 | 430 | } |
michael@0 | 431 | |
michael@0 | 432 | /* adjust counters in sb reflecting freeing of m */ |
michael@0 | 433 | #define sbfree(sb, m) { \ |
michael@0 | 434 | (sb)->sb_cc -= (m)->m_len; \ |
michael@0 | 435 | if ((m)->m_type != MT_DATA && (m)->m_type != MT_OOBDATA) \ |
michael@0 | 436 | (sb)->sb_ctl -= (m)->m_len; \ |
michael@0 | 437 | (sb)->sb_mbcnt -= MSIZE; \ |
michael@0 | 438 | if ((m)->m_flags & M_EXT) \ |
michael@0 | 439 | (sb)->sb_mbcnt -= (m)->m_ext.ext_size; \ |
michael@0 | 440 | if ((sb)->sb_sndptr == (m)) { \ |
michael@0 | 441 | (sb)->sb_sndptr = NULL; \ |
michael@0 | 442 | (sb)->sb_sndptroff = 0; \ |
michael@0 | 443 | } \ |
michael@0 | 444 | if ((sb)->sb_sndptroff != 0) \ |
michael@0 | 445 | (sb)->sb_sndptroff -= (m)->m_len; \ |
michael@0 | 446 | } |
michael@0 | 447 | |
michael@0 | 448 | /* |
michael@0 | 449 | * soref()/sorele() ref-count the socket structure. Note that you must |
michael@0 | 450 | * still explicitly close the socket, but the last ref count will free |
michael@0 | 451 | * the structure. |
michael@0 | 452 | */ |
michael@0 | 453 | #define soref(so) do { \ |
michael@0 | 454 | SOCK_LOCK_ASSERT(so); \ |
michael@0 | 455 | ++(so)->so_count; \ |
michael@0 | 456 | } while (0) |
michael@0 | 457 | |
michael@0 | 458 | #define sorele(so) do { \ |
michael@0 | 459 | ACCEPT_LOCK_ASSERT(); \ |
michael@0 | 460 | SOCK_LOCK_ASSERT(so); \ |
michael@0 | 461 | KASSERT((so)->so_count > 0, ("sorele")); \ |
michael@0 | 462 | if (--(so)->so_count == 0) \ |
michael@0 | 463 | sofree(so); \ |
michael@0 | 464 | else { \ |
michael@0 | 465 | SOCK_UNLOCK(so); \ |
michael@0 | 466 | ACCEPT_UNLOCK(); \ |
michael@0 | 467 | } \ |
michael@0 | 468 | } while (0) |
michael@0 | 469 | |
michael@0 | 470 | #define sotryfree(so) do { \ |
michael@0 | 471 | ACCEPT_LOCK_ASSERT(); \ |
michael@0 | 472 | SOCK_LOCK_ASSERT(so); \ |
michael@0 | 473 | if ((so)->so_count == 0) \ |
michael@0 | 474 | sofree(so); \ |
michael@0 | 475 | else { \ |
michael@0 | 476 | SOCK_UNLOCK(so); \ |
michael@0 | 477 | ACCEPT_UNLOCK(); \ |
michael@0 | 478 | } \ |
michael@0 | 479 | } while(0) |
michael@0 | 480 | |
michael@0 | 481 | /* |
michael@0 | 482 | * In sorwakeup() and sowwakeup(), acquire the socket buffer lock to |
michael@0 | 483 | * avoid a non-atomic test-and-wakeup. However, sowakeup is |
michael@0 | 484 | * responsible for releasing the lock if it is called. We unlock only |
michael@0 | 485 | * if we don't call into sowakeup. If any code is introduced that |
michael@0 | 486 | * directly invokes the underlying sowakeup() primitives, it must |
michael@0 | 487 | * maintain the same semantics. |
michael@0 | 488 | */ |
michael@0 | 489 | #define sorwakeup_locked(so) do { \ |
michael@0 | 490 | SOCKBUF_LOCK_ASSERT(&(so)->so_rcv); \ |
michael@0 | 491 | if (sb_notify(&(so)->so_rcv)) \ |
michael@0 | 492 | sowakeup((so), &(so)->so_rcv); \ |
michael@0 | 493 | else \ |
michael@0 | 494 | SOCKBUF_UNLOCK(&(so)->so_rcv); \ |
michael@0 | 495 | } while (0) |
michael@0 | 496 | |
michael@0 | 497 | #define sorwakeup(so) do { \ |
michael@0 | 498 | SOCKBUF_LOCK(&(so)->so_rcv); \ |
michael@0 | 499 | sorwakeup_locked(so); \ |
michael@0 | 500 | } while (0) |
michael@0 | 501 | |
michael@0 | 502 | #define sowwakeup_locked(so) do { \ |
michael@0 | 503 | SOCKBUF_LOCK_ASSERT(&(so)->so_snd); \ |
michael@0 | 504 | if (sb_notify(&(so)->so_snd)) \ |
michael@0 | 505 | sowakeup((so), &(so)->so_snd); \ |
michael@0 | 506 | else \ |
michael@0 | 507 | SOCKBUF_UNLOCK(&(so)->so_snd); \ |
michael@0 | 508 | } while (0) |
michael@0 | 509 | |
michael@0 | 510 | #define sowwakeup(so) do { \ |
michael@0 | 511 | SOCKBUF_LOCK(&(so)->so_snd); \ |
michael@0 | 512 | sowwakeup_locked(so); \ |
michael@0 | 513 | } while (0) |
michael@0 | 514 | |
michael@0 | 515 | /* |
michael@0 | 516 | * Argument structure for sosetopt et seq. This is in the KERNEL |
michael@0 | 517 | * section because it will never be visible to user code. |
michael@0 | 518 | */ |
michael@0 | 519 | enum sopt_dir { SOPT_GET, SOPT_SET }; |
michael@0 | 520 | struct sockopt { |
michael@0 | 521 | enum sopt_dir sopt_dir; /* is this a get or a set? */ |
michael@0 | 522 | int sopt_level; /* second arg of [gs]etsockopt */ |
michael@0 | 523 | int sopt_name; /* third arg of [gs]etsockopt */ |
michael@0 | 524 | void *sopt_val; /* fourth arg of [gs]etsockopt */ |
michael@0 | 525 | size_t sopt_valsize; /* (almost) fifth arg of [gs]etsockopt */ |
michael@0 | 526 | struct thread *sopt_td; /* calling thread or null if kernel */ |
michael@0 | 527 | }; |
michael@0 | 528 | |
michael@0 | 529 | struct accept_filter { |
michael@0 | 530 | char accf_name[16]; |
michael@0 | 531 | void (*accf_callback) |
michael@0 | 532 | (struct socket *so, void *arg, int waitflag); |
michael@0 | 533 | void * (*accf_create) |
michael@0 | 534 | (struct socket *so, char *arg); |
michael@0 | 535 | void (*accf_destroy) |
michael@0 | 536 | (struct socket *so); |
michael@0 | 537 | SLIST_ENTRY(accept_filter) accf_next; |
michael@0 | 538 | }; |
michael@0 | 539 | |
michael@0 | 540 | extern int maxsockets; |
michael@0 | 541 | extern u_long sb_max; |
michael@0 | 542 | extern struct uma_zone *socket_zone; |
michael@0 | 543 | extern so_gen_t so_gencnt; |
michael@0 | 544 | |
michael@0 | 545 | struct mbuf; |
michael@0 | 546 | struct sockaddr; |
michael@0 | 547 | struct ucred; |
michael@0 | 548 | struct uio; |
michael@0 | 549 | |
michael@0 | 550 | /* |
michael@0 | 551 | * From uipc_socket and friends |
michael@0 | 552 | */ |
michael@0 | 553 | int do_getopt_accept_filter(struct socket *so, struct sockopt *sopt); |
michael@0 | 554 | int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt); |
michael@0 | 555 | int so_setsockopt(struct socket *so, int level, int optname, |
michael@0 | 556 | void *optval, size_t optlen); |
michael@0 | 557 | int sockargs(struct mbuf **mp, caddr_t buf, int buflen, int type); |
michael@0 | 558 | int getsockaddr(struct sockaddr **namp, caddr_t uaddr, size_t len); |
michael@0 | 559 | void sbappend(struct sockbuf *sb, struct mbuf *m); |
michael@0 | 560 | void sbappend_locked(struct sockbuf *sb, struct mbuf *m); |
michael@0 | 561 | void sbappendstream(struct sockbuf *sb, struct mbuf *m); |
michael@0 | 562 | void sbappendstream_locked(struct sockbuf *sb, struct mbuf *m); |
michael@0 | 563 | int sbappendaddr(struct sockbuf *sb, const struct sockaddr *asa, |
michael@0 | 564 | struct mbuf *m0, struct mbuf *control); |
michael@0 | 565 | int sbappendaddr_locked(struct sockbuf *sb, const struct sockaddr *asa, |
michael@0 | 566 | struct mbuf *m0, struct mbuf *control); |
michael@0 | 567 | int sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, |
michael@0 | 568 | struct mbuf *control); |
michael@0 | 569 | int sbappendcontrol_locked(struct sockbuf *sb, struct mbuf *m0, |
michael@0 | 570 | struct mbuf *control); |
michael@0 | 571 | void sbappendrecord(struct sockbuf *sb, struct mbuf *m0); |
michael@0 | 572 | void sbappendrecord_locked(struct sockbuf *sb, struct mbuf *m0); |
michael@0 | 573 | void sbcheck(struct sockbuf *sb); |
michael@0 | 574 | void sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n); |
michael@0 | 575 | struct mbuf * |
michael@0 | 576 | sbcreatecontrol(caddr_t p, int size, int type, int level); |
michael@0 | 577 | void sbdestroy(struct sockbuf *sb, struct socket *so); |
michael@0 | 578 | void sbdrop(struct sockbuf *sb, int len); |
michael@0 | 579 | void sbdrop_locked(struct sockbuf *sb, int len); |
michael@0 | 580 | void sbdroprecord(struct sockbuf *sb); |
michael@0 | 581 | void sbdroprecord_locked(struct sockbuf *sb); |
michael@0 | 582 | void sbflush(struct sockbuf *sb); |
michael@0 | 583 | void sbflush_locked(struct sockbuf *sb); |
michael@0 | 584 | void sbrelease(struct sockbuf *sb, struct socket *so); |
michael@0 | 585 | void sbrelease_locked(struct sockbuf *sb, struct socket *so); |
michael@0 | 586 | int sbreserve(struct sockbuf *sb, u_long cc, struct socket *so, |
michael@0 | 587 | struct thread *td); |
michael@0 | 588 | int sbreserve_locked(struct sockbuf *sb, u_long cc, struct socket *so, |
michael@0 | 589 | struct thread *td); |
michael@0 | 590 | struct mbuf * |
michael@0 | 591 | sbsndptr(struct sockbuf *sb, u_int off, u_int len, u_int *moff); |
michael@0 | 592 | void sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb); |
michael@0 | 593 | int sbwait(struct sockbuf *sb); |
michael@0 | 594 | int sblock(struct sockbuf *sb, int flags); |
michael@0 | 595 | void sbunlock(struct sockbuf *sb); |
michael@0 | 596 | void soabort(struct socket *so); |
michael@0 | 597 | int soaccept(struct socket *so, struct sockaddr **nam); |
michael@0 | 598 | int socheckuid(struct socket *so, uid_t uid); |
michael@0 | 599 | int sobind(struct socket *so, struct sockaddr *nam, struct thread *td); |
michael@0 | 600 | void socantrcvmore(struct socket *so); |
michael@0 | 601 | void socantrcvmore_locked(struct socket *so); |
michael@0 | 602 | void socantsendmore(struct socket *so); |
michael@0 | 603 | void socantsendmore_locked(struct socket *so); |
michael@0 | 604 | int soclose(struct socket *so); |
michael@0 | 605 | int soconnect(struct socket *so, struct sockaddr *nam, struct thread *td); |
michael@0 | 606 | int soconnect2(struct socket *so1, struct socket *so2); |
michael@0 | 607 | int socow_setup(struct mbuf *m0, struct uio *uio); |
michael@0 | 608 | int socreate(int dom, struct socket **aso, int type, int proto, |
michael@0 | 609 | struct ucred *cred, struct thread *td); |
michael@0 | 610 | int sodisconnect(struct socket *so); |
michael@0 | 611 | struct sockaddr *sodupsockaddr(const struct sockaddr *sa, int mflags); |
michael@0 | 612 | void sofree(struct socket *so); |
michael@0 | 613 | int sogetopt(struct socket *so, struct sockopt *sopt); |
michael@0 | 614 | void sohasoutofband(struct socket *so); |
michael@0 | 615 | void soisconnected(struct socket *so); |
michael@0 | 616 | void soisconnecting(struct socket *so); |
michael@0 | 617 | void soisdisconnected(struct socket *so); |
michael@0 | 618 | void soisdisconnecting(struct socket *so); |
michael@0 | 619 | int solisten(struct socket *so, int backlog, struct thread *td); |
michael@0 | 620 | void solisten_proto(struct socket *so, int backlog); |
michael@0 | 621 | int solisten_proto_check(struct socket *so); |
michael@0 | 622 | struct socket * |
michael@0 | 623 | sonewconn(struct socket *head, int connstatus); |
michael@0 | 624 | int sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen); |
michael@0 | 625 | int sooptcopyout(struct sockopt *sopt, const void *buf, size_t len); |
michael@0 | 626 | |
michael@0 | 627 | /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ |
michael@0 | 628 | int soopt_getm(struct sockopt *sopt, struct mbuf **mp); |
michael@0 | 629 | int soopt_mcopyin(struct sockopt *sopt, struct mbuf *m); |
michael@0 | 630 | int soopt_mcopyout(struct sockopt *sopt, struct mbuf *m); |
michael@0 | 631 | |
michael@0 | 632 | int sopoll(struct socket *so, int events, struct ucred *active_cred, |
michael@0 | 633 | struct thread *td); |
michael@0 | 634 | int sopoll_generic(struct socket *so, int events, |
michael@0 | 635 | struct ucred *active_cred, struct thread *td); |
michael@0 | 636 | int soreceive(struct socket *so, struct sockaddr **paddr, struct uio *uio, |
michael@0 | 637 | struct mbuf **mp0, struct mbuf **controlp, int *flagsp); |
michael@0 | 638 | int soreceive_generic(struct socket *so, struct sockaddr **paddr, |
michael@0 | 639 | struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, |
michael@0 | 640 | int *flagsp); |
michael@0 | 641 | int soreserve(struct socket *so, u_long sndcc, u_long rcvcc); |
michael@0 | 642 | void sorflush(struct socket *so); |
michael@0 | 643 | int sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, |
michael@0 | 644 | struct mbuf *top, struct mbuf *control, int flags, |
michael@0 | 645 | struct thread *td); |
michael@0 | 646 | int sosend_dgram(struct socket *so, struct sockaddr *addr, |
michael@0 | 647 | struct uio *uio, struct mbuf *top, struct mbuf *control, |
michael@0 | 648 | int flags, struct thread *td); |
michael@0 | 649 | int sosend_generic(struct socket *so, struct sockaddr *addr, |
michael@0 | 650 | struct uio *uio, struct mbuf *top, struct mbuf *control, |
michael@0 | 651 | int flags, struct thread *td); |
michael@0 | 652 | int sosetopt(struct socket *so, struct sockopt *sopt); |
michael@0 | 653 | int soshutdown(struct socket *so, int how); |
michael@0 | 654 | void sotoxsocket(struct socket *so, struct xsocket *xso); |
michael@0 | 655 | void sowakeup(struct socket *so, struct sockbuf *sb); |
michael@0 | 656 | |
michael@0 | 657 | #ifdef SOCKBUF_DEBUG |
michael@0 | 658 | void sblastrecordchk(struct sockbuf *, const char *, int); |
michael@0 | 659 | #define SBLASTRECORDCHK(sb) sblastrecordchk((sb), __FILE__, __LINE__) |
michael@0 | 660 | |
michael@0 | 661 | void sblastmbufchk(struct sockbuf *, const char *, int); |
michael@0 | 662 | #define SBLASTMBUFCHK(sb) sblastmbufchk((sb), __FILE__, __LINE__) |
michael@0 | 663 | #else |
michael@0 | 664 | #define SBLASTRECORDCHK(sb) /* nothing */ |
michael@0 | 665 | #define SBLASTMBUFCHK(sb) /* nothing */ |
michael@0 | 666 | #endif /* SOCKBUF_DEBUG */ |
michael@0 | 667 | |
michael@0 | 668 | /* |
michael@0 | 669 | * Accept filter functions (duh). |
michael@0 | 670 | */ |
michael@0 | 671 | int accept_filt_add(struct accept_filter *filt); |
michael@0 | 672 | int accept_filt_del(char *name); |
michael@0 | 673 | struct accept_filter *accept_filt_get(char *name); |
michael@0 | 674 | #ifdef ACCEPT_FILTER_MOD |
michael@0 | 675 | #ifdef SYSCTL_DECL |
michael@0 | 676 | SYSCTL_DECL(_net_inet_accf); |
michael@0 | 677 | #endif |
michael@0 | 678 | int accept_filt_generic_mod_event(module_t mod, int event, void *data); |
michael@0 | 679 | #endif |
michael@0 | 680 | |
michael@0 | 681 | #endif /* _KERNEL */ |
michael@0 | 682 | |
michael@0 | 683 | |
michael@0 | 684 | /*-------------------------------------------------------------*/ |
michael@0 | 685 | /*-------------------------------------------------------------*/ |
michael@0 | 686 | /* __Userspace__ */ |
michael@0 | 687 | /*-------------------------------------------------------------*/ |
michael@0 | 688 | /*-------------------------------------------------------------*/ |
michael@0 | 689 | /* this new __Userspace__ section is to copy portions of the _KERNEL block |
michael@0 | 690 | * above into, avoiding having to port the entire thing at once... |
michael@0 | 691 | * For function prototypes, the full bodies are in user_socket.c . |
michael@0 | 692 | */ |
michael@0 | 693 | #if defined(__Userspace__) |
michael@0 | 694 | |
michael@0 | 695 | /* ---------------------------------------------------------- */ |
michael@0 | 696 | /* --- function prototypes (implemented in user_socket.c) --- */ |
michael@0 | 697 | /* ---------------------------------------------------------- */ |
michael@0 | 698 | void soisconnecting(struct socket *so); |
michael@0 | 699 | void soisdisconnecting(struct socket *so); |
michael@0 | 700 | void soisconnected(struct socket *so); |
michael@0 | 701 | struct socket * sonewconn(struct socket *head, int connstatus); |
michael@0 | 702 | void socantrcvmore(struct socket *so); |
michael@0 | 703 | void socantsendmore(struct socket *so); |
michael@0 | 704 | |
michael@0 | 705 | |
michael@0 | 706 | |
michael@0 | 707 | /* -------------- */ |
michael@0 | 708 | /* --- macros --- */ |
michael@0 | 709 | /* -------------- */ |
michael@0 | 710 | |
michael@0 | 711 | #define soref(so) do { \ |
michael@0 | 712 | SOCK_LOCK_ASSERT(so); \ |
michael@0 | 713 | ++(so)->so_count; \ |
michael@0 | 714 | } while (0) |
michael@0 | 715 | |
michael@0 | 716 | #define sorele(so) do { \ |
michael@0 | 717 | ACCEPT_LOCK_ASSERT(); \ |
michael@0 | 718 | SOCK_LOCK_ASSERT(so); \ |
michael@0 | 719 | KASSERT((so)->so_count > 0, ("sorele")); \ |
michael@0 | 720 | if (--(so)->so_count == 0) \ |
michael@0 | 721 | sofree(so); \ |
michael@0 | 722 | else { \ |
michael@0 | 723 | SOCK_UNLOCK(so); \ |
michael@0 | 724 | ACCEPT_UNLOCK(); \ |
michael@0 | 725 | } \ |
michael@0 | 726 | } while (0) |
michael@0 | 727 | |
michael@0 | 728 | |
michael@0 | 729 | /* replacing imin with min (user_environment.h) */ |
michael@0 | 730 | #define sbspace(sb) \ |
michael@0 | 731 | ((long) min((int)((sb)->sb_hiwat - (sb)->sb_cc), \ |
michael@0 | 732 | (int)((sb)->sb_mbmax - (sb)->sb_mbcnt))) |
michael@0 | 733 | |
michael@0 | 734 | /* do we have to send all at once on a socket? */ |
michael@0 | 735 | #define sosendallatonce(so) \ |
michael@0 | 736 | ((so)->so_proto->pr_flags & PR_ATOMIC) |
michael@0 | 737 | |
michael@0 | 738 | /* can we read something from so? */ |
michael@0 | 739 | #define soreadable(so) \ |
michael@0 | 740 | ((int)((so)->so_rcv.sb_cc) >= (so)->so_rcv.sb_lowat || \ |
michael@0 | 741 | ((so)->so_rcv.sb_state & SBS_CANTRCVMORE) || \ |
michael@0 | 742 | !TAILQ_EMPTY(&(so)->so_comp) || (so)->so_error) |
michael@0 | 743 | |
michael@0 | 744 | #if 0 /* original */ |
michael@0 | 745 | #define PR_CONNREQUIRED 0x04 /* from sys/protosw.h "needed" for sowriteable */ |
michael@0 | 746 | #define sowriteable(so) \ |
michael@0 | 747 | ((sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat && \ |
michael@0 | 748 | (((so)->so_state&SS_ISCONNECTED) || \ |
michael@0 | 749 | ((so)->so_proto->pr_flags&PR_CONNREQUIRED)==0)) || \ |
michael@0 | 750 | ((so)->so_snd.sb_state & SBS_CANTSENDMORE) || \ |
michael@0 | 751 | (so)->so_error) |
michael@0 | 752 | #else /* line with PR_CONNREQUIRED removed */ |
michael@0 | 753 | /* can we write something to so? */ |
michael@0 | 754 | #define sowriteable(so) \ |
michael@0 | 755 | ((sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat && \ |
michael@0 | 756 | (((so)->so_state&SS_ISCONNECTED))) || \ |
michael@0 | 757 | ((so)->so_snd.sb_state & SBS_CANTSENDMORE) || \ |
michael@0 | 758 | (so)->so_error) |
michael@0 | 759 | #endif |
michael@0 | 760 | |
michael@0 | 761 | extern void solisten_proto(struct socket *so, int backlog); |
michael@0 | 762 | extern int solisten_proto_check(struct socket *so); |
michael@0 | 763 | extern int sctp_listen(struct socket *so, int backlog, struct proc *p); |
michael@0 | 764 | extern void socantrcvmore_locked(struct socket *so); |
michael@0 | 765 | extern int sctp_bind(struct socket *so, struct sockaddr *addr); |
michael@0 | 766 | extern int sctp6_bind(struct socket *so, struct sockaddr *addr, void *proc); |
michael@0 | 767 | #if defined(__Userspace__) |
michael@0 | 768 | extern int sctpconn_bind(struct socket *so, struct sockaddr *addr); |
michael@0 | 769 | #endif |
michael@0 | 770 | extern int sctp_accept(struct socket *so, struct sockaddr **addr); |
michael@0 | 771 | extern int sctp_attach(struct socket *so, int proto, uint32_t vrf_id); |
michael@0 | 772 | extern int sctp6_attach(struct socket *so, int proto, uint32_t vrf_id); |
michael@0 | 773 | extern int sctp_abort(struct socket *so); |
michael@0 | 774 | extern int sctp6_abort(struct socket *so); |
michael@0 | 775 | extern void sctp_close(struct socket *so); |
michael@0 | 776 | extern int soaccept(struct socket *so, struct sockaddr **nam); |
michael@0 | 777 | extern int solisten(struct socket *so, int backlog); |
michael@0 | 778 | extern int soreserve(struct socket *so, u_long sndcc, u_long rcvcc); |
michael@0 | 779 | extern void sowakeup(struct socket *so, struct sockbuf *sb); |
michael@0 | 780 | extern void wakeup(void *ident, struct socket *so); /*__Userspace__ */ |
michael@0 | 781 | extern int uiomove(void *cp, int n, struct uio *uio); |
michael@0 | 782 | extern int sbwait(struct sockbuf *sb); |
michael@0 | 783 | extern int sodisconnect(struct socket *so); |
michael@0 | 784 | extern int soconnect(struct socket *so, struct sockaddr *nam); |
michael@0 | 785 | extern int sctp_disconnect(struct socket *so); |
michael@0 | 786 | extern int sctp_connect(struct socket *so, struct sockaddr *addr); |
michael@0 | 787 | extern int sctp6_connect(struct socket *so, struct sockaddr *addr); |
michael@0 | 788 | #if defined(__Userspace__) |
michael@0 | 789 | extern int sctpconn_connect(struct socket *so, struct sockaddr *addr); |
michael@0 | 790 | #endif |
michael@0 | 791 | extern void sctp_finish(void); |
michael@0 | 792 | |
michael@0 | 793 | /* ------------------------------------------------ */ |
michael@0 | 794 | /* ----- macros copied from above ---- */ |
michael@0 | 795 | /* ------------------------------------------------ */ |
michael@0 | 796 | |
michael@0 | 797 | /* |
michael@0 | 798 | * Do we need to notify the other side when I/O is possible? |
michael@0 | 799 | */ |
michael@0 | 800 | #define sb_notify(sb) (((sb)->sb_flags & (SB_WAIT | SB_SEL | SB_ASYNC | \ |
michael@0 | 801 | SB_UPCALL | SB_AIO | SB_KNOTE)) != 0) |
michael@0 | 802 | |
michael@0 | 803 | |
michael@0 | 804 | /* |
michael@0 | 805 | * In sorwakeup() and sowwakeup(), acquire the socket buffer lock to |
michael@0 | 806 | * avoid a non-atomic test-and-wakeup. However, sowakeup is |
michael@0 | 807 | * responsible for releasing the lock if it is called. We unlock only |
michael@0 | 808 | * if we don't call into sowakeup. If any code is introduced that |
michael@0 | 809 | * directly invokes the underlying sowakeup() primitives, it must |
michael@0 | 810 | * maintain the same semantics. |
michael@0 | 811 | */ |
michael@0 | 812 | #define sorwakeup_locked(so) do { \ |
michael@0 | 813 | SOCKBUF_LOCK_ASSERT(&(so)->so_rcv); \ |
michael@0 | 814 | if (sb_notify(&(so)->so_rcv)) \ |
michael@0 | 815 | sowakeup((so), &(so)->so_rcv); \ |
michael@0 | 816 | else \ |
michael@0 | 817 | SOCKBUF_UNLOCK(&(so)->so_rcv); \ |
michael@0 | 818 | } while (0) |
michael@0 | 819 | |
michael@0 | 820 | #define sorwakeup(so) do { \ |
michael@0 | 821 | SOCKBUF_LOCK(&(so)->so_rcv); \ |
michael@0 | 822 | sorwakeup_locked(so); \ |
michael@0 | 823 | } while (0) |
michael@0 | 824 | |
michael@0 | 825 | #define sowwakeup_locked(so) do { \ |
michael@0 | 826 | SOCKBUF_LOCK_ASSERT(&(so)->so_snd); \ |
michael@0 | 827 | if (sb_notify(&(so)->so_snd)) \ |
michael@0 | 828 | sowakeup((so), &(so)->so_snd); \ |
michael@0 | 829 | else \ |
michael@0 | 830 | SOCKBUF_UNLOCK(&(so)->so_snd); \ |
michael@0 | 831 | } while (0) |
michael@0 | 832 | |
michael@0 | 833 | #define sowwakeup(so) do { \ |
michael@0 | 834 | SOCKBUF_LOCK(&(so)->so_snd); \ |
michael@0 | 835 | sowwakeup_locked(so); \ |
michael@0 | 836 | } while (0) |
michael@0 | 837 | |
michael@0 | 838 | |
michael@0 | 839 | |
michael@0 | 840 | #endif /* __Userspace__ */ |
michael@0 | 841 | |
michael@0 | 842 | #endif /* !_SYS_SOCKETVAR_H_ */ |