michael@0: /*- michael@0: * Copyright (c) 1982, 1986, 1990, 1993 michael@0: * The Regents of the University of California. All rights reserved. michael@0: * michael@0: * Redistribution and use in source and binary forms, with or without michael@0: * modification, are permitted provided that the following conditions michael@0: * are met: michael@0: * 1. Redistributions of source code must retain the above copyright michael@0: * notice, this list of conditions and the following disclaimer. michael@0: * 2. Redistributions in binary form must reproduce the above copyright michael@0: * notice, this list of conditions and the following disclaimer in the michael@0: * documentation and/or other materials provided with the distribution. michael@0: * 4. Neither the name of the University nor the names of its contributors michael@0: * may be used to endorse or promote products derived from this software michael@0: * without specific prior written permission. michael@0: * michael@0: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND michael@0: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE michael@0: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE michael@0: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE michael@0: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL michael@0: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS michael@0: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) michael@0: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT michael@0: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY michael@0: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF michael@0: * SUCH DAMAGE. michael@0: * michael@0: */ michael@0: michael@0: /* __Userspace__ version of goes here.*/ michael@0: michael@0: #ifndef _USER_SOCKETVAR_H_ michael@0: #define _USER_SOCKETVAR_H_ michael@0: michael@0: #if defined(__Userspace_os_Darwin) michael@0: #include michael@0: #include michael@0: #endif michael@0: michael@0: /* #include */ /*__Userspace__ alternative?*/ /* for struct selinfo */ michael@0: /* #include was 0 byte file */ michael@0: /* #include was 0 byte file */ michael@0: /* #include */ /*__Userspace__ alternative?*/ michael@0: #if !defined(__Userspace_os_DragonFly) && !defined(__Userspace_os_FreeBSD) && !defined(__Userspace_os_NetBSD) && !defined(__Userspace_os_Windows) michael@0: #include michael@0: #endif michael@0: #define SOCK_MAXADDRLEN 255 michael@0: #if !defined(MSG_NOTIFICATION) michael@0: #define MSG_NOTIFICATION 0x2000 /* SCTP notification */ michael@0: #endif michael@0: #define SCTP_SO_LINGER 0x0001 michael@0: #define SCTP_SO_ACCEPTCONN 0x0002 michael@0: #define SS_CANTRCVMORE 0x020 michael@0: #define SS_CANTSENDMORE 0x010 michael@0: michael@0: #if defined(__Userspace_os_Darwin) || defined(__Userspace_os_DragonFly) || defined(__Userspace_os_FreeBSD) || defined(__Userspace_os_OpenBSD) || defined (__Userspace_os_Windows) michael@0: #define UIO_MAXIOV 1024 michael@0: #define ERESTART (-1) michael@0: #endif michael@0: michael@0: #if !defined(__Userspace_os_Darwin) && !defined(__Userspace_os_NetBSD) && !defined(__Userspace_os_OpenBSD) michael@0: enum uio_rw { UIO_READ, UIO_WRITE }; michael@0: #endif michael@0: michael@0: #if !defined(__Userspace_os_NetBSD) && !defined(__Userspace_os_OpenBSD) michael@0: /* Segment flag values. */ michael@0: enum uio_seg { michael@0: UIO_USERSPACE, /* from user data space */ michael@0: UIO_SYSSPACE /* from system space */ michael@0: }; michael@0: #endif michael@0: michael@0: struct proc { michael@0: int stub; /* struct proc is a dummy for __Userspace__ */ michael@0: }; michael@0: michael@0: MALLOC_DECLARE(M_ACCF); michael@0: MALLOC_DECLARE(M_PCB); michael@0: MALLOC_DECLARE(M_SONAME); michael@0: michael@0: /* __Userspace__ Are these all the fields we need? michael@0: * Removing struct thread *uio_td; owner field michael@0: */ michael@0: struct uio { michael@0: struct iovec *uio_iov; /* scatter/gather list */ michael@0: int uio_iovcnt; /* length of scatter/gather list */ michael@0: off_t uio_offset; /* offset in target object */ michael@0: int uio_resid; /* remaining bytes to process */ michael@0: enum uio_seg uio_segflg; /* address space */ michael@0: enum uio_rw uio_rw; /* operation */ michael@0: }; michael@0: michael@0: michael@0: /* __Userspace__ */ michael@0: michael@0: /* michael@0: * Kernel structure per socket. michael@0: * Contains send and receive buffer queues, michael@0: * handle on protocol and pointer to protocol michael@0: * private data and error information. michael@0: */ michael@0: #if defined (__Userspace_os_Windows) michael@0: #define AF_ROUTE 17 michael@0: typedef __int32 pid_t; michael@0: typedef unsigned __int32 uid_t; michael@0: enum sigType { michael@0: SIGNAL = 0, michael@0: BROADCAST = 1, michael@0: MAX_EVENTS = 2 michael@0: }; michael@0: #endif michael@0: michael@0: /*- michael@0: * Locking key to struct socket: michael@0: * (a) constant after allocation, no locking required. michael@0: * (b) locked by SOCK_LOCK(so). michael@0: * (c) locked by SOCKBUF_LOCK(&so->so_rcv). michael@0: * (d) locked by SOCKBUF_LOCK(&so->so_snd). michael@0: * (e) locked by ACCEPT_LOCK(). michael@0: * (f) not locked since integer reads/writes are atomic. michael@0: * (g) used only as a sleep/wakeup address, no value. michael@0: * (h) locked by global mutex so_global_mtx. michael@0: */ michael@0: struct socket { michael@0: int so_count; /* (b) reference count */ michael@0: short so_type; /* (a) generic type, see socket.h */ michael@0: short so_options; /* from socket call, see socket.h */ michael@0: short so_linger; /* time to linger while closing */ michael@0: short so_state; /* (b) internal state flags SS_* */ michael@0: int so_qstate; /* (e) internal state flags SQ_* */ michael@0: void *so_pcb; /* protocol control block */ michael@0: int so_dom; michael@0: /* michael@0: * Variables for connection queuing. michael@0: * Socket where accepts occur is so_head in all subsidiary sockets. michael@0: * If so_head is 0, socket is not related to an accept. michael@0: * For head socket so_incomp queues partially completed connections, michael@0: * while so_comp is a queue of connections ready to be accepted. michael@0: * If a connection is aborted and it has so_head set, then michael@0: * it has to be pulled out of either so_incomp or so_comp. michael@0: * We allow connections to queue up based on current queue lengths michael@0: * and limit on number of queued connections for this socket. michael@0: */ michael@0: struct socket *so_head; /* (e) back pointer to listen socket */ michael@0: TAILQ_HEAD(, socket) so_incomp; /* (e) queue of partial unaccepted connections */ michael@0: TAILQ_HEAD(, socket) so_comp; /* (e) queue of complete unaccepted connections */ michael@0: TAILQ_ENTRY(socket) so_list; /* (e) list of unaccepted connections */ michael@0: u_short so_qlen; /* (e) number of unaccepted connections */ michael@0: u_short so_incqlen; /* (e) number of unaccepted incomplete michael@0: connections */ michael@0: u_short so_qlimit; /* (e) max number queued connections */ michael@0: short so_timeo; /* (g) connection timeout */ michael@0: userland_cond_t timeo_cond; /* timeo_cond condition variable being used in wakeup */ michael@0: michael@0: u_short so_error; /* (f) error affecting connection */ michael@0: struct sigio *so_sigio; /* [sg] information for async I/O or michael@0: out of band data (SIGURG) */ michael@0: u_long so_oobmark; /* (c) chars to oob mark */ michael@0: TAILQ_HEAD(, aiocblist) so_aiojobq; /* AIO ops waiting on socket */ michael@0: /* michael@0: * Variables for socket buffering. michael@0: */ michael@0: struct sockbuf { michael@0: /* __Userspace__ Many of these fields may michael@0: * not be required for the sctp stack. michael@0: * Commenting out the following. michael@0: * Including pthread mutex and condition variable to be michael@0: * used by sbwait, sorwakeup and sowwakeup. michael@0: */ michael@0: /* struct selinfo sb_sel;*/ /* process selecting read/write */ michael@0: /* struct mtx sb_mtx;*/ /* sockbuf lock */ michael@0: /* struct sx sb_sx;*/ /* prevent I/O interlacing */ michael@0: userland_cond_t sb_cond; /* sockbuf condition variable */ michael@0: userland_mutex_t sb_mtx; /* sockbuf lock associated with sb_cond */ michael@0: short sb_state; /* (c/d) socket state on sockbuf */ michael@0: #define sb_startzero sb_mb michael@0: struct mbuf *sb_mb; /* (c/d) the mbuf chain */ michael@0: struct mbuf *sb_mbtail; /* (c/d) the last mbuf in the chain */ michael@0: struct mbuf *sb_lastrecord; /* (c/d) first mbuf of last michael@0: * record in socket buffer */ michael@0: struct mbuf *sb_sndptr; /* (c/d) pointer into mbuf chain */ michael@0: u_int sb_sndptroff; /* (c/d) byte offset of ptr into chain */ michael@0: u_int sb_cc; /* (c/d) actual chars in buffer */ michael@0: u_int sb_hiwat; /* (c/d) max actual char count */ michael@0: u_int sb_mbcnt; /* (c/d) chars of mbufs used */ michael@0: u_int sb_mbmax; /* (c/d) max chars of mbufs to use */ michael@0: u_int sb_ctl; /* (c/d) non-data chars in buffer */ michael@0: int sb_lowat; /* (c/d) low water mark */ michael@0: int sb_timeo; /* (c/d) timeout for read/write */ michael@0: short sb_flags; /* (c/d) flags, see below */ michael@0: } so_rcv, so_snd; michael@0: /* michael@0: * Constants for sb_flags field of struct sockbuf. michael@0: */ michael@0: #define SB_MAX (256*1024) /* default for max chars in sockbuf */ michael@0: #define SB_RAW (64*1024*2) /*Aligning so->so_rcv.sb_hiwat with the receive buffer size of raw socket*/ michael@0: /* michael@0: * Constants for sb_flags field of struct sockbuf. michael@0: */ michael@0: #define SB_WAIT 0x04 /* someone is waiting for data/space */ michael@0: #define SB_SEL 0x08 /* someone is selecting */ michael@0: #define SB_ASYNC 0x10 /* ASYNC I/O, need signals */ michael@0: #define SB_UPCALL 0x20 /* someone wants an upcall */ michael@0: #define SB_NOINTR 0x40 /* operations not interruptible */ michael@0: #define SB_AIO 0x80 /* AIO operations queued */ michael@0: #define SB_KNOTE 0x100 /* kernel note attached */ michael@0: #define SB_AUTOSIZE 0x800 /* automatically size socket buffer */ michael@0: michael@0: void (*so_upcall)(struct socket *, void *, int); michael@0: void *so_upcallarg; michael@0: struct ucred *so_cred; /* (a) user credentials */ michael@0: struct label *so_label; /* (b) MAC label for socket */ michael@0: struct label *so_peerlabel; /* (b) cached MAC label for peer */ michael@0: /* NB: generation count must not be first. */ michael@0: uint32_t so_gencnt; /* (h) generation count */ michael@0: void *so_emuldata; /* (b) private data for emulators */ michael@0: struct so_accf { michael@0: struct accept_filter *so_accept_filter; michael@0: void *so_accept_filter_arg; /* saved filter args */ michael@0: char *so_accept_filter_str; /* saved user args */ michael@0: } *so_accf; michael@0: }; michael@0: michael@0: #define SB_EMPTY_FIXUP(sb) do { \ michael@0: if ((sb)->sb_mb == NULL) { \ michael@0: (sb)->sb_mbtail = NULL; \ michael@0: (sb)->sb_lastrecord = NULL; \ michael@0: } \ michael@0: } while (/*CONSTCOND*/0) michael@0: michael@0: /* michael@0: * Global accept mutex to serialize access to accept queues and michael@0: * fields associated with multiple sockets. This allows us to michael@0: * avoid defining a lock order between listen and accept sockets michael@0: * until such time as it proves to be a good idea. michael@0: */ michael@0: #if defined(__Userspace_os_Windows) michael@0: extern userland_mutex_t accept_mtx; michael@0: extern userland_cond_t accept_cond; michael@0: #define ACCEPT_LOCK_ASSERT() michael@0: #define ACCEPT_LOCK() do { \ michael@0: EnterCriticalSection(&accept_mtx); \ michael@0: } while (0) michael@0: #define ACCEPT_UNLOCK() do { \ michael@0: LeaveCriticalSection(&accept_mtx); \ michael@0: } while (0) michael@0: #define ACCEPT_UNLOCK_ASSERT() michael@0: #else michael@0: extern userland_mutex_t accept_mtx; michael@0: extern userland_cond_t accept_cond; michael@0: #define ACCEPT_LOCK_ASSERT() KASSERT(pthread_mutex_trylock(&accept_mtx) == EBUSY, ("%s: accept_mtx not locked", __func__)) michael@0: #define ACCEPT_LOCK() (void)pthread_mutex_lock(&accept_mtx) michael@0: #define ACCEPT_UNLOCK() (void)pthread_mutex_unlock(&accept_mtx) michael@0: #define ACCEPT_UNLOCK_ASSERT() do{ \ michael@0: KASSERT(pthread_mutex_trylock(&accept_mtx) == 0, ("%s: accept_mtx locked", __func__)); \ michael@0: (void)pthread_mutex_unlock(&accept_mtx); \ michael@0: } while (0) michael@0: #endif michael@0: michael@0: /* michael@0: * Per-socket buffer mutex used to protect most fields in the socket michael@0: * buffer. michael@0: */ michael@0: #define SOCKBUF_MTX(_sb) (&(_sb)->sb_mtx) michael@0: #if defined (__Userspace_os_Windows) michael@0: #define SOCKBUF_LOCK_INIT(_sb, _name) \ michael@0: InitializeCriticalSection(SOCKBUF_MTX(_sb)) michael@0: #define SOCKBUF_LOCK_DESTROY(_sb) DeleteCriticalSection(SOCKBUF_MTX(_sb)) michael@0: #define SOCKBUF_COND_INIT(_sb) InitializeConditionVariable((&(_sb)->sb_cond)) michael@0: #define SOCKBUF_COND_DESTROY(_sb) DeleteConditionVariable((&(_sb)->sb_cond)) michael@0: #define SOCK_COND_INIT(_so) InitializeConditionVariable((&(_so)->timeo_cond)) michael@0: #define SOCK_COND_DESTROY(_so) DeleteConditionVariable((&(_so)->timeo_cond)) michael@0: #define SOCK_COND(_so) (&(_so)->timeo_cond) michael@0: #else michael@0: #define SOCKBUF_LOCK_INIT(_sb, _name) \ michael@0: pthread_mutex_init(SOCKBUF_MTX(_sb), NULL) michael@0: #define SOCKBUF_LOCK_DESTROY(_sb) pthread_mutex_destroy(SOCKBUF_MTX(_sb)) michael@0: #define SOCKBUF_COND_INIT(_sb) pthread_cond_init((&(_sb)->sb_cond), NULL) michael@0: #define SOCKBUF_COND_DESTROY(_sb) pthread_cond_destroy((&(_sb)->sb_cond)) michael@0: #define SOCK_COND_INIT(_so) pthread_cond_init((&(_so)->timeo_cond), NULL) michael@0: #define SOCK_COND_DESTROY(_so) pthread_cond_destroy((&(_so)->timeo_cond)) michael@0: #define SOCK_COND(_so) (&(_so)->timeo_cond) michael@0: #endif michael@0: /*__Userspace__ SOCKBUF_LOCK(_sb) is now defined in netinet/sctp_process_lock.h */ michael@0: michael@0: /* #define SOCKBUF_OWNED(_sb) mtx_owned(SOCKBUF_MTX(_sb)) unused */ michael@0: /*__Userspace__ SOCKBUF_UNLOCK(_sb) is now defined in netinet/sctp_process_lock.h */ michael@0: michael@0: /*__Userspace__ SOCKBUF_LOCK_ASSERT(_sb) is now defined in netinet/sctp_process_lock.h */ michael@0: michael@0: /* #define SOCKBUF_UNLOCK_ASSERT(_sb) mtx_assert(SOCKBUF_MTX(_sb), MA_NOTOWNED) unused */ michael@0: michael@0: /* michael@0: * Per-socket mutex: we reuse the receive socket buffer mutex for space michael@0: * efficiency. This decision should probably be revisited as we optimize michael@0: * locking for the socket code. michael@0: */ michael@0: #define SOCK_MTX(_so) SOCKBUF_MTX(&(_so)->so_rcv) michael@0: /*__Userspace__ SOCK_LOCK(_so) is now defined in netinet/sctp_process_lock.h */ michael@0: michael@0: /* #define SOCK_OWNED(_so) SOCKBUF_OWNED(&(_so)->so_rcv) unused */ michael@0: /*__Userspace__ SOCK_UNLOCK(_so) is now defined in netinet/sctp_process_lock.h */ michael@0: michael@0: #define SOCK_LOCK_ASSERT(_so) SOCKBUF_LOCK_ASSERT(&(_so)->so_rcv) michael@0: michael@0: /* michael@0: * Socket state bits. michael@0: * michael@0: * Historically, this bits were all kept in the so_state field. For michael@0: * locking reasons, they are now in multiple fields, as they are michael@0: * locked differently. so_state maintains basic socket state protected michael@0: * by the socket lock. so_qstate holds information about the socket michael@0: * accept queues. Each socket buffer also has a state field holding michael@0: * information relevant to that socket buffer (can't send, rcv). Many michael@0: * fields will be read without locks to improve performance and avoid michael@0: * lock order issues. However, this approach must be used with caution. michael@0: */ michael@0: #define SS_NOFDREF 0x0001 /* no file table ref any more */ michael@0: #define SS_ISCONNECTED 0x0002 /* socket connected to a peer */ michael@0: #define SS_ISCONNECTING 0x0004 /* in process of connecting to peer */ michael@0: #define SS_ISDISCONNECTING 0x0008 /* in process of disconnecting */ michael@0: #define SS_NBIO 0x0100 /* non-blocking ops */ michael@0: #define SS_ASYNC 0x0200 /* async i/o notify */ michael@0: #define SS_ISCONFIRMING 0x0400 /* deciding to accept connection req */ michael@0: #define SS_ISDISCONNECTED 0x2000 /* socket disconnected from peer */ michael@0: /* michael@0: * Protocols can mark a socket as SS_PROTOREF to indicate that, following michael@0: * pru_detach, they still want the socket to persist, and will free it michael@0: * themselves when they are done. Protocols should only ever call sofree() michael@0: * following setting this flag in pru_detach(), and never otherwise, as michael@0: * sofree() bypasses socket reference counting. michael@0: */ michael@0: #define SS_PROTOREF 0x4000 /* strong protocol reference */ michael@0: michael@0: /* michael@0: * Socket state bits now stored in the socket buffer state field. michael@0: */ michael@0: #define SBS_CANTSENDMORE 0x0010 /* can't send more data to peer */ michael@0: #define SBS_CANTRCVMORE 0x0020 /* can't receive more data from peer */ michael@0: #define SBS_RCVATMARK 0x0040 /* at mark on input */ michael@0: michael@0: /* michael@0: * Socket state bits stored in so_qstate. michael@0: */ michael@0: #define SQ_INCOMP 0x0800 /* unaccepted, incomplete connection */ michael@0: #define SQ_COMP 0x1000 /* unaccepted, complete connection */ michael@0: michael@0: /* michael@0: * Externalized form of struct socket used by the sysctl(3) interface. michael@0: */ michael@0: struct xsocket { michael@0: size_t xso_len; /* length of this structure */ michael@0: struct socket *xso_so; /* makes a convenient handle sometimes */ michael@0: short so_type; michael@0: short so_options; michael@0: short so_linger; michael@0: short so_state; michael@0: caddr_t so_pcb; /* another convenient handle */ michael@0: int xso_protocol; michael@0: int xso_family; michael@0: u_short so_qlen; michael@0: u_short so_incqlen; michael@0: u_short so_qlimit; michael@0: short so_timeo; michael@0: u_short so_error; michael@0: pid_t so_pgid; michael@0: u_long so_oobmark; michael@0: struct xsockbuf { michael@0: u_int sb_cc; michael@0: u_int sb_hiwat; michael@0: u_int sb_mbcnt; michael@0: u_int sb_mbmax; michael@0: int sb_lowat; michael@0: int sb_timeo; michael@0: short sb_flags; michael@0: } so_rcv, so_snd; michael@0: uid_t so_uid; /* XXX */ michael@0: }; michael@0: michael@0: #if defined(_KERNEL) michael@0: michael@0: michael@0: /* michael@0: * Macros for sockets and socket buffering. michael@0: */ michael@0: michael@0: /* michael@0: * Do we need to notify the other side when I/O is possible? michael@0: */ michael@0: #define sb_notify(sb) (((sb)->sb_flags & (SB_WAIT | SB_SEL | SB_ASYNC | \ michael@0: SB_UPCALL | SB_AIO | SB_KNOTE)) != 0) michael@0: michael@0: /* michael@0: * How much space is there in a socket buffer (so->so_snd or so->so_rcv)? michael@0: * This is problematical if the fields are unsigned, as the space might michael@0: * still be negative (cc > hiwat or mbcnt > mbmax). Should detect michael@0: * overflow and return 0. Should use "lmin" but it doesn't exist now. michael@0: */ michael@0: #define sbspace(sb) \ michael@0: ((long) imin((int)((sb)->sb_hiwat - (sb)->sb_cc), \ michael@0: (int)((sb)->sb_mbmax - (sb)->sb_mbcnt))) michael@0: michael@0: /* do we have to send all at once on a socket? */ michael@0: #define sosendallatonce(so) \ michael@0: ((so)->so_proto->pr_flags & PR_ATOMIC) michael@0: michael@0: /* can we read something from so? */ michael@0: #define soreadable(so) \ michael@0: ((so)->so_rcv.sb_cc >= (so)->so_rcv.sb_lowat || \ michael@0: ((so)->so_rcv.sb_state & SBS_CANTRCVMORE) || \ michael@0: !TAILQ_EMPTY(&(so)->so_comp) || (so)->so_error) michael@0: michael@0: /* can we write something to so? */ michael@0: #define sowriteable(so) \ michael@0: ((sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat && \ michael@0: (((so)->so_state&SS_ISCONNECTED) || \ michael@0: ((so)->so_proto->pr_flags&PR_CONNREQUIRED)==0)) || \ michael@0: ((so)->so_snd.sb_state & SBS_CANTSENDMORE) || \ michael@0: (so)->so_error) michael@0: michael@0: /* adjust counters in sb reflecting allocation of m */ michael@0: #define sballoc(sb, m) { \ michael@0: (sb)->sb_cc += (m)->m_len; \ michael@0: if ((m)->m_type != MT_DATA && (m)->m_type != MT_OOBDATA) \ michael@0: (sb)->sb_ctl += (m)->m_len; \ michael@0: (sb)->sb_mbcnt += MSIZE; \ michael@0: if ((m)->m_flags & M_EXT) \ michael@0: (sb)->sb_mbcnt += (m)->m_ext.ext_size; \ michael@0: } michael@0: michael@0: /* adjust counters in sb reflecting freeing of m */ michael@0: #define sbfree(sb, m) { \ michael@0: (sb)->sb_cc -= (m)->m_len; \ michael@0: if ((m)->m_type != MT_DATA && (m)->m_type != MT_OOBDATA) \ michael@0: (sb)->sb_ctl -= (m)->m_len; \ michael@0: (sb)->sb_mbcnt -= MSIZE; \ michael@0: if ((m)->m_flags & M_EXT) \ michael@0: (sb)->sb_mbcnt -= (m)->m_ext.ext_size; \ michael@0: if ((sb)->sb_sndptr == (m)) { \ michael@0: (sb)->sb_sndptr = NULL; \ michael@0: (sb)->sb_sndptroff = 0; \ michael@0: } \ michael@0: if ((sb)->sb_sndptroff != 0) \ michael@0: (sb)->sb_sndptroff -= (m)->m_len; \ michael@0: } michael@0: michael@0: /* michael@0: * soref()/sorele() ref-count the socket structure. Note that you must michael@0: * still explicitly close the socket, but the last ref count will free michael@0: * the structure. michael@0: */ michael@0: #define soref(so) do { \ michael@0: SOCK_LOCK_ASSERT(so); \ michael@0: ++(so)->so_count; \ michael@0: } while (0) michael@0: michael@0: #define sorele(so) do { \ michael@0: ACCEPT_LOCK_ASSERT(); \ michael@0: SOCK_LOCK_ASSERT(so); \ michael@0: KASSERT((so)->so_count > 0, ("sorele")); \ michael@0: if (--(so)->so_count == 0) \ michael@0: sofree(so); \ michael@0: else { \ michael@0: SOCK_UNLOCK(so); \ michael@0: ACCEPT_UNLOCK(); \ michael@0: } \ michael@0: } while (0) michael@0: michael@0: #define sotryfree(so) do { \ michael@0: ACCEPT_LOCK_ASSERT(); \ michael@0: SOCK_LOCK_ASSERT(so); \ michael@0: if ((so)->so_count == 0) \ michael@0: sofree(so); \ michael@0: else { \ michael@0: SOCK_UNLOCK(so); \ michael@0: ACCEPT_UNLOCK(); \ michael@0: } \ michael@0: } while(0) michael@0: michael@0: /* michael@0: * In sorwakeup() and sowwakeup(), acquire the socket buffer lock to michael@0: * avoid a non-atomic test-and-wakeup. However, sowakeup is michael@0: * responsible for releasing the lock if it is called. We unlock only michael@0: * if we don't call into sowakeup. If any code is introduced that michael@0: * directly invokes the underlying sowakeup() primitives, it must michael@0: * maintain the same semantics. michael@0: */ michael@0: #define sorwakeup_locked(so) do { \ michael@0: SOCKBUF_LOCK_ASSERT(&(so)->so_rcv); \ michael@0: if (sb_notify(&(so)->so_rcv)) \ michael@0: sowakeup((so), &(so)->so_rcv); \ michael@0: else \ michael@0: SOCKBUF_UNLOCK(&(so)->so_rcv); \ michael@0: } while (0) michael@0: michael@0: #define sorwakeup(so) do { \ michael@0: SOCKBUF_LOCK(&(so)->so_rcv); \ michael@0: sorwakeup_locked(so); \ michael@0: } while (0) michael@0: michael@0: #define sowwakeup_locked(so) do { \ michael@0: SOCKBUF_LOCK_ASSERT(&(so)->so_snd); \ michael@0: if (sb_notify(&(so)->so_snd)) \ michael@0: sowakeup((so), &(so)->so_snd); \ michael@0: else \ michael@0: SOCKBUF_UNLOCK(&(so)->so_snd); \ michael@0: } while (0) michael@0: michael@0: #define sowwakeup(so) do { \ michael@0: SOCKBUF_LOCK(&(so)->so_snd); \ michael@0: sowwakeup_locked(so); \ michael@0: } while (0) michael@0: michael@0: /* michael@0: * Argument structure for sosetopt et seq. This is in the KERNEL michael@0: * section because it will never be visible to user code. michael@0: */ michael@0: enum sopt_dir { SOPT_GET, SOPT_SET }; michael@0: struct sockopt { michael@0: enum sopt_dir sopt_dir; /* is this a get or a set? */ michael@0: int sopt_level; /* second arg of [gs]etsockopt */ michael@0: int sopt_name; /* third arg of [gs]etsockopt */ michael@0: void *sopt_val; /* fourth arg of [gs]etsockopt */ michael@0: size_t sopt_valsize; /* (almost) fifth arg of [gs]etsockopt */ michael@0: struct thread *sopt_td; /* calling thread or null if kernel */ michael@0: }; michael@0: michael@0: struct accept_filter { michael@0: char accf_name[16]; michael@0: void (*accf_callback) michael@0: (struct socket *so, void *arg, int waitflag); michael@0: void * (*accf_create) michael@0: (struct socket *so, char *arg); michael@0: void (*accf_destroy) michael@0: (struct socket *so); michael@0: SLIST_ENTRY(accept_filter) accf_next; michael@0: }; michael@0: michael@0: extern int maxsockets; michael@0: extern u_long sb_max; michael@0: extern struct uma_zone *socket_zone; michael@0: extern so_gen_t so_gencnt; michael@0: michael@0: struct mbuf; michael@0: struct sockaddr; michael@0: struct ucred; michael@0: struct uio; michael@0: michael@0: /* michael@0: * From uipc_socket and friends michael@0: */ michael@0: int do_getopt_accept_filter(struct socket *so, struct sockopt *sopt); michael@0: int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt); michael@0: int so_setsockopt(struct socket *so, int level, int optname, michael@0: void *optval, size_t optlen); michael@0: int sockargs(struct mbuf **mp, caddr_t buf, int buflen, int type); michael@0: int getsockaddr(struct sockaddr **namp, caddr_t uaddr, size_t len); michael@0: void sbappend(struct sockbuf *sb, struct mbuf *m); michael@0: void sbappend_locked(struct sockbuf *sb, struct mbuf *m); michael@0: void sbappendstream(struct sockbuf *sb, struct mbuf *m); michael@0: void sbappendstream_locked(struct sockbuf *sb, struct mbuf *m); michael@0: int sbappendaddr(struct sockbuf *sb, const struct sockaddr *asa, michael@0: struct mbuf *m0, struct mbuf *control); michael@0: int sbappendaddr_locked(struct sockbuf *sb, const struct sockaddr *asa, michael@0: struct mbuf *m0, struct mbuf *control); michael@0: int sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, michael@0: struct mbuf *control); michael@0: int sbappendcontrol_locked(struct sockbuf *sb, struct mbuf *m0, michael@0: struct mbuf *control); michael@0: void sbappendrecord(struct sockbuf *sb, struct mbuf *m0); michael@0: void sbappendrecord_locked(struct sockbuf *sb, struct mbuf *m0); michael@0: void sbcheck(struct sockbuf *sb); michael@0: void sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n); michael@0: struct mbuf * michael@0: sbcreatecontrol(caddr_t p, int size, int type, int level); michael@0: void sbdestroy(struct sockbuf *sb, struct socket *so); michael@0: void sbdrop(struct sockbuf *sb, int len); michael@0: void sbdrop_locked(struct sockbuf *sb, int len); michael@0: void sbdroprecord(struct sockbuf *sb); michael@0: void sbdroprecord_locked(struct sockbuf *sb); michael@0: void sbflush(struct sockbuf *sb); michael@0: void sbflush_locked(struct sockbuf *sb); michael@0: void sbrelease(struct sockbuf *sb, struct socket *so); michael@0: void sbrelease_locked(struct sockbuf *sb, struct socket *so); michael@0: int sbreserve(struct sockbuf *sb, u_long cc, struct socket *so, michael@0: struct thread *td); michael@0: int sbreserve_locked(struct sockbuf *sb, u_long cc, struct socket *so, michael@0: struct thread *td); michael@0: struct mbuf * michael@0: sbsndptr(struct sockbuf *sb, u_int off, u_int len, u_int *moff); michael@0: void sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb); michael@0: int sbwait(struct sockbuf *sb); michael@0: int sblock(struct sockbuf *sb, int flags); michael@0: void sbunlock(struct sockbuf *sb); michael@0: void soabort(struct socket *so); michael@0: int soaccept(struct socket *so, struct sockaddr **nam); michael@0: int socheckuid(struct socket *so, uid_t uid); michael@0: int sobind(struct socket *so, struct sockaddr *nam, struct thread *td); michael@0: void socantrcvmore(struct socket *so); michael@0: void socantrcvmore_locked(struct socket *so); michael@0: void socantsendmore(struct socket *so); michael@0: void socantsendmore_locked(struct socket *so); michael@0: int soclose(struct socket *so); michael@0: int soconnect(struct socket *so, struct sockaddr *nam, struct thread *td); michael@0: int soconnect2(struct socket *so1, struct socket *so2); michael@0: int socow_setup(struct mbuf *m0, struct uio *uio); michael@0: int socreate(int dom, struct socket **aso, int type, int proto, michael@0: struct ucred *cred, struct thread *td); michael@0: int sodisconnect(struct socket *so); michael@0: struct sockaddr *sodupsockaddr(const struct sockaddr *sa, int mflags); michael@0: void sofree(struct socket *so); michael@0: int sogetopt(struct socket *so, struct sockopt *sopt); michael@0: void sohasoutofband(struct socket *so); michael@0: void soisconnected(struct socket *so); michael@0: void soisconnecting(struct socket *so); michael@0: void soisdisconnected(struct socket *so); michael@0: void soisdisconnecting(struct socket *so); michael@0: int solisten(struct socket *so, int backlog, struct thread *td); michael@0: void solisten_proto(struct socket *so, int backlog); michael@0: int solisten_proto_check(struct socket *so); michael@0: struct socket * michael@0: sonewconn(struct socket *head, int connstatus); michael@0: int sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen); michael@0: int sooptcopyout(struct sockopt *sopt, const void *buf, size_t len); michael@0: michael@0: /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */ michael@0: int soopt_getm(struct sockopt *sopt, struct mbuf **mp); michael@0: int soopt_mcopyin(struct sockopt *sopt, struct mbuf *m); michael@0: int soopt_mcopyout(struct sockopt *sopt, struct mbuf *m); michael@0: michael@0: int sopoll(struct socket *so, int events, struct ucred *active_cred, michael@0: struct thread *td); michael@0: int sopoll_generic(struct socket *so, int events, michael@0: struct ucred *active_cred, struct thread *td); michael@0: int soreceive(struct socket *so, struct sockaddr **paddr, struct uio *uio, michael@0: struct mbuf **mp0, struct mbuf **controlp, int *flagsp); michael@0: int soreceive_generic(struct socket *so, struct sockaddr **paddr, michael@0: struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, michael@0: int *flagsp); michael@0: int soreserve(struct socket *so, u_long sndcc, u_long rcvcc); michael@0: void sorflush(struct socket *so); michael@0: int sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, michael@0: struct mbuf *top, struct mbuf *control, int flags, michael@0: struct thread *td); michael@0: int sosend_dgram(struct socket *so, struct sockaddr *addr, michael@0: struct uio *uio, struct mbuf *top, struct mbuf *control, michael@0: int flags, struct thread *td); michael@0: int sosend_generic(struct socket *so, struct sockaddr *addr, michael@0: struct uio *uio, struct mbuf *top, struct mbuf *control, michael@0: int flags, struct thread *td); michael@0: int sosetopt(struct socket *so, struct sockopt *sopt); michael@0: int soshutdown(struct socket *so, int how); michael@0: void sotoxsocket(struct socket *so, struct xsocket *xso); michael@0: void sowakeup(struct socket *so, struct sockbuf *sb); michael@0: michael@0: #ifdef SOCKBUF_DEBUG michael@0: void sblastrecordchk(struct sockbuf *, const char *, int); michael@0: #define SBLASTRECORDCHK(sb) sblastrecordchk((sb), __FILE__, __LINE__) michael@0: michael@0: void sblastmbufchk(struct sockbuf *, const char *, int); michael@0: #define SBLASTMBUFCHK(sb) sblastmbufchk((sb), __FILE__, __LINE__) michael@0: #else michael@0: #define SBLASTRECORDCHK(sb) /* nothing */ michael@0: #define SBLASTMBUFCHK(sb) /* nothing */ michael@0: #endif /* SOCKBUF_DEBUG */ michael@0: michael@0: /* michael@0: * Accept filter functions (duh). michael@0: */ michael@0: int accept_filt_add(struct accept_filter *filt); michael@0: int accept_filt_del(char *name); michael@0: struct accept_filter *accept_filt_get(char *name); michael@0: #ifdef ACCEPT_FILTER_MOD michael@0: #ifdef SYSCTL_DECL michael@0: SYSCTL_DECL(_net_inet_accf); michael@0: #endif michael@0: int accept_filt_generic_mod_event(module_t mod, int event, void *data); michael@0: #endif michael@0: michael@0: #endif /* _KERNEL */ michael@0: michael@0: michael@0: /*-------------------------------------------------------------*/ michael@0: /*-------------------------------------------------------------*/ michael@0: /* __Userspace__ */ michael@0: /*-------------------------------------------------------------*/ michael@0: /*-------------------------------------------------------------*/ michael@0: /* this new __Userspace__ section is to copy portions of the _KERNEL block michael@0: * above into, avoiding having to port the entire thing at once... michael@0: * For function prototypes, the full bodies are in user_socket.c . michael@0: */ michael@0: #if defined(__Userspace__) michael@0: michael@0: /* ---------------------------------------------------------- */ michael@0: /* --- function prototypes (implemented in user_socket.c) --- */ michael@0: /* ---------------------------------------------------------- */ michael@0: void soisconnecting(struct socket *so); michael@0: void soisdisconnecting(struct socket *so); michael@0: void soisconnected(struct socket *so); michael@0: struct socket * sonewconn(struct socket *head, int connstatus); michael@0: void socantrcvmore(struct socket *so); michael@0: void socantsendmore(struct socket *so); michael@0: michael@0: michael@0: michael@0: /* -------------- */ michael@0: /* --- macros --- */ michael@0: /* -------------- */ michael@0: michael@0: #define soref(so) do { \ michael@0: SOCK_LOCK_ASSERT(so); \ michael@0: ++(so)->so_count; \ michael@0: } while (0) michael@0: michael@0: #define sorele(so) do { \ michael@0: ACCEPT_LOCK_ASSERT(); \ michael@0: SOCK_LOCK_ASSERT(so); \ michael@0: KASSERT((so)->so_count > 0, ("sorele")); \ michael@0: if (--(so)->so_count == 0) \ michael@0: sofree(so); \ michael@0: else { \ michael@0: SOCK_UNLOCK(so); \ michael@0: ACCEPT_UNLOCK(); \ michael@0: } \ michael@0: } while (0) michael@0: michael@0: michael@0: /* replacing imin with min (user_environment.h) */ michael@0: #define sbspace(sb) \ michael@0: ((long) min((int)((sb)->sb_hiwat - (sb)->sb_cc), \ michael@0: (int)((sb)->sb_mbmax - (sb)->sb_mbcnt))) michael@0: michael@0: /* do we have to send all at once on a socket? */ michael@0: #define sosendallatonce(so) \ michael@0: ((so)->so_proto->pr_flags & PR_ATOMIC) michael@0: michael@0: /* can we read something from so? */ michael@0: #define soreadable(so) \ michael@0: ((int)((so)->so_rcv.sb_cc) >= (so)->so_rcv.sb_lowat || \ michael@0: ((so)->so_rcv.sb_state & SBS_CANTRCVMORE) || \ michael@0: !TAILQ_EMPTY(&(so)->so_comp) || (so)->so_error) michael@0: michael@0: #if 0 /* original */ michael@0: #define PR_CONNREQUIRED 0x04 /* from sys/protosw.h "needed" for sowriteable */ michael@0: #define sowriteable(so) \ michael@0: ((sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat && \ michael@0: (((so)->so_state&SS_ISCONNECTED) || \ michael@0: ((so)->so_proto->pr_flags&PR_CONNREQUIRED)==0)) || \ michael@0: ((so)->so_snd.sb_state & SBS_CANTSENDMORE) || \ michael@0: (so)->so_error) michael@0: #else /* line with PR_CONNREQUIRED removed */ michael@0: /* can we write something to so? */ michael@0: #define sowriteable(so) \ michael@0: ((sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat && \ michael@0: (((so)->so_state&SS_ISCONNECTED))) || \ michael@0: ((so)->so_snd.sb_state & SBS_CANTSENDMORE) || \ michael@0: (so)->so_error) michael@0: #endif michael@0: michael@0: extern void solisten_proto(struct socket *so, int backlog); michael@0: extern int solisten_proto_check(struct socket *so); michael@0: extern int sctp_listen(struct socket *so, int backlog, struct proc *p); michael@0: extern void socantrcvmore_locked(struct socket *so); michael@0: extern int sctp_bind(struct socket *so, struct sockaddr *addr); michael@0: extern int sctp6_bind(struct socket *so, struct sockaddr *addr, void *proc); michael@0: #if defined(__Userspace__) michael@0: extern int sctpconn_bind(struct socket *so, struct sockaddr *addr); michael@0: #endif michael@0: extern int sctp_accept(struct socket *so, struct sockaddr **addr); michael@0: extern int sctp_attach(struct socket *so, int proto, uint32_t vrf_id); michael@0: extern int sctp6_attach(struct socket *so, int proto, uint32_t vrf_id); michael@0: extern int sctp_abort(struct socket *so); michael@0: extern int sctp6_abort(struct socket *so); michael@0: extern void sctp_close(struct socket *so); michael@0: extern int soaccept(struct socket *so, struct sockaddr **nam); michael@0: extern int solisten(struct socket *so, int backlog); michael@0: extern int soreserve(struct socket *so, u_long sndcc, u_long rcvcc); michael@0: extern void sowakeup(struct socket *so, struct sockbuf *sb); michael@0: extern void wakeup(void *ident, struct socket *so); /*__Userspace__ */ michael@0: extern int uiomove(void *cp, int n, struct uio *uio); michael@0: extern int sbwait(struct sockbuf *sb); michael@0: extern int sodisconnect(struct socket *so); michael@0: extern int soconnect(struct socket *so, struct sockaddr *nam); michael@0: extern int sctp_disconnect(struct socket *so); michael@0: extern int sctp_connect(struct socket *so, struct sockaddr *addr); michael@0: extern int sctp6_connect(struct socket *so, struct sockaddr *addr); michael@0: #if defined(__Userspace__) michael@0: extern int sctpconn_connect(struct socket *so, struct sockaddr *addr); michael@0: #endif michael@0: extern void sctp_finish(void); michael@0: michael@0: /* ------------------------------------------------ */ michael@0: /* ----- macros copied from above ---- */ michael@0: /* ------------------------------------------------ */ michael@0: michael@0: /* michael@0: * Do we need to notify the other side when I/O is possible? michael@0: */ michael@0: #define sb_notify(sb) (((sb)->sb_flags & (SB_WAIT | SB_SEL | SB_ASYNC | \ michael@0: SB_UPCALL | SB_AIO | SB_KNOTE)) != 0) michael@0: michael@0: michael@0: /* michael@0: * In sorwakeup() and sowwakeup(), acquire the socket buffer lock to michael@0: * avoid a non-atomic test-and-wakeup. However, sowakeup is michael@0: * responsible for releasing the lock if it is called. We unlock only michael@0: * if we don't call into sowakeup. If any code is introduced that michael@0: * directly invokes the underlying sowakeup() primitives, it must michael@0: * maintain the same semantics. michael@0: */ michael@0: #define sorwakeup_locked(so) do { \ michael@0: SOCKBUF_LOCK_ASSERT(&(so)->so_rcv); \ michael@0: if (sb_notify(&(so)->so_rcv)) \ michael@0: sowakeup((so), &(so)->so_rcv); \ michael@0: else \ michael@0: SOCKBUF_UNLOCK(&(so)->so_rcv); \ michael@0: } while (0) michael@0: michael@0: #define sorwakeup(so) do { \ michael@0: SOCKBUF_LOCK(&(so)->so_rcv); \ michael@0: sorwakeup_locked(so); \ michael@0: } while (0) michael@0: michael@0: #define sowwakeup_locked(so) do { \ michael@0: SOCKBUF_LOCK_ASSERT(&(so)->so_snd); \ michael@0: if (sb_notify(&(so)->so_snd)) \ michael@0: sowakeup((so), &(so)->so_snd); \ michael@0: else \ michael@0: SOCKBUF_UNLOCK(&(so)->so_snd); \ michael@0: } while (0) michael@0: michael@0: #define sowwakeup(so) do { \ michael@0: SOCKBUF_LOCK(&(so)->so_snd); \ michael@0: sowwakeup_locked(so); \ michael@0: } while (0) michael@0: michael@0: michael@0: michael@0: #endif /* __Userspace__ */ michael@0: michael@0: #endif /* !_SYS_SOCKETVAR_H_ */