Thu, 15 Jan 2015 21:03:48 +0100
Integrate friendly tips from Tor colleagues to make (or not) 4.5 alpha 3;
This includes removal of overloaded (but unused) methods, and addition of
a overlooked call to DataStruct::SetData(nsISupports, uint32_t, bool.)
1 /*-
2 * Copyright (c) 1982, 1986, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 */
31 /* __Userspace__ version of <sys/socketvar.h> goes here.*/
33 #ifndef _USER_SOCKETVAR_H_
34 #define _USER_SOCKETVAR_H_
36 #if defined(__Userspace_os_Darwin)
37 #include <sys/types.h>
38 #include <unistd.h>
39 #endif
41 /* #include <sys/selinfo.h> */ /*__Userspace__ alternative?*/ /* for struct selinfo */
42 /* #include <sys/_lock.h> was 0 byte file */
43 /* #include <sys/_mutex.h> was 0 byte file */
44 /* #include <sys/_sx.h> */ /*__Userspace__ alternative?*/
45 #if !defined(__Userspace_os_DragonFly) && !defined(__Userspace_os_FreeBSD) && !defined(__Userspace_os_NetBSD) && !defined(__Userspace_os_Windows)
46 #include <sys/uio.h>
47 #endif
48 #define SOCK_MAXADDRLEN 255
49 #if !defined(MSG_NOTIFICATION)
50 #define MSG_NOTIFICATION 0x2000 /* SCTP notification */
51 #endif
52 #define SCTP_SO_LINGER 0x0001
53 #define SCTP_SO_ACCEPTCONN 0x0002
54 #define SS_CANTRCVMORE 0x020
55 #define SS_CANTSENDMORE 0x010
57 #if defined(__Userspace_os_Darwin) || defined(__Userspace_os_DragonFly) || defined(__Userspace_os_FreeBSD) || defined(__Userspace_os_OpenBSD) || defined (__Userspace_os_Windows)
58 #define UIO_MAXIOV 1024
59 #define ERESTART (-1)
60 #endif
62 #if !defined(__Userspace_os_Darwin) && !defined(__Userspace_os_NetBSD) && !defined(__Userspace_os_OpenBSD)
63 enum uio_rw { UIO_READ, UIO_WRITE };
64 #endif
66 #if !defined(__Userspace_os_NetBSD) && !defined(__Userspace_os_OpenBSD)
67 /* Segment flag values. */
68 enum uio_seg {
69 UIO_USERSPACE, /* from user data space */
70 UIO_SYSSPACE /* from system space */
71 };
72 #endif
74 struct proc {
75 int stub; /* struct proc is a dummy for __Userspace__ */
76 };
78 MALLOC_DECLARE(M_ACCF);
79 MALLOC_DECLARE(M_PCB);
80 MALLOC_DECLARE(M_SONAME);
82 /* __Userspace__ Are these all the fields we need?
83 * Removing struct thread *uio_td; owner field
84 */
85 struct uio {
86 struct iovec *uio_iov; /* scatter/gather list */
87 int uio_iovcnt; /* length of scatter/gather list */
88 off_t uio_offset; /* offset in target object */
89 int uio_resid; /* remaining bytes to process */
90 enum uio_seg uio_segflg; /* address space */
91 enum uio_rw uio_rw; /* operation */
92 };
95 /* __Userspace__ */
97 /*
98 * Kernel structure per socket.
99 * Contains send and receive buffer queues,
100 * handle on protocol and pointer to protocol
101 * private data and error information.
102 */
103 #if defined (__Userspace_os_Windows)
104 #define AF_ROUTE 17
105 typedef __int32 pid_t;
106 typedef unsigned __int32 uid_t;
107 enum sigType {
108 SIGNAL = 0,
109 BROADCAST = 1,
110 MAX_EVENTS = 2
111 };
112 #endif
114 /*-
115 * Locking key to struct socket:
116 * (a) constant after allocation, no locking required.
117 * (b) locked by SOCK_LOCK(so).
118 * (c) locked by SOCKBUF_LOCK(&so->so_rcv).
119 * (d) locked by SOCKBUF_LOCK(&so->so_snd).
120 * (e) locked by ACCEPT_LOCK().
121 * (f) not locked since integer reads/writes are atomic.
122 * (g) used only as a sleep/wakeup address, no value.
123 * (h) locked by global mutex so_global_mtx.
124 */
125 struct socket {
126 int so_count; /* (b) reference count */
127 short so_type; /* (a) generic type, see socket.h */
128 short so_options; /* from socket call, see socket.h */
129 short so_linger; /* time to linger while closing */
130 short so_state; /* (b) internal state flags SS_* */
131 int so_qstate; /* (e) internal state flags SQ_* */
132 void *so_pcb; /* protocol control block */
133 int so_dom;
134 /*
135 * Variables for connection queuing.
136 * Socket where accepts occur is so_head in all subsidiary sockets.
137 * If so_head is 0, socket is not related to an accept.
138 * For head socket so_incomp queues partially completed connections,
139 * while so_comp is a queue of connections ready to be accepted.
140 * If a connection is aborted and it has so_head set, then
141 * it has to be pulled out of either so_incomp or so_comp.
142 * We allow connections to queue up based on current queue lengths
143 * and limit on number of queued connections for this socket.
144 */
145 struct socket *so_head; /* (e) back pointer to listen socket */
146 TAILQ_HEAD(, socket) so_incomp; /* (e) queue of partial unaccepted connections */
147 TAILQ_HEAD(, socket) so_comp; /* (e) queue of complete unaccepted connections */
148 TAILQ_ENTRY(socket) so_list; /* (e) list of unaccepted connections */
149 u_short so_qlen; /* (e) number of unaccepted connections */
150 u_short so_incqlen; /* (e) number of unaccepted incomplete
151 connections */
152 u_short so_qlimit; /* (e) max number queued connections */
153 short so_timeo; /* (g) connection timeout */
154 userland_cond_t timeo_cond; /* timeo_cond condition variable being used in wakeup */
156 u_short so_error; /* (f) error affecting connection */
157 struct sigio *so_sigio; /* [sg] information for async I/O or
158 out of band data (SIGURG) */
159 u_long so_oobmark; /* (c) chars to oob mark */
160 TAILQ_HEAD(, aiocblist) so_aiojobq; /* AIO ops waiting on socket */
161 /*
162 * Variables for socket buffering.
163 */
164 struct sockbuf {
165 /* __Userspace__ Many of these fields may
166 * not be required for the sctp stack.
167 * Commenting out the following.
168 * Including pthread mutex and condition variable to be
169 * used by sbwait, sorwakeup and sowwakeup.
170 */
171 /* struct selinfo sb_sel;*/ /* process selecting read/write */
172 /* struct mtx sb_mtx;*/ /* sockbuf lock */
173 /* struct sx sb_sx;*/ /* prevent I/O interlacing */
174 userland_cond_t sb_cond; /* sockbuf condition variable */
175 userland_mutex_t sb_mtx; /* sockbuf lock associated with sb_cond */
176 short sb_state; /* (c/d) socket state on sockbuf */
177 #define sb_startzero sb_mb
178 struct mbuf *sb_mb; /* (c/d) the mbuf chain */
179 struct mbuf *sb_mbtail; /* (c/d) the last mbuf in the chain */
180 struct mbuf *sb_lastrecord; /* (c/d) first mbuf of last
181 * record in socket buffer */
182 struct mbuf *sb_sndptr; /* (c/d) pointer into mbuf chain */
183 u_int sb_sndptroff; /* (c/d) byte offset of ptr into chain */
184 u_int sb_cc; /* (c/d) actual chars in buffer */
185 u_int sb_hiwat; /* (c/d) max actual char count */
186 u_int sb_mbcnt; /* (c/d) chars of mbufs used */
187 u_int sb_mbmax; /* (c/d) max chars of mbufs to use */
188 u_int sb_ctl; /* (c/d) non-data chars in buffer */
189 int sb_lowat; /* (c/d) low water mark */
190 int sb_timeo; /* (c/d) timeout for read/write */
191 short sb_flags; /* (c/d) flags, see below */
192 } so_rcv, so_snd;
193 /*
194 * Constants for sb_flags field of struct sockbuf.
195 */
196 #define SB_MAX (256*1024) /* default for max chars in sockbuf */
197 #define SB_RAW (64*1024*2) /*Aligning so->so_rcv.sb_hiwat with the receive buffer size of raw socket*/
198 /*
199 * Constants for sb_flags field of struct sockbuf.
200 */
201 #define SB_WAIT 0x04 /* someone is waiting for data/space */
202 #define SB_SEL 0x08 /* someone is selecting */
203 #define SB_ASYNC 0x10 /* ASYNC I/O, need signals */
204 #define SB_UPCALL 0x20 /* someone wants an upcall */
205 #define SB_NOINTR 0x40 /* operations not interruptible */
206 #define SB_AIO 0x80 /* AIO operations queued */
207 #define SB_KNOTE 0x100 /* kernel note attached */
208 #define SB_AUTOSIZE 0x800 /* automatically size socket buffer */
210 void (*so_upcall)(struct socket *, void *, int);
211 void *so_upcallarg;
212 struct ucred *so_cred; /* (a) user credentials */
213 struct label *so_label; /* (b) MAC label for socket */
214 struct label *so_peerlabel; /* (b) cached MAC label for peer */
215 /* NB: generation count must not be first. */
216 uint32_t so_gencnt; /* (h) generation count */
217 void *so_emuldata; /* (b) private data for emulators */
218 struct so_accf {
219 struct accept_filter *so_accept_filter;
220 void *so_accept_filter_arg; /* saved filter args */
221 char *so_accept_filter_str; /* saved user args */
222 } *so_accf;
223 };
225 #define SB_EMPTY_FIXUP(sb) do { \
226 if ((sb)->sb_mb == NULL) { \
227 (sb)->sb_mbtail = NULL; \
228 (sb)->sb_lastrecord = NULL; \
229 } \
230 } while (/*CONSTCOND*/0)
232 /*
233 * Global accept mutex to serialize access to accept queues and
234 * fields associated with multiple sockets. This allows us to
235 * avoid defining a lock order between listen and accept sockets
236 * until such time as it proves to be a good idea.
237 */
238 #if defined(__Userspace_os_Windows)
239 extern userland_mutex_t accept_mtx;
240 extern userland_cond_t accept_cond;
241 #define ACCEPT_LOCK_ASSERT()
242 #define ACCEPT_LOCK() do { \
243 EnterCriticalSection(&accept_mtx); \
244 } while (0)
245 #define ACCEPT_UNLOCK() do { \
246 LeaveCriticalSection(&accept_mtx); \
247 } while (0)
248 #define ACCEPT_UNLOCK_ASSERT()
249 #else
250 extern userland_mutex_t accept_mtx;
251 extern userland_cond_t accept_cond;
252 #define ACCEPT_LOCK_ASSERT() KASSERT(pthread_mutex_trylock(&accept_mtx) == EBUSY, ("%s: accept_mtx not locked", __func__))
253 #define ACCEPT_LOCK() (void)pthread_mutex_lock(&accept_mtx)
254 #define ACCEPT_UNLOCK() (void)pthread_mutex_unlock(&accept_mtx)
255 #define ACCEPT_UNLOCK_ASSERT() do{ \
256 KASSERT(pthread_mutex_trylock(&accept_mtx) == 0, ("%s: accept_mtx locked", __func__)); \
257 (void)pthread_mutex_unlock(&accept_mtx); \
258 } while (0)
259 #endif
261 /*
262 * Per-socket buffer mutex used to protect most fields in the socket
263 * buffer.
264 */
265 #define SOCKBUF_MTX(_sb) (&(_sb)->sb_mtx)
266 #if defined (__Userspace_os_Windows)
267 #define SOCKBUF_LOCK_INIT(_sb, _name) \
268 InitializeCriticalSection(SOCKBUF_MTX(_sb))
269 #define SOCKBUF_LOCK_DESTROY(_sb) DeleteCriticalSection(SOCKBUF_MTX(_sb))
270 #define SOCKBUF_COND_INIT(_sb) InitializeConditionVariable((&(_sb)->sb_cond))
271 #define SOCKBUF_COND_DESTROY(_sb) DeleteConditionVariable((&(_sb)->sb_cond))
272 #define SOCK_COND_INIT(_so) InitializeConditionVariable((&(_so)->timeo_cond))
273 #define SOCK_COND_DESTROY(_so) DeleteConditionVariable((&(_so)->timeo_cond))
274 #define SOCK_COND(_so) (&(_so)->timeo_cond)
275 #else
276 #define SOCKBUF_LOCK_INIT(_sb, _name) \
277 pthread_mutex_init(SOCKBUF_MTX(_sb), NULL)
278 #define SOCKBUF_LOCK_DESTROY(_sb) pthread_mutex_destroy(SOCKBUF_MTX(_sb))
279 #define SOCKBUF_COND_INIT(_sb) pthread_cond_init((&(_sb)->sb_cond), NULL)
280 #define SOCKBUF_COND_DESTROY(_sb) pthread_cond_destroy((&(_sb)->sb_cond))
281 #define SOCK_COND_INIT(_so) pthread_cond_init((&(_so)->timeo_cond), NULL)
282 #define SOCK_COND_DESTROY(_so) pthread_cond_destroy((&(_so)->timeo_cond))
283 #define SOCK_COND(_so) (&(_so)->timeo_cond)
284 #endif
285 /*__Userspace__ SOCKBUF_LOCK(_sb) is now defined in netinet/sctp_process_lock.h */
287 /* #define SOCKBUF_OWNED(_sb) mtx_owned(SOCKBUF_MTX(_sb)) unused */
288 /*__Userspace__ SOCKBUF_UNLOCK(_sb) is now defined in netinet/sctp_process_lock.h */
290 /*__Userspace__ SOCKBUF_LOCK_ASSERT(_sb) is now defined in netinet/sctp_process_lock.h */
292 /* #define SOCKBUF_UNLOCK_ASSERT(_sb) mtx_assert(SOCKBUF_MTX(_sb), MA_NOTOWNED) unused */
294 /*
295 * Per-socket mutex: we reuse the receive socket buffer mutex for space
296 * efficiency. This decision should probably be revisited as we optimize
297 * locking for the socket code.
298 */
299 #define SOCK_MTX(_so) SOCKBUF_MTX(&(_so)->so_rcv)
300 /*__Userspace__ SOCK_LOCK(_so) is now defined in netinet/sctp_process_lock.h */
302 /* #define SOCK_OWNED(_so) SOCKBUF_OWNED(&(_so)->so_rcv) unused */
303 /*__Userspace__ SOCK_UNLOCK(_so) is now defined in netinet/sctp_process_lock.h */
305 #define SOCK_LOCK_ASSERT(_so) SOCKBUF_LOCK_ASSERT(&(_so)->so_rcv)
307 /*
308 * Socket state bits.
309 *
310 * Historically, this bits were all kept in the so_state field. For
311 * locking reasons, they are now in multiple fields, as they are
312 * locked differently. so_state maintains basic socket state protected
313 * by the socket lock. so_qstate holds information about the socket
314 * accept queues. Each socket buffer also has a state field holding
315 * information relevant to that socket buffer (can't send, rcv). Many
316 * fields will be read without locks to improve performance and avoid
317 * lock order issues. However, this approach must be used with caution.
318 */
319 #define SS_NOFDREF 0x0001 /* no file table ref any more */
320 #define SS_ISCONNECTED 0x0002 /* socket connected to a peer */
321 #define SS_ISCONNECTING 0x0004 /* in process of connecting to peer */
322 #define SS_ISDISCONNECTING 0x0008 /* in process of disconnecting */
323 #define SS_NBIO 0x0100 /* non-blocking ops */
324 #define SS_ASYNC 0x0200 /* async i/o notify */
325 #define SS_ISCONFIRMING 0x0400 /* deciding to accept connection req */
326 #define SS_ISDISCONNECTED 0x2000 /* socket disconnected from peer */
327 /*
328 * Protocols can mark a socket as SS_PROTOREF to indicate that, following
329 * pru_detach, they still want the socket to persist, and will free it
330 * themselves when they are done. Protocols should only ever call sofree()
331 * following setting this flag in pru_detach(), and never otherwise, as
332 * sofree() bypasses socket reference counting.
333 */
334 #define SS_PROTOREF 0x4000 /* strong protocol reference */
336 /*
337 * Socket state bits now stored in the socket buffer state field.
338 */
339 #define SBS_CANTSENDMORE 0x0010 /* can't send more data to peer */
340 #define SBS_CANTRCVMORE 0x0020 /* can't receive more data from peer */
341 #define SBS_RCVATMARK 0x0040 /* at mark on input */
343 /*
344 * Socket state bits stored in so_qstate.
345 */
346 #define SQ_INCOMP 0x0800 /* unaccepted, incomplete connection */
347 #define SQ_COMP 0x1000 /* unaccepted, complete connection */
349 /*
350 * Externalized form of struct socket used by the sysctl(3) interface.
351 */
352 struct xsocket {
353 size_t xso_len; /* length of this structure */
354 struct socket *xso_so; /* makes a convenient handle sometimes */
355 short so_type;
356 short so_options;
357 short so_linger;
358 short so_state;
359 caddr_t so_pcb; /* another convenient handle */
360 int xso_protocol;
361 int xso_family;
362 u_short so_qlen;
363 u_short so_incqlen;
364 u_short so_qlimit;
365 short so_timeo;
366 u_short so_error;
367 pid_t so_pgid;
368 u_long so_oobmark;
369 struct xsockbuf {
370 u_int sb_cc;
371 u_int sb_hiwat;
372 u_int sb_mbcnt;
373 u_int sb_mbmax;
374 int sb_lowat;
375 int sb_timeo;
376 short sb_flags;
377 } so_rcv, so_snd;
378 uid_t so_uid; /* XXX */
379 };
381 #if defined(_KERNEL)
384 /*
385 * Macros for sockets and socket buffering.
386 */
388 /*
389 * Do we need to notify the other side when I/O is possible?
390 */
391 #define sb_notify(sb) (((sb)->sb_flags & (SB_WAIT | SB_SEL | SB_ASYNC | \
392 SB_UPCALL | SB_AIO | SB_KNOTE)) != 0)
394 /*
395 * How much space is there in a socket buffer (so->so_snd or so->so_rcv)?
396 * This is problematical if the fields are unsigned, as the space might
397 * still be negative (cc > hiwat or mbcnt > mbmax). Should detect
398 * overflow and return 0. Should use "lmin" but it doesn't exist now.
399 */
400 #define sbspace(sb) \
401 ((long) imin((int)((sb)->sb_hiwat - (sb)->sb_cc), \
402 (int)((sb)->sb_mbmax - (sb)->sb_mbcnt)))
404 /* do we have to send all at once on a socket? */
405 #define sosendallatonce(so) \
406 ((so)->so_proto->pr_flags & PR_ATOMIC)
408 /* can we read something from so? */
409 #define soreadable(so) \
410 ((so)->so_rcv.sb_cc >= (so)->so_rcv.sb_lowat || \
411 ((so)->so_rcv.sb_state & SBS_CANTRCVMORE) || \
412 !TAILQ_EMPTY(&(so)->so_comp) || (so)->so_error)
414 /* can we write something to so? */
415 #define sowriteable(so) \
416 ((sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat && \
417 (((so)->so_state&SS_ISCONNECTED) || \
418 ((so)->so_proto->pr_flags&PR_CONNREQUIRED)==0)) || \
419 ((so)->so_snd.sb_state & SBS_CANTSENDMORE) || \
420 (so)->so_error)
422 /* adjust counters in sb reflecting allocation of m */
423 #define sballoc(sb, m) { \
424 (sb)->sb_cc += (m)->m_len; \
425 if ((m)->m_type != MT_DATA && (m)->m_type != MT_OOBDATA) \
426 (sb)->sb_ctl += (m)->m_len; \
427 (sb)->sb_mbcnt += MSIZE; \
428 if ((m)->m_flags & M_EXT) \
429 (sb)->sb_mbcnt += (m)->m_ext.ext_size; \
430 }
432 /* adjust counters in sb reflecting freeing of m */
433 #define sbfree(sb, m) { \
434 (sb)->sb_cc -= (m)->m_len; \
435 if ((m)->m_type != MT_DATA && (m)->m_type != MT_OOBDATA) \
436 (sb)->sb_ctl -= (m)->m_len; \
437 (sb)->sb_mbcnt -= MSIZE; \
438 if ((m)->m_flags & M_EXT) \
439 (sb)->sb_mbcnt -= (m)->m_ext.ext_size; \
440 if ((sb)->sb_sndptr == (m)) { \
441 (sb)->sb_sndptr = NULL; \
442 (sb)->sb_sndptroff = 0; \
443 } \
444 if ((sb)->sb_sndptroff != 0) \
445 (sb)->sb_sndptroff -= (m)->m_len; \
446 }
448 /*
449 * soref()/sorele() ref-count the socket structure. Note that you must
450 * still explicitly close the socket, but the last ref count will free
451 * the structure.
452 */
453 #define soref(so) do { \
454 SOCK_LOCK_ASSERT(so); \
455 ++(so)->so_count; \
456 } while (0)
458 #define sorele(so) do { \
459 ACCEPT_LOCK_ASSERT(); \
460 SOCK_LOCK_ASSERT(so); \
461 KASSERT((so)->so_count > 0, ("sorele")); \
462 if (--(so)->so_count == 0) \
463 sofree(so); \
464 else { \
465 SOCK_UNLOCK(so); \
466 ACCEPT_UNLOCK(); \
467 } \
468 } while (0)
470 #define sotryfree(so) do { \
471 ACCEPT_LOCK_ASSERT(); \
472 SOCK_LOCK_ASSERT(so); \
473 if ((so)->so_count == 0) \
474 sofree(so); \
475 else { \
476 SOCK_UNLOCK(so); \
477 ACCEPT_UNLOCK(); \
478 } \
479 } while(0)
481 /*
482 * In sorwakeup() and sowwakeup(), acquire the socket buffer lock to
483 * avoid a non-atomic test-and-wakeup. However, sowakeup is
484 * responsible for releasing the lock if it is called. We unlock only
485 * if we don't call into sowakeup. If any code is introduced that
486 * directly invokes the underlying sowakeup() primitives, it must
487 * maintain the same semantics.
488 */
489 #define sorwakeup_locked(so) do { \
490 SOCKBUF_LOCK_ASSERT(&(so)->so_rcv); \
491 if (sb_notify(&(so)->so_rcv)) \
492 sowakeup((so), &(so)->so_rcv); \
493 else \
494 SOCKBUF_UNLOCK(&(so)->so_rcv); \
495 } while (0)
497 #define sorwakeup(so) do { \
498 SOCKBUF_LOCK(&(so)->so_rcv); \
499 sorwakeup_locked(so); \
500 } while (0)
502 #define sowwakeup_locked(so) do { \
503 SOCKBUF_LOCK_ASSERT(&(so)->so_snd); \
504 if (sb_notify(&(so)->so_snd)) \
505 sowakeup((so), &(so)->so_snd); \
506 else \
507 SOCKBUF_UNLOCK(&(so)->so_snd); \
508 } while (0)
510 #define sowwakeup(so) do { \
511 SOCKBUF_LOCK(&(so)->so_snd); \
512 sowwakeup_locked(so); \
513 } while (0)
515 /*
516 * Argument structure for sosetopt et seq. This is in the KERNEL
517 * section because it will never be visible to user code.
518 */
519 enum sopt_dir { SOPT_GET, SOPT_SET };
520 struct sockopt {
521 enum sopt_dir sopt_dir; /* is this a get or a set? */
522 int sopt_level; /* second arg of [gs]etsockopt */
523 int sopt_name; /* third arg of [gs]etsockopt */
524 void *sopt_val; /* fourth arg of [gs]etsockopt */
525 size_t sopt_valsize; /* (almost) fifth arg of [gs]etsockopt */
526 struct thread *sopt_td; /* calling thread or null if kernel */
527 };
529 struct accept_filter {
530 char accf_name[16];
531 void (*accf_callback)
532 (struct socket *so, void *arg, int waitflag);
533 void * (*accf_create)
534 (struct socket *so, char *arg);
535 void (*accf_destroy)
536 (struct socket *so);
537 SLIST_ENTRY(accept_filter) accf_next;
538 };
540 extern int maxsockets;
541 extern u_long sb_max;
542 extern struct uma_zone *socket_zone;
543 extern so_gen_t so_gencnt;
545 struct mbuf;
546 struct sockaddr;
547 struct ucred;
548 struct uio;
550 /*
551 * From uipc_socket and friends
552 */
553 int do_getopt_accept_filter(struct socket *so, struct sockopt *sopt);
554 int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt);
555 int so_setsockopt(struct socket *so, int level, int optname,
556 void *optval, size_t optlen);
557 int sockargs(struct mbuf **mp, caddr_t buf, int buflen, int type);
558 int getsockaddr(struct sockaddr **namp, caddr_t uaddr, size_t len);
559 void sbappend(struct sockbuf *sb, struct mbuf *m);
560 void sbappend_locked(struct sockbuf *sb, struct mbuf *m);
561 void sbappendstream(struct sockbuf *sb, struct mbuf *m);
562 void sbappendstream_locked(struct sockbuf *sb, struct mbuf *m);
563 int sbappendaddr(struct sockbuf *sb, const struct sockaddr *asa,
564 struct mbuf *m0, struct mbuf *control);
565 int sbappendaddr_locked(struct sockbuf *sb, const struct sockaddr *asa,
566 struct mbuf *m0, struct mbuf *control);
567 int sbappendcontrol(struct sockbuf *sb, struct mbuf *m0,
568 struct mbuf *control);
569 int sbappendcontrol_locked(struct sockbuf *sb, struct mbuf *m0,
570 struct mbuf *control);
571 void sbappendrecord(struct sockbuf *sb, struct mbuf *m0);
572 void sbappendrecord_locked(struct sockbuf *sb, struct mbuf *m0);
573 void sbcheck(struct sockbuf *sb);
574 void sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n);
575 struct mbuf *
576 sbcreatecontrol(caddr_t p, int size, int type, int level);
577 void sbdestroy(struct sockbuf *sb, struct socket *so);
578 void sbdrop(struct sockbuf *sb, int len);
579 void sbdrop_locked(struct sockbuf *sb, int len);
580 void sbdroprecord(struct sockbuf *sb);
581 void sbdroprecord_locked(struct sockbuf *sb);
582 void sbflush(struct sockbuf *sb);
583 void sbflush_locked(struct sockbuf *sb);
584 void sbrelease(struct sockbuf *sb, struct socket *so);
585 void sbrelease_locked(struct sockbuf *sb, struct socket *so);
586 int sbreserve(struct sockbuf *sb, u_long cc, struct socket *so,
587 struct thread *td);
588 int sbreserve_locked(struct sockbuf *sb, u_long cc, struct socket *so,
589 struct thread *td);
590 struct mbuf *
591 sbsndptr(struct sockbuf *sb, u_int off, u_int len, u_int *moff);
592 void sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb);
593 int sbwait(struct sockbuf *sb);
594 int sblock(struct sockbuf *sb, int flags);
595 void sbunlock(struct sockbuf *sb);
596 void soabort(struct socket *so);
597 int soaccept(struct socket *so, struct sockaddr **nam);
598 int socheckuid(struct socket *so, uid_t uid);
599 int sobind(struct socket *so, struct sockaddr *nam, struct thread *td);
600 void socantrcvmore(struct socket *so);
601 void socantrcvmore_locked(struct socket *so);
602 void socantsendmore(struct socket *so);
603 void socantsendmore_locked(struct socket *so);
604 int soclose(struct socket *so);
605 int soconnect(struct socket *so, struct sockaddr *nam, struct thread *td);
606 int soconnect2(struct socket *so1, struct socket *so2);
607 int socow_setup(struct mbuf *m0, struct uio *uio);
608 int socreate(int dom, struct socket **aso, int type, int proto,
609 struct ucred *cred, struct thread *td);
610 int sodisconnect(struct socket *so);
611 struct sockaddr *sodupsockaddr(const struct sockaddr *sa, int mflags);
612 void sofree(struct socket *so);
613 int sogetopt(struct socket *so, struct sockopt *sopt);
614 void sohasoutofband(struct socket *so);
615 void soisconnected(struct socket *so);
616 void soisconnecting(struct socket *so);
617 void soisdisconnected(struct socket *so);
618 void soisdisconnecting(struct socket *so);
619 int solisten(struct socket *so, int backlog, struct thread *td);
620 void solisten_proto(struct socket *so, int backlog);
621 int solisten_proto_check(struct socket *so);
622 struct socket *
623 sonewconn(struct socket *head, int connstatus);
624 int sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen);
625 int sooptcopyout(struct sockopt *sopt, const void *buf, size_t len);
627 /* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
628 int soopt_getm(struct sockopt *sopt, struct mbuf **mp);
629 int soopt_mcopyin(struct sockopt *sopt, struct mbuf *m);
630 int soopt_mcopyout(struct sockopt *sopt, struct mbuf *m);
632 int sopoll(struct socket *so, int events, struct ucred *active_cred,
633 struct thread *td);
634 int sopoll_generic(struct socket *so, int events,
635 struct ucred *active_cred, struct thread *td);
636 int soreceive(struct socket *so, struct sockaddr **paddr, struct uio *uio,
637 struct mbuf **mp0, struct mbuf **controlp, int *flagsp);
638 int soreceive_generic(struct socket *so, struct sockaddr **paddr,
639 struct uio *uio, struct mbuf **mp0, struct mbuf **controlp,
640 int *flagsp);
641 int soreserve(struct socket *so, u_long sndcc, u_long rcvcc);
642 void sorflush(struct socket *so);
643 int sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
644 struct mbuf *top, struct mbuf *control, int flags,
645 struct thread *td);
646 int sosend_dgram(struct socket *so, struct sockaddr *addr,
647 struct uio *uio, struct mbuf *top, struct mbuf *control,
648 int flags, struct thread *td);
649 int sosend_generic(struct socket *so, struct sockaddr *addr,
650 struct uio *uio, struct mbuf *top, struct mbuf *control,
651 int flags, struct thread *td);
652 int sosetopt(struct socket *so, struct sockopt *sopt);
653 int soshutdown(struct socket *so, int how);
654 void sotoxsocket(struct socket *so, struct xsocket *xso);
655 void sowakeup(struct socket *so, struct sockbuf *sb);
657 #ifdef SOCKBUF_DEBUG
658 void sblastrecordchk(struct sockbuf *, const char *, int);
659 #define SBLASTRECORDCHK(sb) sblastrecordchk((sb), __FILE__, __LINE__)
661 void sblastmbufchk(struct sockbuf *, const char *, int);
662 #define SBLASTMBUFCHK(sb) sblastmbufchk((sb), __FILE__, __LINE__)
663 #else
664 #define SBLASTRECORDCHK(sb) /* nothing */
665 #define SBLASTMBUFCHK(sb) /* nothing */
666 #endif /* SOCKBUF_DEBUG */
668 /*
669 * Accept filter functions (duh).
670 */
671 int accept_filt_add(struct accept_filter *filt);
672 int accept_filt_del(char *name);
673 struct accept_filter *accept_filt_get(char *name);
674 #ifdef ACCEPT_FILTER_MOD
675 #ifdef SYSCTL_DECL
676 SYSCTL_DECL(_net_inet_accf);
677 #endif
678 int accept_filt_generic_mod_event(module_t mod, int event, void *data);
679 #endif
681 #endif /* _KERNEL */
684 /*-------------------------------------------------------------*/
685 /*-------------------------------------------------------------*/
686 /* __Userspace__ */
687 /*-------------------------------------------------------------*/
688 /*-------------------------------------------------------------*/
689 /* this new __Userspace__ section is to copy portions of the _KERNEL block
690 * above into, avoiding having to port the entire thing at once...
691 * For function prototypes, the full bodies are in user_socket.c .
692 */
693 #if defined(__Userspace__)
695 /* ---------------------------------------------------------- */
696 /* --- function prototypes (implemented in user_socket.c) --- */
697 /* ---------------------------------------------------------- */
698 void soisconnecting(struct socket *so);
699 void soisdisconnecting(struct socket *so);
700 void soisconnected(struct socket *so);
701 struct socket * sonewconn(struct socket *head, int connstatus);
702 void socantrcvmore(struct socket *so);
703 void socantsendmore(struct socket *so);
707 /* -------------- */
708 /* --- macros --- */
709 /* -------------- */
711 #define soref(so) do { \
712 SOCK_LOCK_ASSERT(so); \
713 ++(so)->so_count; \
714 } while (0)
716 #define sorele(so) do { \
717 ACCEPT_LOCK_ASSERT(); \
718 SOCK_LOCK_ASSERT(so); \
719 KASSERT((so)->so_count > 0, ("sorele")); \
720 if (--(so)->so_count == 0) \
721 sofree(so); \
722 else { \
723 SOCK_UNLOCK(so); \
724 ACCEPT_UNLOCK(); \
725 } \
726 } while (0)
729 /* replacing imin with min (user_environment.h) */
730 #define sbspace(sb) \
731 ((long) min((int)((sb)->sb_hiwat - (sb)->sb_cc), \
732 (int)((sb)->sb_mbmax - (sb)->sb_mbcnt)))
734 /* do we have to send all at once on a socket? */
735 #define sosendallatonce(so) \
736 ((so)->so_proto->pr_flags & PR_ATOMIC)
738 /* can we read something from so? */
739 #define soreadable(so) \
740 ((int)((so)->so_rcv.sb_cc) >= (so)->so_rcv.sb_lowat || \
741 ((so)->so_rcv.sb_state & SBS_CANTRCVMORE) || \
742 !TAILQ_EMPTY(&(so)->so_comp) || (so)->so_error)
744 #if 0 /* original */
745 #define PR_CONNREQUIRED 0x04 /* from sys/protosw.h "needed" for sowriteable */
746 #define sowriteable(so) \
747 ((sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat && \
748 (((so)->so_state&SS_ISCONNECTED) || \
749 ((so)->so_proto->pr_flags&PR_CONNREQUIRED)==0)) || \
750 ((so)->so_snd.sb_state & SBS_CANTSENDMORE) || \
751 (so)->so_error)
752 #else /* line with PR_CONNREQUIRED removed */
753 /* can we write something to so? */
754 #define sowriteable(so) \
755 ((sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat && \
756 (((so)->so_state&SS_ISCONNECTED))) || \
757 ((so)->so_snd.sb_state & SBS_CANTSENDMORE) || \
758 (so)->so_error)
759 #endif
761 extern void solisten_proto(struct socket *so, int backlog);
762 extern int solisten_proto_check(struct socket *so);
763 extern int sctp_listen(struct socket *so, int backlog, struct proc *p);
764 extern void socantrcvmore_locked(struct socket *so);
765 extern int sctp_bind(struct socket *so, struct sockaddr *addr);
766 extern int sctp6_bind(struct socket *so, struct sockaddr *addr, void *proc);
767 #if defined(__Userspace__)
768 extern int sctpconn_bind(struct socket *so, struct sockaddr *addr);
769 #endif
770 extern int sctp_accept(struct socket *so, struct sockaddr **addr);
771 extern int sctp_attach(struct socket *so, int proto, uint32_t vrf_id);
772 extern int sctp6_attach(struct socket *so, int proto, uint32_t vrf_id);
773 extern int sctp_abort(struct socket *so);
774 extern int sctp6_abort(struct socket *so);
775 extern void sctp_close(struct socket *so);
776 extern int soaccept(struct socket *so, struct sockaddr **nam);
777 extern int solisten(struct socket *so, int backlog);
778 extern int soreserve(struct socket *so, u_long sndcc, u_long rcvcc);
779 extern void sowakeup(struct socket *so, struct sockbuf *sb);
780 extern void wakeup(void *ident, struct socket *so); /*__Userspace__ */
781 extern int uiomove(void *cp, int n, struct uio *uio);
782 extern int sbwait(struct sockbuf *sb);
783 extern int sodisconnect(struct socket *so);
784 extern int soconnect(struct socket *so, struct sockaddr *nam);
785 extern int sctp_disconnect(struct socket *so);
786 extern int sctp_connect(struct socket *so, struct sockaddr *addr);
787 extern int sctp6_connect(struct socket *so, struct sockaddr *addr);
788 #if defined(__Userspace__)
789 extern int sctpconn_connect(struct socket *so, struct sockaddr *addr);
790 #endif
791 extern void sctp_finish(void);
793 /* ------------------------------------------------ */
794 /* ----- macros copied from above ---- */
795 /* ------------------------------------------------ */
797 /*
798 * Do we need to notify the other side when I/O is possible?
799 */
800 #define sb_notify(sb) (((sb)->sb_flags & (SB_WAIT | SB_SEL | SB_ASYNC | \
801 SB_UPCALL | SB_AIO | SB_KNOTE)) != 0)
804 /*
805 * In sorwakeup() and sowwakeup(), acquire the socket buffer lock to
806 * avoid a non-atomic test-and-wakeup. However, sowakeup is
807 * responsible for releasing the lock if it is called. We unlock only
808 * if we don't call into sowakeup. If any code is introduced that
809 * directly invokes the underlying sowakeup() primitives, it must
810 * maintain the same semantics.
811 */
812 #define sorwakeup_locked(so) do { \
813 SOCKBUF_LOCK_ASSERT(&(so)->so_rcv); \
814 if (sb_notify(&(so)->so_rcv)) \
815 sowakeup((so), &(so)->so_rcv); \
816 else \
817 SOCKBUF_UNLOCK(&(so)->so_rcv); \
818 } while (0)
820 #define sorwakeup(so) do { \
821 SOCKBUF_LOCK(&(so)->so_rcv); \
822 sorwakeup_locked(so); \
823 } while (0)
825 #define sowwakeup_locked(so) do { \
826 SOCKBUF_LOCK_ASSERT(&(so)->so_snd); \
827 if (sb_notify(&(so)->so_snd)) \
828 sowakeup((so), &(so)->so_snd); \
829 else \
830 SOCKBUF_UNLOCK(&(so)->so_snd); \
831 } while (0)
833 #define sowwakeup(so) do { \
834 SOCKBUF_LOCK(&(so)->so_snd); \
835 sowwakeup_locked(so); \
836 } while (0)
840 #endif /* __Userspace__ */
842 #endif /* !_SYS_SOCKETVAR_H_ */