2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
39 #include <sys/param.h>
40 #include <sys/systm.h>
43 #include <sys/malloc.h>
45 #include <sys/domain.h>
46 #include <sys/kernel.h>
47 #include <sys/protosw.h>
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/resourcevar.h>
51 #include <sys/signalvar.h>
55 * Exported to userland via sysctl
57 int somaxconn
= SOMAXCONN
;
60 * Socket operation routines.
61 * These routines are called by the routines in
62 * sys_socket.c or from a system process, and
63 * implement the semantics of socket operations by
64 * switching out to the protocol specific routines.
68 socreate(dom
, aso
, type
, proto
)
75 struct proc
*p
= curproc
; /* XXX */
77 register struct protosw
*prp
;
78 register struct socket
*so
;
82 prp
= pffindproto(dom
, proto
, type
);
84 prp
= pffindtype(dom
, type
);
85 if (prp
== 0 || prp
->pr_usrreq
== 0)
86 return (EPROTONOSUPPORT
);
87 if (prp
->pr_type
!= type
)
89 MALLOC(so
, struct socket
*, sizeof(*so
), M_SOCKET
, M_WAIT
);
90 bzero((caddr_t
)so
, sizeof(*so
));
93 if (p
->p_ucred
->cr_uid
== 0)
94 so
->so_state
= SS_PRIV
;
98 (*prp
->pr_usrreq
)(so
, PRU_ATTACH
,
99 (struct mbuf
*)0, (struct mbuf
*)proto
, (struct mbuf
*)0);
101 so
->so_state
|= SS_NOFDREF
;
118 (*so
->so_proto
->pr_usrreq
)(so
, PRU_BIND
,
119 (struct mbuf
*)0, nam
, (struct mbuf
*)0);
125 solisten(so
, backlog
)
126 register struct socket
*so
;
129 int s
= splnet(), error
;
132 (*so
->so_proto
->pr_usrreq
)(so
, PRU_LISTEN
,
133 (struct mbuf
*)0, (struct mbuf
*)0, (struct mbuf
*)0);
139 so
->so_options
|= SO_ACCEPTCONN
;
140 if (backlog
< 0 || backlog
> somaxconn
)
142 so
->so_qlimit
= backlog
;
149 register struct socket
*so
;
152 if (so
->so_pcb
|| (so
->so_state
& SS_NOFDREF
) == 0)
155 if (!soqremque(so
, 0) && !soqremque(so
, 1))
159 sbrelease(&so
->so_snd
);
165 * Close a socket on last file table reference removal.
166 * Initiate disconnect if connected.
167 * Free socket when disconnect complete.
171 register struct socket
*so
;
173 int s
= splnet(); /* conservative */
176 if (so
->so_options
& SO_ACCEPTCONN
) {
178 (void) soabort(so
->so_q0
);
180 (void) soabort(so
->so_q
);
184 if (so
->so_state
& SS_ISCONNECTED
) {
185 if ((so
->so_state
& SS_ISDISCONNECTING
) == 0) {
186 error
= sodisconnect(so
);
190 if (so
->so_options
& SO_LINGER
) {
191 if ((so
->so_state
& SS_ISDISCONNECTING
) &&
192 (so
->so_state
& SS_NBIO
))
194 while (so
->so_state
& SS_ISCONNECTED
) {
195 error
= tsleep((caddr_t
)&so
->so_timeo
,
196 PSOCK
| PCATCH
, netcls
, so
->so_linger
);
205 (*so
->so_proto
->pr_usrreq
)(so
, PRU_DETACH
,
206 (struct mbuf
*)0, (struct mbuf
*)0, (struct mbuf
*)0);
211 if (so
->so_state
& SS_NOFDREF
)
212 panic("soclose: NOFDREF");
213 so
->so_state
|= SS_NOFDREF
;
220 * Must be called at splnet...
228 (*so
->so_proto
->pr_usrreq
)(so
, PRU_ABORT
,
229 (struct mbuf
*)0, (struct mbuf
*)0, (struct mbuf
*)0));
234 register struct socket
*so
;
240 if ((so
->so_state
& SS_NOFDREF
) == 0)
241 panic("soaccept: !NOFDREF");
242 so
->so_state
&= ~SS_NOFDREF
;
243 error
= (*so
->so_proto
->pr_usrreq
)(so
, PRU_ACCEPT
,
244 (struct mbuf
*)0, nam
, (struct mbuf
*)0);
251 register struct socket
*so
;
257 if (so
->so_options
& SO_ACCEPTCONN
)
261 * If protocol is connection-based, can only connect once.
262 * Otherwise, if connected, try to disconnect first.
263 * This allows user to disconnect by connecting to, e.g.,
266 if (so
->so_state
& (SS_ISCONNECTED
|SS_ISCONNECTING
) &&
267 ((so
->so_proto
->pr_flags
& PR_CONNREQUIRED
) ||
268 (error
= sodisconnect(so
))))
271 error
= (*so
->so_proto
->pr_usrreq
)(so
, PRU_CONNECT
,
272 (struct mbuf
*)0, nam
, (struct mbuf
*)0);
279 register struct socket
*so1
;
285 error
= (*so1
->so_proto
->pr_usrreq
)(so1
, PRU_CONNECT2
,
286 (struct mbuf
*)0, (struct mbuf
*)so2
, (struct mbuf
*)0);
293 register struct socket
*so
;
298 if ((so
->so_state
& SS_ISCONNECTED
) == 0) {
302 if (so
->so_state
& SS_ISDISCONNECTING
) {
306 error
= (*so
->so_proto
->pr_usrreq
)(so
, PRU_DISCONNECT
,
307 (struct mbuf
*)0, (struct mbuf
*)0, (struct mbuf
*)0);
313 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
316 * If send must go all at once and message is larger than
317 * send buffering, then hard error.
318 * Lock against other senders.
319 * If must go all at once and not enough room now, then
320 * inform user that this would block and do nothing.
321 * Otherwise, if nonblocking, send as much as possible.
322 * The data to be sent is described by "uio" if nonzero,
323 * otherwise by the mbuf chain "top" (which must be null
324 * if uio is not). Data provided in mbuf chain must be small
325 * enough to send all at once.
327 * Returns nonzero on error, timeout or signal; callers
328 * must check for short counts if EINTR/ERESTART are returned.
329 * Data and control buffers are freed on return.
332 sosend(so
, addr
, uio
, top
, control
, flags
)
333 register struct socket
*so
;
337 struct mbuf
*control
;
341 struct proc
*p
= curproc
; /* XXX */
344 register struct mbuf
*m
;
345 register long space
, len
, resid
;
346 int clen
= 0, error
, s
, dontroute
, mlen
;
347 int atomic
= sosendallatonce(so
) || top
;
350 resid
= uio
->uio_resid
;
352 resid
= top
->m_pkthdr
.len
;
354 * In theory resid should be unsigned.
355 * However, space must be signed, as it might be less than 0
356 * if we over-committed, and we must use a signed comparison
357 * of space and resid. On the other hand, a negative resid
358 * causes us to loop sending 0-length segments to the protocol.
363 (flags
& MSG_DONTROUTE
) && (so
->so_options
& SO_DONTROUTE
) == 0 &&
364 (so
->so_proto
->pr_flags
& PR_ATOMIC
);
366 p
->p_stats
->p_ru
.ru_msgsnd
++;
369 clen
= control
->m_len
;
370 #define snderr(errno) { error = errno; splx(s); goto release; }
373 error
= sblock(&so
->so_snd
, SBLOCKWAIT(flags
));
378 if (so
->so_state
& SS_CANTSENDMORE
)
381 snderr(so
->so_error
);
382 if ((so
->so_state
& SS_ISCONNECTED
) == 0) {
384 * `sendto' and `sendmsg' is allowed on a connection-
385 * based socket if it supports implied connect.
386 * Return ENOTCONN if not connected and no address is
389 if ((so
->so_proto
->pr_flags
& PR_CONNREQUIRED
) &&
390 (so
->so_proto
->pr_flags
& PR_IMPLOPCL
) == 0) {
391 if ((so
->so_state
& SS_ISCONFIRMING
) == 0 &&
392 !(resid
== 0 && clen
!= 0))
394 } else if (addr
== 0)
395 snderr(so
->so_proto
->pr_flags
& PR_CONNREQUIRED
?
396 ENOTCONN
: EDESTADDRREQ
);
398 space
= sbspace(&so
->so_snd
);
401 if ((atomic
&& resid
> so
->so_snd
.sb_hiwat
) ||
402 clen
> so
->so_snd
.sb_hiwat
)
404 if (space
< resid
+ clen
&& uio
&&
405 (atomic
|| space
< so
->so_snd
.sb_lowat
|| space
< clen
)) {
406 if (so
->so_state
& SS_NBIO
)
408 sbunlock(so
, &so
->so_snd
);
409 error
= sbwait(&so
->so_snd
);
421 * Data is prepackaged in "top".
425 top
->m_flags
|= M_EOR
;
428 MGETHDR(m
, M_WAIT
, MT_DATA
);
431 m
->m_pkthdr
.rcvif
= (struct ifnet
*)0;
433 MGET(m
, M_WAIT
, MT_DATA
);
436 if (resid
>= MINCLSIZE
) {
438 if ((m
->m_flags
& M_EXT
) == 0)
441 len
= min(min(mlen
, resid
), space
);
444 len
= min(min(mlen
, resid
), space
);
446 * For datagram protocols, leave room
447 * for protocol headers in first mbuf.
449 if (atomic
&& top
== 0 && len
< mlen
)
453 error
= uiomove(mtod(m
, caddr_t
), (int)len
, uio
);
454 resid
= uio
->uio_resid
;
457 top
->m_pkthdr
.len
+= len
;
463 top
->m_flags
|= M_EOR
;
466 } while (space
> 0 && atomic
);
468 so
->so_options
|= SO_DONTROUTE
;
469 s
= splnet(); /* XXX */
470 error
= (*so
->so_proto
->pr_usrreq
)(so
,
471 (flags
& MSG_OOB
) ? PRU_SENDOOB
:
473 * If the user set MSG_EOF, the protocol
474 * understands this flag and nothing left to
475 * send then use PRU_SEND_EOF instead of PRU_SEND.
477 ((flags
& MSG_EOF
) &&
478 (so
->so_proto
->pr_flags
& PR_IMPLOPCL
) &&
480 PRU_SEND_EOF
: PRU_SEND
,
484 so
->so_options
&= ~SO_DONTROUTE
;
491 } while (resid
&& space
> 0);
495 sbunlock(so
, &so
->so_snd
);
505 * Implement receive operations on a socket.
506 * We depend on the way that records are added to the sockbuf
507 * by sbappend*. In particular, each record (mbufs linked through m_next)
508 * must begin with an address if the protocol so specifies,
509 * followed by an optional mbuf or mbufs containing ancillary data,
510 * and then zero or more mbufs of data.
511 * In order to avoid blocking network interrupts for the entire time here,
512 * we splx() while doing the actual copy to user space.
513 * Although the sockbuf is locked, new data may still be appended,
514 * and thus we must maintain consistency of the sockbuf during that time.
516 * The caller may receive the data as a single mbuf chain by supplying
517 * an mbuf **mp0 for use in returning the chain. The uio is then used
518 * only for the count in uio_resid.
521 soreceive(so
, paddr
, uio
, mp0
, controlp
, flagsp
)
522 register struct socket
*so
;
526 struct mbuf
**controlp
;
529 register struct mbuf
*m
, **mp
;
530 register int flags
, len
, error
, s
, offset
;
531 struct protosw
*pr
= so
->so_proto
;
532 struct mbuf
*nextrecord
;
534 int orig_resid
= uio
->uio_resid
;
542 flags
= *flagsp
&~ MSG_EOR
;
545 if (flags
& MSG_OOB
) {
546 m
= m_get(M_WAIT
, MT_DATA
);
547 error
= (*pr
->pr_usrreq
)(so
, PRU_RCVOOB
,
548 m
, (struct mbuf
*)(flags
& MSG_PEEK
), (struct mbuf
*)0);
552 error
= uiomove(mtod(m
, caddr_t
),
553 (int) min(uio
->uio_resid
, m
->m_len
), uio
);
555 } while (uio
->uio_resid
&& error
== 0 && m
);
562 *mp
= (struct mbuf
*)0;
563 if (so
->so_state
& SS_ISCONFIRMING
&& uio
->uio_resid
)
564 (*pr
->pr_usrreq
)(so
, PRU_RCVD
, (struct mbuf
*)0,
565 (struct mbuf
*)0, (struct mbuf
*)0);
568 error
= sblock(&so
->so_rcv
, SBLOCKWAIT(flags
));
573 m
= so
->so_rcv
.sb_mb
;
575 * If we have less data than requested, block awaiting more
576 * (subject to any timeout) if:
577 * 1. the current count is less than the low water mark, or
578 * 2. MSG_WAITALL is set, and it is possible to do the entire
579 * receive operation at once if we block (resid <= hiwat).
580 * 3. MSG_DONTWAIT is not set
581 * If MSG_WAITALL is set but resid is larger than the receive buffer,
582 * we have to do the receive in sections, and thus risk returning
583 * a short count if a timeout or signal occurs after we start.
585 if (m
== 0 || (((flags
& MSG_DONTWAIT
) == 0 &&
586 so
->so_rcv
.sb_cc
< uio
->uio_resid
) &&
587 (so
->so_rcv
.sb_cc
< so
->so_rcv
.sb_lowat
||
588 ((flags
& MSG_WAITALL
) && uio
->uio_resid
<= so
->so_rcv
.sb_hiwat
)) &&
589 m
->m_nextpkt
== 0 && (pr
->pr_flags
& PR_ATOMIC
) == 0)) {
591 if (m
== 0 && so
->so_rcv
.sb_cc
)
597 error
= so
->so_error
;
598 if ((flags
& MSG_PEEK
) == 0)
602 if (so
->so_state
& SS_CANTRCVMORE
) {
608 for (; m
; m
= m
->m_next
)
609 if (m
->m_type
== MT_OOBDATA
|| (m
->m_flags
& M_EOR
)) {
610 m
= so
->so_rcv
.sb_mb
;
613 if ((so
->so_state
& (SS_ISCONNECTED
|SS_ISCONNECTING
)) == 0 &&
614 (so
->so_proto
->pr_flags
& PR_CONNREQUIRED
)) {
615 printf("so: %x\n", so
);
620 if (uio
->uio_resid
== 0)
622 if ((so
->so_state
& SS_NBIO
) || (flags
& MSG_DONTWAIT
)) {
626 sbunlock(so
, &so
->so_rcv
);
627 error
= sbwait(&so
->so_rcv
);
635 uio
->uio_procp
->p_stats
->p_ru
.ru_msgrcv
++;
636 nextrecord
= m
->m_nextpkt
;
637 if (pr
->pr_flags
& PR_ADDR
) {
639 if (m
->m_type
!= MT_SONAME
)
643 if (flags
& MSG_PEEK
) {
645 *paddr
= m_copy(m
, 0, m
->m_len
);
648 sbfree(&so
->so_rcv
, m
);
651 so
->so_rcv
.sb_mb
= m
->m_next
;
653 m
= so
->so_rcv
.sb_mb
;
655 MFREE(m
, so
->so_rcv
.sb_mb
);
656 m
= so
->so_rcv
.sb_mb
;
660 while (m
&& m
->m_type
== MT_CONTROL
&& error
== 0) {
661 if (flags
& MSG_PEEK
) {
663 *controlp
= m_copy(m
, 0, m
->m_len
);
666 sbfree(&so
->so_rcv
, m
);
669 if (pr
->pr_domain
->dom_externalize
&&
670 mtod(m
, struct cmsghdr
*)->cmsg_type
==
672 error
= (*pr
->pr_domain
->dom_externalize
)(m
);
674 so
->so_rcv
.sb_mb
= m
->m_next
;
676 m
= so
->so_rcv
.sb_mb
;
680 MFREE(m
, so
->so_rcv
.sb_mb
);
681 m
= so
->so_rcv
.sb_mb
;
686 controlp
= &(*controlp
)->m_next
;
690 if ((flags
& MSG_PEEK
) == 0)
691 m
->m_nextpkt
= nextrecord
;
693 if (type
== MT_OOBDATA
)
698 while (m
&& uio
->uio_resid
> 0 && error
== 0) {
699 if (m
->m_type
== MT_OOBDATA
) {
700 if (type
!= MT_OOBDATA
)
702 } else if (type
== MT_OOBDATA
)
705 else if (m
->m_type
!= MT_DATA
&& m
->m_type
!= MT_HEADER
)
708 so
->so_state
&= ~SS_RCVATMARK
;
709 len
= uio
->uio_resid
;
710 if (so
->so_oobmark
&& len
> so
->so_oobmark
- offset
)
711 len
= so
->so_oobmark
- offset
;
712 if (len
> m
->m_len
- moff
)
713 len
= m
->m_len
- moff
;
715 * If mp is set, just pass back the mbufs.
716 * Otherwise copy them out via the uio, then free.
717 * Sockbuf must be consistent here (points to current mbuf,
718 * it points to next record) when we drop priority;
719 * we must note any additions to the sockbuf when we
720 * block interrupts again.
724 error
= uiomove(mtod(m
, caddr_t
) + moff
, (int)len
, uio
);
727 uio
->uio_resid
-= len
;
728 if (len
== m
->m_len
- moff
) {
729 if (m
->m_flags
& M_EOR
)
731 if (flags
& MSG_PEEK
) {
735 nextrecord
= m
->m_nextpkt
;
736 sbfree(&so
->so_rcv
, m
);
740 so
->so_rcv
.sb_mb
= m
= m
->m_next
;
741 *mp
= (struct mbuf
*)0;
743 MFREE(m
, so
->so_rcv
.sb_mb
);
744 m
= so
->so_rcv
.sb_mb
;
747 m
->m_nextpkt
= nextrecord
;
750 if (flags
& MSG_PEEK
)
754 *mp
= m_copym(m
, 0, len
, M_WAIT
);
757 so
->so_rcv
.sb_cc
-= len
;
760 if (so
->so_oobmark
) {
761 if ((flags
& MSG_PEEK
) == 0) {
762 so
->so_oobmark
-= len
;
763 if (so
->so_oobmark
== 0) {
764 so
->so_state
|= SS_RCVATMARK
;
769 if (offset
== so
->so_oobmark
)
776 * If the MSG_WAITALL flag is set (for non-atomic socket),
777 * we must not quit until "uio->uio_resid == 0" or an error
778 * termination. If a signal/timeout occurs, return
779 * with a short count but without error.
780 * Keep sockbuf locked against other readers.
782 while (flags
& MSG_WAITALL
&& m
== 0 && uio
->uio_resid
> 0 &&
783 !sosendallatonce(so
) && !nextrecord
) {
784 if (so
->so_error
|| so
->so_state
& SS_CANTRCVMORE
)
786 error
= sbwait(&so
->so_rcv
);
788 sbunlock(so
, &so
->so_rcv
);
792 m
= so
->so_rcv
.sb_mb
;
794 nextrecord
= m
->m_nextpkt
;
798 if (m
&& pr
->pr_flags
& PR_ATOMIC
) {
800 if ((flags
& MSG_PEEK
) == 0)
801 (void) sbdroprecord(&so
->so_rcv
);
803 if ((flags
& MSG_PEEK
) == 0) {
805 so
->so_rcv
.sb_mb
= nextrecord
;
806 if (pr
->pr_flags
& PR_WANTRCVD
&& so
->so_pcb
)
807 (*pr
->pr_usrreq
)(so
, PRU_RCVD
, (struct mbuf
*)0,
808 (struct mbuf
*)flags
, (struct mbuf
*)0);
810 if (orig_resid
== uio
->uio_resid
&& orig_resid
&&
811 (flags
& MSG_EOR
) == 0 && (so
->so_state
& SS_CANTRCVMORE
) == 0) {
812 sbunlock(so
, &so
->so_rcv
);
820 sbunlock(so
, &so
->so_rcv
);
827 register struct socket
*so
;
830 register struct protosw
*pr
= so
->so_proto
;
836 return ((*pr
->pr_usrreq
)(so
, PRU_SHUTDOWN
,
837 (struct mbuf
*)0, (struct mbuf
*)0, (struct mbuf
*)0));
843 register struct socket
*so
;
845 register struct sockbuf
*sb
= &so
->so_rcv
;
846 register struct protosw
*pr
= so
->so_proto
;
850 sb
->sb_flags
|= SB_NOINTR
;
851 (void) sblock(sb
, M_WAITOK
);
856 bzero((caddr_t
)sb
, sizeof (*sb
));
858 if (pr
->pr_flags
& PR_RIGHTS
&& pr
->pr_domain
->dom_dispose
)
859 (*pr
->pr_domain
->dom_dispose
)(asb
.sb_mb
);
864 sosetopt(so
, level
, optname
, m0
)
865 register struct socket
*so
;
870 register struct mbuf
*m
= m0
;
872 if (level
!= SOL_SOCKET
) {
873 if (so
->so_proto
&& so
->so_proto
->pr_ctloutput
)
874 return ((*so
->so_proto
->pr_ctloutput
)
875 (PRCO_SETOPT
, so
, level
, optname
, &m0
));
881 if (m
== NULL
|| m
->m_len
!= sizeof (struct linger
)) {
885 so
->so_linger
= mtod(m
, struct linger
*)->l_linger
;
896 if (m
== NULL
|| m
->m_len
< sizeof (int)) {
901 so
->so_options
|= optname
;
903 so
->so_options
&= ~optname
;
910 if (m
== NULL
|| m
->m_len
< sizeof (int)) {
918 if (sbreserve(optname
== SO_SNDBUF
?
919 &so
->so_snd
: &so
->so_rcv
,
920 (u_long
) *mtod(m
, int *)) == 0) {
927 so
->so_snd
.sb_lowat
= *mtod(m
, int *);
930 so
->so_rcv
.sb_lowat
= *mtod(m
, int *);
941 if (m
== NULL
|| m
->m_len
< sizeof (*tv
)) {
945 tv
= mtod(m
, struct timeval
*);
946 if (tv
->tv_sec
> SHRT_MAX
/ hz
- hz
) {
950 val
= tv
->tv_sec
* hz
+ tv
->tv_usec
/ tick
;
955 so
->so_snd
.sb_timeo
= val
;
958 so
->so_rcv
.sb_timeo
= val
;
968 if (error
== 0 && so
->so_proto
&& so
->so_proto
->pr_ctloutput
) {
969 (void) ((*so
->so_proto
->pr_ctloutput
)
970 (PRCO_SETOPT
, so
, level
, optname
, &m0
));
971 m
= NULL
; /* freed by protocol */
981 sogetopt(so
, level
, optname
, mp
)
982 register struct socket
*so
;
986 register struct mbuf
*m
;
988 if (level
!= SOL_SOCKET
) {
989 if (so
->so_proto
&& so
->so_proto
->pr_ctloutput
) {
990 return ((*so
->so_proto
->pr_ctloutput
)
991 (PRCO_GETOPT
, so
, level
, optname
, mp
));
993 return (ENOPROTOOPT
);
995 m
= m_get(M_WAIT
, MT_SOOPTS
);
996 m
->m_len
= sizeof (int);
1001 m
->m_len
= sizeof (struct linger
);
1002 mtod(m
, struct linger
*)->l_onoff
=
1003 so
->so_options
& SO_LINGER
;
1004 mtod(m
, struct linger
*)->l_linger
= so
->so_linger
;
1007 case SO_USELOOPBACK
:
1015 *mtod(m
, int *) = so
->so_options
& optname
;
1019 *mtod(m
, int *) = so
->so_type
;
1023 *mtod(m
, int *) = so
->so_error
;
1028 *mtod(m
, int *) = so
->so_snd
.sb_hiwat
;
1032 *mtod(m
, int *) = so
->so_rcv
.sb_hiwat
;
1036 *mtod(m
, int *) = so
->so_snd
.sb_lowat
;
1040 *mtod(m
, int *) = so
->so_rcv
.sb_lowat
;
1046 int val
= (optname
== SO_SNDTIMEO
?
1047 so
->so_snd
.sb_timeo
: so
->so_rcv
.sb_timeo
);
1049 m
->m_len
= sizeof(struct timeval
);
1050 mtod(m
, struct timeval
*)->tv_sec
= val
/ hz
;
1051 mtod(m
, struct timeval
*)->tv_usec
=
1058 return (ENOPROTOOPT
);
1067 register struct socket
*so
;
1071 if (so
->so_pgid
< 0)
1072 gsignal(-so
->so_pgid
, SIGURG
);
1073 else if (so
->so_pgid
> 0 && (p
= pfind(so
->so_pgid
)) != 0)
1076 selwakeup(so
, &so
->so_rcv
.sb_sel
);