- Implement EnumServicesStatusW.
[reactos.git] / reactos / drivers / lib / oskittcp / oskittcp / uipc_socket.c
1 /*
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
34 */
35
36 #ifndef _MSC_VER
37 #include <roscfg.h>
38 #endif/*_MSC_VER*/
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/proc.h>
42 #include <sys/file.h>
43 #include <sys/malloc.h>
44 #include <sys/mbuf.h>
45 #include <sys/domain.h>
46 #include <sys/kernel.h>
47 #include <sys/protosw.h>
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/resourcevar.h>
51 #include <sys/signalvar.h>
52 #include <oskittcp.h>
53
54 /*
55 * Exported to userland via sysctl
56 */
57 int somaxconn = SOMAXCONN;
58
59 /*
60 * Socket operation routines.
61 * These routines are called by the routines in
62 * sys_socket.c or from a system process, and
63 * implement the semantics of socket operations by
64 * switching out to the protocol specific routines.
65 */
66 /*ARGSUSED*/
67 int
68 socreate(dom, aso, type, proto)
69 int dom;
70 struct socket **aso;
71 register int type;
72 int proto;
73 {
74 #ifndef __REACTOS__
75 struct proc *p = curproc; /* XXX */
76 #endif
77 register struct protosw *prp;
78 register struct socket *so;
79 register int error;
80
81 if (proto)
82 prp = pffindproto(dom, proto, type);
83 else
84 prp = pffindtype(dom, type);
85 if (prp == 0 || prp->pr_usrreq == 0)
86 return (EPROTONOSUPPORT);
87 if (prp->pr_type != type)
88 return (EPROTOTYPE);
89 MALLOC(so, struct socket *, sizeof(*so), M_SOCKET, M_WAIT);
90 bzero((caddr_t)so, sizeof(*so));
91 so->so_type = type;
92 #ifndef __REACTOS__
93 if (p->p_ucred->cr_uid == 0)
94 so->so_state = SS_PRIV;
95 #endif
96 so->so_proto = prp;
97 error =
98 (*prp->pr_usrreq)(so, PRU_ATTACH,
99 (struct mbuf *)0, (struct mbuf *)proto, (struct mbuf *)0);
100 if (error) {
101 so->so_state |= SS_NOFDREF;
102 sofree(so);
103 return (error);
104 }
105 *aso = so;
106 return (0);
107 }
108
109 int
110 sobind(so, nam)
111 struct socket *so;
112 struct mbuf *nam;
113 {
114 int s = splnet();
115 int error;
116
117 error =
118 (*so->so_proto->pr_usrreq)(so, PRU_BIND,
119 (struct mbuf *)0, nam, (struct mbuf *)0);
120 splx(s);
121 return (error);
122 }
123
124 int
125 solisten(so, backlog)
126 register struct socket *so;
127 int backlog;
128 {
129 int s = splnet(), error;
130
131 error =
132 (*so->so_proto->pr_usrreq)(so, PRU_LISTEN,
133 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);
134 if (error) {
135 splx(s);
136 return (error);
137 }
138 if (so->so_q == 0)
139 so->so_options |= SO_ACCEPTCONN;
140 if (backlog < 0 || backlog > somaxconn)
141 backlog = somaxconn;
142 so->so_qlimit = backlog;
143 splx(s);
144 return (0);
145 }
146
147 void
148 sofree(so)
149 register struct socket *so;
150 {
151
152 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
153 return;
154 if (so->so_head) {
155 if (!soqremque(so, 0) && !soqremque(so, 1))
156 panic("sofree dq");
157 so->so_head = 0;
158 }
159 sbrelease(&so->so_snd);
160 sorflush(so);
161 FREE(so, M_SOCKET);
162 }
163
164 /*
165 * Close a socket on last file table reference removal.
166 * Initiate disconnect if connected.
167 * Free socket when disconnect complete.
168 */
169 int
170 soclose(so)
171 register struct socket *so;
172 {
173 int s = splnet(); /* conservative */
174 int error = 0;
175
176 if (so->so_options & SO_ACCEPTCONN) {
177 while (so->so_q0)
178 (void) soabort(so->so_q0);
179 while (so->so_q)
180 (void) soabort(so->so_q);
181 }
182 if (so->so_pcb == 0)
183 goto discard;
184 if (so->so_state & SS_ISCONNECTED) {
185 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
186 error = sodisconnect(so);
187 if (error)
188 goto drop;
189 }
190 if (so->so_options & SO_LINGER) {
191 if ((so->so_state & SS_ISDISCONNECTING) &&
192 (so->so_state & SS_NBIO))
193 goto drop;
194 while (so->so_state & SS_ISCONNECTED) {
195 error = tsleep((caddr_t)&so->so_timeo,
196 PSOCK | PCATCH, netcls, so->so_linger);
197 if (error)
198 break;
199 }
200 }
201 }
202 drop:
203 if (so->so_pcb) {
204 int error2 =
205 (*so->so_proto->pr_usrreq)(so, PRU_DETACH,
206 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);
207 if (error == 0)
208 error = error2;
209 }
210 discard:
211 if (so->so_state & SS_NOFDREF)
212 panic("soclose: NOFDREF");
213 so->so_state |= SS_NOFDREF;
214 sofree(so);
215 splx(s);
216 return (error);
217 }
218
219 /*
220 * Must be called at splnet...
221 */
222 int
223 soabort(so)
224 struct socket *so;
225 {
226
227 return (
228 (*so->so_proto->pr_usrreq)(so, PRU_ABORT,
229 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0));
230 }
231
232 int
233 soaccept(so, nam)
234 register struct socket *so;
235 struct mbuf *nam;
236 {
237 int s = splnet();
238 int error;
239
240 if ((so->so_state & SS_NOFDREF) == 0)
241 panic("soaccept: !NOFDREF");
242 so->so_state &= ~SS_NOFDREF;
243 error = (*so->so_proto->pr_usrreq)(so, PRU_ACCEPT,
244 (struct mbuf *)0, nam, (struct mbuf *)0);
245 splx(s);
246 return (error);
247 }
248
249 int
250 soconnect(so, nam)
251 register struct socket *so;
252 struct mbuf *nam;
253 {
254 int s;
255 int error;
256
257 if (so->so_options & SO_ACCEPTCONN)
258 return (EOPNOTSUPP);
259 s = splnet();
260 /*
261 * If protocol is connection-based, can only connect once.
262 * Otherwise, if connected, try to disconnect first.
263 * This allows user to disconnect by connecting to, e.g.,
264 * a null address.
265 */
266 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
267 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
268 (error = sodisconnect(so))))
269 error = EISCONN;
270 else
271 error = (*so->so_proto->pr_usrreq)(so, PRU_CONNECT,
272 (struct mbuf *)0, nam, (struct mbuf *)0);
273 splx(s);
274 return (error);
275 }
276
277 int
278 soconnect2(so1, so2)
279 register struct socket *so1;
280 struct socket *so2;
281 {
282 int s = splnet();
283 int error;
284
285 error = (*so1->so_proto->pr_usrreq)(so1, PRU_CONNECT2,
286 (struct mbuf *)0, (struct mbuf *)so2, (struct mbuf *)0);
287 splx(s);
288 return (error);
289 }
290
291 int
292 sodisconnect(so)
293 register struct socket *so;
294 {
295 int s = splnet();
296 int error;
297
298 if ((so->so_state & SS_ISCONNECTED) == 0) {
299 error = ENOTCONN;
300 goto bad;
301 }
302 if (so->so_state & SS_ISDISCONNECTING) {
303 error = EALREADY;
304 goto bad;
305 }
306 error = (*so->so_proto->pr_usrreq)(so, PRU_DISCONNECT,
307 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0);
308 bad:
309 splx(s);
310 return (error);
311 }
312
313 #define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
314 /*
315 * Send on a socket.
316 * If send must go all at once and message is larger than
317 * send buffering, then hard error.
318 * Lock against other senders.
319 * If must go all at once and not enough room now, then
320 * inform user that this would block and do nothing.
321 * Otherwise, if nonblocking, send as much as possible.
322 * The data to be sent is described by "uio" if nonzero,
323 * otherwise by the mbuf chain "top" (which must be null
324 * if uio is not). Data provided in mbuf chain must be small
325 * enough to send all at once.
326 *
327 * Returns nonzero on error, timeout or signal; callers
328 * must check for short counts if EINTR/ERESTART are returned.
329 * Data and control buffers are freed on return.
330 */
331 int
332 sosend(so, addr, uio, top, control, flags)
333 register struct socket *so;
334 struct mbuf *addr;
335 struct uio *uio;
336 struct mbuf *top;
337 struct mbuf *control;
338 int flags;
339 {
340 #ifndef __REACTOS__
341 struct proc *p = curproc; /* XXX */
342 #endif
343 struct mbuf **mp;
344 register struct mbuf *m;
345 register long space, len, resid;
346 int clen = 0, error, s, dontroute, mlen;
347 int atomic = sosendallatonce(so) || top;
348
349 if (uio)
350 resid = uio->uio_resid;
351 else
352 resid = top->m_pkthdr.len;
353 /*
354 * In theory resid should be unsigned.
355 * However, space must be signed, as it might be less than 0
356 * if we over-committed, and we must use a signed comparison
357 * of space and resid. On the other hand, a negative resid
358 * causes us to loop sending 0-length segments to the protocol.
359 */
360 if (resid < 0)
361 return (EINVAL);
362 dontroute =
363 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
364 (so->so_proto->pr_flags & PR_ATOMIC);
365 #ifndef __REACTOS__
366 p->p_stats->p_ru.ru_msgsnd++;
367 #endif
368 if (control)
369 clen = control->m_len;
370 #define snderr(errno) { error = errno; splx(s); goto release; }
371
372 restart:
373 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
374 if (error)
375 goto out;
376 do {
377 s = splnet();
378 if (so->so_state & SS_CANTSENDMORE)
379 snderr(EPIPE);
380 if (so->so_error)
381 snderr(so->so_error);
382 if ((so->so_state & SS_ISCONNECTED) == 0) {
383 /*
384 * `sendto' and `sendmsg' is allowed on a connection-
385 * based socket if it supports implied connect.
386 * Return ENOTCONN if not connected and no address is
387 * supplied.
388 */
389 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
390 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
391 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
392 !(resid == 0 && clen != 0))
393 snderr(ENOTCONN);
394 } else if (addr == 0)
395 snderr(so->so_proto->pr_flags & PR_CONNREQUIRED ?
396 ENOTCONN : EDESTADDRREQ);
397 }
398 space = sbspace(&so->so_snd);
399 if (flags & MSG_OOB)
400 space += 1024;
401 if ((atomic && resid > so->so_snd.sb_hiwat) ||
402 clen > so->so_snd.sb_hiwat)
403 snderr(EMSGSIZE);
404 if (space < resid + clen && uio &&
405 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
406 if (so->so_state & SS_NBIO)
407 snderr(EWOULDBLOCK);
408 sbunlock(so, &so->so_snd);
409 error = sbwait(&so->so_snd);
410 splx(s);
411 if (error)
412 goto out;
413 goto restart;
414 }
415 splx(s);
416 mp = &top;
417 space -= clen;
418 do {
419 if (uio == NULL) {
420 /*
421 * Data is prepackaged in "top".
422 */
423 resid = 0;
424 if (flags & MSG_EOR)
425 top->m_flags |= M_EOR;
426 } else do {
427 if (top == 0) {
428 MGETHDR(m, M_WAIT, MT_DATA);
429 mlen = MHLEN;
430 m->m_pkthdr.len = 0;
431 m->m_pkthdr.rcvif = (struct ifnet *)0;
432 } else {
433 MGET(m, M_WAIT, MT_DATA);
434 mlen = MLEN;
435 }
436 if (resid >= MINCLSIZE) {
437 MCLGET(m, M_WAIT);
438 if ((m->m_flags & M_EXT) == 0)
439 goto nopages;
440 mlen = MCLBYTES;
441 len = min(min(mlen, resid), space);
442 } else {
443 nopages:
444 len = min(min(mlen, resid), space);
445 /*
446 * For datagram protocols, leave room
447 * for protocol headers in first mbuf.
448 */
449 if (atomic && top == 0 && len < mlen)
450 MH_ALIGN(m, len);
451 }
452 space -= len;
453 error = uiomove(mtod(m, caddr_t), (int)len, uio);
454 resid = uio->uio_resid;
455 m->m_len = len;
456 *mp = m;
457 top->m_pkthdr.len += len;
458 if (error)
459 goto release;
460 mp = &m->m_next;
461 if (resid <= 0) {
462 if (flags & MSG_EOR)
463 top->m_flags |= M_EOR;
464 break;
465 }
466 } while (space > 0 && atomic);
467 if (dontroute)
468 so->so_options |= SO_DONTROUTE;
469 s = splnet(); /* XXX */
470 error = (*so->so_proto->pr_usrreq)(so,
471 (flags & MSG_OOB) ? PRU_SENDOOB :
472 /*
473 * If the user set MSG_EOF, the protocol
474 * understands this flag and nothing left to
475 * send then use PRU_SEND_EOF instead of PRU_SEND.
476 */
477 ((flags & MSG_EOF) &&
478 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
479 (resid <= 0)) ?
480 PRU_SEND_EOF : PRU_SEND,
481 top, addr, control);
482 splx(s);
483 if (dontroute)
484 so->so_options &= ~SO_DONTROUTE;
485 clen = 0;
486 control = 0;
487 top = 0;
488 mp = &top;
489 if (error)
490 goto release;
491 } while (resid && space > 0);
492 } while (resid);
493
494 release:
495 sbunlock(so, &so->so_snd);
496 out:
497 if (top)
498 m_freem(top);
499 if (control)
500 m_freem(control);
501 return (error);
502 }
503
504 /*
505 * Implement receive operations on a socket.
506 * We depend on the way that records are added to the sockbuf
507 * by sbappend*. In particular, each record (mbufs linked through m_next)
508 * must begin with an address if the protocol so specifies,
509 * followed by an optional mbuf or mbufs containing ancillary data,
510 * and then zero or more mbufs of data.
511 * In order to avoid blocking network interrupts for the entire time here,
512 * we splx() while doing the actual copy to user space.
513 * Although the sockbuf is locked, new data may still be appended,
514 * and thus we must maintain consistency of the sockbuf during that time.
515 *
516 * The caller may receive the data as a single mbuf chain by supplying
517 * an mbuf **mp0 for use in returning the chain. The uio is then used
518 * only for the count in uio_resid.
519 */
520 int
521 soreceive(so, paddr, uio, mp0, controlp, flagsp)
522 register struct socket *so;
523 struct mbuf **paddr;
524 struct uio *uio;
525 struct mbuf **mp0;
526 struct mbuf **controlp;
527 int *flagsp;
528 {
529 register struct mbuf *m, **mp;
530 register int flags, len, error, s, offset;
531 struct protosw *pr = so->so_proto;
532 struct mbuf *nextrecord;
533 int moff, type = 0;
534 int orig_resid = uio->uio_resid;
535
536 mp = mp0;
537 if (paddr)
538 *paddr = 0;
539 if (controlp)
540 *controlp = 0;
541 if (flagsp)
542 flags = *flagsp &~ MSG_EOR;
543 else
544 flags = 0;
545 if (flags & MSG_OOB) {
546 m = m_get(M_WAIT, MT_DATA);
547 error = (*pr->pr_usrreq)(so, PRU_RCVOOB,
548 m, (struct mbuf *)(flags & MSG_PEEK), (struct mbuf *)0);
549 if (error)
550 goto bad;
551 do {
552 error = uiomove(mtod(m, caddr_t),
553 (int) min(uio->uio_resid, m->m_len), uio);
554 m = m_free(m);
555 } while (uio->uio_resid && error == 0 && m);
556 bad:
557 if (m)
558 m_freem(m);
559 return (error);
560 }
561 if (mp)
562 *mp = (struct mbuf *)0;
563 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)
564 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0,
565 (struct mbuf *)0, (struct mbuf *)0);
566
567 restart:
568 error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
569 if (error)
570 return (error);
571 s = splnet();
572
573 m = so->so_rcv.sb_mb;
574 /*
575 * If we have less data than requested, block awaiting more
576 * (subject to any timeout) if:
577 * 1. the current count is less than the low water mark, or
578 * 2. MSG_WAITALL is set, and it is possible to do the entire
579 * receive operation at once if we block (resid <= hiwat).
580 * 3. MSG_DONTWAIT is not set
581 * If MSG_WAITALL is set but resid is larger than the receive buffer,
582 * we have to do the receive in sections, and thus risk returning
583 * a short count if a timeout or signal occurs after we start.
584 */
585 if (m == 0 || (((flags & MSG_DONTWAIT) == 0 &&
586 so->so_rcv.sb_cc < uio->uio_resid) &&
587 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
588 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
589 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
590 #ifdef DIAGNOSTIC
591 if (m == 0 && so->so_rcv.sb_cc)
592 panic("receive 1");
593 #endif
594 if (so->so_error) {
595 if (m)
596 goto dontblock;
597 error = so->so_error;
598 if ((flags & MSG_PEEK) == 0)
599 so->so_error = 0;
600 goto release;
601 }
602 if (so->so_state & SS_CANTRCVMORE) {
603 if (m)
604 goto dontblock;
605 else
606 goto release;
607 }
608 for (; m; m = m->m_next)
609 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
610 m = so->so_rcv.sb_mb;
611 goto dontblock;
612 }
613 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
614 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
615 printf("so: %x\n", so);
616 __asm__("int3");
617 error = ENOTCONN;
618 goto release;
619 }
620 if (uio->uio_resid == 0)
621 goto release;
622 if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) {
623 error = EWOULDBLOCK;
624 goto release;
625 }
626 sbunlock(so, &so->so_rcv);
627 error = sbwait(&so->so_rcv);
628 splx(s);
629 if (error)
630 return (error);
631 goto restart;
632 }
633 dontblock:
634 if (uio->uio_procp)
635 uio->uio_procp->p_stats->p_ru.ru_msgrcv++;
636 nextrecord = m->m_nextpkt;
637 if (pr->pr_flags & PR_ADDR) {
638 #ifdef DIAGNOSTIC
639 if (m->m_type != MT_SONAME)
640 panic("receive 1a");
641 #endif
642 orig_resid = 0;
643 if (flags & MSG_PEEK) {
644 if (paddr)
645 *paddr = m_copy(m, 0, m->m_len);
646 m = m->m_next;
647 } else {
648 sbfree(&so->so_rcv, m);
649 if (paddr) {
650 *paddr = m;
651 so->so_rcv.sb_mb = m->m_next;
652 m->m_next = 0;
653 m = so->so_rcv.sb_mb;
654 } else {
655 MFREE(m, so->so_rcv.sb_mb);
656 m = so->so_rcv.sb_mb;
657 }
658 }
659 }
660 while (m && m->m_type == MT_CONTROL && error == 0) {
661 if (flags & MSG_PEEK) {
662 if (controlp)
663 *controlp = m_copy(m, 0, m->m_len);
664 m = m->m_next;
665 } else {
666 sbfree(&so->so_rcv, m);
667 #ifndef __REACTOS__
668 if (controlp) {
669 if (pr->pr_domain->dom_externalize &&
670 mtod(m, struct cmsghdr *)->cmsg_type ==
671 SCM_RIGHTS)
672 error = (*pr->pr_domain->dom_externalize)(m);
673 *controlp = m;
674 so->so_rcv.sb_mb = m->m_next;
675 m->m_next = 0;
676 m = so->so_rcv.sb_mb;
677 } else
678 #endif
679 {
680 MFREE(m, so->so_rcv.sb_mb);
681 m = so->so_rcv.sb_mb;
682 }
683 }
684 if (controlp) {
685 orig_resid = 0;
686 controlp = &(*controlp)->m_next;
687 }
688 }
689 if (m) {
690 if ((flags & MSG_PEEK) == 0)
691 m->m_nextpkt = nextrecord;
692 type = m->m_type;
693 if (type == MT_OOBDATA)
694 flags |= MSG_OOB;
695 }
696 moff = 0;
697 offset = 0;
698 while (m && uio->uio_resid > 0 && error == 0) {
699 if (m->m_type == MT_OOBDATA) {
700 if (type != MT_OOBDATA)
701 break;
702 } else if (type == MT_OOBDATA)
703 break;
704 #ifdef DIAGNOSTIC
705 else if (m->m_type != MT_DATA && m->m_type != MT_HEADER)
706 panic("receive 3");
707 #endif
708 so->so_state &= ~SS_RCVATMARK;
709 len = uio->uio_resid;
710 if (so->so_oobmark && len > so->so_oobmark - offset)
711 len = so->so_oobmark - offset;
712 if (len > m->m_len - moff)
713 len = m->m_len - moff;
714 /*
715 * If mp is set, just pass back the mbufs.
716 * Otherwise copy them out via the uio, then free.
717 * Sockbuf must be consistent here (points to current mbuf,
718 * it points to next record) when we drop priority;
719 * we must note any additions to the sockbuf when we
720 * block interrupts again.
721 */
722 if (mp == 0) {
723 splx(s);
724 error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio);
725 s = splnet();
726 } else
727 uio->uio_resid -= len;
728 if (len == m->m_len - moff) {
729 if (m->m_flags & M_EOR)
730 flags |= MSG_EOR;
731 if (flags & MSG_PEEK) {
732 m = m->m_next;
733 moff = 0;
734 } else {
735 nextrecord = m->m_nextpkt;
736 sbfree(&so->so_rcv, m);
737 if (mp) {
738 *mp = m;
739 mp = &m->m_next;
740 so->so_rcv.sb_mb = m = m->m_next;
741 *mp = (struct mbuf *)0;
742 } else {
743 MFREE(m, so->so_rcv.sb_mb);
744 m = so->so_rcv.sb_mb;
745 }
746 if (m)
747 m->m_nextpkt = nextrecord;
748 }
749 } else {
750 if (flags & MSG_PEEK)
751 moff += len;
752 else {
753 if (mp)
754 *mp = m_copym(m, 0, len, M_WAIT);
755 m->m_data += len;
756 m->m_len -= len;
757 so->so_rcv.sb_cc -= len;
758 }
759 }
760 if (so->so_oobmark) {
761 if ((flags & MSG_PEEK) == 0) {
762 so->so_oobmark -= len;
763 if (so->so_oobmark == 0) {
764 so->so_state |= SS_RCVATMARK;
765 break;
766 }
767 } else {
768 offset += len;
769 if (offset == so->so_oobmark)
770 break;
771 }
772 }
773 if (flags & MSG_EOR)
774 break;
775 /*
776 * If the MSG_WAITALL flag is set (for non-atomic socket),
777 * we must not quit until "uio->uio_resid == 0" or an error
778 * termination. If a signal/timeout occurs, return
779 * with a short count but without error.
780 * Keep sockbuf locked against other readers.
781 */
782 while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 &&
783 !sosendallatonce(so) && !nextrecord) {
784 if (so->so_error || so->so_state & SS_CANTRCVMORE)
785 break;
786 error = sbwait(&so->so_rcv);
787 if (error) {
788 sbunlock(so, &so->so_rcv);
789 splx(s);
790 return (0);
791 }
792 m = so->so_rcv.sb_mb;
793 if (m)
794 nextrecord = m->m_nextpkt;
795 }
796 }
797
798 if (m && pr->pr_flags & PR_ATOMIC) {
799 flags |= MSG_TRUNC;
800 if ((flags & MSG_PEEK) == 0)
801 (void) sbdroprecord(&so->so_rcv);
802 }
803 if ((flags & MSG_PEEK) == 0) {
804 if (m == 0)
805 so->so_rcv.sb_mb = nextrecord;
806 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
807 (*pr->pr_usrreq)(so, PRU_RCVD, (struct mbuf *)0,
808 (struct mbuf *)flags, (struct mbuf *)0);
809 }
810 if (orig_resid == uio->uio_resid && orig_resid &&
811 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
812 sbunlock(so, &so->so_rcv);
813 splx(s);
814 goto restart;
815 }
816
817 if (flagsp)
818 *flagsp |= flags;
819 release:
820 sbunlock(so, &so->so_rcv);
821 splx(s);
822 return (error);
823 }
824
825 int
826 soshutdown(so, how)
827 register struct socket *so;
828 register int how;
829 {
830 register struct protosw *pr = so->so_proto;
831
832 how++;
833 if (how & FREAD)
834 sorflush(so);
835 if (how & FWRITE)
836 return ((*pr->pr_usrreq)(so, PRU_SHUTDOWN,
837 (struct mbuf *)0, (struct mbuf *)0, (struct mbuf *)0));
838 return (0);
839 }
840
841 void
842 sorflush(so)
843 register struct socket *so;
844 {
845 register struct sockbuf *sb = &so->so_rcv;
846 register struct protosw *pr = so->so_proto;
847 register int s;
848 struct sockbuf asb;
849
850 sb->sb_flags |= SB_NOINTR;
851 (void) sblock(sb, M_WAITOK);
852 s = splimp();
853 socantrcvmore(so);
854 sbunlock(so, sb);
855 asb = *sb;
856 bzero((caddr_t)sb, sizeof (*sb));
857 splx(s);
858 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
859 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
860 sbrelease(&asb);
861 }
862
863 int
864 sosetopt(so, level, optname, m0)
865 register struct socket *so;
866 int level, optname;
867 struct mbuf *m0;
868 {
869 int error = 0;
870 register struct mbuf *m = m0;
871
872 if (level != SOL_SOCKET) {
873 if (so->so_proto && so->so_proto->pr_ctloutput)
874 return ((*so->so_proto->pr_ctloutput)
875 (PRCO_SETOPT, so, level, optname, &m0));
876 error = ENOPROTOOPT;
877 } else {
878 switch (optname) {
879
880 case SO_LINGER:
881 if (m == NULL || m->m_len != sizeof (struct linger)) {
882 error = EINVAL;
883 goto bad;
884 }
885 so->so_linger = mtod(m, struct linger *)->l_linger;
886 /* fall thru... */
887
888 case SO_DEBUG:
889 case SO_KEEPALIVE:
890 case SO_DONTROUTE:
891 case SO_USELOOPBACK:
892 case SO_BROADCAST:
893 case SO_REUSEADDR:
894 case SO_REUSEPORT:
895 case SO_OOBINLINE:
896 if (m == NULL || m->m_len < sizeof (int)) {
897 error = EINVAL;
898 goto bad;
899 }
900 if (*mtod(m, int *))
901 so->so_options |= optname;
902 else
903 so->so_options &= ~optname;
904 break;
905
906 case SO_SNDBUF:
907 case SO_RCVBUF:
908 case SO_SNDLOWAT:
909 case SO_RCVLOWAT:
910 if (m == NULL || m->m_len < sizeof (int)) {
911 error = EINVAL;
912 goto bad;
913 }
914 switch (optname) {
915
916 case SO_SNDBUF:
917 case SO_RCVBUF:
918 if (sbreserve(optname == SO_SNDBUF ?
919 &so->so_snd : &so->so_rcv,
920 (u_long) *mtod(m, int *)) == 0) {
921 error = ENOBUFS;
922 goto bad;
923 }
924 break;
925
926 case SO_SNDLOWAT:
927 so->so_snd.sb_lowat = *mtod(m, int *);
928 break;
929 case SO_RCVLOWAT:
930 so->so_rcv.sb_lowat = *mtod(m, int *);
931 break;
932 }
933 break;
934
935 case SO_SNDTIMEO:
936 case SO_RCVTIMEO:
937 {
938 struct timeval *tv;
939 short val;
940
941 if (m == NULL || m->m_len < sizeof (*tv)) {
942 error = EINVAL;
943 goto bad;
944 }
945 tv = mtod(m, struct timeval *);
946 if (tv->tv_sec > SHRT_MAX / hz - hz) {
947 error = EDOM;
948 goto bad;
949 }
950 val = tv->tv_sec * hz + tv->tv_usec / tick;
951
952 switch (optname) {
953
954 case SO_SNDTIMEO:
955 so->so_snd.sb_timeo = val;
956 break;
957 case SO_RCVTIMEO:
958 so->so_rcv.sb_timeo = val;
959 break;
960 }
961 break;
962 }
963
964 default:
965 error = ENOPROTOOPT;
966 break;
967 }
968 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) {
969 (void) ((*so->so_proto->pr_ctloutput)
970 (PRCO_SETOPT, so, level, optname, &m0));
971 m = NULL; /* freed by protocol */
972 }
973 }
974 bad:
975 if (m)
976 (void) m_free(m);
977 return (error);
978 }
979
980 int
981 sogetopt(so, level, optname, mp)
982 register struct socket *so;
983 int level, optname;
984 struct mbuf **mp;
985 {
986 register struct mbuf *m;
987
988 if (level != SOL_SOCKET) {
989 if (so->so_proto && so->so_proto->pr_ctloutput) {
990 return ((*so->so_proto->pr_ctloutput)
991 (PRCO_GETOPT, so, level, optname, mp));
992 } else
993 return (ENOPROTOOPT);
994 } else {
995 m = m_get(M_WAIT, MT_SOOPTS);
996 m->m_len = sizeof (int);
997
998 switch (optname) {
999
1000 case SO_LINGER:
1001 m->m_len = sizeof (struct linger);
1002 mtod(m, struct linger *)->l_onoff =
1003 so->so_options & SO_LINGER;
1004 mtod(m, struct linger *)->l_linger = so->so_linger;
1005 break;
1006
1007 case SO_USELOOPBACK:
1008 case SO_DONTROUTE:
1009 case SO_DEBUG:
1010 case SO_KEEPALIVE:
1011 case SO_REUSEADDR:
1012 case SO_REUSEPORT:
1013 case SO_BROADCAST:
1014 case SO_OOBINLINE:
1015 *mtod(m, int *) = so->so_options & optname;
1016 break;
1017
1018 case SO_TYPE:
1019 *mtod(m, int *) = so->so_type;
1020 break;
1021
1022 case SO_ERROR:
1023 *mtod(m, int *) = so->so_error;
1024 so->so_error = 0;
1025 break;
1026
1027 case SO_SNDBUF:
1028 *mtod(m, int *) = so->so_snd.sb_hiwat;
1029 break;
1030
1031 case SO_RCVBUF:
1032 *mtod(m, int *) = so->so_rcv.sb_hiwat;
1033 break;
1034
1035 case SO_SNDLOWAT:
1036 *mtod(m, int *) = so->so_snd.sb_lowat;
1037 break;
1038
1039 case SO_RCVLOWAT:
1040 *mtod(m, int *) = so->so_rcv.sb_lowat;
1041 break;
1042
1043 case SO_SNDTIMEO:
1044 case SO_RCVTIMEO:
1045 {
1046 int val = (optname == SO_SNDTIMEO ?
1047 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
1048
1049 m->m_len = sizeof(struct timeval);
1050 mtod(m, struct timeval *)->tv_sec = val / hz;
1051 mtod(m, struct timeval *)->tv_usec =
1052 (val % hz) * tick;
1053 break;
1054 }
1055
1056 default:
1057 (void)m_free(m);
1058 return (ENOPROTOOPT);
1059 }
1060 *mp = m;
1061 return (0);
1062 }
1063 }
1064
1065 void
1066 sohasoutofband(so)
1067 register struct socket *so;
1068 {
1069 struct proc *p;
1070
1071 if (so->so_pgid < 0)
1072 gsignal(-so->so_pgid, SIGURG);
1073 else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0)
1074 psignal(p, SIGURG);
1075 #ifndef __REACTOS__
1076 selwakeup(so, &so->so_rcv.sb_sel);
1077 #else
1078 sorwakeup(so);
1079 #endif
1080 }