2 * Copyright (c) 1997-1998 University of Utah and the Flux Group.
5 * This file is part of the Flux OSKit. The OSKit is free software, also known
6 * as "open source;" you can redistribute it and/or modify it under the terms
7 * of the GNU General Public License (GPL), version 2, as published by the Free
8 * Software Foundation (FSF). To explore alternate licensing terms, contact
9 * the University of Utah at csl-dist@cs.utah.edu or +1-801-585-3271.
11 * The OSKit is distributed in the hope that it will be useful, but WITHOUT ANY
12 * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 * FOR A PARTICULAR PURPOSE. See the GPL for more details. You should have
14 * received a copy of the GPL along with the OSKit; see the file COPYING. If
15 * not, write to the FSF, 59 Temple Place #330, Boston, MA 02111-1307, USA.
18 * Copyright (c) 1982, 1986, 1988, 1991, 1993
19 * The Regents of the University of California. All rights reserved.
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
24 * 1. Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * 2. Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in the
28 * documentation and/or other materials provided with the distribution.
29 * 3. All advertising materials mentioning features or use of this software
30 * must display the following acknowledgement:
31 * This product includes software developed by the University of
32 * California, Berkeley and its contributors.
33 * 4. Neither the name of the University nor the names of its contributors
34 * may be used to endorse or promote products derived from this software
35 * without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
38 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
39 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
40 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
41 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
42 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
43 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
44 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
45 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
46 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
49 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
52 #include <sys/param.h>
53 #include <sys/systm.h>
55 #include <sys/malloc.h>
58 #include <sys/kernel.h>
59 #include <sys/syslog.h>
60 #include <sys/domain.h>
61 #include <sys/protosw.h>
67 extern vm_map_t mb_map
;
78 #define NCL_INIT (4096/CLBYTES)
84 if (m_clalloc(NCL_INIT
, M_DONTWAIT
) == 0)
94 * Allocate some number of mbuf clusters
95 * and place on cluster free list.
96 * Must be called at splimp.
100 m_clalloc(ncl
, nowait
)
112 * Once we run out of map space, it will be impossible
113 * to get any more (nothing is ever freed back to the
121 //printf("kmem_malloc(%d)\n", npg);
123 p
= (caddr_t
)kmem_malloc(mb_map
, ctob(npg
),
124 nowait
? M_NOWAIT
: M_WAITOK
);
126 //printf("kmem_malloc done\n");
129 * Either the map is now full, or this is nowait and there
135 ncl
= ncl
* CLBYTES
/ MCLBYTES
;
136 for (i
= 0; i
< ncl
; i
++) {
137 ((union mcluster
*)p
)->mcl_next
= mclfree
;
138 //printf( "Freeing %x onto the free list\n", p);
139 mclfree
= (union mcluster
*)p
;
143 mbstat
.m_clusters
+= ncl
;
144 //printf( "done with m_clalloc\n");
150 * When MGET failes, ask protocols to free space when short of memory,
151 * then re-attempt to allocate an mbuf.
157 register struct mbuf
*m
;
161 * I'm getting rid of the utterly ugly redefinition of m_retry
162 * - same for m_retryhdr below
167 MGET_DONT_RECURSE(m
, i
, t
);
177 * As above; retry an MGETHDR.
183 register struct mbuf
*m
;
189 MGETHDR_DONT_RECURSE(m
, i
, t
);
201 register struct domain
*dp
;
202 register struct protosw
*pr
;
205 for (dp
= domains
; dp
; dp
= dp
->dom_next
) {
206 for (pr
= dp
->dom_protosw
; pr
< dp
->dom_protoswNPROTOSW
; pr
++) {
217 * Space allocation routines.
218 * These are also available as macros
219 * for critical paths.
225 register struct mbuf
*m
;
227 MGET(m
, nowait
, type
);
232 m_gethdr(nowait
, type
)
235 register struct mbuf
*m
;
237 MGETHDR(m
, nowait
, type
);
242 m_getclr(nowait
, type
)
245 register struct mbuf
*m
;
247 MGET(m
, nowait
, type
);
250 bzero(mtod(m
, caddr_t
), MLEN
);
258 register struct mbuf
*n
;
266 register struct mbuf
*m
;
268 register struct mbuf
*n
;
279 * Mbuffer utility routines.
283 * Lesser-used path for M_PREPEND:
284 * allocate new mbuf to prepend to chain,
288 m_prepend(m
, len
, how
)
289 register struct mbuf
*m
;
294 MGET(mn
, how
, m
->m_type
);
295 if (mn
== (struct mbuf
*)NULL
) {
297 return ((struct mbuf
*)NULL
);
299 if (m
->m_flags
& M_PKTHDR
) {
300 M_COPY_PKTHDR(mn
, m
);
301 m
->m_flags
&= ~M_PKTHDR
;
312 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
313 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
314 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
319 m_copym(m
, off0
, len
, wait
)
320 register struct mbuf
*m
;
324 register struct mbuf
*n
, **np
;
325 register int off
= off0
;
329 if (off
< 0 || len
< 0)
330 panic("m_copym: off %d, len %d", off
, len
);
331 if (off
== 0 && m
->m_flags
& M_PKTHDR
)
335 panic("m_copym: %d", off
);
345 if (len
!= M_COPYALL
)
349 MGET(n
, wait
, m
->m_type
);
355 if (len
== M_COPYALL
)
356 n
->m_pkthdr
.len
-= off0
;
358 n
->m_pkthdr
.len
= len
;
361 n
->m_len
= min(len
, m
->m_len
- off
);
363 if (m
->m_flags
& M_EXT
) {
364 n
->m_data
= m
->m_data
+ off
;
366 oskit_bufio_addref(m
->m_ext
.ext_bufio
);
368 mclrefcnt
[mtocl(m
->m_ext
.ext_buf
)]++;
373 bcopy(mtod(m
, caddr_t
)+off
, mtod(n
, caddr_t
),
375 if (len
!= M_COPYALL
)
391 * Copy data from an mbuf chain starting "off" bytes from the beginning,
392 * continuing for "len" bytes, into the indicated buffer.
395 m_copydata(m
, off
, len
, cp
)
396 register struct mbuf
*m
;
401 register unsigned count
;
403 if (off
< 0 || len
< 0)
416 count
= min(m
->m_len
- off
, len
);
417 OS_DbgPrint(OSK_MID_TRACE
,("count %d len %d\n", count
, len
));
418 bcopy(mtod(m
, caddr_t
) + off
, cp
, count
);
427 * Concatenate mbuf chain n to m.
428 * Both chains must be of the same type (e.g. MT_DATA).
429 * Any m_pkthdr is not updated.
433 register struct mbuf
*m
, *n
;
438 if (m
->m_flags
& M_EXT
||
439 m
->m_data
+ m
->m_len
+ n
->m_len
>= &m
->m_dat
[MLEN
]) {
440 /* just join the two chains */
444 /* splat the data from one into the other */
445 bcopy(mtod(n
, caddr_t
), mtod(m
, caddr_t
) + m
->m_len
,
447 m
->m_len
+= n
->m_len
;
457 register int len
= req_len
;
458 register struct mbuf
*m
;
461 if ((m
= mp
) == NULL
)
467 while (m
!= NULL
&& len
> 0) {
468 if (m
->m_len
<= len
) {
479 if (mp
->m_flags
& M_PKTHDR
)
480 m
->m_pkthdr
.len
-= (req_len
- len
);
483 * Trim from tail. Scan the mbuf chain,
484 * calculating its length and finding the last mbuf.
485 * If the adjustment only affects this mbuf, then just
486 * adjust and return. Otherwise, rescan and truncate
487 * after the remaining size.
493 if (m
->m_next
== (struct mbuf
*)0)
497 if (m
->m_len
>= len
) {
499 if (mp
->m_flags
& M_PKTHDR
)
500 mp
->m_pkthdr
.len
-= len
;
507 * Correct length for chain is "count".
508 * Find the mbuf with last data, adjust its length,
509 * and toss data from remaining mbufs on chain.
512 if (m
->m_flags
& M_PKTHDR
)
513 m
->m_pkthdr
.len
= count
;
514 for (; m
; m
= m
->m_next
) {
515 if (m
->m_len
>= count
) {
522 (m
= m
->m_next
) ->m_len
= 0;
527 * Rearange an mbuf chain so that len bytes are contiguous
528 * and in the data area of an mbuf (so that mtod and dtom
529 * will work for a structure of size len). Returns the resulting
530 * mbuf chain on success, frees it and returns null on failure.
531 * If there is room, it will add up to max_protohdr-len extra bytes to the
532 * contiguous region in an attempt to avoid being called next time.
538 register struct mbuf
*n
;
541 register struct mbuf
*m
;
546 * If first mbuf has no cluster, and has room for len bytes
547 * without shifting current data, pullup into it,
548 * otherwise allocate a new mbuf to prepend to the chain.
550 if ((n
->m_flags
& M_EXT
) == 0 &&
551 n
->m_data
+ len
< &n
->m_dat
[MLEN
] && n
->m_next
) {
560 MGET(m
, M_DONTWAIT
, n
->m_type
);
564 if (n
->m_flags
& M_PKTHDR
) {
566 n
->m_flags
&= ~M_PKTHDR
;
569 space
= &m
->m_dat
[MLEN
] - (m
->m_data
+ m
->m_len
);
571 count
= min(min(max(len
, max_protohdr
), space
), n
->m_len
);
572 bcopy(mtod(n
, caddr_t
), mtod(m
, caddr_t
) + m
->m_len
,
582 } while (len
> 0 && n
);
596 * Partition an mbuf chain in two pieces, returning the tail --
597 * all but the first len0 bytes. In case of failure, it returns NULL and
598 * attempts to restore the chain to its original state.
601 m_split(m0
, len0
, wait
)
602 register struct mbuf
*m0
;
605 register struct mbuf
*m
, *n
;
606 unsigned len
= len0
, remain
;
608 for (m
= m0
; m
&& len
> m
->m_len
; m
= m
->m_next
)
612 remain
= m
->m_len
- len
;
613 if (m0
->m_flags
& M_PKTHDR
) {
614 MGETHDR(n
, wait
, m0
->m_type
);
617 n
->m_pkthdr
.rcvif
= m0
->m_pkthdr
.rcvif
;
618 n
->m_pkthdr
.len
= m0
->m_pkthdr
.len
- len0
;
619 m0
->m_pkthdr
.len
= len0
;
620 if (m
->m_flags
& M_EXT
)
622 if (remain
> MHLEN
) {
623 /* m can't be the lead packet */
625 n
->m_next
= m_split(m
, len
, wait
);
626 if (n
->m_next
== 0) {
633 } else if (remain
== 0) {
638 MGET(n
, wait
, m
->m_type
);
644 if (m
->m_flags
& M_EXT
) {
648 oskit_bufio_addref(m
->m_ext
.ext_bufio
);
650 mclrefcnt
[mtocl(m
->m_ext
.ext_buf
)]++;
652 m
->m_ext
.ext_size
= 0; /* For Accounting XXXXXX danger */
653 n
->m_data
= m
->m_data
+ len
;
655 bcopy(mtod(m
, caddr_t
) + len
, mtod(n
, caddr_t
), remain
);
659 n
->m_next
= m
->m_next
;
664 #if !defined(OSKIT) || defined(__REACTOS__)
665 /* currently not OS Kit approved, and shouldn't be needed in the first place */
668 * Routine to copy from device local memory into mbufs.
671 m_devget(buf
, totlen
, off0
, ifp
, copy
)
677 register struct mbuf
*m
;
678 struct mbuf
*top
= 0, **mp
= &top
;
679 register int off
= off0
, len
;
686 cp
+= off
+ 2 * sizeof(u_short
);
687 totlen
-= 2 * sizeof(u_short
);
689 MGETHDR(m
, M_DONTWAIT
, MT_DATA
);
693 m
->m_pkthdr
.rcvif
= ifp
;
695 m
->m_pkthdr
.rcvif
= 0;
697 m
->m_pkthdr
.len
= totlen
;
702 MGET(m
, M_DONTWAIT
, MT_DATA
);
709 len
= min(totlen
, epkt
- cp
);
710 if (len
>= MINCLSIZE
) {
711 MCLGET(m
, M_DONTWAIT
);
712 if (m
->m_flags
& M_EXT
)
713 m
->m_len
= len
= min(len
, MCLBYTES
);
718 * Place initial small packet/header at end of mbuf.
720 if (len
< m
->m_len
) {
721 if (top
== 0 && len
+ max_linkhdr
<= m
->m_len
)
722 m
->m_data
+= max_linkhdr
;
728 copy(cp
, mtod(m
, caddr_t
), (unsigned)len
);
730 bcopy(cp
, mtod(m
, caddr_t
), (unsigned)len
);
744 * Copy data from a buffer back into the indicated mbuf chain,
745 * starting "off" bytes from the beginning, extending the mbuf
746 * chain if necessary.
749 m_copyback(m0
, off
, len
, cp
)
756 register struct mbuf
*m
= m0
, *n
;
761 while (off
> (mlen
= m
->m_len
)) {
764 if (m
->m_next
== 0) {
765 n
= m_getclr(M_DONTWAIT
, m
->m_type
);
768 n
->m_len
= min(MLEN
, len
+ off
);
774 mlen
= min (m
->m_len
- off
, len
);
775 bcopy(cp
, off
+ mtod(m
, caddr_t
), (unsigned)mlen
);
783 if (m
->m_next
== 0) {
784 n
= m_get(M_DONTWAIT
, m
->m_type
);
787 n
->m_len
= min(MLEN
, len
);
792 out
: if (((m
= m0
)->m_flags
& M_PKTHDR
) && (m
->m_pkthdr
.len
< totlen
))
793 m
->m_pkthdr
.len
= totlen
;