28 typedef unsigned short umode_t
;
31 * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
32 * header files exported to user space
35 typedef __signed__
char __s8
;
36 typedef unsigned char __u8
;
38 typedef __signed__
short __s16
;
39 typedef unsigned short __u16
;
41 typedef __signed__
int __s32
;
42 typedef unsigned int __u32
;
44 #if defined(__GNUC__) && !defined(__STRICT_ANSI__)
45 typedef __signed__
long long __s64
;
46 typedef unsigned long long __u64
;
50 * These aren't exported outside the kernel to avoid name space clashes
52 typedef signed char s8
;
53 typedef unsigned char u8
;
55 typedef signed short s16
;
56 typedef unsigned short u16
;
58 typedef signed int s32
;
59 typedef unsigned int u32
;
61 typedef signed long long s64
;
62 typedef unsigned long long u64
;
64 #define BITS_PER_LONG 32
66 /* DMA addresses come in generic and 64-bit flavours. */
68 #ifdef CONFIG_HIGHMEM64G
69 typedef u64 dma_addr_t
;
71 typedef u32 dma_addr_t
;
73 typedef u64 dma64_addr_t
;
78 * This allows for 1024 file descriptors: if NR_OPEN is ever grown
79 * beyond that you'll have to change this too. But 1024 fd's seem to be
80 * enough even for such "real" unices like OSF/1, so hopefully this is
81 * one limit that doesn't have to be changed [again].
83 * Note that POSIX wants the FD_CLEAR(fd,fdsetp) defines to be in
84 * <sys/time.h> (and thus <linux/time.h>) - but this is a more logical
85 * place for them. Solved by having dummy defines in <sys/time.h>.
89 * Those macros may have been defined in <gnu/types.h>. But we always
93 #define __NFDBITS (8 * sizeof(unsigned long))
96 #define __FD_SETSIZE 1024
99 #define __FDSET_LONGS (__FD_SETSIZE/__NFDBITS)
102 #define __FDELT(d) ((d) / __NFDBITS)
105 #define __FDMASK(d) (1UL << ((d) % __NFDBITS))
108 unsigned long fds_bits
[__FDSET_LONGS
];
111 /* Type of a signal handler. */
112 typedef void (*__kernel_sighandler_t
)(int);
114 /* Type of a SYSV IPC key. */
115 typedef int __kernel_key_t
;
119 * This file is generally used by user-level software, so you need to
120 * be a little careful about namespace pollution etc. Also, we cannot
121 * assume GCC is being used.
124 typedef unsigned short __kernel_dev_t
;
125 typedef unsigned long __kernel_ino_t
;
126 typedef unsigned short __kernel_mode_t
;
127 typedef unsigned short __kernel_nlink_t
;
128 typedef long __kernel_off_t
;
129 typedef int __kernel_pid_t
;
130 typedef unsigned short __kernel_ipc_pid_t
;
131 typedef unsigned short __kernel_uid_t
;
132 typedef unsigned short __kernel_gid_t
;
133 typedef unsigned int __kernel_size_t
;
134 typedef int __kernel_ssize_t
;
135 typedef int __kernel_ptrdiff_t
;
136 typedef long __kernel_time_t
;
137 typedef long __kernel_suseconds_t
;
138 typedef long __kernel_clock_t
;
139 typedef int __kernel_daddr_t
;
140 typedef char * __kernel_caddr_t
;
141 typedef unsigned short __kernel_uid16_t
;
142 typedef unsigned short __kernel_gid16_t
;
143 typedef unsigned int __kernel_uid32_t
;
144 typedef unsigned int __kernel_gid32_t
;
146 typedef unsigned short __kernel_old_uid_t
;
147 typedef unsigned short __kernel_old_gid_t
;
150 typedef long long __kernel_loff_t
;
154 #if defined(__KERNEL__) || defined(__USE_ALL)
156 #else /* !defined(__KERNEL__) && !defined(__USE_ALL) */
158 #endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */
161 #if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
164 #define __FD_SET(fd,fdsetp) \
165 __asm__ __volatile__("btsl %1,%0": \
166 "=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
169 #define __FD_CLR(fd,fdsetp) \
170 __asm__ __volatile__("btrl %1,%0": \
171 "=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
174 #define __FD_ISSET(fd,fdsetp) (__extension__ ({ \
175 unsigned char __result; \
176 __asm__ __volatile__("btl %1,%2 ; setb %0" \
177 :"=q" (__result) :"r" ((int) (fd)), \
178 "m" (*(__kernel_fd_set *) (fdsetp))); \
182 #define __FD_ZERO(fdsetp) \
185 __asm__ __volatile__("cld ; rep ; stosl" \
186 :"=m" (*(__kernel_fd_set *) (fdsetp)), \
187 "=&c" (__d0), "=&D" (__d1) \
188 :"a" (0), "1" (__FDSET_LONGS), \
189 "2" ((__kernel_fd_set *) (fdsetp)) : "memory"); \
192 #endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
195 #ifndef __KERNEL_STRICT_NAMES
197 typedef __kernel_fd_set fd_set
;
198 typedef __kernel_dev_t dev_t
;
199 typedef __kernel_ino_t ino_t
;
200 typedef __kernel_mode_t mode_t
;
201 typedef __kernel_nlink_t nlink_t
;
202 typedef __kernel_off_t off_t
;
203 typedef __kernel_pid_t pid_t
;
204 typedef __kernel_daddr_t daddr_t
;
205 typedef __kernel_key_t key_t
;
206 typedef __kernel_suseconds_t suseconds_t
;
209 typedef __kernel_uid32_t uid_t
;
210 typedef __kernel_gid32_t gid_t
;
211 typedef __kernel_uid16_t uid16_t
;
212 typedef __kernel_gid16_t gid16_t
;
215 /* This is defined by include/asm-{arch}/posix_types.h */
216 typedef __kernel_old_uid_t old_uid_t
;
217 typedef __kernel_old_gid_t old_gid_t
;
218 #endif /* CONFIG_UID16 */
220 /* libc5 includes this file to define uid_t, thus uid_t can never change
221 * when it is included by non-kernel code
224 typedef __kernel_uid_t uid_t
;
225 typedef __kernel_gid_t gid_t
;
226 #endif /* __KERNEL__ */
228 #if defined(__GNUC__)
229 typedef __kernel_loff_t loff_t
;
233 * The following typedefs are also protected by individual ifdefs for
234 * historical reasons:
238 typedef __kernel_size_t
size_t;
243 typedef __kernel_ssize_t ssize_t
;
248 typedef __kernel_ptrdiff_t
ptrdiff_t;
253 typedef __kernel_time_t
time_t;
258 typedef __kernel_clock_t
clock_t;
263 typedef __kernel_caddr_t caddr_t
;
267 typedef unsigned char u_char
;
268 typedef unsigned short u_short
;
269 typedef unsigned int u_int
;
270 typedef unsigned long u_long
;
273 typedef unsigned char unchar
;
274 typedef unsigned short ushort
;
275 typedef unsigned int uint
;
276 typedef unsigned long ulong
;
278 #ifndef __BIT_TYPES_DEFINED__
279 #define __BIT_TYPES_DEFINED__
281 typedef __u8 u_int8_t
;
283 typedef __u16 u_int16_t
;
284 typedef __s16
int16_t;
285 typedef __u32 u_int32_t
;
286 typedef __s32
int32_t;
288 #endif /* !(__BIT_TYPES_DEFINED__) */
290 typedef __u8
uint8_t;
291 typedef __u16
uint16_t;
292 typedef __u32
uint32_t;
294 #if defined(__GNUC__) && !defined(__STRICT_ANSI__)
295 typedef __u64
uint64_t;
296 typedef __u64 u_int64_t
;
297 typedef __s64
int64_t;
300 #endif /* __KERNEL_STRICT_NAMES */
303 * Below are truly Linux-specific types that should never collide with
304 * any application/library that wants linux/types.h.
308 __kernel_daddr_t f_tfree
;
309 __kernel_ino_t f_tinode
;
327 #ifndef __LITTLE_ENDIAN
328 #define __LITTLE_ENDIAN 1234
330 #ifndef __LITTLE_ENDIAN_BITFIELD
331 #define __LITTLE_ENDIAN_BITFIELD
337 * linux/byteorder/swab.h
338 * Byte-swapping, independently from CPU endianness
341 * Francois-Rene Rideau <fare@tunes.org> 19971205
342 * separated swab functions from cpu_to_XX,
343 * to clean up support for bizarre-endian architectures.
345 * See asm-i386/byteorder.h and suches for examples of how to provide
346 * architecture-dependent optimized versions
350 /* casts are necessary for constants, because we never know how for sure
351 * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
353 #define ___swab16(x) \
357 (((__u16)(__x) & (__u16)0x00ffU) << 8) | \
358 (((__u16)(__x) & (__u16)0xff00U) >> 8) )); \
361 #define ___swab24(x) \
365 ((__x & (__u32)0x000000ffUL) << 16) | \
366 (__x & (__u32)0x0000ff00UL) | \
367 ((__x & (__u32)0x00ff0000UL) >> 16) )); \
370 #define ___swab32(x) \
374 (((__u32)(__x) & (__u32)0x000000ffUL) << 24) | \
375 (((__u32)(__x) & (__u32)0x0000ff00UL) << 8) | \
376 (((__u32)(__x) & (__u32)0x00ff0000UL) >> 8) | \
377 (((__u32)(__x) & (__u32)0xff000000UL) >> 24) )); \
380 #define ___swab64(x) \
384 (__u64)(((__u64)(__x) & (__u64)0x00000000000000ffULL) << 56) | \
385 (__u64)(((__u64)(__x) & (__u64)0x000000000000ff00ULL) << 40) | \
386 (__u64)(((__u64)(__x) & (__u64)0x0000000000ff0000ULL) << 24) | \
387 (__u64)(((__u64)(__x) & (__u64)0x00000000ff000000ULL) << 8) | \
388 (__u64)(((__u64)(__x) & (__u64)0x000000ff00000000ULL) >> 8) | \
389 (__u64)(((__u64)(__x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
390 (__u64)(((__u64)(__x) & (__u64)0x00ff000000000000ULL) >> 40) | \
391 (__u64)(((__u64)(__x) & (__u64)0xff00000000000000ULL) >> 56) )); \
394 #define ___constant_swab16(x) \
396 (((__u16)(x) & (__u16)0x00ffU) << 8) | \
397 (((__u16)(x) & (__u16)0xff00U) >> 8) ))
398 #define ___constant_swab24(x) \
400 (((__u32)(x) & (__u32)0x000000ffU) << 16) | \
401 (((__u32)(x) & (__u32)0x0000ff00U) | \
402 (((__u32)(x) & (__u32)0x00ff0000U) >> 16) ))
403 #define ___constant_swab32(x) \
405 (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \
406 (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \
407 (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \
408 (((__u32)(x) & (__u32)0xff000000UL) >> 24) ))
409 #define ___constant_swab64(x) \
411 (__u64)(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \
412 (__u64)(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \
413 (__u64)(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \
414 (__u64)(((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \
415 (__u64)(((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \
416 (__u64)(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
417 (__u64)(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \
418 (__u64)(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56) ))
421 * provide defaults when no architecture-specific optimization is detected
423 #ifndef __arch__swab16
424 # define __arch__swab16(x) ({ __u16 __tmp = (x) ; ___swab16(__tmp); })
426 #ifndef __arch__swab24
427 # define __arch__swab24(x) ({ __u32 __tmp = (x) ; ___swab24(__tmp); })
429 #ifndef __arch__swab32
430 # define __arch__swab32(x) ({ __u32 __tmp = (x) ; ___swab32(__tmp); })
432 #ifndef __arch__swab64
433 # define __arch__swab64(x) ({ __u64 __tmp = (x) ; ___swab64(__tmp); })
436 #ifndef __arch__swab16p
437 # define __arch__swab16p(x) __arch__swab16(*(x))
439 #ifndef __arch__swab24p
440 # define __arch__swab24p(x) __arch__swab24(*(x))
442 #ifndef __arch__swab32p
443 # define __arch__swab32p(x) __arch__swab32(*(x))
445 #ifndef __arch__swab64p
446 # define __arch__swab64p(x) __arch__swab64(*(x))
449 #ifndef __arch__swab16s
450 # define __arch__swab16s(x) do { *(x) = __arch__swab16p((x)); } while (0)
452 #ifndef __arch__swab24s
453 # define __arch__swab24s(x) do { *(x) = __arch__swab24p((x)); } while (0)
455 #ifndef __arch__swab32s
456 # define __arch__swab32s(x) do { *(x) = __arch__swab32p((x)); } while (0)
458 #ifndef __arch__swab64s
459 # define __arch__swab64s(x) do { *(x) = __arch__swab64p((x)); } while (0)
464 * Allow constant folding
466 #if defined(__GNUC__) && (__GNUC__ >= 2) && defined(__OPTIMIZE__)
467 # define __swab16(x) \
468 (__builtin_constant_p((__u16)(x)) ? \
471 # define __swab24(x) \
472 (__builtin_constant_p((__u32)(x)) ? \
475 # define __swab32(x) \
476 (__builtin_constant_p((__u32)(x)) ? \
479 # define __swab64(x) \
480 (__builtin_constant_p((__u64)(x)) ? \
484 # define __swab16(x) __fswab16(x)
485 # define __swab24(x) __fswab24(x)
486 # define __swab32(x) __fswab32(x)
487 # define __swab64(x) __fswab64(x)
488 #endif /* OPTIMIZE */
491 static __inline__ __const__ __u16
__fswab16(__u16 x
)
493 return __arch__swab16(x
);
495 static __inline__ __u16
__swab16p(__u16
*x
)
497 return __arch__swab16p(x
);
499 static __inline__
void __swab16s(__u16
*addr
)
501 __arch__swab16s(addr
);
504 static __inline__ __const__ __u32
__fswab24(__u32 x
)
506 return __arch__swab24(x
);
508 static __inline__ __u32
__swab24p(__u32
*x
)
510 return __arch__swab24p(x
);
512 static __inline__
void __swab24s(__u32
*addr
)
514 __arch__swab24s(addr
);
517 static __inline__ __const__ __u32
__fswab32(__u32 x
)
519 return __arch__swab32(x
);
521 static __inline__ __u32
__swab32p(__u32
*x
)
523 return __arch__swab32p(x
);
525 static __inline__
void __swab32s(__u32
*addr
)
527 __arch__swab32s(addr
);
530 #ifdef __BYTEORDER_HAS_U64__
531 static __inline__ __const__ __u64
__fswab64(__u64 x
)
533 # ifdef __SWAB_64_THRU_32__
535 __u32 l
= x
& ((1ULL<<32)-1);
536 return (((__u64
)__swab32(l
)) << 32) | ((__u64
)(__swab32(h
)));
538 return __arch__swab64(x
);
541 static __inline__ __u64
__swab64p(__u64
*x
)
543 return __arch__swab64p(x
);
545 static __inline__
void __swab64s(__u64
*addr
)
547 __arch__swab64s(addr
);
549 #endif /* __BYTEORDER_HAS_U64__ */
551 #if defined(__KERNEL__)
552 #define swab16 __swab16
553 #define swab24 __swab24
554 #define swab32 __swab32
555 #define swab64 __swab64
556 #define swab16p __swab16p
557 #define swab24p __swab24p
558 #define swab32p __swab32p
559 #define swab64p __swab64p
560 #define swab16s __swab16s
561 #define swab24s __swab24s
562 #define swab32s __swab32s
563 #define swab64s __swab64s
573 * linux/byteorder_generic.h
574 * Generic Byte-reordering support
576 * Francois-Rene Rideau <fare@tunes.org> 19970707
577 * gathered all the good ideas from all asm-foo/byteorder.h into one file,
579 * I hope it is compliant with non-GCC compilers.
580 * I decided to put __BYTEORDER_HAS_U64__ in byteorder.h,
581 * because I wasn't sure it would be ok to put it in types.h
582 * Upgraded it to 2.1.43
583 * Francois-Rene Rideau <fare@tunes.org> 19971012
584 * Upgraded it to 2.1.57
585 * to please Linus T., replaced huge #ifdef's between little/big endian
586 * by nestedly #include'd files.
587 * Francois-Rene Rideau <fare@tunes.org> 19971205
588 * Made it to 2.1.71; now a facelift:
589 * Put files under include/linux/byteorder/
590 * Split swab from generic support.
593 * = Regular kernel maintainers could also replace all these manual
594 * byteswap macros that remain, disseminated among drivers,
595 * after some grep or the sources...
596 * = Linus might want to rename all these macros and files to fit his taste,
597 * to fit his personal naming scheme.
598 * = it seems that a few drivers would also appreciate
599 * nybble swapping support...
600 * = every architecture could add their byteswap macro in asm/byteorder.h
601 * see how some architectures already do (i386, alpha, ppc, etc)
602 * = cpu_to_beXX and beXX_to_cpu might some day need to be well
603 * distinguished throughout the kernel. This is not the case currently,
604 * since little endian, big endian, and pdp endian machines needn't it.
605 * But this might be the case for, say, a port of Linux to 20/21 bit
606 * architectures (and F21 Linux addict around?).
610 * The following macros are to be defined by <asm/byteorder.h>:
612 * Conversion of long and short int between network and host format
617 * It seems that some programs (which? where? or perhaps a standard? POSIX?)
618 * might like the above to be functions, not macros (why?).
619 * if that's true, then detect them, and take measures.
620 * Anyway, the measure is: define only ___ntohl as a macro instead,
621 * and in a separate file, have
622 * unsigned long inline ntohl(x){return ___ntohl(x);}
624 * The same for constant arguments
625 * __constant_ntohl(__u32 x)
626 * __constant_ntohs(__u16 x)
627 * __constant_htonl(__u32 x)
628 * __constant_htons(__u16 x)
630 * Conversion of XX-bit integers (16- 32- or 64-)
631 * between native CPU format and little/big endian format
632 * 64-bit stuff only defined for proper architectures
633 * cpu_to_[bl]eXX(__uXX x)
634 * [bl]eXX_to_cpu(__uXX x)
636 * The same, but takes a pointer to the value to convert
637 * cpu_to_[bl]eXXp(__uXX x)
638 * [bl]eXX_to_cpup(__uXX x)
640 * The same, but change in situ
641 * cpu_to_[bl]eXXs(__uXX x)
642 * [bl]eXX_to_cpus(__uXX x)
644 * See asm-foo/byteorder.h for examples of how to provide
645 * architecture-optimized versions
650 #if defined(__KERNEL__)
652 * inside the kernel, we can use nicknames;
653 * outside of it, we must avoid POSIX namespace pollution...
655 #define cpu_to_le64 __cpu_to_le64
656 #define le64_to_cpu __le64_to_cpu
657 #define cpu_to_le32 __cpu_to_le32
658 #define le32_to_cpu __le32_to_cpu
659 #define cpu_to_le16 __cpu_to_le16
660 #define le16_to_cpu __le16_to_cpu
661 #define cpu_to_be64 __cpu_to_be64
662 #define be64_to_cpu __be64_to_cpu
663 #define cpu_to_be32 __cpu_to_be32
664 #define be32_to_cpu __be32_to_cpu
665 #define cpu_to_be16 __cpu_to_be16
666 #define be16_to_cpu __be16_to_cpu
667 #define cpu_to_le64p __cpu_to_le64p
668 #define le64_to_cpup __le64_to_cpup
669 #define cpu_to_le32p __cpu_to_le32p
670 #define le32_to_cpup __le32_to_cpup
671 #define cpu_to_le16p __cpu_to_le16p
672 #define le16_to_cpup __le16_to_cpup
673 #define cpu_to_be64p __cpu_to_be64p
674 #define be64_to_cpup __be64_to_cpup
675 #define cpu_to_be32p __cpu_to_be32p
676 #define be32_to_cpup __be32_to_cpup
677 #define cpu_to_be16p __cpu_to_be16p
678 #define be16_to_cpup __be16_to_cpup
679 #define cpu_to_le64s __cpu_to_le64s
680 #define le64_to_cpus __le64_to_cpus
681 #define cpu_to_le32s __cpu_to_le32s
682 #define le32_to_cpus __le32_to_cpus
683 #define cpu_to_le16s __cpu_to_le16s
684 #define le16_to_cpus __le16_to_cpus
685 #define cpu_to_be64s __cpu_to_be64s
686 #define be64_to_cpus __be64_to_cpus
687 #define cpu_to_be32s __cpu_to_be32s
688 #define be32_to_cpus __be32_to_cpus
689 #define cpu_to_be16s __cpu_to_be16s
690 #define be16_to_cpus __be16_to_cpus
695 * Handle ntohl and suches. These have various compatibility
696 * issues - like we want to give the prototype even though we
697 * also have a macro for them in case some strange program
698 * wants to take the address of the thing or something..
700 * Note that these used to return a "long" in libc5, even though
701 * long is often 64-bit these days.. Thus the casts.
703 * They have to be macros in order to do the constant folding
704 * correctly - if the argument passed into a inline function
705 * it is no longer constant according to gcc..
714 * Do the prototypes. Somebody might want to take the
715 * address or some such sick thing..
717 #if defined(__KERNEL__) || (defined (__GLIBC__) && __GLIBC__ >= 2)
718 extern __u32
ntohl(__u32
);
719 extern __u32
htonl(__u32
);
721 extern unsigned long int ntohl(unsigned long int);
722 extern unsigned long int htonl(unsigned long int);
724 extern unsigned short int ntohs(unsigned short int);
725 extern unsigned short int htons(unsigned short int);
728 #if defined(__GNUC__) && (__GNUC__ >= 2) && defined(__OPTIMIZE__) && !defined(__STRICT_ANSI__)
730 #define ___htonl(x) __cpu_to_be32(x)
731 #define ___htons(x) __cpu_to_be16(x)
732 #define ___ntohl(x) __be32_to_cpu(x)
733 #define ___ntohs(x) __be16_to_cpu(x)
735 #if defined(__KERNEL__) || (defined (__GLIBC__) && __GLIBC__ >= 2)
736 #define htonl(x) ___htonl(x)
737 #define ntohl(x) ___ntohl(x)
739 #define htonl(x) ((unsigned long)___htonl(x))
740 #define ntohl(x) ((unsigned long)___ntohl(x))
742 #define htons(x) ___htons(x)
743 #define ntohs(x) ___ntohs(x)
745 #endif /* OPTIMIZE */
750 #define __constant_htonl(x) ___constant_swab32((x))
751 #define __constant_ntohl(x) ___constant_swab32((x))
752 #define __constant_htons(x) ___constant_swab16((x))
753 #define __constant_ntohs(x) ___constant_swab16((x))
754 #define __constant_cpu_to_le64(x) ((__u64)(x))
755 #define __constant_le64_to_cpu(x) ((__u64)(x))
756 #define __constant_cpu_to_le32(x) ((__u32)(x))
757 #define __constant_le32_to_cpu(x) ((__u32)(x))
758 #define __constant_cpu_to_le24(x) ((__u32)(x))
759 #define __constant_le24_to_cpu(x) ((__u32)(x))
760 #define __constant_cpu_to_le16(x) ((__u16)(x))
761 #define __constant_le16_to_cpu(x) ((__u16)(x))
762 #define __constant_cpu_to_be64(x) ___constant_swab64((x))
763 #define __constant_be64_to_cpu(x) ___constant_swab64((x))
764 #define __constant_cpu_to_be32(x) ___constant_swab32((x))
765 #define __constant_be32_to_cpu(x) ___constant_swab32((x))
766 #define __constant_cpu_to_be24(x) ___constant_swab24((x))
767 #define __constant_be24_to_cpu(x) ___constant_swab24((x))
768 #define __constant_cpu_to_be16(x) ___constant_swab16((x))
769 #define __constant_be16_to_cpu(x) ___constant_swab16((x))
770 #define __cpu_to_le64(x) ((__u64)(x))
771 #define __le64_to_cpu(x) ((__u64)(x))
772 #define __cpu_to_le32(x) ((__u32)(x))
773 #define __le32_to_cpu(x) ((__u32)(x))
774 #define __cpu_to_le24(x) ((__u32)(x))
775 #define __le24_to_cpu(x) ((__u32)(x))
776 #define __cpu_to_le16(x) ((__u16)(x))
777 #define __le16_to_cpu(x) ((__u16)(x))
778 #define __cpu_to_be64(x) __swab64((x))
779 #define __be64_to_cpu(x) __swab64((x))
780 #define __cpu_to_be32(x) __swab32((x))
781 #define __be32_to_cpu(x) __swab32((x))
782 #define __cpu_to_be24(x) __swab24((x))
783 #define __be24_to_cpu(x) __swab24((x))
784 #define __cpu_to_be16(x) __swab16((x))
785 #define __be16_to_cpu(x) __swab16((x))
786 #define __cpu_to_le64p(x) (*(__u64*)(x))
787 #define __le64_to_cpup(x) (*(__u64*)(x))
788 #define __cpu_to_le32p(x) (*(__u32*)(x))
789 #define __le32_to_cpup(x) (*(__u32*)(x))
790 #define __cpu_to_le24p(x) (*(__u32*)(x))
791 #define __le24_to_cpup(x) (*(__u32*)(x))
792 #define __cpu_to_le16p(x) (*(__u16*)(x))
793 #define __le16_to_cpup(x) (*(__u16*)(x))
794 #define __cpu_to_be64p(x) __swab64p((x))
795 #define __be64_to_cpup(x) __swab64p((x))
796 #define __cpu_to_be32p(x) __swab32p((x))
797 #define __be32_to_cpup(x) __swab32p((x))
798 #define __cpu_to_be24p(x) __swab24p((x))
799 #define __be24_to_cpup(x) __swab24p((x))
800 #define __cpu_to_be16p(x) __swab16p((x))
801 #define __be16_to_cpup(x) __swab16p((x))
802 #define __cpu_to_le64s(x) do {} while (0)
803 #define __le64_to_cpus(x) do {} while (0)
804 #define __cpu_to_le32s(x) do {} while (0)
805 #define __le32_to_cpus(x) do {} while (0)
806 #define __cpu_to_le24s(x) do {} while (0)
807 #define __le24_to_cpus(x) do {} while (0)
808 #define __cpu_to_le16s(x) do {} while (0)
809 #define __le16_to_cpus(x) do {} while (0)
810 #define __cpu_to_be64s(x) __swab64s((x))
811 #define __be64_to_cpus(x) __swab64s((x))
812 #define __cpu_to_be32s(x) __swab32s((x))
813 #define __be32_to_cpus(x) __swab32s((x))
814 #define __cpu_to_be24s(x) __swab24s((x))
815 #define __be24_to_cpus(x) __swab24s((x))
816 #define __cpu_to_be16s(x) __swab16s((x))
817 #define __be16_to_cpus(x) __swab16s((x))
830 #define ____cacheline_aligned
834 volatile unsigned int lock
;
838 volatile unsigned int lock
;
850 * Atomic operations that C can't guarantee us. Useful for
851 * resource counting etc..
855 #define LOCK "lock ; "
861 * Make sure gcc doesn't try to be clever and move things around
862 * on us. We need to use _exactly_ the address the user gave us,
863 * not some alias that contains the same information.
865 typedef struct { volatile int counter
; } atomic_t
;
867 #define ATOMIC_INIT(i) { (i) }
870 * atomic_read - read atomic variable
871 * @v: pointer of type atomic_t
873 * Atomically reads the value of @v. Note that the guaranteed
874 * useful range of an atomic_t is only 24 bits.
876 #define atomic_read(v) ((v)->counter)
879 * atomic_set - set atomic variable
880 * @v: pointer of type atomic_t
883 * Atomically sets the value of @v to @i. Note that the guaranteed
884 * useful range of an atomic_t is only 24 bits.
886 #define atomic_set(v,i) (((v)->counter) = (i))
889 * atomic_add - add integer to atomic variable
890 * @i: integer value to add
891 * @v: pointer of type atomic_t
893 * Atomically adds @i to @v. Note that the guaranteed useful range
894 * of an atomic_t is only 24 bits.
896 static __inline__
void atomic_add(int i
, atomic_t
*v
)
899 __asm__
__volatile__(
902 :"ir" (i
), "m" (v
->counter
));
907 * atomic_sub - subtract the atomic variable
908 * @i: integer value to subtract
909 * @v: pointer of type atomic_t
911 * Atomically subtracts @i from @v. Note that the guaranteed
912 * useful range of an atomic_t is only 24 bits.
914 static __inline__
void atomic_sub(int i
, atomic_t
*v
)
917 __asm__
__volatile__(
920 :"ir" (i
), "m" (v
->counter
));
925 * atomic_sub_and_test - subtract value from variable and test result
926 * @i: integer value to subtract
927 * @v: pointer of type atomic_t
929 * Atomically subtracts @i from @v and returns
930 * true if the result is zero, or false for all
931 * other cases. Note that the guaranteed
932 * useful range of an atomic_t is only 24 bits.
934 static __inline__
int atomic_sub_and_test(int i
, atomic_t
*v
)
939 __asm__
__volatile__(
940 LOCK
"subl %2,%0; sete %1"
941 :"=m" (v
->counter
), "=qm" (c
)
942 :"ir" (i
), "m" (v
->counter
) : "memory");
948 * atomic_inc - increment atomic variable
949 * @v: pointer of type atomic_t
951 * Atomically increments @v by 1. Note that the guaranteed
952 * useful range of an atomic_t is only 24 bits.
954 static __inline__
void atomic_inc(atomic_t
*v
)
957 __asm__
__volatile__(
965 * atomic_dec - decrement atomic variable
966 * @v: pointer of type atomic_t
968 * Atomically decrements @v by 1. Note that the guaranteed
969 * useful range of an atomic_t is only 24 bits.
971 static __inline__
void atomic_dec(atomic_t
*v
)
974 __asm__
__volatile__(
982 * atomic_dec_and_test - decrement and test
983 * @v: pointer of type atomic_t
985 * Atomically decrements @v by 1 and
986 * returns true if the result is 0, or false for all other
987 * cases. Note that the guaranteed
988 * useful range of an atomic_t is only 24 bits.
990 static __inline__
int atomic_dec_and_test(atomic_t
*v
)
995 __asm__
__volatile__(
996 LOCK
"decl %0; sete %1"
997 :"=m" (v
->counter
), "=qm" (c
)
998 :"m" (v
->counter
) : "memory");
1006 * atomic_inc_and_test - increment and test
1007 * @v: pointer of type atomic_t
1009 * Atomically increments @v by 1
1010 * and returns true if the result is zero, or false for all
1011 * other cases. Note that the guaranteed
1012 * useful range of an atomic_t is only 24 bits.
1014 static __inline__
int atomic_inc_and_test(atomic_t
*v
)
1019 __asm__
__volatile__(
1020 LOCK
"incl %0; sete %1"
1021 :"=m" (v
->counter
), "=qm" (c
)
1022 :"m" (v
->counter
) : "memory");
1030 * atomic_add_negative - add and test if negative
1031 * @v: pointer of type atomic_t
1032 * @i: integer value to add
1034 * Atomically adds @i to @v and returns true
1035 * if the result is negative, or false when
1036 * result is greater than or equal to zero. Note that the guaranteed
1037 * useful range of an atomic_t is only 24 bits.
1039 static __inline__
int atomic_add_negative(int i
, atomic_t
*v
)
1044 __asm__
__volatile__(
1045 LOCK
"addl %2,%0; sets %1"
1046 :"=m" (v
->counter
), "=qm" (c
)
1047 :"ir" (i
), "m" (v
->counter
) : "memory");
1054 /* These are x86-specific, used by some header files */
1055 #define atomic_clear_mask(mask, addr)
1057 __asm__
__volatile__(LOCK
"andl %0,%1" \
1058 : : "r" (~(mask
)),"m" (*addr
) : "memory")
1061 #define atomic_set_mask(mask, addr)
1063 __asm__
__volatile__(LOCK
"orl %0,%1" \
1064 : : "r" (mask
),"m" (*addr
) : "memory")
1067 /* Atomic operations are already serializing on x86 */
1068 #define smp_mb__before_atomic_dec()
1069 #define smp_mb__after_atomic_dec()
1070 #define smp_mb__before_atomic_inc()
1071 #define smp_mb__after_atomic_inc()
1084 struct list_head
*next
, *prev
;
1087 #define LIST_HEAD_INIT(name) { &(name), &(name) }
1089 #define LIST_HEAD(name) \
1090 struct list_head name = LIST_HEAD_INIT(name)
1092 #define INIT_LIST_HEAD(ptr) do { \
1093 (ptr)->next = (ptr); (ptr)->prev = (ptr); \
1097 * Insert a new entry between two known consecutive entries.
1099 * This is only for internal list manipulation where we know
1100 * the prev/next entries already!
1102 static inline void __list_add(struct list_head
*new,
1103 struct list_head
*prev
,
1104 struct list_head
*next
)
1115 * list_add - add a new entry
1116 * @new: new entry to be added
1117 * @head: list head to add it after
1119 * Insert a new entry after the specified head.
1120 * This is good for implementing stacks.
1122 static inline void list_add(struct list_head
*new, struct list_head
*head
)
1125 __list_add(new, head
, head
->next
);
1130 * list_add_tail - add a new entry
1131 * @new: new entry to be added
1132 * @head: list head to add it before
1134 * Insert a new entry before the specified head.
1135 * This is useful for implementing queues.
1137 static inline void list_add_tail(struct list_head
*new, struct list_head
*head
)
1140 __list_add(new, head
->prev
, head
);
1145 * Delete a list entry by making the prev/next entries
1146 * point to each other.
1148 * This is only for internal list manipulation where we know
1149 * the prev/next entries already!
1151 static inline void __list_del(struct list_head
*prev
, struct list_head
*next
)
1158 * list_del - deletes entry from list.
1159 * @entry: the element to delete from the list.
1160 * Note: list_empty on entry does not return true after this, the entry is in an undefined state.
1162 static inline void list_del(struct list_head
*entry
)
1165 __list_del(entry
->prev
, entry
->next
);
1166 entry
->next
= (void *) 0;
1167 entry
->prev
= (void *) 0;
1172 * list_del_init - deletes entry from list and reinitialize it.
1173 * @entry: the element to delete from the list.
1175 static inline void list_del_init(struct list_head
*entry
)
1178 __list_del(entry
->prev
, entry
->next
);
1179 INIT_LIST_HEAD(entry
);
1184 * list_move - delete from one list and add as another's head
1185 * @list: the entry to move
1186 * @head: the head that will precede our entry
1188 static inline void list_move(struct list_head
*list
, struct list_head
*head
)
1191 __list_del(list
->prev
, list
->next
);
1192 list_add(list
, head
);
1197 * list_move_tail - delete from one list and add as another's tail
1198 * @list: the entry to move
1199 * @head: the head that will follow our entry
1201 static inline void list_move_tail(struct list_head
*list
,
1202 struct list_head
*head
)
1205 __list_del(list
->prev
, list
->next
);
1206 list_add_tail(list
, head
);
1211 * list_empty - tests whether a list is empty
1212 * @head: the list to test.
1214 static inline int list_empty(struct list_head
*head
)
1216 return head
->next
== head
;
1219 static inline void __list_splice(struct list_head
*list
,
1220 struct list_head
*head
)
1223 struct list_head
*first
= list
->next
;
1224 struct list_head
*last
= list
->prev
;
1225 struct list_head
*at
= head
->next
;
1236 * list_splice - join two lists
1237 * @list: the new list to add.
1238 * @head: the place to add it in the first list.
1240 static inline void list_splice(struct list_head
*list
, struct list_head
*head
)
1243 if (!list_empty(list
))
1244 __list_splice(list
, head
);
1249 * list_splice_init - join two lists and reinitialise the emptied list.
1250 * @list: the new list to add.
1251 * @head: the place to add it in the first list.
1253 * The list at @list is reinitialised
1255 static inline void list_splice_init(struct list_head
*list
,
1256 struct list_head
*head
)
1259 if (!list_empty(list
)) {
1260 __list_splice(list
, head
);
1261 INIT_LIST_HEAD(list
);
1267 * list_entry - get the struct for this entry
1268 * @ptr: the &struct list_head pointer.
1269 * @type: the type of the struct this is embedded in.
1270 * @member: the name of the list_struct within the struct.
1272 #define list_entry(ptr, type, member)
1274 ((type
*)((char *)(ptr
)-(unsigned long)(&((type
*)0)->member
)))
1278 * list_for_each - iterate over a list
1279 * @pos: the &struct list_head to use as a loop counter.
1280 * @head: the head for your list.
1282 #define list_for_each(pos, head)
1284 for (pos
= (head
)->next
, prefetch(pos
->next
); pos
!= (head
); \
1285 pos
= pos
->next
, prefetch(pos
->next
))
1289 * list_for_each_prev - iterate over a list backwards
1290 * @pos: the &struct list_head to use as a loop counter.
1291 * @head: the head for your list.
1293 #define list_for_each_prev(pos, head)
1295 for (pos
= (head
)->prev
, prefetch(pos
->prev
); pos
!= (head
); \
1296 pos
= pos
->prev
, prefetch(pos
->prev
))
1300 * list_for_each_safe - iterate over a list safe against removal of list entry
1301 * @pos: the &struct list_head to use as a loop counter.
1302 * @n: another &struct list_head to use as temporary storage
1303 * @head: the head for your list.
1305 #define list_for_each_safe(pos, n, head)
1307 for (pos
= (head
)->next
, n
= pos
->next
; pos
!= (head
); \
1308 pos
= n
, n
= pos
->next
)
1312 * list_for_each_entry - iterate over list of given type
1313 * @pos: the type * to use as a loop counter.
1314 * @head: the head for your list.
1315 * @member: the name of the list_struct within the struct.
1317 #define list_for_each_entry(pos, head, member)
1319 for (pos
= list_entry((head
)->next
, typeof(*pos
), member
), \
1320 prefetch(pos
->member
.next
); \
1321 &pos
->member
!= (head
); \
1322 pos
= list_entry(pos
->member
.next
, typeof(*pos
), member
), \
1323 prefetch(pos
->member
.next
))
1334 #define WNOHANG 0x00000001
1335 #define WUNTRACED 0x00000002
1337 #define __WNOTHREAD 0x20000000 /* Don't wait on children of other threads in this group */
1338 #define __WALL 0x40000000 /* Wait on all children, regardless of type */
1339 #define __WCLONE 0x80000000 /* Wait only on non-SIGCHLD children */
1342 #include <linux/kernel.h>
1343 #include <linux/list.h>
1344 #include <linux/stddef.h>
1345 #include <linux/spinlock.h>
1346 #include <linux/config.h>
1348 #include <asm/page.h>
1349 #include <asm/processor.h>
1353 * Debug control. Slow but useful.
1355 #if defined(CONFIG_DEBUG_WAITQ)
1356 #define WAITQUEUE_DEBUG 1
1358 #define WAITQUEUE_DEBUG 0
1361 struct __wait_queue
{
1363 #define WQ_FLAG_EXCLUSIVE 0x01
1364 struct task_struct
* task
;
1365 struct list_head task_list
;
1371 typedef struct __wait_queue wait_queue_t
;
1374 * 'dual' spinlock architecture. Can be switched between spinlock_t and
1375 * rwlock_t locks via changing this define. Since waitqueues are quite
1376 * decoupled in the new architecture, lightweight 'simple' spinlocks give
1377 * us slightly better latencies and smaller waitqueue structure size.
1379 #define USE_RW_WAIT_QUEUE_SPINLOCK 0
1381 #if USE_RW_WAIT_QUEUE_SPINLOCK
1382 # define wq_lock_t rwlock_t
1383 # define WAITQUEUE_RW_LOCK_UNLOCKED RW_LOCK_UNLOCKED
1385 # define wq_read_lock read_lock
1386 # define wq_read_lock_irqsave read_lock_irqsave
1387 # define wq_read_unlock_irqrestore read_unlock_irqrestore
1388 # define wq_read_unlock read_unlock
1389 # define wq_write_lock_irq write_lock_irq
1390 # define wq_write_lock_irqsave write_lock_irqsave
1391 # define wq_write_unlock_irqrestore write_unlock_irqrestore
1392 # define wq_write_unlock write_unlock
1394 # define wq_lock_t spinlock_t
1395 # define WAITQUEUE_RW_LOCK_UNLOCKED SPIN_LOCK_UNLOCKED
1397 # define wq_read_lock spin_lock
1398 # define wq_read_lock_irqsave spin_lock_irqsave
1399 # define wq_read_unlock spin_unlock
1400 # define wq_read_unlock_irqrestore spin_unlock_irqrestore
1401 # define wq_write_lock_irq spin_lock_irq
1402 # define wq_write_lock_irqsave spin_lock_irqsave
1403 # define wq_write_unlock_irqrestore spin_unlock_irqrestore
1404 # define wq_write_unlock spin_unlock
1407 struct __wait_queue_head
{
1409 struct list_head task_list
;
1415 typedef struct __wait_queue_head wait_queue_head_t
;
1419 * Debugging macros. We eschew `do { } while (0)' because gcc can generate
1423 #define WQ_BUG() BUG()
1424 #define CHECK_MAGIC(x)
1427 if ((x
) != (long)&(x
)) { \
1428 printk("bad magic %lx (should be %lx), ", \
1429 (long)x
, (long)&(x
)); \
1435 #define CHECK_MAGIC_WQHEAD(x)
1438 if ((x
)->__magic
!= (long)&((x
)->__magic
)) { \
1439 printk("bad magic %lx (should be %lx, creator %lx), ", \
1440 (x
)->__magic
, (long)&((x
)->__magic
), (x
)->__creator
); \
1446 #define WQ_CHECK_LIST_HEAD(list)
1449 if (!(list
)->next
|| !(list
)->prev
) \
1454 #define WQ_NOTE_WAKER(tsk)
1457 (tsk
)->__waker
= (long)__builtin_return_address(0); \
1462 #define CHECK_MAGIC(x)
1463 #define CHECK_MAGIC_WQHEAD(x)
1464 #define WQ_CHECK_LIST_HEAD(list)
1465 #define WQ_NOTE_WAKER(tsk)
1469 * Macros for declaration and initialisaton of the datatypes
1473 # define __WAITQUEUE_DEBUG_INIT(name) //(long)&(name).__magic, 0
1474 # define __WAITQUEUE_HEAD_DEBUG_INIT(name) //(long)&(name).__magic, (long)&(name).__magic
1476 # define __WAITQUEUE_DEBUG_INIT(name)
1477 # define __WAITQUEUE_HEAD_DEBUG_INIT(name)
1480 #define __WAITQUEUE_INITIALIZER(name, tsk)
1484 task_list
: { NULL
, NULL
}, \
1485 __WAITQUEUE_DEBUG_INIT(name
)}
1488 #define DECLARE_WAITQUEUE(name, tsk)
1490 wait_queue_t name
= __WAITQUEUE_INITIALIZER(name
, tsk
)
1493 #define __WAIT_QUEUE_HEAD_INITIALIZER(name)
1496 lock
: WAITQUEUE_RW_LOCK_UNLOCKED
, \
1497 task_list
: { &(name
).task_list
, &(name
).task_list
}, \
1498 __WAITQUEUE_HEAD_DEBUG_INIT(name
)}
1501 #define DECLARE_WAIT_QUEUE_HEAD(name)
1503 wait_queue_head_t name
= __WAIT_QUEUE_HEAD_INITIALIZER(name
)
1506 static inline void init_waitqueue_head(wait_queue_head_t
*q
)
1513 q
->lock
= WAITQUEUE_RW_LOCK_UNLOCKED
;
1514 INIT_LIST_HEAD(&q
->task_list
);
1516 q
->__magic
= (long)&q
->__magic
;
1517 q
->__creator
= (long)current_text_addr();
1522 static inline void init_waitqueue_entry(wait_queue_t
*q
, struct task_struct
*p
)
1532 q
->__magic
= (long)&q
->__magic
;
1537 static inline int waitqueue_active(wait_queue_head_t
*q
)
1543 CHECK_MAGIC_WQHEAD(q
);
1546 return !list_empty(&q
->task_list
);
1550 static inline void __add_wait_queue(wait_queue_head_t
*head
, wait_queue_t
*new)
1556 CHECK_MAGIC_WQHEAD(head
);
1557 CHECK_MAGIC(new->__magic
);
1558 if (!head
->task_list
.next
|| !head
->task_list
.prev
)
1561 list_add(&new->task_list
, &head
->task_list
);
1566 * Used for wake-one threads:
1568 static inline void __add_wait_queue_tail(wait_queue_head_t
*head
,
1575 CHECK_MAGIC_WQHEAD(head
);
1576 CHECK_MAGIC(new->__magic
);
1577 if (!head
->task_list
.next
|| !head
->task_list
.prev
)
1580 list_add_tail(&new->task_list
, &head
->task_list
);
1584 static inline void __remove_wait_queue(wait_queue_head_t
*head
,
1591 CHECK_MAGIC(old
->__magic
);
1593 list_del(&old
->task_list
);
1615 typedef struct kmem_cache_s kmem_cache_t
;
1618 #include <linux/mm.h>
1619 #include <linux/cache.h>
1622 /* flags for kmem_cache_alloc() */
1623 #define SLAB_NOFS GFP_NOFS
1624 #define SLAB_NOIO GFP_NOIO
1625 #define SLAB_NOHIGHIO GFP_NOHIGHIO
1626 #define SLAB_ATOMIC GFP_ATOMIC
1627 #define SLAB_USER GFP_USER
1628 #define SLAB_KERNEL GFP_KERNEL
1629 #define SLAB_NFS GFP_NFS
1630 #define SLAB_DMA GFP_DMA
1632 #define SLAB_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_HIGHIO|__GFP_FS)
1633 #define SLAB_NO_GROW 0x00001000UL /* don't grow a cache */
1635 /* flags to pass to kmem_cache_create().
1636 * The first 3 are only valid when the allocator as been build
1637 * SLAB_DEBUG_SUPPORT.
1639 #define SLAB_DEBUG_FREE 0x00000100UL /* Peform (expensive) checks on free */
1640 #define SLAB_DEBUG_INITIAL 0x00000200UL /* Call constructor (as verifier) */
1641 #define SLAB_RED_ZONE 0x00000400UL /* Red zone objs in a cache */
1642 #define SLAB_POISON 0x00000800UL /* Poison objects */
1643 #define SLAB_NO_REAP 0x00001000UL /* never reap from the cache */
1644 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* align objs on a h/w cache lines */
1645 #define SLAB_CACHE_DMA 0x00004000UL /* use GFP_DMA memory */
1646 #define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* force alignment */
1648 /* flags passed to a constructor func */
1649 #define SLAB_CTOR_CONSTRUCTOR 0x001UL /* if not set, then deconstructor */
1650 #define SLAB_CTOR_ATOMIC 0x002UL /* tell constructor it can't sleep */
1651 #define SLAB_CTOR_VERIFY 0x004UL /* tell constructor it's a verify call */
1654 extern void kmem_cache_init(void);
1655 extern void kmem_cache_sizes_init(void);
1657 extern kmem_cache_t
*kmem_find_general_cachep(size_t, int gfpflags
);
1658 extern kmem_cache_t
*kmem_cache_create(const char *, size_t, size_t, unsigned long,
1659 void (*)(void *, kmem_cache_t
*, unsigned long),
1660 void (*)(void *, kmem_cache_t
*, unsigned long));
1661 extern int kmem_cache_destroy(kmem_cache_t
*);
1662 extern int kmem_cache_shrink(kmem_cache_t
*);
1663 extern void *kmem_cache_alloc(kmem_cache_t
*, int);
1664 extern void kmem_cache_free(kmem_cache_t
*, void *);
1665 extern unsigned int kmem_cache_size(kmem_cache_t
*);
1667 extern void *kmalloc(size_t, int);
1668 extern void kfree(const void *);
1670 //extern int FASTCALL(kmem_cache_reap(int));
1672 /* System wide caches */
1673 extern kmem_cache_t
*vm_area_cachep
;
1674 extern kmem_cache_t
*mm_cachep
;
1675 extern kmem_cache_t
*names_cachep
;
1676 extern kmem_cache_t
*files_cachep
;
1677 extern kmem_cache_t
*filp_cachep
;
1678 extern kmem_cache_t
*dquot_cachep
;
1679 extern kmem_cache_t
*bh_cachep
;
1680 extern kmem_cache_t
*fs_cachep
;
1681 extern kmem_cache_t
*sigact_cachep
;
1688 * Berkeley style UIO structures - Alan Cox 1994.
1690 * This program is free software; you can redistribute it and/or
1691 * modify it under the terms of the GNU General Public License
1692 * as published by the Free Software Foundation; either version
1693 * 2 of the License, or (at your option) any later version.
1697 /* A word of warning: Our uio structure will clash with the C library one (which is now obsolete). Remove the C
1698 library one from sys/uio.h if you have a very old library set */
1702 void *iov_base
; /* BSD uses caddr_t (1003.1g requires void *) */
1703 __kernel_size_t iov_len
; /* Must be size_t (1003.1g) */
1707 * UIO_MAXIOV shall be at least 16 1003.1g (5.4.1.1)
1710 #define UIO_FASTIOV 8
1711 #define UIO_MAXIOV 1024
1713 #define UIO_MAXIOV 16 /* Maximum iovec's in one operation
1715 /* Beg pardon: BSD has 1024 --ANK */
1721 * In Linux 2.4, static timers have been removed from the kernel.
1722 * Timers may be dynamically created and destroyed, and should be initialized
1723 * by a call to init_timer() upon creation.
1725 * The "data" field enables use of a common timeout function for several
1726 * timeouts. You can use this field to distinguish between the different
1730 struct list_head list
;
1731 unsigned long expires
;
1733 void (*function
)(unsigned long);
1739 unsigned long tv_sec
;
1740 unsigned long tv_usec
;
1741 // time_t tv_sec; /* seconds */
1742 // suseconds_t tv_usec; /* microseconds */
1755 struct poll_table_page
;
1757 typedef struct poll_table_struct
{
1759 struct poll_table_page
* table
;
1762 extern void __pollwait(struct file
* filp
, wait_queue_head_t
* wait_address
, poll_table
*p
);
1764 static inline void poll_wait(struct file
* filp
, wait_queue_head_t
* wait_address
, poll_table
*p
)
1766 if (p
&& wait_address
)
1767 __pollwait(filp
, wait_address
, p
);
1770 static inline void poll_initwait(poll_table
* pt
)
1775 extern void poll_freewait(poll_table
* pt
);
1779 * Scaleable version of the fd_set.
1783 unsigned long *in
, *out
, *ex
;
1784 unsigned long *res_in
, *res_out
, *res_ex
;
1788 * How many longwords for "nr" bits?
1790 #define FDS_BITPERLONG (8*sizeof(long))
1791 #define FDS_LONGS(nr) (((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG)
1792 #define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long))
1795 * We do a VERIFY_WRITE here even though we are only reading this time:
1796 * we'll write to it eventually..
1798 * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned.
1801 int get_fd_set(unsigned long nr
, void *ufdset
, unsigned long *fdset
)
1807 error
= verify_area(VERIFY_WRITE
, ufdset
, nr
);
1808 if (!error
&& __copy_from_user(fdset
, ufdset
, nr
))
1812 memset(fdset
, 0, nr
);
1820 void set_fd_set(unsigned long nr
, void *ufdset
, unsigned long *fdset
)
1824 __copy_to_user(ufdset
, fdset
, FDS_BYTES(nr
));
1829 void zero_fd_set(unsigned long nr
, unsigned long *fdset
)
1832 memset(fdset
, 0, FDS_BYTES(nr
));
1836 extern int do_select(int n
, fd_set_bits
*fds
, long *timeout
);
1845 } read_descriptor_t
;
1853 /* These are specified by iBCS2 */
1854 #define POLLIN 0x0001
1855 #define POLLPRI 0x0002
1856 #define POLLOUT 0x0004
1857 #define POLLERR 0x0008
1858 #define POLLHUP 0x0010
1859 #define POLLNVAL 0x0020
1861 /* The rest seem to be more-or-less nonstandard. Check them! */
1862 #define POLLRDNORM 0x0040
1863 #define POLLRDBAND 0x0080
1864 #define POLLWRNORM 0x0100
1865 #define POLLWRBAND 0x0200
1866 #define POLLMSG 0x0400
1876 #endif /* _LINUX_TYPES_H */