e46d81b0916374c54403dd7142df23b4eef548dd
[reactos.git] / reactos / drivers / net / tcpip / include / linux.h
1 #ifndef _LINUX_TYPES_H
2 #define _LINUX_TYPES_H
3
4 #include <ddk/ntddk.h>
5
6 #ifndef NULL
7 #define NULL (void*)0
8 #endif
9
10 typedef struct page {
11 int x;
12 } mem_map_t;
13
14
15
16
17
18
19
20
21
22
23
24
25
26 /* i386 */
27
28 typedef unsigned short umode_t;
29
30 /*
31 * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
32 * header files exported to user space
33 */
34
35 typedef __signed__ char __s8;
36 typedef unsigned char __u8;
37
38 typedef __signed__ short __s16;
39 typedef unsigned short __u16;
40
41 typedef __signed__ int __s32;
42 typedef unsigned int __u32;
43
44 #if defined(__GNUC__) && !defined(__STRICT_ANSI__)
45 typedef __signed__ long long __s64;
46 typedef unsigned long long __u64;
47 #endif
48
49 /*
50 * These aren't exported outside the kernel to avoid name space clashes
51 */
52 typedef signed char s8;
53 typedef unsigned char u8;
54
55 typedef signed short s16;
56 typedef unsigned short u16;
57
58 typedef signed int s32;
59 typedef unsigned int u32;
60
61 typedef signed long long s64;
62 typedef unsigned long long u64;
63
64 #define BITS_PER_LONG 32
65
66 /* DMA addresses come in generic and 64-bit flavours. */
67
68 #ifdef CONFIG_HIGHMEM64G
69 typedef u64 dma_addr_t;
70 #else
71 typedef u32 dma_addr_t;
72 #endif
73 typedef u64 dma64_addr_t;
74
75
76
77 /*
78 * This allows for 1024 file descriptors: if NR_OPEN is ever grown
79 * beyond that you'll have to change this too. But 1024 fd's seem to be
80 * enough even for such "real" unices like OSF/1, so hopefully this is
81 * one limit that doesn't have to be changed [again].
82 *
83 * Note that POSIX wants the FD_CLEAR(fd,fdsetp) defines to be in
84 * <sys/time.h> (and thus <linux/time.h>) - but this is a more logical
85 * place for them. Solved by having dummy defines in <sys/time.h>.
86 */
87
88 /*
89 * Those macros may have been defined in <gnu/types.h>. But we always
90 * use the ones here.
91 */
92 #undef __NFDBITS
93 #define __NFDBITS (8 * sizeof(unsigned long))
94
95 #undef __FD_SETSIZE
96 #define __FD_SETSIZE 1024
97
98 #undef __FDSET_LONGS
99 #define __FDSET_LONGS (__FD_SETSIZE/__NFDBITS)
100
101 #undef __FDELT
102 #define __FDELT(d) ((d) / __NFDBITS)
103
104 #undef __FDMASK
105 #define __FDMASK(d) (1UL << ((d) % __NFDBITS))
106
107 typedef struct {
108 unsigned long fds_bits [__FDSET_LONGS];
109 } __kernel_fd_set;
110
111 /* Type of a signal handler. */
112 typedef void (*__kernel_sighandler_t)(int);
113
114 /* Type of a SYSV IPC key. */
115 typedef int __kernel_key_t;
116
117
118 /*
119 * This file is generally used by user-level software, so you need to
120 * be a little careful about namespace pollution etc. Also, we cannot
121 * assume GCC is being used.
122 */
123
124 typedef unsigned short __kernel_dev_t;
125 typedef unsigned long __kernel_ino_t;
126 typedef unsigned short __kernel_mode_t;
127 typedef unsigned short __kernel_nlink_t;
128 typedef long __kernel_off_t;
129 typedef int __kernel_pid_t;
130 typedef unsigned short __kernel_ipc_pid_t;
131 typedef unsigned short __kernel_uid_t;
132 typedef unsigned short __kernel_gid_t;
133 typedef unsigned int __kernel_size_t;
134 typedef int __kernel_ssize_t;
135 typedef int __kernel_ptrdiff_t;
136 typedef long __kernel_time_t;
137 typedef long __kernel_suseconds_t;
138 typedef long __kernel_clock_t;
139 typedef int __kernel_daddr_t;
140 typedef char * __kernel_caddr_t;
141 typedef unsigned short __kernel_uid16_t;
142 typedef unsigned short __kernel_gid16_t;
143 typedef unsigned int __kernel_uid32_t;
144 typedef unsigned int __kernel_gid32_t;
145
146 typedef unsigned short __kernel_old_uid_t;
147 typedef unsigned short __kernel_old_gid_t;
148
149 #ifdef __GNUC__
150 typedef long long __kernel_loff_t;
151 #endif
152
153 typedef struct {
154 #if defined(__KERNEL__) || defined(__USE_ALL)
155 int val[2];
156 #else /* !defined(__KERNEL__) && !defined(__USE_ALL) */
157 int __val[2];
158 #endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */
159 } __kernel_fsid_t;
160
161 #if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
162
163 #undef __FD_SET
164 #define __FD_SET(fd,fdsetp) \
165 __asm__ __volatile__("btsl %1,%0": \
166 "=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
167
168 #undef __FD_CLR
169 #define __FD_CLR(fd,fdsetp) \
170 __asm__ __volatile__("btrl %1,%0": \
171 "=m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd)))
172
173 #undef __FD_ISSET
174 #define __FD_ISSET(fd,fdsetp) (__extension__ ({ \
175 unsigned char __result; \
176 __asm__ __volatile__("btl %1,%2 ; setb %0" \
177 :"=q" (__result) :"r" ((int) (fd)), \
178 "m" (*(__kernel_fd_set *) (fdsetp))); \
179 __result; }))
180
181 #undef __FD_ZERO
182 #define __FD_ZERO(fdsetp) \
183 do { \
184 int __d0, __d1; \
185 __asm__ __volatile__("cld ; rep ; stosl" \
186 :"=m" (*(__kernel_fd_set *) (fdsetp)), \
187 "=&c" (__d0), "=&D" (__d1) \
188 :"a" (0), "1" (__FDSET_LONGS), \
189 "2" ((__kernel_fd_set *) (fdsetp)) : "memory"); \
190 } while (0)
191
192 #endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
193
194
195 #ifndef __KERNEL_STRICT_NAMES
196
197 typedef __kernel_fd_set fd_set;
198 typedef __kernel_dev_t dev_t;
199 typedef __kernel_ino_t ino_t;
200 typedef __kernel_mode_t mode_t;
201 typedef __kernel_nlink_t nlink_t;
202 typedef __kernel_off_t off_t;
203 typedef __kernel_pid_t pid_t;
204 typedef __kernel_daddr_t daddr_t;
205 typedef __kernel_key_t key_t;
206 typedef __kernel_suseconds_t suseconds_t;
207
208 #ifdef __KERNEL__
209 typedef __kernel_uid32_t uid_t;
210 typedef __kernel_gid32_t gid_t;
211 typedef __kernel_uid16_t uid16_t;
212 typedef __kernel_gid16_t gid16_t;
213
214 #ifdef CONFIG_UID16
215 /* This is defined by include/asm-{arch}/posix_types.h */
216 typedef __kernel_old_uid_t old_uid_t;
217 typedef __kernel_old_gid_t old_gid_t;
218 #endif /* CONFIG_UID16 */
219
220 /* libc5 includes this file to define uid_t, thus uid_t can never change
221 * when it is included by non-kernel code
222 */
223 #else
224 typedef __kernel_uid_t uid_t;
225 typedef __kernel_gid_t gid_t;
226 #endif /* __KERNEL__ */
227
228 #if defined(__GNUC__)
229 typedef __kernel_loff_t loff_t;
230 #endif
231
232 /*
233 * The following typedefs are also protected by individual ifdefs for
234 * historical reasons:
235 */
236 #ifndef _SIZE_T
237 #define _SIZE_T
238 typedef __kernel_size_t size_t;
239 #endif
240
241 #ifndef _SSIZE_T
242 #define _SSIZE_T
243 typedef __kernel_ssize_t ssize_t;
244 #endif
245
246 #ifndef _PTRDIFF_T
247 #define _PTRDIFF_T
248 typedef __kernel_ptrdiff_t ptrdiff_t;
249 #endif
250
251 #ifndef _TIME_T
252 #define _TIME_T
253 typedef __kernel_time_t time_t;
254 #endif
255
256 #ifndef _CLOCK_T
257 #define _CLOCK_T
258 typedef __kernel_clock_t clock_t;
259 #endif
260
261 #ifndef _CADDR_T
262 #define _CADDR_T
263 typedef __kernel_caddr_t caddr_t;
264 #endif
265
266 /* bsd */
267 typedef unsigned char u_char;
268 typedef unsigned short u_short;
269 typedef unsigned int u_int;
270 typedef unsigned long u_long;
271
272 /* sysv */
273 typedef unsigned char unchar;
274 typedef unsigned short ushort;
275 typedef unsigned int uint;
276 typedef unsigned long ulong;
277
278 #ifndef __BIT_TYPES_DEFINED__
279 #define __BIT_TYPES_DEFINED__
280
281 typedef __u8 u_int8_t;
282 typedef __s8 int8_t;
283 typedef __u16 u_int16_t;
284 typedef __s16 int16_t;
285 typedef __u32 u_int32_t;
286 typedef __s32 int32_t;
287
288 #endif /* !(__BIT_TYPES_DEFINED__) */
289
290 typedef __u8 uint8_t;
291 typedef __u16 uint16_t;
292 typedef __u32 uint32_t;
293
294 #if defined(__GNUC__) && !defined(__STRICT_ANSI__)
295 typedef __u64 uint64_t;
296 typedef __u64 u_int64_t;
297 typedef __s64 int64_t;
298 #endif
299
300 #endif /* __KERNEL_STRICT_NAMES */
301
302 /*
303 * Below are truly Linux-specific types that should never collide with
304 * any application/library that wants linux/types.h.
305 */
306
307 struct ustat {
308 __kernel_daddr_t f_tfree;
309 __kernel_ino_t f_tinode;
310 char f_fname[6];
311 char f_fpack[6];
312 };
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327 #ifndef __LITTLE_ENDIAN
328 #define __LITTLE_ENDIAN 1234
329 #endif
330 #ifndef __LITTLE_ENDIAN_BITFIELD
331 #define __LITTLE_ENDIAN_BITFIELD
332 #endif
333
334 #if 1 /* swab */
335
336 /*
337 * linux/byteorder/swab.h
338 * Byte-swapping, independently from CPU endianness
339 * swabXX[ps]?(foo)
340 *
341 * Francois-Rene Rideau <fare@tunes.org> 19971205
342 * separated swab functions from cpu_to_XX,
343 * to clean up support for bizarre-endian architectures.
344 *
345 * See asm-i386/byteorder.h and suches for examples of how to provide
346 * architecture-dependent optimized versions
347 *
348 */
349
350 /* casts are necessary for constants, because we never know how for sure
351 * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
352 */
353 #define ___swab16(x) \
354 ({ \
355 __u16 __x = (x); \
356 ((__u16)( \
357 (((__u16)(__x) & (__u16)0x00ffU) << 8) | \
358 (((__u16)(__x) & (__u16)0xff00U) >> 8) )); \
359 })
360
361 #define ___swab24(x) \
362 ({ \
363 __u32 __x = (x); \
364 ((__u32)( \
365 ((__x & (__u32)0x000000ffUL) << 16) | \
366 (__x & (__u32)0x0000ff00UL) | \
367 ((__x & (__u32)0x00ff0000UL) >> 16) )); \
368 })
369
370 #define ___swab32(x) \
371 ({ \
372 __u32 __x = (x); \
373 ((__u32)( \
374 (((__u32)(__x) & (__u32)0x000000ffUL) << 24) | \
375 (((__u32)(__x) & (__u32)0x0000ff00UL) << 8) | \
376 (((__u32)(__x) & (__u32)0x00ff0000UL) >> 8) | \
377 (((__u32)(__x) & (__u32)0xff000000UL) >> 24) )); \
378 })
379
380 #define ___swab64(x) \
381 ({ \
382 __u64 __x = (x); \
383 ((__u64)( \
384 (__u64)(((__u64)(__x) & (__u64)0x00000000000000ffULL) << 56) | \
385 (__u64)(((__u64)(__x) & (__u64)0x000000000000ff00ULL) << 40) | \
386 (__u64)(((__u64)(__x) & (__u64)0x0000000000ff0000ULL) << 24) | \
387 (__u64)(((__u64)(__x) & (__u64)0x00000000ff000000ULL) << 8) | \
388 (__u64)(((__u64)(__x) & (__u64)0x000000ff00000000ULL) >> 8) | \
389 (__u64)(((__u64)(__x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
390 (__u64)(((__u64)(__x) & (__u64)0x00ff000000000000ULL) >> 40) | \
391 (__u64)(((__u64)(__x) & (__u64)0xff00000000000000ULL) >> 56) )); \
392 })
393
394 #define ___constant_swab16(x) \
395 ((__u16)( \
396 (((__u16)(x) & (__u16)0x00ffU) << 8) | \
397 (((__u16)(x) & (__u16)0xff00U) >> 8) ))
398 #define ___constant_swab24(x) \
399 ((__u32)( \
400 (((__u32)(x) & (__u32)0x000000ffU) << 16) | \
401 (((__u32)(x) & (__u32)0x0000ff00U) | \
402 (((__u32)(x) & (__u32)0x00ff0000U) >> 16) ))
403 #define ___constant_swab32(x) \
404 ((__u32)( \
405 (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \
406 (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \
407 (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \
408 (((__u32)(x) & (__u32)0xff000000UL) >> 24) ))
409 #define ___constant_swab64(x) \
410 ((__u64)( \
411 (__u64)(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \
412 (__u64)(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \
413 (__u64)(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \
414 (__u64)(((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \
415 (__u64)(((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \
416 (__u64)(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
417 (__u64)(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \
418 (__u64)(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56) ))
419
420 /*
421 * provide defaults when no architecture-specific optimization is detected
422 */
423 #ifndef __arch__swab16
424 # define __arch__swab16(x) ({ __u16 __tmp = (x) ; ___swab16(__tmp); })
425 #endif
426 #ifndef __arch__swab24
427 # define __arch__swab24(x) ({ __u32 __tmp = (x) ; ___swab24(__tmp); })
428 #endif
429 #ifndef __arch__swab32
430 # define __arch__swab32(x) ({ __u32 __tmp = (x) ; ___swab32(__tmp); })
431 #endif
432 #ifndef __arch__swab64
433 # define __arch__swab64(x) ({ __u64 __tmp = (x) ; ___swab64(__tmp); })
434 #endif
435
436 #ifndef __arch__swab16p
437 # define __arch__swab16p(x) __arch__swab16(*(x))
438 #endif
439 #ifndef __arch__swab24p
440 # define __arch__swab24p(x) __arch__swab24(*(x))
441 #endif
442 #ifndef __arch__swab32p
443 # define __arch__swab32p(x) __arch__swab32(*(x))
444 #endif
445 #ifndef __arch__swab64p
446 # define __arch__swab64p(x) __arch__swab64(*(x))
447 #endif
448
449 #ifndef __arch__swab16s
450 # define __arch__swab16s(x) do { *(x) = __arch__swab16p((x)); } while (0)
451 #endif
452 #ifndef __arch__swab24s
453 # define __arch__swab24s(x) do { *(x) = __arch__swab24p((x)); } while (0)
454 #endif
455 #ifndef __arch__swab32s
456 # define __arch__swab32s(x) do { *(x) = __arch__swab32p((x)); } while (0)
457 #endif
458 #ifndef __arch__swab64s
459 # define __arch__swab64s(x) do { *(x) = __arch__swab64p((x)); } while (0)
460 #endif
461
462
463 /*
464 * Allow constant folding
465 */
466 #if defined(__GNUC__) && (__GNUC__ >= 2) && defined(__OPTIMIZE__)
467 # define __swab16(x) \
468 (__builtin_constant_p((__u16)(x)) ? \
469 ___swab16((x)) : \
470 __fswab16((x)))
471 # define __swab24(x) \
472 (__builtin_constant_p((__u32)(x)) ? \
473 ___swab24((x)) : \
474 __fswab24((x)))
475 # define __swab32(x) \
476 (__builtin_constant_p((__u32)(x)) ? \
477 ___swab32((x)) : \
478 __fswab32((x)))
479 # define __swab64(x) \
480 (__builtin_constant_p((__u64)(x)) ? \
481 ___swab64((x)) : \
482 __fswab64((x)))
483 #else
484 # define __swab16(x) __fswab16(x)
485 # define __swab24(x) __fswab24(x)
486 # define __swab32(x) __fswab32(x)
487 # define __swab64(x) __fswab64(x)
488 #endif /* OPTIMIZE */
489
490
491 static __inline__ __const__ __u16 __fswab16(__u16 x)
492 {
493 return __arch__swab16(x);
494 }
495 static __inline__ __u16 __swab16p(__u16 *x)
496 {
497 return __arch__swab16p(x);
498 }
499 static __inline__ void __swab16s(__u16 *addr)
500 {
501 __arch__swab16s(addr);
502 }
503
504 static __inline__ __const__ __u32 __fswab24(__u32 x)
505 {
506 return __arch__swab24(x);
507 }
508 static __inline__ __u32 __swab24p(__u32 *x)
509 {
510 return __arch__swab24p(x);
511 }
512 static __inline__ void __swab24s(__u32 *addr)
513 {
514 __arch__swab24s(addr);
515 }
516
517 static __inline__ __const__ __u32 __fswab32(__u32 x)
518 {
519 return __arch__swab32(x);
520 }
521 static __inline__ __u32 __swab32p(__u32 *x)
522 {
523 return __arch__swab32p(x);
524 }
525 static __inline__ void __swab32s(__u32 *addr)
526 {
527 __arch__swab32s(addr);
528 }
529
530 #ifdef __BYTEORDER_HAS_U64__
531 static __inline__ __const__ __u64 __fswab64(__u64 x)
532 {
533 # ifdef __SWAB_64_THRU_32__
534 __u32 h = x >> 32;
535 __u32 l = x & ((1ULL<<32)-1);
536 return (((__u64)__swab32(l)) << 32) | ((__u64)(__swab32(h)));
537 # else
538 return __arch__swab64(x);
539 # endif
540 }
541 static __inline__ __u64 __swab64p(__u64 *x)
542 {
543 return __arch__swab64p(x);
544 }
545 static __inline__ void __swab64s(__u64 *addr)
546 {
547 __arch__swab64s(addr);
548 }
549 #endif /* __BYTEORDER_HAS_U64__ */
550
551 #if defined(__KERNEL__)
552 #define swab16 __swab16
553 #define swab24 __swab24
554 #define swab32 __swab32
555 #define swab64 __swab64
556 #define swab16p __swab16p
557 #define swab24p __swab24p
558 #define swab32p __swab32p
559 #define swab64p __swab64p
560 #define swab16s __swab16s
561 #define swab24s __swab24s
562 #define swab32s __swab32s
563 #define swab64s __swab64s
564 #endif
565
566 #endif /* swab */
567
568
569
570 #if 1 /* generic */
571
572 /*
573 * linux/byteorder_generic.h
574 * Generic Byte-reordering support
575 *
576 * Francois-Rene Rideau <fare@tunes.org> 19970707
577 * gathered all the good ideas from all asm-foo/byteorder.h into one file,
578 * cleaned them up.
579 * I hope it is compliant with non-GCC compilers.
580 * I decided to put __BYTEORDER_HAS_U64__ in byteorder.h,
581 * because I wasn't sure it would be ok to put it in types.h
582 * Upgraded it to 2.1.43
583 * Francois-Rene Rideau <fare@tunes.org> 19971012
584 * Upgraded it to 2.1.57
585 * to please Linus T., replaced huge #ifdef's between little/big endian
586 * by nestedly #include'd files.
587 * Francois-Rene Rideau <fare@tunes.org> 19971205
588 * Made it to 2.1.71; now a facelift:
589 * Put files under include/linux/byteorder/
590 * Split swab from generic support.
591 *
592 * TODO:
593 * = Regular kernel maintainers could also replace all these manual
594 * byteswap macros that remain, disseminated among drivers,
595 * after some grep or the sources...
596 * = Linus might want to rename all these macros and files to fit his taste,
597 * to fit his personal naming scheme.
598 * = it seems that a few drivers would also appreciate
599 * nybble swapping support...
600 * = every architecture could add their byteswap macro in asm/byteorder.h
601 * see how some architectures already do (i386, alpha, ppc, etc)
602 * = cpu_to_beXX and beXX_to_cpu might some day need to be well
603 * distinguished throughout the kernel. This is not the case currently,
604 * since little endian, big endian, and pdp endian machines needn't it.
605 * But this might be the case for, say, a port of Linux to 20/21 bit
606 * architectures (and F21 Linux addict around?).
607 */
608
609 /*
610 * The following macros are to be defined by <asm/byteorder.h>:
611 *
612 * Conversion of long and short int between network and host format
613 * ntohl(__u32 x)
614 * ntohs(__u16 x)
615 * htonl(__u32 x)
616 * htons(__u16 x)
617 * It seems that some programs (which? where? or perhaps a standard? POSIX?)
618 * might like the above to be functions, not macros (why?).
619 * if that's true, then detect them, and take measures.
620 * Anyway, the measure is: define only ___ntohl as a macro instead,
621 * and in a separate file, have
622 * unsigned long inline ntohl(x){return ___ntohl(x);}
623 *
624 * The same for constant arguments
625 * __constant_ntohl(__u32 x)
626 * __constant_ntohs(__u16 x)
627 * __constant_htonl(__u32 x)
628 * __constant_htons(__u16 x)
629 *
630 * Conversion of XX-bit integers (16- 32- or 64-)
631 * between native CPU format and little/big endian format
632 * 64-bit stuff only defined for proper architectures
633 * cpu_to_[bl]eXX(__uXX x)
634 * [bl]eXX_to_cpu(__uXX x)
635 *
636 * The same, but takes a pointer to the value to convert
637 * cpu_to_[bl]eXXp(__uXX x)
638 * [bl]eXX_to_cpup(__uXX x)
639 *
640 * The same, but change in situ
641 * cpu_to_[bl]eXXs(__uXX x)
642 * [bl]eXX_to_cpus(__uXX x)
643 *
644 * See asm-foo/byteorder.h for examples of how to provide
645 * architecture-optimized versions
646 *
647 */
648
649
650 #if defined(__KERNEL__)
651 /*
652 * inside the kernel, we can use nicknames;
653 * outside of it, we must avoid POSIX namespace pollution...
654 */
655 #define cpu_to_le64 __cpu_to_le64
656 #define le64_to_cpu __le64_to_cpu
657 #define cpu_to_le32 __cpu_to_le32
658 #define le32_to_cpu __le32_to_cpu
659 #define cpu_to_le16 __cpu_to_le16
660 #define le16_to_cpu __le16_to_cpu
661 #define cpu_to_be64 __cpu_to_be64
662 #define be64_to_cpu __be64_to_cpu
663 #define cpu_to_be32 __cpu_to_be32
664 #define be32_to_cpu __be32_to_cpu
665 #define cpu_to_be16 __cpu_to_be16
666 #define be16_to_cpu __be16_to_cpu
667 #define cpu_to_le64p __cpu_to_le64p
668 #define le64_to_cpup __le64_to_cpup
669 #define cpu_to_le32p __cpu_to_le32p
670 #define le32_to_cpup __le32_to_cpup
671 #define cpu_to_le16p __cpu_to_le16p
672 #define le16_to_cpup __le16_to_cpup
673 #define cpu_to_be64p __cpu_to_be64p
674 #define be64_to_cpup __be64_to_cpup
675 #define cpu_to_be32p __cpu_to_be32p
676 #define be32_to_cpup __be32_to_cpup
677 #define cpu_to_be16p __cpu_to_be16p
678 #define be16_to_cpup __be16_to_cpup
679 #define cpu_to_le64s __cpu_to_le64s
680 #define le64_to_cpus __le64_to_cpus
681 #define cpu_to_le32s __cpu_to_le32s
682 #define le32_to_cpus __le32_to_cpus
683 #define cpu_to_le16s __cpu_to_le16s
684 #define le16_to_cpus __le16_to_cpus
685 #define cpu_to_be64s __cpu_to_be64s
686 #define be64_to_cpus __be64_to_cpus
687 #define cpu_to_be32s __cpu_to_be32s
688 #define be32_to_cpus __be32_to_cpus
689 #define cpu_to_be16s __cpu_to_be16s
690 #define be16_to_cpus __be16_to_cpus
691 #endif
692
693
694 /*
695 * Handle ntohl and suches. These have various compatibility
696 * issues - like we want to give the prototype even though we
697 * also have a macro for them in case some strange program
698 * wants to take the address of the thing or something..
699 *
700 * Note that these used to return a "long" in libc5, even though
701 * long is often 64-bit these days.. Thus the casts.
702 *
703 * They have to be macros in order to do the constant folding
704 * correctly - if the argument passed into a inline function
705 * it is no longer constant according to gcc..
706 */
707
708 #undef ntohl
709 #undef ntohs
710 #undef htonl
711 #undef htons
712
713 /*
714 * Do the prototypes. Somebody might want to take the
715 * address or some such sick thing..
716 */
717 #if defined(__KERNEL__) || (defined (__GLIBC__) && __GLIBC__ >= 2)
718 extern __u32 ntohl(__u32);
719 extern __u32 htonl(__u32);
720 #else
721 extern unsigned long int ntohl(unsigned long int);
722 extern unsigned long int htonl(unsigned long int);
723 #endif
724 extern unsigned short int ntohs(unsigned short int);
725 extern unsigned short int htons(unsigned short int);
726
727
728 #if defined(__GNUC__) && (__GNUC__ >= 2) && defined(__OPTIMIZE__) && !defined(__STRICT_ANSI__)
729
730 #define ___htonl(x) __cpu_to_be32(x)
731 #define ___htons(x) __cpu_to_be16(x)
732 #define ___ntohl(x) __be32_to_cpu(x)
733 #define ___ntohs(x) __be16_to_cpu(x)
734
735 #if defined(__KERNEL__) || (defined (__GLIBC__) && __GLIBC__ >= 2)
736 #define htonl(x) ___htonl(x)
737 #define ntohl(x) ___ntohl(x)
738 #else
739 #define htonl(x) ((unsigned long)___htonl(x))
740 #define ntohl(x) ((unsigned long)___ntohl(x))
741 #endif
742 #define htons(x) ___htons(x)
743 #define ntohs(x) ___ntohs(x)
744
745 #endif /* OPTIMIZE */
746
747 #endif /* generic */
748
749
750 #define __constant_htonl(x) ___constant_swab32((x))
751 #define __constant_ntohl(x) ___constant_swab32((x))
752 #define __constant_htons(x) ___constant_swab16((x))
753 #define __constant_ntohs(x) ___constant_swab16((x))
754 #define __constant_cpu_to_le64(x) ((__u64)(x))
755 #define __constant_le64_to_cpu(x) ((__u64)(x))
756 #define __constant_cpu_to_le32(x) ((__u32)(x))
757 #define __constant_le32_to_cpu(x) ((__u32)(x))
758 #define __constant_cpu_to_le24(x) ((__u32)(x))
759 #define __constant_le24_to_cpu(x) ((__u32)(x))
760 #define __constant_cpu_to_le16(x) ((__u16)(x))
761 #define __constant_le16_to_cpu(x) ((__u16)(x))
762 #define __constant_cpu_to_be64(x) ___constant_swab64((x))
763 #define __constant_be64_to_cpu(x) ___constant_swab64((x))
764 #define __constant_cpu_to_be32(x) ___constant_swab32((x))
765 #define __constant_be32_to_cpu(x) ___constant_swab32((x))
766 #define __constant_cpu_to_be24(x) ___constant_swab24((x))
767 #define __constant_be24_to_cpu(x) ___constant_swab24((x))
768 #define __constant_cpu_to_be16(x) ___constant_swab16((x))
769 #define __constant_be16_to_cpu(x) ___constant_swab16((x))
770 #define __cpu_to_le64(x) ((__u64)(x))
771 #define __le64_to_cpu(x) ((__u64)(x))
772 #define __cpu_to_le32(x) ((__u32)(x))
773 #define __le32_to_cpu(x) ((__u32)(x))
774 #define __cpu_to_le24(x) ((__u32)(x))
775 #define __le24_to_cpu(x) ((__u32)(x))
776 #define __cpu_to_le16(x) ((__u16)(x))
777 #define __le16_to_cpu(x) ((__u16)(x))
778 #define __cpu_to_be64(x) __swab64((x))
779 #define __be64_to_cpu(x) __swab64((x))
780 #define __cpu_to_be32(x) __swab32((x))
781 #define __be32_to_cpu(x) __swab32((x))
782 #define __cpu_to_be24(x) __swab24((x))
783 #define __be24_to_cpu(x) __swab24((x))
784 #define __cpu_to_be16(x) __swab16((x))
785 #define __be16_to_cpu(x) __swab16((x))
786 #define __cpu_to_le64p(x) (*(__u64*)(x))
787 #define __le64_to_cpup(x) (*(__u64*)(x))
788 #define __cpu_to_le32p(x) (*(__u32*)(x))
789 #define __le32_to_cpup(x) (*(__u32*)(x))
790 #define __cpu_to_le24p(x) (*(__u32*)(x))
791 #define __le24_to_cpup(x) (*(__u32*)(x))
792 #define __cpu_to_le16p(x) (*(__u16*)(x))
793 #define __le16_to_cpup(x) (*(__u16*)(x))
794 #define __cpu_to_be64p(x) __swab64p((x))
795 #define __be64_to_cpup(x) __swab64p((x))
796 #define __cpu_to_be32p(x) __swab32p((x))
797 #define __be32_to_cpup(x) __swab32p((x))
798 #define __cpu_to_be24p(x) __swab24p((x))
799 #define __be24_to_cpup(x) __swab24p((x))
800 #define __cpu_to_be16p(x) __swab16p((x))
801 #define __be16_to_cpup(x) __swab16p((x))
802 #define __cpu_to_le64s(x) do {} while (0)
803 #define __le64_to_cpus(x) do {} while (0)
804 #define __cpu_to_le32s(x) do {} while (0)
805 #define __le32_to_cpus(x) do {} while (0)
806 #define __cpu_to_le24s(x) do {} while (0)
807 #define __le24_to_cpus(x) do {} while (0)
808 #define __cpu_to_le16s(x) do {} while (0)
809 #define __le16_to_cpus(x) do {} while (0)
810 #define __cpu_to_be64s(x) __swab64s((x))
811 #define __be64_to_cpus(x) __swab64s((x))
812 #define __cpu_to_be32s(x) __swab32s((x))
813 #define __be32_to_cpus(x) __swab32s((x))
814 #define __cpu_to_be24s(x) __swab24s((x))
815 #define __be24_to_cpus(x) __swab24s((x))
816 #define __cpu_to_be16s(x) __swab16s((x))
817 #define __be16_to_cpus(x) __swab16s((x))
818
819
820
821
822
823
824
825
826 #if 1
827
828 /* Dummy types */
829
830 #define ____cacheline_aligned
831
832 typedef struct
833 {
834 volatile unsigned int lock;
835 } rwlock_t;
836
837 typedef struct {
838 volatile unsigned int lock;
839 } spinlock_t;
840
841 struct task_struct;
842
843
844
845
846
847 #if 1 /* atomic */
848
849 /*
850 * Atomic operations that C can't guarantee us. Useful for
851 * resource counting etc..
852 */
853
854 #ifdef CONFIG_SMP
855 #define LOCK "lock ; "
856 #else
857 #define LOCK ""
858 #endif
859
860 /*
861 * Make sure gcc doesn't try to be clever and move things around
862 * on us. We need to use _exactly_ the address the user gave us,
863 * not some alias that contains the same information.
864 */
865 typedef struct { volatile int counter; } atomic_t;
866
867 #define ATOMIC_INIT(i) { (i) }
868
869 /**
870 * atomic_read - read atomic variable
871 * @v: pointer of type atomic_t
872 *
873 * Atomically reads the value of @v. Note that the guaranteed
874 * useful range of an atomic_t is only 24 bits.
875 */
876 #define atomic_read(v) ((v)->counter)
877
878 /**
879 * atomic_set - set atomic variable
880 * @v: pointer of type atomic_t
881 * @i: required value
882 *
883 * Atomically sets the value of @v to @i. Note that the guaranteed
884 * useful range of an atomic_t is only 24 bits.
885 */
886 #define atomic_set(v,i) (((v)->counter) = (i))
887
888 /**
889 * atomic_add - add integer to atomic variable
890 * @i: integer value to add
891 * @v: pointer of type atomic_t
892 *
893 * Atomically adds @i to @v. Note that the guaranteed useful range
894 * of an atomic_t is only 24 bits.
895 */
896 static __inline__ void atomic_add(int i, atomic_t *v)
897 {
898 #if 0
899 __asm__ __volatile__(
900 LOCK "addl %1,%0"
901 :"=m" (v->counter)
902 :"ir" (i), "m" (v->counter));
903 #endif
904 }
905
906 /**
907 * atomic_sub - subtract the atomic variable
908 * @i: integer value to subtract
909 * @v: pointer of type atomic_t
910 *
911 * Atomically subtracts @i from @v. Note that the guaranteed
912 * useful range of an atomic_t is only 24 bits.
913 */
914 static __inline__ void atomic_sub(int i, atomic_t *v)
915 {
916 #if 0
917 __asm__ __volatile__(
918 LOCK "subl %1,%0"
919 :"=m" (v->counter)
920 :"ir" (i), "m" (v->counter));
921 #endif
922 }
923
924 /**
925 * atomic_sub_and_test - subtract value from variable and test result
926 * @i: integer value to subtract
927 * @v: pointer of type atomic_t
928 *
929 * Atomically subtracts @i from @v and returns
930 * true if the result is zero, or false for all
931 * other cases. Note that the guaranteed
932 * useful range of an atomic_t is only 24 bits.
933 */
934 static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
935 {
936 #if 0
937 unsigned char c;
938
939 __asm__ __volatile__(
940 LOCK "subl %2,%0; sete %1"
941 :"=m" (v->counter), "=qm" (c)
942 :"ir" (i), "m" (v->counter) : "memory");
943 return c;
944 #endif
945 }
946
947 /**
948 * atomic_inc - increment atomic variable
949 * @v: pointer of type atomic_t
950 *
951 * Atomically increments @v by 1. Note that the guaranteed
952 * useful range of an atomic_t is only 24 bits.
953 */
954 static __inline__ void atomic_inc(atomic_t *v)
955 {
956 #if 0
957 __asm__ __volatile__(
958 LOCK "incl %0"
959 :"=m" (v->counter)
960 :"m" (v->counter));
961 #endif
962 }
963
964 /**
965 * atomic_dec - decrement atomic variable
966 * @v: pointer of type atomic_t
967 *
968 * Atomically decrements @v by 1. Note that the guaranteed
969 * useful range of an atomic_t is only 24 bits.
970 */
971 static __inline__ void atomic_dec(atomic_t *v)
972 {
973 #if 0
974 __asm__ __volatile__(
975 LOCK "decl %0"
976 :"=m" (v->counter)
977 :"m" (v->counter));
978 #endif
979 }
980
981 /**
982 * atomic_dec_and_test - decrement and test
983 * @v: pointer of type atomic_t
984 *
985 * Atomically decrements @v by 1 and
986 * returns true if the result is 0, or false for all other
987 * cases. Note that the guaranteed
988 * useful range of an atomic_t is only 24 bits.
989 */
990 static __inline__ int atomic_dec_and_test(atomic_t *v)
991 {
992 #if 0
993 unsigned char c;
994
995 __asm__ __volatile__(
996 LOCK "decl %0; sete %1"
997 :"=m" (v->counter), "=qm" (c)
998 :"m" (v->counter) : "memory");
999 return c != 0;
1000 #else
1001 return 1;
1002 #endif
1003 }
1004
1005 /**
1006 * atomic_inc_and_test - increment and test
1007 * @v: pointer of type atomic_t
1008 *
1009 * Atomically increments @v by 1
1010 * and returns true if the result is zero, or false for all
1011 * other cases. Note that the guaranteed
1012 * useful range of an atomic_t is only 24 bits.
1013 */
1014 static __inline__ int atomic_inc_and_test(atomic_t *v)
1015 {
1016 #if 0
1017 unsigned char c;
1018
1019 __asm__ __volatile__(
1020 LOCK "incl %0; sete %1"
1021 :"=m" (v->counter), "=qm" (c)
1022 :"m" (v->counter) : "memory");
1023 return c != 0;
1024 #else
1025 return 1;
1026 #endif
1027 }
1028
1029 /**
1030 * atomic_add_negative - add and test if negative
1031 * @v: pointer of type atomic_t
1032 * @i: integer value to add
1033 *
1034 * Atomically adds @i to @v and returns true
1035 * if the result is negative, or false when
1036 * result is greater than or equal to zero. Note that the guaranteed
1037 * useful range of an atomic_t is only 24 bits.
1038 */
1039 static __inline__ int atomic_add_negative(int i, atomic_t *v)
1040 {
1041 #if 0
1042 unsigned char c;
1043
1044 __asm__ __volatile__(
1045 LOCK "addl %2,%0; sets %1"
1046 :"=m" (v->counter), "=qm" (c)
1047 :"ir" (i), "m" (v->counter) : "memory");
1048 return c;
1049 #else
1050 return 0;
1051 #endif
1052 }
1053
1054 /* These are x86-specific, used by some header files */
1055 #define atomic_clear_mask(mask, addr)
1056 #if 0
1057 __asm__ __volatile__(LOCK "andl %0,%1" \
1058 : : "r" (~(mask)),"m" (*addr) : "memory")
1059 #endif
1060
1061 #define atomic_set_mask(mask, addr)
1062 #if 0
1063 __asm__ __volatile__(LOCK "orl %0,%1" \
1064 : : "r" (mask),"m" (*addr) : "memory")
1065 #endif
1066
1067 /* Atomic operations are already serializing on x86 */
1068 #define smp_mb__before_atomic_dec()
1069 #define smp_mb__after_atomic_dec()
1070 #define smp_mb__before_atomic_inc()
1071 #define smp_mb__after_atomic_inc()
1072
1073
1074
1075 #endif /* atomic */
1076
1077
1078
1079
1080
1081 #if 1 /* list */
1082
1083 struct list_head {
1084 struct list_head *next, *prev;
1085 };
1086
1087 #define LIST_HEAD_INIT(name) { &(name), &(name) }
1088
1089 #define LIST_HEAD(name) \
1090 struct list_head name = LIST_HEAD_INIT(name)
1091
1092 #define INIT_LIST_HEAD(ptr) do { \
1093 (ptr)->next = (ptr); (ptr)->prev = (ptr); \
1094 } while (0)
1095
1096 /*
1097 * Insert a new entry between two known consecutive entries.
1098 *
1099 * This is only for internal list manipulation where we know
1100 * the prev/next entries already!
1101 */
1102 static inline void __list_add(struct list_head *new,
1103 struct list_head *prev,
1104 struct list_head *next)
1105 {
1106 #if 0
1107 next->prev = new;
1108 new->next = next;
1109 new->prev = prev;
1110 prev->next = new;
1111 #endif
1112 }
1113
1114 /**
1115 * list_add - add a new entry
1116 * @new: new entry to be added
1117 * @head: list head to add it after
1118 *
1119 * Insert a new entry after the specified head.
1120 * This is good for implementing stacks.
1121 */
1122 static inline void list_add(struct list_head *new, struct list_head *head)
1123 {
1124 #if 0
1125 __list_add(new, head, head->next);
1126 #endif
1127 }
1128
1129 /**
1130 * list_add_tail - add a new entry
1131 * @new: new entry to be added
1132 * @head: list head to add it before
1133 *
1134 * Insert a new entry before the specified head.
1135 * This is useful for implementing queues.
1136 */
1137 static inline void list_add_tail(struct list_head *new, struct list_head *head)
1138 {
1139 #if 0
1140 __list_add(new, head->prev, head);
1141 #endif
1142 }
1143
1144 /*
1145 * Delete a list entry by making the prev/next entries
1146 * point to each other.
1147 *
1148 * This is only for internal list manipulation where we know
1149 * the prev/next entries already!
1150 */
1151 static inline void __list_del(struct list_head *prev, struct list_head *next)
1152 {
1153 next->prev = prev;
1154 prev->next = next;
1155 }
1156
1157 /**
1158 * list_del - deletes entry from list.
1159 * @entry: the element to delete from the list.
1160 * Note: list_empty on entry does not return true after this, the entry is in an undefined state.
1161 */
1162 static inline void list_del(struct list_head *entry)
1163 {
1164 #if 0
1165 __list_del(entry->prev, entry->next);
1166 entry->next = (void *) 0;
1167 entry->prev = (void *) 0;
1168 #endif
1169 }
1170
1171 /**
1172 * list_del_init - deletes entry from list and reinitialize it.
1173 * @entry: the element to delete from the list.
1174 */
1175 static inline void list_del_init(struct list_head *entry)
1176 {
1177 #if 0
1178 __list_del(entry->prev, entry->next);
1179 INIT_LIST_HEAD(entry);
1180 #endif
1181 }
1182
1183 /**
1184 * list_move - delete from one list and add as another's head
1185 * @list: the entry to move
1186 * @head: the head that will precede our entry
1187 */
1188 static inline void list_move(struct list_head *list, struct list_head *head)
1189 {
1190 #if 0
1191 __list_del(list->prev, list->next);
1192 list_add(list, head);
1193 #endif
1194 }
1195
1196 /**
1197 * list_move_tail - delete from one list and add as another's tail
1198 * @list: the entry to move
1199 * @head: the head that will follow our entry
1200 */
1201 static inline void list_move_tail(struct list_head *list,
1202 struct list_head *head)
1203 {
1204 #if 0
1205 __list_del(list->prev, list->next);
1206 list_add_tail(list, head);
1207 #endif
1208 }
1209
1210 /**
1211 * list_empty - tests whether a list is empty
1212 * @head: the list to test.
1213 */
1214 static inline int list_empty(struct list_head *head)
1215 {
1216 return head->next == head;
1217 }
1218
1219 static inline void __list_splice(struct list_head *list,
1220 struct list_head *head)
1221 {
1222 #if 0
1223 struct list_head *first = list->next;
1224 struct list_head *last = list->prev;
1225 struct list_head *at = head->next;
1226
1227 first->prev = head;
1228 head->next = first;
1229
1230 last->next = at;
1231 at->prev = last;
1232 #endif
1233 }
1234
1235 /**
1236 * list_splice - join two lists
1237 * @list: the new list to add.
1238 * @head: the place to add it in the first list.
1239 */
1240 static inline void list_splice(struct list_head *list, struct list_head *head)
1241 {
1242 #if 0
1243 if (!list_empty(list))
1244 __list_splice(list, head);
1245 #endif
1246 }
1247
1248 /**
1249 * list_splice_init - join two lists and reinitialise the emptied list.
1250 * @list: the new list to add.
1251 * @head: the place to add it in the first list.
1252 *
1253 * The list at @list is reinitialised
1254 */
1255 static inline void list_splice_init(struct list_head *list,
1256 struct list_head *head)
1257 {
1258 #if 0
1259 if (!list_empty(list)) {
1260 __list_splice(list, head);
1261 INIT_LIST_HEAD(list);
1262 }
1263 #endif
1264 }
1265
1266 /**
1267 * list_entry - get the struct for this entry
1268 * @ptr: the &struct list_head pointer.
1269 * @type: the type of the struct this is embedded in.
1270 * @member: the name of the list_struct within the struct.
1271 */
1272 #define list_entry(ptr, type, member)
1273 #if 0
1274 ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
1275 #endif
1276
1277 /**
1278 * list_for_each - iterate over a list
1279 * @pos: the &struct list_head to use as a loop counter.
1280 * @head: the head for your list.
1281 */
1282 #define list_for_each(pos, head)
1283 #if 0
1284 for (pos = (head)->next, prefetch(pos->next); pos != (head); \
1285 pos = pos->next, prefetch(pos->next))
1286 #endif
1287
1288 /**
1289 * list_for_each_prev - iterate over a list backwards
1290 * @pos: the &struct list_head to use as a loop counter.
1291 * @head: the head for your list.
1292 */
1293 #define list_for_each_prev(pos, head)
1294 #if 0
1295 for (pos = (head)->prev, prefetch(pos->prev); pos != (head); \
1296 pos = pos->prev, prefetch(pos->prev))
1297 #endif
1298
1299 /**
1300 * list_for_each_safe - iterate over a list safe against removal of list entry
1301 * @pos: the &struct list_head to use as a loop counter.
1302 * @n: another &struct list_head to use as temporary storage
1303 * @head: the head for your list.
1304 */
1305 #define list_for_each_safe(pos, n, head)
1306 #if 0
1307 for (pos = (head)->next, n = pos->next; pos != (head); \
1308 pos = n, n = pos->next)
1309 #endif
1310
1311 /**
1312 * list_for_each_entry - iterate over list of given type
1313 * @pos: the type * to use as a loop counter.
1314 * @head: the head for your list.
1315 * @member: the name of the list_struct within the struct.
1316 */
1317 #define list_for_each_entry(pos, head, member)
1318 #if 0
1319 for (pos = list_entry((head)->next, typeof(*pos), member), \
1320 prefetch(pos->member.next); \
1321 &pos->member != (head); \
1322 pos = list_entry(pos->member.next, typeof(*pos), member), \
1323 prefetch(pos->member.next))
1324 #endif
1325
1326 #endif /* list */
1327
1328
1329
1330
1331
1332 #if 1 /* wait */
1333
1334 #define WNOHANG 0x00000001
1335 #define WUNTRACED 0x00000002
1336
1337 #define __WNOTHREAD 0x20000000 /* Don't wait on children of other threads in this group */
1338 #define __WALL 0x40000000 /* Wait on all children, regardless of type */
1339 #define __WCLONE 0x80000000 /* Wait only on non-SIGCHLD children */
1340
1341 #if 0
1342 #include <linux/kernel.h>
1343 #include <linux/list.h>
1344 #include <linux/stddef.h>
1345 #include <linux/spinlock.h>
1346 #include <linux/config.h>
1347
1348 #include <asm/page.h>
1349 #include <asm/processor.h>
1350 #endif
1351
1352 /*
1353 * Debug control. Slow but useful.
1354 */
1355 #if defined(CONFIG_DEBUG_WAITQ)
1356 #define WAITQUEUE_DEBUG 1
1357 #else
1358 #define WAITQUEUE_DEBUG 0
1359 #endif
1360
1361 struct __wait_queue {
1362 unsigned int flags;
1363 #define WQ_FLAG_EXCLUSIVE 0x01
1364 struct task_struct * task;
1365 struct list_head task_list;
1366 #if WAITQUEUE_DEBUG
1367 long __magic;
1368 long __waker;
1369 #endif
1370 };
1371 typedef struct __wait_queue wait_queue_t;
1372
1373 /*
1374 * 'dual' spinlock architecture. Can be switched between spinlock_t and
1375 * rwlock_t locks via changing this define. Since waitqueues are quite
1376 * decoupled in the new architecture, lightweight 'simple' spinlocks give
1377 * us slightly better latencies and smaller waitqueue structure size.
1378 */
1379 #define USE_RW_WAIT_QUEUE_SPINLOCK 0
1380
1381 #if USE_RW_WAIT_QUEUE_SPINLOCK
1382 # define wq_lock_t rwlock_t
1383 # define WAITQUEUE_RW_LOCK_UNLOCKED RW_LOCK_UNLOCKED
1384
1385 # define wq_read_lock read_lock
1386 # define wq_read_lock_irqsave read_lock_irqsave
1387 # define wq_read_unlock_irqrestore read_unlock_irqrestore
1388 # define wq_read_unlock read_unlock
1389 # define wq_write_lock_irq write_lock_irq
1390 # define wq_write_lock_irqsave write_lock_irqsave
1391 # define wq_write_unlock_irqrestore write_unlock_irqrestore
1392 # define wq_write_unlock write_unlock
1393 #else
1394 # define wq_lock_t spinlock_t
1395 # define WAITQUEUE_RW_LOCK_UNLOCKED SPIN_LOCK_UNLOCKED
1396
1397 # define wq_read_lock spin_lock
1398 # define wq_read_lock_irqsave spin_lock_irqsave
1399 # define wq_read_unlock spin_unlock
1400 # define wq_read_unlock_irqrestore spin_unlock_irqrestore
1401 # define wq_write_lock_irq spin_lock_irq
1402 # define wq_write_lock_irqsave spin_lock_irqsave
1403 # define wq_write_unlock_irqrestore spin_unlock_irqrestore
1404 # define wq_write_unlock spin_unlock
1405 #endif
1406
1407 struct __wait_queue_head {
1408 wq_lock_t lock;
1409 struct list_head task_list;
1410 #if WAITQUEUE_DEBUG
1411 long __magic;
1412 long __creator;
1413 #endif
1414 };
1415 typedef struct __wait_queue_head wait_queue_head_t;
1416
1417
1418 /*
1419 * Debugging macros. We eschew `do { } while (0)' because gcc can generate
1420 * spurious .aligns.
1421 */
1422 #if WAITQUEUE_DEBUG
1423 #define WQ_BUG() BUG()
1424 #define CHECK_MAGIC(x)
1425 #if 0
1426 do { \
1427 if ((x) != (long)&(x)) { \
1428 printk("bad magic %lx (should be %lx), ", \
1429 (long)x, (long)&(x)); \
1430 WQ_BUG(); \
1431 } \
1432 } while (0)
1433 #endif
1434
1435 #define CHECK_MAGIC_WQHEAD(x)
1436 #if 0
1437 do { \
1438 if ((x)->__magic != (long)&((x)->__magic)) { \
1439 printk("bad magic %lx (should be %lx, creator %lx), ", \
1440 (x)->__magic, (long)&((x)->__magic), (x)->__creator); \
1441 WQ_BUG(); \
1442 } \
1443 } while (0)
1444 #endif
1445
1446 #define WQ_CHECK_LIST_HEAD(list)
1447 #if 0
1448 do { \
1449 if (!(list)->next || !(list)->prev) \
1450 WQ_BUG(); \
1451 } while(0)
1452 #endif
1453
1454 #define WQ_NOTE_WAKER(tsk)
1455 #if 0
1456 do { \
1457 (tsk)->__waker = (long)__builtin_return_address(0); \
1458 } while (0)
1459 #endif
1460 #else
1461 #define WQ_BUG()
1462 #define CHECK_MAGIC(x)
1463 #define CHECK_MAGIC_WQHEAD(x)
1464 #define WQ_CHECK_LIST_HEAD(list)
1465 #define WQ_NOTE_WAKER(tsk)
1466 #endif
1467
1468 /*
1469 * Macros for declaration and initialisaton of the datatypes
1470 */
1471
1472 #if WAITQUEUE_DEBUG
1473 # define __WAITQUEUE_DEBUG_INIT(name) //(long)&(name).__magic, 0
1474 # define __WAITQUEUE_HEAD_DEBUG_INIT(name) //(long)&(name).__magic, (long)&(name).__magic
1475 #else
1476 # define __WAITQUEUE_DEBUG_INIT(name)
1477 # define __WAITQUEUE_HEAD_DEBUG_INIT(name)
1478 #endif
1479
1480 #define __WAITQUEUE_INITIALIZER(name, tsk)
1481 #if 0
1482 {
1483 task: tsk, \
1484 task_list: { NULL, NULL }, \
1485 __WAITQUEUE_DEBUG_INIT(name)}
1486 #endif
1487
1488 #define DECLARE_WAITQUEUE(name, tsk)
1489 #if 0
1490 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
1491 #endif
1492
1493 #define __WAIT_QUEUE_HEAD_INITIALIZER(name)
1494 #if 0
1495 {
1496 lock: WAITQUEUE_RW_LOCK_UNLOCKED, \
1497 task_list: { &(name).task_list, &(name).task_list }, \
1498 __WAITQUEUE_HEAD_DEBUG_INIT(name)}
1499 #endif
1500
1501 #define DECLARE_WAIT_QUEUE_HEAD(name)
1502 #if 0
1503 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
1504 #endif
1505
1506 static inline void init_waitqueue_head(wait_queue_head_t *q)
1507 {
1508 #if 0
1509 #if WAITQUEUE_DEBUG
1510 if (!q)
1511 WQ_BUG();
1512 #endif
1513 q->lock = WAITQUEUE_RW_LOCK_UNLOCKED;
1514 INIT_LIST_HEAD(&q->task_list);
1515 #if WAITQUEUE_DEBUG
1516 q->__magic = (long)&q->__magic;
1517 q->__creator = (long)current_text_addr();
1518 #endif
1519 #endif
1520 }
1521
1522 static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
1523 {
1524 #if 0
1525 #if WAITQUEUE_DEBUG
1526 if (!q || !p)
1527 WQ_BUG();
1528 #endif
1529 q->flags = 0;
1530 q->task = p;
1531 #if WAITQUEUE_DEBUG
1532 q->__magic = (long)&q->__magic;
1533 #endif
1534 #endif
1535 }
1536
1537 static inline int waitqueue_active(wait_queue_head_t *q)
1538 {
1539 #if 0
1540 #if WAITQUEUE_DEBUG
1541 if (!q)
1542 WQ_BUG();
1543 CHECK_MAGIC_WQHEAD(q);
1544 #endif
1545
1546 return !list_empty(&q->task_list);
1547 #endif
1548 }
1549
1550 static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
1551 {
1552 #if 0
1553 #if WAITQUEUE_DEBUG
1554 if (!head || !new)
1555 WQ_BUG();
1556 CHECK_MAGIC_WQHEAD(head);
1557 CHECK_MAGIC(new->__magic);
1558 if (!head->task_list.next || !head->task_list.prev)
1559 WQ_BUG();
1560 #endif
1561 list_add(&new->task_list, &head->task_list);
1562 #endif
1563 }
1564
1565 /*
1566 * Used for wake-one threads:
1567 */
1568 static inline void __add_wait_queue_tail(wait_queue_head_t *head,
1569 wait_queue_t *new)
1570 {
1571 #if 0
1572 #if WAITQUEUE_DEBUG
1573 if (!head || !new)
1574 WQ_BUG();
1575 CHECK_MAGIC_WQHEAD(head);
1576 CHECK_MAGIC(new->__magic);
1577 if (!head->task_list.next || !head->task_list.prev)
1578 WQ_BUG();
1579 #endif
1580 list_add_tail(&new->task_list, &head->task_list);
1581 #endif
1582 }
1583
1584 static inline void __remove_wait_queue(wait_queue_head_t *head,
1585 wait_queue_t *old)
1586 {
1587 #if 0
1588 #if WAITQUEUE_DEBUG
1589 if (!old)
1590 WQ_BUG();
1591 CHECK_MAGIC(old->__magic);
1592 #endif
1593 list_del(&old->task_list);
1594 #endif
1595 }
1596
1597
1598
1599
1600 #endif /* wait */
1601
1602
1603 #endif
1604
1605
1606
1607
1608 #if 1 /* slab */
1609
1610 typedef struct
1611 {
1612 int x;
1613 } kmem_cache_s;
1614
1615 typedef struct kmem_cache_s kmem_cache_t;
1616
1617 #if 0
1618 #include <linux/mm.h>
1619 #include <linux/cache.h>
1620 #endif
1621
1622 /* flags for kmem_cache_alloc() */
1623 #define SLAB_NOFS GFP_NOFS
1624 #define SLAB_NOIO GFP_NOIO
1625 #define SLAB_NOHIGHIO GFP_NOHIGHIO
1626 #define SLAB_ATOMIC GFP_ATOMIC
1627 #define SLAB_USER GFP_USER
1628 #define SLAB_KERNEL GFP_KERNEL
1629 #define SLAB_NFS GFP_NFS
1630 #define SLAB_DMA GFP_DMA
1631
1632 #define SLAB_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_HIGHIO|__GFP_FS)
1633 #define SLAB_NO_GROW 0x00001000UL /* don't grow a cache */
1634
1635 /* flags to pass to kmem_cache_create().
1636 * The first 3 are only valid when the allocator as been build
1637 * SLAB_DEBUG_SUPPORT.
1638 */
1639 #define SLAB_DEBUG_FREE 0x00000100UL /* Peform (expensive) checks on free */
1640 #define SLAB_DEBUG_INITIAL 0x00000200UL /* Call constructor (as verifier) */
1641 #define SLAB_RED_ZONE 0x00000400UL /* Red zone objs in a cache */
1642 #define SLAB_POISON 0x00000800UL /* Poison objects */
1643 #define SLAB_NO_REAP 0x00001000UL /* never reap from the cache */
1644 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* align objs on a h/w cache lines */
1645 #define SLAB_CACHE_DMA 0x00004000UL /* use GFP_DMA memory */
1646 #define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* force alignment */
1647
1648 /* flags passed to a constructor func */
1649 #define SLAB_CTOR_CONSTRUCTOR 0x001UL /* if not set, then deconstructor */
1650 #define SLAB_CTOR_ATOMIC 0x002UL /* tell constructor it can't sleep */
1651 #define SLAB_CTOR_VERIFY 0x004UL /* tell constructor it's a verify call */
1652
1653 /* prototypes */
1654 extern void kmem_cache_init(void);
1655 extern void kmem_cache_sizes_init(void);
1656
1657 extern kmem_cache_t *kmem_find_general_cachep(size_t, int gfpflags);
1658 extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned long,
1659 void (*)(void *, kmem_cache_t *, unsigned long),
1660 void (*)(void *, kmem_cache_t *, unsigned long));
1661 extern int kmem_cache_destroy(kmem_cache_t *);
1662 extern int kmem_cache_shrink(kmem_cache_t *);
1663 extern void *kmem_cache_alloc(kmem_cache_t *, int);
1664 extern void kmem_cache_free(kmem_cache_t *, void *);
1665 extern unsigned int kmem_cache_size(kmem_cache_t *);
1666
1667 extern void *kmalloc(size_t, int);
1668 extern void kfree(const void *);
1669
1670 //extern int FASTCALL(kmem_cache_reap(int));
1671
1672 /* System wide caches */
1673 extern kmem_cache_t *vm_area_cachep;
1674 extern kmem_cache_t *mm_cachep;
1675 extern kmem_cache_t *names_cachep;
1676 extern kmem_cache_t *files_cachep;
1677 extern kmem_cache_t *filp_cachep;
1678 extern kmem_cache_t *dquot_cachep;
1679 extern kmem_cache_t *bh_cachep;
1680 extern kmem_cache_t *fs_cachep;
1681 extern kmem_cache_t *sigact_cachep;
1682
1683 #endif /* slab */
1684
1685
1686
1687 /*
1688 * Berkeley style UIO structures - Alan Cox 1994.
1689 *
1690 * This program is free software; you can redistribute it and/or
1691 * modify it under the terms of the GNU General Public License
1692 * as published by the Free Software Foundation; either version
1693 * 2 of the License, or (at your option) any later version.
1694 */
1695
1696
1697 /* A word of warning: Our uio structure will clash with the C library one (which is now obsolete). Remove the C
1698 library one from sys/uio.h if you have a very old library set */
1699
1700 struct iovec
1701 {
1702 void *iov_base; /* BSD uses caddr_t (1003.1g requires void *) */
1703 __kernel_size_t iov_len; /* Must be size_t (1003.1g) */
1704 };
1705
1706 /*
1707 * UIO_MAXIOV shall be at least 16 1003.1g (5.4.1.1)
1708 */
1709
1710 #define UIO_FASTIOV 8
1711 #define UIO_MAXIOV 1024
1712 #if 0
1713 #define UIO_MAXIOV 16 /* Maximum iovec's in one operation
1714 16 matches BSD */
1715 /* Beg pardon: BSD has 1024 --ANK */
1716 #endif
1717
1718
1719
1720 /*
1721 * In Linux 2.4, static timers have been removed from the kernel.
1722 * Timers may be dynamically created and destroyed, and should be initialized
1723 * by a call to init_timer() upon creation.
1724 *
1725 * The "data" field enables use of a common timeout function for several
1726 * timeouts. You can use this field to distinguish between the different
1727 * invocations.
1728 */
1729 struct timer_list {
1730 struct list_head list;
1731 unsigned long expires;
1732 unsigned long data;
1733 void (*function)(unsigned long);
1734 };
1735
1736
1737
1738 struct timeval {
1739 unsigned long tv_sec;
1740 unsigned long tv_usec;
1741 // time_t tv_sec; /* seconds */
1742 // suseconds_t tv_usec; /* microseconds */
1743 };
1744
1745
1746
1747
1748
1749
1750
1751 #if 1 /* poll */
1752
1753 struct file;
1754
1755 struct poll_table_page;
1756
1757 typedef struct poll_table_struct {
1758 int error;
1759 struct poll_table_page * table;
1760 } poll_table;
1761
1762 extern void __pollwait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p);
1763
1764 static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
1765 {
1766 if (p && wait_address)
1767 __pollwait(filp, wait_address, p);
1768 }
1769
1770 static inline void poll_initwait(poll_table* pt)
1771 {
1772 pt->error = 0;
1773 pt->table = NULL;
1774 }
1775 extern void poll_freewait(poll_table* pt);
1776
1777
1778 /*
1779 * Scaleable version of the fd_set.
1780 */
1781
1782 typedef struct {
1783 unsigned long *in, *out, *ex;
1784 unsigned long *res_in, *res_out, *res_ex;
1785 } fd_set_bits;
1786
1787 /*
1788 * How many longwords for "nr" bits?
1789 */
1790 #define FDS_BITPERLONG (8*sizeof(long))
1791 #define FDS_LONGS(nr) (((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG)
1792 #define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long))
1793
1794 /*
1795 * We do a VERIFY_WRITE here even though we are only reading this time:
1796 * we'll write to it eventually..
1797 *
1798 * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned.
1799 */
1800 static inline
1801 int get_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset)
1802 {
1803 #if 0
1804 nr = FDS_BYTES(nr);
1805 if (ufdset) {
1806 int error;
1807 error = verify_area(VERIFY_WRITE, ufdset, nr);
1808 if (!error && __copy_from_user(fdset, ufdset, nr))
1809 error = -EFAULT;
1810 return error;
1811 }
1812 memset(fdset, 0, nr);
1813 return 0;
1814 #else
1815 return 0;
1816 #endif
1817 }
1818
1819 static inline
1820 void set_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset)
1821 {
1822 #if 0
1823 if (ufdset)
1824 __copy_to_user(ufdset, fdset, FDS_BYTES(nr));
1825 #endif
1826 }
1827
1828 static inline
1829 void zero_fd_set(unsigned long nr, unsigned long *fdset)
1830 {
1831 #if 0
1832 memset(fdset, 0, FDS_BYTES(nr));
1833 #endif
1834 }
1835
1836 extern int do_select(int n, fd_set_bits *fds, long *timeout);
1837
1838 #endif /* poll */
1839
1840
1841
1842 typedef struct
1843 {
1844 int x;
1845 } read_descriptor_t;
1846
1847
1848
1849
1850
1851 #if 1 /* poll */
1852
1853 /* These are specified by iBCS2 */
1854 #define POLLIN 0x0001
1855 #define POLLPRI 0x0002
1856 #define POLLOUT 0x0004
1857 #define POLLERR 0x0008
1858 #define POLLHUP 0x0010
1859 #define POLLNVAL 0x0020
1860
1861 /* The rest seem to be more-or-less nonstandard. Check them! */
1862 #define POLLRDNORM 0x0040
1863 #define POLLRDBAND 0x0080
1864 #define POLLWRNORM 0x0100
1865 #define POLLWRBAND 0x0200
1866 #define POLLMSG 0x0400
1867
1868 struct pollfd {
1869 int fd;
1870 short events;
1871 short revents;
1872 };
1873
1874 #endif /* poll */
1875
1876 #endif /* _LINUX_TYPES_H */