[PSDK]
[reactos.git] / reactos / drivers / filesystems / ext2_new / inc / linux / module.h
1 /*
2 * COPYRIGHT: See COPYRIGHT.TXT
3 * PROJECT: Ext2 File System Driver for WinNT/2K/XP
4 * FILE: Modules.h
5 * PURPOSE: Header file: nls structures & linux kernel ...
6 * PROGRAMMER: Matt Wu <mattwu@163.com>
7 * HOMEPAGE: http://ext2.yeah.net
8 * UPDATE HISTORY:
9 */
10
11 #ifndef _EXT2_MODULE_HEADER_
12 #define _EXT2_MODULE_HEADER_
13
14 /* INCLUDES *************************************************************/
15
16 #include <linux/types.h>
17 #include <linux/errno.h>
18 #include <linux/fs.h>
19 #include <linux/log2.h>
20 #include <linux/rbtree.h>
21
22 #if _WIN32_WINNT <= 0x500
23 #define _WIN2K_TARGET_ 1
24 #endif
25
26 /* STRUCTS ******************************************************/
27
28 #ifndef offsetof
29 # define offsetof(type, member) ((ULONG_PTR)&(((type *)0)->member))
30 #endif
31
32 #ifndef container_of
33 #define container_of(ptr, type, member) \
34 ((type *)((char *)ptr - (char *)offsetof(type, member)))
35 #endif
36
37 //
38 // Byte order swapping routines
39 //
40
41 /* use the runtime routine or compiler's implementation */
42 #if (defined(_M_IX86) && (_MSC_FULL_VER > 13009037)) || \
43 ((defined(_M_AMD64) || defined(_M_IA64)) && \
44 (_MSC_FULL_VER > 13009175))
45 #ifdef __cplusplus
46 extern "C" {
47 #endif
48 unsigned short __cdecl _byteswap_ushort(unsigned short);
49 unsigned long __cdecl _byteswap_ulong (unsigned long);
50 unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64);
51 #ifdef __cplusplus
52 }
53 #endif
54 #pragma intrinsic(_byteswap_ushort)
55 #pragma intrinsic(_byteswap_ulong)
56 #pragma intrinsic(_byteswap_uint64)
57
58 #define RtlUshortByteSwap(_x) _byteswap_ushort((USHORT)(_x))
59 #define RtlUlongByteSwap(_x) _byteswap_ulong((_x))
60 #define RtlUlonglongByteSwap(_x) _byteswap_uint64((_x))
61
62 #elif !defined(__REACTOS__)
63
64 USHORT
65 FASTCALL
66 RtlUshortByteSwap(
67 IN USHORT Source
68 );
69
70 ULONG
71 FASTCALL
72 RtlUlongByteSwap(
73 IN ULONG Source
74 );
75
76 ULONGLONG
77 FASTCALL
78 RtlUlonglongByteSwap(
79 IN ULONGLONG Source
80 );
81 #endif
82
83 #define __swab16(x) RtlUshortByteSwap(x)
84 #define __swab32(x) RtlUlongByteSwap(x)
85 #define __swab64(x) RtlUlonglongByteSwap(x)
86
87 #define __constant_swab32 __swab32
88 #define __constant_swab64 __swab64
89
90 #define __constant_htonl(x) __constant_swab32((x))
91 #define __constant_ntohl(x) __constant_swab32((x))
92 #define __constant_htons(x) __constant_swab16((x))
93 #define __constant_ntohs(x) __constant_swab16((x))
94 #define __constant_cpu_to_le64(x) ((__u64)(x))
95 #define __constant_le64_to_cpu(x) ((__u64)(x))
96 #define __constant_cpu_to_le32(x) ((__u32)(x))
97 #define __constant_le32_to_cpu(x) ((__u32)(x))
98 #define __constant_cpu_to_le16(x) ((__u16)(x))
99 #define __constant_le16_to_cpu(x) ((__u16)(x))
100 #define __constant_cpu_to_be64(x) __constant_swab64((x))
101 #define __constant_be64_to_cpu(x) __constant_swab64((x))
102 #define __constant_cpu_to_be32(x) __constant_swab32((x))
103 #define __constant_be32_to_cpu(x) __constant_swab32((x))
104 #define __constant_cpu_to_be16(x) __constant_swab16((x))
105 #define __constant_be16_to_cpu(x) __constant_swab16((x))
106 #define __cpu_to_le64(x) ((__u64)(x))
107 #define __le64_to_cpu(x) ((__u64)(x))
108 #define __cpu_to_le32(x) ((__u32)(x))
109 #define __le32_to_cpu(x) ((__u32)(x))
110 #define __cpu_to_le16(x) ((__u16)(x))
111 #define __le16_to_cpu(x) ((__u16)(x))
112 #define __cpu_to_be64(x) __swab64((x))
113 #define __be64_to_cpu(x) __swab64((x))
114 #define __cpu_to_be32(x) __swab32((x))
115 #define __be32_to_cpu(x) __swab32((x))
116 #define __cpu_to_be16(x) __swab16((x))
117 #define __be16_to_cpu(x) __swab16((x))
118 #define __cpu_to_le64p(x) (*(__u64*)(x))
119 #define __le64_to_cpup(x) (*(__u64*)(x))
120 #define __cpu_to_le32p(x) (*(__u32*)(x))
121 #define __le32_to_cpup(x) (*(__u32*)(x))
122 #define __cpu_to_le16p(x) (*(__u16*)(x))
123 #define __le16_to_cpup(x) (*(__u16*)(x))
124 #define __cpu_to_be64p(x) __swab64p((x))
125 #define __be64_to_cpup(x) __swab64p((x))
126 #define __cpu_to_be32p(x) __swab32p((x))
127 #define __be32_to_cpup(x) __swab32p((x))
128 #define __cpu_to_be16p(x) __swab16p((x))
129 #define __be16_to_cpup(x) __swab16p((x))
130 #define __cpu_to_le64s(x) ((__s64)(x))
131 #define __le64_to_cpus(x) ((__s64)(x))
132 #define __cpu_to_le32s(x) ((__s32)(x))
133 #define __le32_to_cpus(x) ((__s32)(x))
134 #define __cpu_to_le16s(x) ((__s16)(x))
135 #define __le16_to_cpus(x) ((__s16)(x))
136 #define __cpu_to_be64s(x) __swab64s((x))
137 #define __be64_to_cpus(x) __swab64s((x))
138 #define __cpu_to_be32s(x) __swab32s((x))
139 #define __be32_to_cpus(x) __swab32s((x))
140 #define __cpu_to_be16s(x) __swab16s((x))
141 #define __be16_to_cpus(x) __swab16s((x))
142
143 #ifndef cpu_to_le64
144 #define cpu_to_le64 __cpu_to_le64
145 #define le64_to_cpu __le64_to_cpu
146 #define cpu_to_le32 __cpu_to_le32
147 #define le32_to_cpu __le32_to_cpu
148 #define cpu_to_le16 __cpu_to_le16
149 #define le16_to_cpu __le16_to_cpu
150 #endif
151
152 #define cpu_to_be64 __cpu_to_be64
153 #define be64_to_cpu __be64_to_cpu
154 #define cpu_to_be32 __cpu_to_be32
155 #define be32_to_cpu __be32_to_cpu
156 #define cpu_to_be16 __cpu_to_be16
157 #define be16_to_cpu __be16_to_cpu
158 #define cpu_to_le64p __cpu_to_le64p
159 #define le64_to_cpup __le64_to_cpup
160 #define cpu_to_le32p __cpu_to_le32p
161 #define le32_to_cpup __le32_to_cpup
162 #define cpu_to_le16p __cpu_to_le16p
163 #define le16_to_cpup __le16_to_cpup
164 #define cpu_to_be64p __cpu_to_be64p
165 #define be64_to_cpup __be64_to_cpup
166 #define cpu_to_be32p __cpu_to_be32p
167 #define be32_to_cpup __be32_to_cpup
168 #define cpu_to_be16p __cpu_to_be16p
169 #define be16_to_cpup __be16_to_cpup
170 #define cpu_to_le64s __cpu_to_le64s
171 #define le64_to_cpus __le64_to_cpus
172 #define cpu_to_le32s __cpu_to_le32s
173 #define le32_to_cpus __le32_to_cpus
174 #define cpu_to_le16s __cpu_to_le16s
175 #define le16_to_cpus __le16_to_cpus
176 #define cpu_to_be64s __cpu_to_be64s
177 #define be64_to_cpus __be64_to_cpus
178 #define cpu_to_be32s __cpu_to_be32s
179 #define be32_to_cpus __be32_to_cpus
180 #define cpu_to_be16s __cpu_to_be16s
181 #define be16_to_cpus __be16_to_cpus
182
183
184 static inline void le16_add_cpu(__le16 *var, u16 val)
185 {
186 *var = cpu_to_le16(le16_to_cpu(*var) + val);
187 }
188
189 static inline void le32_add_cpu(__le32 *var, u32 val)
190 {
191 *var = cpu_to_le32(le32_to_cpu(*var) + val);
192 }
193
194 static inline void le64_add_cpu(__le64 *var, u64 val)
195 {
196 *var = cpu_to_le64(le64_to_cpu(*var) + val);
197 }
198
199 //
200 // Network to host byte swap functions
201 //
202
203 #define ntohl(x) ( ( ( ( x ) & 0x000000ff ) << 24 ) | \
204 ( ( ( x ) & 0x0000ff00 ) << 8 ) | \
205 ( ( ( x ) & 0x00ff0000 ) >> 8 ) | \
206 ( ( ( x ) & 0xff000000 ) >> 24 ) )
207
208 #define ntohs(x) ( ( ( ( x ) & 0xff00 ) >> 8 ) | \
209 ( ( ( x ) & 0x00ff ) << 8 ) )
210
211
212 #define htonl(x) ntohl(x)
213 #define htons(x) ntohs(x)
214
215
216 //
217 // kernel printk flags
218 //
219
220 #define KERN_EMERG "<0>" /* system is unusable */
221 #define KERN_ALERT "<1>" /* action must be taken immediately */
222 #define KERN_CRIT "<2>" /* critical conditions */
223 #define KERN_ERR "<3>" /* error conditions */
224 #define KERN_WARNING "<4>" /* warning conditions */
225 #define KERN_NOTICE "<5>" /* normal but significant condition */
226 #define KERN_INFO "<6>" /* informational */
227 #define KERN_DEBUG "<7>" /* debug-level messages */
228
229 #define printk DbgPrint
230
231 /*
232 * error pointer
233 */
234 #define MAX_ERRNO 4095
235 #define IS_ERR_VALUE(x) ((x) >= (unsigned long)-MAX_ERRNO)
236
237 static inline void *ERR_PTR(long error)
238 {
239 return (void *)(long_ptr_t) error;
240 }
241
242 static inline long PTR_ERR(const void *ptr)
243 {
244 return (long)(long_ptr_t) ptr;
245 }
246
247 static inline long IS_ERR(const void *ptr)
248 {
249 return IS_ERR_VALUE((unsigned long)(long_ptr_t)ptr);
250 }
251
252
253 #define BUG_ON(c) assert(!(c))
254
255 #define WARN_ON(c) BUG_ON(c)
256
257 //
258 // Linux module definitions
259 //
260
261 #define likely
262 #define unlikely
263
264 #define __init
265 #define __exit
266
267 #define THIS_MODULE NULL
268 #define MODULE_LICENSE(x)
269 #define MODULE_ALIAS_NLS(x)
270 #define EXPORT_SYMBOL(x)
271
272
273 #define try_module_get(x) (TRUE)
274 #define module_put(x)
275
276 #define module_init(X) int __init module_##X() {return X();}
277 #define module_exit(X) void __exit module_##X() {X();}
278
279 #define DECLARE_INIT(X) int __init module_##X(void)
280 #define DECLARE_EXIT(X) void __exit module_##X(void)
281
282 #define LOAD_MODULE(X) do { \
283 rc = module_##X(); \
284 } while(0)
285
286 #define UNLOAD_MODULE(X) do { \
287 module_##X(); \
288 } while(0)
289
290 #define LOAD_NLS LOAD_MODULE
291 #define UNLOAD_NLS UNLOAD_MODULE
292
293 //
294 // spinlocks .....
295 //
296
297 typedef struct _spinlock_t {
298
299 KSPIN_LOCK lock;
300 KIRQL irql;
301 } spinlock_t;
302
303 #define spin_lock_init(sl) KeInitializeSpinLock(&((sl)->lock))
304 #define spin_lock(sl) KeAcquireSpinLock(&((sl)->lock), &((sl)->irql))
305 #define spin_unlock(sl) KeReleaseSpinLock(&((sl)->lock), (sl)->irql)
306 #define spin_lock_irqsave(sl, flags) do {spin_lock(sl); flags=(sl)->irql;} while(0)
307 #define spin_unlock_irqrestore(sl, flags) do {ASSERT((KIRQL)(flags)==(sl)->irql); spin_unlock(sl);} while(0)
308
309 #define assert_spin_locked(x) do {} while(0)
310
311 /*
312 * Does a critical section need to be broken due to another
313 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
314 * but a general need for low latency)
315 */
316 static inline int spin_needbreak(spinlock_t *lock)
317 {
318 #ifdef CONFIG_PREEMPT
319 return spin_is_contended(lock);
320 #else
321 return 0;
322 #endif
323 }
324
325 //
326 // bit operations
327 //
328
329 /**
330 * __set_bit - Set a bit in memory
331 * @nr: the bit to set
332 * @addr: the address to start counting from
333 *
334 * Unlike set_bit(), this function is non-atomic and may be reordered.
335 * If it's called on the same region of memory simultaneously, the effect
336 * may be that only one operation succeeds.
337 */
338 static inline int set_bit(int nr, volatile unsigned long *addr)
339 {
340 addr += (nr >> ORDER_PER_LONG);
341 nr &= (BITS_PER_LONG - 1);
342
343 return !!(InterlockedOr(addr, (1 << nr)) & (1 << nr));
344 }
345
346
347 /**
348 * clear_bit - Clears a bit in memory
349 * @nr: Bit to clear
350 * @addr: Address to start counting from
351 *
352 * clear_bit() is atomic and may not be reordered. However, it does
353 * not contain a memory barrier, so if it is used for locking purposes,
354 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
355 * in order to ensure changes are visible on other processors.
356 */
357 static inline int clear_bit(int nr, volatile unsigned long *addr)
358 {
359 addr += (nr >> ORDER_PER_LONG);
360 nr &= (BITS_PER_LONG - 1);
361
362 return !!(InterlockedAnd(addr, ~(1 << nr)) & (1 << nr));
363 }
364
365 /**
366 * test_and_clear_bit - Clear a bit and return its old value
367 * @nr: Bit to clear
368 * @addr: Address to count from
369 *
370 * This operation is atomic and cannot be reordered.
371 * It also implies a memory barrier.
372 */
373 static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
374 {
375 return clear_bit(nr, addr);
376 }
377
378 /*
379 * test
380 */
381 static int test_bit(int nr, volatile const unsigned long *addr)
382 {
383 return !!((1 << (nr & (BITS_PER_LONG - 1))) &
384 (addr[nr >> ORDER_PER_LONG]));
385 }
386
387 /**
388 * test_and_set_bit - Set a bit and return its old value
389 * @nr: Bit to set
390 * @addr: Address to count from
391 *
392 * This operation is atomic and cannot be reordered.
393 * It also implies a memory barrier.
394 */
395 static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
396 {
397 return set_bit(nr, addr);
398 }
399
400 //
401 // list definition ...
402 //
403
404 #include <linux/list.h>
405
406
407 /*********************************************
408 * linux scheduler related structures *
409 *********************************************/
410
411 //
412 // task structure
413 //
414
415 #define TASK_INTERRUPTIBLE 1
416 #define TASK_UNINTERRUPTIBLE 2
417
418 struct task_struct {
419 pid_t pid;
420 pid_t tid;
421 char comm[32];
422 void * journal_info;
423 };
424
425 extern struct task_struct *current;
426
427 //
428 // scheduler routines
429 //
430
431
432 static inline int cond_resched() {
433 return FALSE;
434 }
435 static inline int need_resched() {
436 return FALSE;
437 }
438
439 #define yield() do {} while(0)
440 #define might_sleep() do {} while(0)
441
442 //
443 // mutex
444 //
445
446 typedef struct mutex {
447 FAST_MUTEX lock;
448 } mutex_t;
449
450 #define mutex_init(x) ExInitializeFastMutex(&((x)->lock))
451 #define mutex_lock(x) ExAcquireFastMutex(&((x)->lock))
452 #define mutex_unlock(x) ExReleaseFastMutex(&((x)->lock))
453
454
455 //
456 // wait_queue
457 //
458
459
460 typedef PVOID wait_queue_t;
461
462 #define WQ_FLAG_EXCLUSIVE 0x01
463 #define WQ_FLAG_AUTO_REMOVAL 0x02
464
465 struct __wait_queue {
466 unsigned int flags;
467 void * private;
468 KEVENT event;
469 struct list_head task_list;
470 };
471
472
473 #define DEFINE_WAIT(name) \
474 wait_queue_t name = (PVOID)wait_queue_create();
475
476 /*
477 struct wait_bit_key {
478 void *flags;
479 int bit_nr;
480 };
481
482 struct wait_bit_queue {
483 struct wait_bit_key key;
484 wait_queue_t wait;
485 };
486 */
487
488 struct __wait_queue_head {
489 spinlock_t lock;
490 struct list_head task_list;
491 };
492 typedef struct __wait_queue_head wait_queue_head_t;
493
494 #define is_sync_wait(wait) (TRUE)
495 #define set_current_state(state) do {} while(0)
496 #define __set_current_state(state) do {} while(0)
497
498 void init_waitqueue_head(wait_queue_head_t *q);
499 int wake_up(wait_queue_head_t *queue);
500
501
502 /*
503 * Waitqueues which are removed from the waitqueue_head at wakeup time
504 */
505 struct __wait_queue * wait_queue_create();
506 void wait_queue_destroy(struct __wait_queue *);
507
508 void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
509 void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
510 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
511 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
512 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
513
514
515 //
516 // timer structure
517 //
518
519 struct timer_list {
520 struct list_head entry;
521 unsigned long expires;
522
523 void (*function)(unsigned long);
524 unsigned long data;
525
526 #ifdef CONFIG_TIMER_STATS
527 void *start_site;
528 char start_comm[16];
529 int start_pid;
530 #endif
531 };
532
533
534 typedef struct kmem_cache kmem_cache_t;
535
536 struct block_device {
537
538 unsigned long bd_flags; /* flags */
539 atomic_t bd_count; /* reference count */
540 PDEVICE_OBJECT bd_dev; /* device object */
541 ANSI_STRING bd_name; /* name in ansi string */
542 DISK_GEOMETRY bd_geo; /* disk geometry */
543 PARTITION_INFORMATION bd_part; /* partition information */
544 void * bd_priv; /* pointers to EXT2_VCB
545 NULL if it's a journal dev */
546 PFILE_OBJECT bd_volume; /* streaming object file */
547 LARGE_MCB bd_extents; /* dirty extents */
548
549 spinlock_t bd_bh_lock; /**/
550 kmem_cache_t * bd_bh_cache; /* memory cache for buffer_head */
551 struct rb_root bd_bh_root; /* buffer_head red-black tree root */
552 };
553
554 //
555 // page information
556 //
557
558 // vom trata paginile in felul urmator:
559 // alocam la sfarsitul structurii inca PAGE_SIZE octeti cand alocam o structura
560 // de tip pagina - acolo vor veni toate buffer-headurile
561 // deci -> page_address(page) = page + sizeof(page)
562 #define page_address(_page) ((char*)_page + sizeof(struct page))
563
564 typedef struct page {
565 void *addr;
566 void *mapping;
567 void *private;
568 atomic_t count;
569 __u32 index;
570 __u32 flags;
571 } mem_map_t;
572
573 #define get_page(p) atomic_inc(&(p)->count)
574
575 #define PG_locked 0 /* Page is locked. Don't touch. */
576 #define PG_error 1
577 #define PG_referenced 2
578 #define PG_uptodate 3
579 #define PG_dirty 4
580 #define PG_unused 5
581 #define PG_lru 6
582 #define PG_active 7
583 #define PG_slab 8
584 #define PG_skip 10
585 #define PG_highmem 11
586 #define PG_checked 12 /* kill me in 2.5.<early>. */
587 #define PG_arch_1 13
588 #define PG_reserved 14
589 #define PG_launder 15 /* written out by VM pressure.. */
590 #define PG_fs_1 16 /* Filesystem specific */
591
592 #ifndef arch_set_page_uptodate
593 #define arch_set_page_uptodate(page)
594 #endif
595
596 /* Make it prettier to test the above... */
597 #define UnlockPage(page) unlock_page(page)
598 #define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
599 #define SetPageUptodate(page) \
600 do { \
601 arch_set_page_uptodate(page); \
602 set_bit(PG_uptodate, &(page)->flags); \
603 } while (0)
604 #define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
605 #define PageDirty(page) test_bit(PG_dirty, &(page)->flags)
606 #define SetPageDirty(page) set_bit(PG_dirty, &(page)->flags)
607 #define ClearPageDirty(page) clear_bit(PG_dirty, &(page)->flags)
608 #define PageLocked(page) test_bit(PG_locked, &(page)->flags)
609 #define LockPage(page) set_bit(PG_locked, &(page)->flags)
610 #define TryLockPage(page) test_and_set_bit(PG_locked, &(page)->flags)
611 #define PageChecked(page) test_bit(PG_checked, &(page)->flags)
612 #define SetPageChecked(page) set_bit(PG_checked, &(page)->flags)
613 #define ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags)
614 #define PageLaunder(page) test_bit(PG_launder, &(page)->flags)
615 #define SetPageLaunder(page) set_bit(PG_launder, &(page)->flags)
616 #define ClearPageLaunder(page) clear_bit(PG_launder, &(page)->flags)
617 #define ClearPageArch1(page) clear_bit(PG_arch_1, &(page)->flags)
618
619 #define PageError(page) test_bit(PG_error, &(page)->flags)
620 #define SetPageError(page) set_bit(PG_error, &(page)->flags)
621 #define ClearPageError(page) clear_bit(PG_error, &(page)->flags)
622 #define PageReferenced(page) test_bit(PG_referenced, &(page)->flags)
623 #define SetPageReferenced(page) set_bit(PG_referenced, &(page)->flags)
624 #define ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->flags)
625
626 #define PageActive(page) test_bit(PG_active, &(page)->flags)
627 #define SetPageActive(page) set_bit(PG_active, &(page)->flags)
628 #define ClearPageActive(page) clear_bit(PG_active, &(page)->flags)
629
630
631 extern unsigned long __get_free_pages(unsigned int gfp_mask, unsigned int order);
632 #define __get_free_page(gfp_mask) \
633 __get_free_pages((gfp_mask),0)
634
635 extern void __free_pages(struct page *page, unsigned int order);
636 extern void free_pages(unsigned long addr, unsigned int order);
637
638 #define __free_page(page) __free_pages((page), 0)
639 #define free_page(addr) free_pages((addr),0)
640
641 #ifndef __REACTOS__
642 extern void truncate_inode_pages(struct address_space *, loff_t);
643 #endif
644
645 #define __GFP_HIGHMEM 0x02
646
647 #define __GFP_WAIT 0x10 /* Can wait and reschedule? */
648 #define __GFP_HIGH 0x20 /* Should access emergency pools? */
649 #define __GFP_IO 0x40 /* Can start low memory physical IO? */
650 #define __GFP_HIGHIO 0x80 /* Can start high mem physical IO? */
651 #define __GFP_FS 0x100 /* Can call down to low-level FS? */
652
653 #define GFP_ATOMIC (__GFP_HIGH)
654 #define GFP_USER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
655 #define GFP_HIGHUSER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS | __GFP_HIGHMEM)
656 #define GFP_KERNEL (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
657 #define GFP_NOFS 0
658 #define __GFP_NOFAIL 0
659
660
661 #define KM_USER0 0
662
663 //
664 // buffer head definitions
665 //
666
667 enum bh_state_bits {
668 BH_Uptodate, /* Contains valid data */
669 BH_Dirty, /* Is dirty */
670 BH_Verified, /* Is verified */
671 BH_Lock, /* Is locked */
672 BH_Req, /* Has been submitted for I/O */
673 BH_Uptodate_Lock, /* Used by the first bh in a page, to serialise
674 * IO completion of other buffers in the page
675 */
676
677 BH_Mapped, /* Has a disk mapping */
678 BH_New, /* Disk mapping was newly created by get_block */
679 BH_Async_Read, /* Is under end_buffer_async_read I/O */
680 BH_Async_Write, /* Is under end_buffer_async_write I/O */
681 BH_Delay, /* Buffer is not yet allocated on disk */
682 BH_Boundary, /* Block is followed by a discontiguity */
683 BH_Write_EIO, /* I/O error on write */
684 BH_Ordered, /* ordered write */
685 BH_Eopnotsupp, /* operation not supported (barrier) */
686 BH_Unwritten, /* Buffer is allocated on disk but not written */
687
688 BH_PrivateStart, /* not a state bit, but the first bit available
689 * for private allocation by other entities
690 */
691 };
692
693 #define PAGE_CACHE_SIZE (PAGE_SIZE)
694 #define PAGE_CACHE_SHIFT (12)
695 #define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
696
697 #ifdef __REACTOS__
698 struct buffer_head;
699 #endif
700 typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
701
702 /*
703 * Historically, a buffer_head was used to map a single block
704 * within a page, and of course as the unit of I/O through the
705 * filesystem and block layers. Nowadays the basic I/O unit
706 * is the bio, and buffer_heads are used for extracting block
707 * mappings (via a get_block_t call), for tracking state within
708 * a page (via a page_mapping) and for wrapping bio submission
709 * for backward compatibility reasons (e.g. submit_bh).
710 */
711 struct buffer_head {
712 unsigned long b_state; /* buffer state bitmap (see above) */
713 struct page *b_page; /* the page this bh is mapped to */
714 PMDL b_mdl; /* MDL of the locked buffer */
715 void *b_bcb; /* BCB of the buffer */
716
717 // kdev_t b_dev; /* device (B_FREE = free) */
718 struct block_device *b_bdev; /* block device object */
719
720 blkcnt_t b_blocknr; /* start block number */
721 size_t b_size; /* size of mapping */
722 char * b_data; /* pointer to data within the page */
723 bh_end_io_t *b_end_io; /* I/O completion */
724 void *b_private; /* reserved for b_end_io */
725 // struct list_head b_assoc_buffers; /* associated with another mapping */
726 // struct address_space *b_assoc_map; /* mapping this buffer is associated with */
727 atomic_t b_count; /* users using this buffer_head */
728 struct rb_node b_rb_node; /* Red-black tree node entry */
729 };
730
731
732 /*
733 * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
734 * and buffer_foo() functions.
735 */
736 #define BUFFER_FNS(bit, name) \
737 static inline void set_buffer_##name(struct buffer_head *bh) \
738 { \
739 set_bit(BH_##bit, &(bh)->b_state); \
740 } \
741 static inline void clear_buffer_##name(struct buffer_head *bh) \
742 { \
743 clear_bit(BH_##bit, &(bh)->b_state); \
744 } \
745 static inline int buffer_##name(const struct buffer_head *bh) \
746 { \
747 return test_bit(BH_##bit, &(bh)->b_state); \
748 }
749
750 /*
751 * test_set_buffer_foo() and test_clear_buffer_foo()
752 */
753 #define TAS_BUFFER_FNS(bit, name) \
754 static inline int test_set_buffer_##name(struct buffer_head *bh) \
755 { \
756 return test_and_set_bit(BH_##bit, &(bh)->b_state); \
757 } \
758 static inline int test_clear_buffer_##name(struct buffer_head *bh) \
759 { \
760 return test_and_clear_bit(BH_##bit, &(bh)->b_state); \
761 } \
762
763 /*
764 * Emit the buffer bitops functions. Note that there are also functions
765 * of the form "mark_buffer_foo()". These are higher-level functions which
766 * do something in addition to setting a b_state bit.
767 */
768 BUFFER_FNS(Uptodate, uptodate)
769 BUFFER_FNS(Dirty, dirty)
770 TAS_BUFFER_FNS(Dirty, dirty)
771 BUFFER_FNS(Verified, verified)
772 BUFFER_FNS(Lock, locked)
773 TAS_BUFFER_FNS(Lock, locked)
774 BUFFER_FNS(Req, req)
775 TAS_BUFFER_FNS(Req, req)
776 BUFFER_FNS(Mapped, mapped)
777 BUFFER_FNS(New, new)
778 BUFFER_FNS(Async_Read, async_read)
779 BUFFER_FNS(Async_Write, async_write)
780 BUFFER_FNS(Delay, delay)
781 BUFFER_FNS(Boundary, boundary)
782 BUFFER_FNS(Write_EIO, write_io_error)
783 BUFFER_FNS(Ordered, ordered)
784 BUFFER_FNS(Eopnotsupp, eopnotsupp)
785 BUFFER_FNS(Unwritten, unwritten)
786
787 #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
788 #define touch_buffer(bh) mark_page_accessed(bh->b_page)
789
790 /* If we *know* page->private refers to buffer_heads */
791
792 #define page_buffers(page) \
793 ( \
794 BUG_ON(!PagePrivate(page)), \
795 ((struct buffer_head *)page_private(page)) \
796 )
797 #define page_has_buffers(page) PagePrivate(page)
798
799
800 /*
801 * Declarations
802 */
803
804 void mark_buffer_dirty(struct buffer_head *bh);
805 void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
806 void set_bh_page(struct buffer_head *bh,
807 struct page *page, unsigned long offset);
808 int try_to_free_buffers(struct page *);
809 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
810 int retry);
811 void create_empty_buffers(struct page *, unsigned long,
812 unsigned long b_state);
813
814 /* Things to do with buffers at mapping->private_list */
815 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
816 int inode_has_buffers(struct inode *);
817 void invalidate_inode_buffers(struct inode *);
818 int remove_inode_buffers(struct inode *inode);
819 #ifndef __REACTOS__
820 int sync_mapping_buffers(struct address_space *mapping);
821 #endif
822 void unmap_underlying_metadata(struct block_device *bdev, sector_t block);
823
824 void mark_buffer_async_write(struct buffer_head *bh);
825 void invalidate_bdev(struct block_device *);
826 int sync_blockdev(struct block_device *bdev);
827 void __wait_on_buffer(struct buffer_head *);
828 wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
829 int fsync_bdev(struct block_device *);
830 struct super_block *freeze_bdev(struct block_device *);
831 void thaw_bdev(struct block_device *, struct super_block *);
832 int fsync_super(struct super_block *);
833 int fsync_no_super(struct block_device *);
834 struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
835 unsigned long size);
836 struct buffer_head *get_block_bh(struct block_device *bdev, sector_t block,
837 unsigned long size, int zero);
838 struct buffer_head *__getblk(struct block_device *bdev, sector_t block,
839 unsigned long size);
840 void __brelse(struct buffer_head *);
841 void __bforget(struct buffer_head *);
842 void __breadahead(struct block_device *, sector_t block, unsigned int size);
843 struct buffer_head *__bread(struct block_device *, sector_t block, unsigned size);
844 void invalidate_bh_lrus(void);
845 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
846 void free_buffer_head(struct buffer_head * bh);
847 void unlock_buffer(struct buffer_head *bh);
848 void __lock_buffer(struct buffer_head *bh);
849 void ll_rw_block(int, int, struct buffer_head * bh[]);
850 int sync_dirty_buffer(struct buffer_head *bh);
851 int submit_bh(int, struct buffer_head *);
852 void write_boundary_block(struct block_device *bdev,
853 sector_t bblock, unsigned blocksize);
854 int bh_uptodate_or_lock(struct buffer_head *bh);
855 int bh_submit_read(struct buffer_head *bh);
856 /* They are separately managed */
857 struct buffer_head *extents_bread(struct super_block *sb, sector_t block);
858 struct buffer_head *extents_bwrite(struct super_block *sb, sector_t block);
859 void extents_mark_buffer_dirty(struct buffer_head *bh);
860 void extents_brelse(struct buffer_head *bh);
861
862 extern int buffer_heads_over_limit;
863
864 /*
865 * Generic address_space_operations implementations for buffer_head-backed
866 * address_spaces.
867 */
868
869 #if 0
870
871 int block_write_full_page(struct page *page, get_block_t *get_block,
872 struct writeback_control *wbc);
873 int block_read_full_page(struct page*, get_block_t*);
874 int block_write_begin(struct file *, struct address_space *,
875 loff_t, unsigned, unsigned,
876 struct page **, void **, get_block_t*);
877 int block_write_end(struct file *, struct address_space *,
878 loff_t, unsigned, unsigned,
879 struct page *, void *);
880 int generic_write_end(struct file *, struct address_space *,
881 loff_t, unsigned, unsigned,
882 struct page *, void *);
883
884 int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*);
885 int cont_write_begin(struct file *, struct address_space *, loff_t,
886 unsigned, unsigned, struct page **, void **,
887 get_block_t *, loff_t *);
888 int block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
889 get_block_t get_block);
890 sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
891 int generic_commit_write(struct file *, struct page *, unsigned, unsigned);
892 int block_truncate_page(struct address_space *, loff_t, get_block_t *);
893 int file_fsync(struct file *, struct dentry *, int);
894 int nobh_write_begin(struct file *, struct address_space *,
895 loff_t, unsigned, unsigned,
896 struct page **, void **, get_block_t*);
897 int nobh_write_end(struct file *, struct address_space *,
898 loff_t, unsigned, unsigned,
899 struct page *, void *);
900 int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
901 int nobh_writepage(struct page *page, get_block_t *get_block,
902 struct writeback_control *wbc);
903 int generic_cont_expand_simple(struct inode *inode, loff_t size);
904 #endif
905
906 void block_invalidatepage(struct page *page, unsigned long offset);
907 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
908 int block_commit_write(struct page *page, unsigned from, unsigned to);
909 void block_sync_page(struct page *);
910
911 void buffer_init(void);
912
913 /*
914 * inline definitions
915 */
916 #if 0
917 static inline void attach_page_buffers(struct page *page,
918 struct buffer_head *head)
919 {
920 page_cache_get(page);
921 SetPagePrivate(page);
922 set_page_private(page, (unsigned long)head);
923 }
924 #endif
925
926 static inline void get_bh(struct buffer_head *bh)
927 {
928 atomic_inc(&bh->b_count);
929 }
930
931 static inline void put_bh(struct buffer_head *bh)
932 {
933 if (bh)
934 __brelse(bh);
935 }
936
937 static inline void brelse(struct buffer_head *bh)
938 {
939 if (bh)
940 __brelse(bh);
941 }
942
943 static inline void bforget(struct buffer_head *bh)
944 {
945 if (bh)
946 __bforget(bh);
947 }
948
949 static inline struct buffer_head *
950 sb_getblk(struct super_block *sb, sector_t block)
951 {
952 return get_block_bh(sb->s_bdev, block, sb->s_blocksize, 0);
953 }
954
955 static inline struct buffer_head *
956 sb_getblk_zero(struct super_block *sb, sector_t block)
957 {
958 return get_block_bh(sb->s_bdev, block, sb->s_blocksize, 1);
959 }
960
961 static inline struct buffer_head *
962 sb_bread(struct super_block *sb, sector_t block)
963 {
964 struct buffer_head *bh = __getblk(sb->s_bdev, block, sb->s_blocksize);
965 if (!bh)
966 return NULL;
967 if (!buffer_uptodate(bh) && (bh_submit_read(bh) < 0)) {
968 brelse(bh);
969 return NULL;
970 }
971 return bh;
972 }
973
974 static inline struct buffer_head *
975 sb_find_get_block(struct super_block *sb, sector_t block)
976 {
977 return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
978 }
979
980 static inline void
981 map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
982 {
983 set_buffer_mapped(bh);
984 bh->b_bdev = sb->s_bdev;
985 bh->b_blocknr = block;
986 bh->b_size = sb->s_blocksize;
987 }
988
989 /*
990 * Calling wait_on_buffer() for a zero-ref buffer is illegal, so we call into
991 * __wait_on_buffer() just to trip a debug check. Because debug code in inline
992 * functions is bloaty.
993 */
994
995 static inline void wait_on_buffer(struct buffer_head *bh)
996 {
997 might_sleep();
998 if (buffer_locked(bh) || atomic_read(&bh->b_count) == 0)
999 __wait_on_buffer(bh);
1000 }
1001
1002 static inline void lock_buffer(struct buffer_head *bh)
1003 {
1004 might_sleep();
1005 if (test_set_buffer_locked(bh))
1006 __lock_buffer(bh);
1007 }
1008
1009 extern int __set_page_dirty_buffers(struct page *page);
1010
1011 //
1012 // unicode character
1013 //
1014
1015 struct nls_table {
1016 char *charset;
1017 char *alias;
1018 int (*uni2char) (wchar_t uni, unsigned char *out, int boundlen);
1019 int (*char2uni) (const unsigned char *rawstring, int boundlen,
1020 wchar_t *uni);
1021 unsigned char *charset2lower;
1022 unsigned char *charset2upper;
1023 struct module *owner;
1024 struct nls_table *next;
1025 };
1026
1027 /* this value hold the maximum octet of charset */
1028 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
1029
1030 /* nls.c */
1031 extern int register_nls(struct nls_table *);
1032 extern int unregister_nls(struct nls_table *);
1033 extern struct nls_table *load_nls(char *);
1034 extern void unload_nls(struct nls_table *);
1035 extern struct nls_table *load_nls_default(void);
1036
1037 extern int utf8_mbtowc(wchar_t *, const __u8 *, int);
1038 extern int utf8_mbstowcs(wchar_t *, const __u8 *, int);
1039 extern int utf8_wctomb(__u8 *, wchar_t, int);
1040 extern int utf8_wcstombs(__u8 *, const wchar_t *, int);
1041
1042 //
1043 // kernel jiffies
1044 //
1045
1046 #define HZ (100)
1047
1048 static inline __u32 JIFFIES()
1049 {
1050 LARGE_INTEGER Tick;
1051
1052 KeQueryTickCount(&Tick);
1053 Tick.QuadPart *= KeQueryTimeIncrement();
1054 Tick.QuadPart /= (10000000 / HZ);
1055
1056 return Tick.LowPart;
1057 }
1058
1059 #define jiffies JIFFIES()
1060
1061 //
1062 // memory routines
1063 //
1064
1065 #ifdef _WIN2K_TARGET_
1066
1067 typedef GUID UUID;
1068 NTKERNELAPI
1069 NTSTATUS
1070 ExUuidCreate(
1071 OUT UUID *Uuid
1072 );
1073
1074 NTKERNELAPI
1075 PVOID
1076 NTAPI
1077 ExAllocatePoolWithTag(
1078 IN POOL_TYPE PoolType,
1079 IN SIZE_T NumberOfBytes,
1080 IN ULONG Tag
1081 );
1082
1083 #define ExFreePoolWithTag(_P, _T) ExFreePool(_P)
1084 #endif
1085
1086 PVOID Ext2AllocatePool(
1087 IN POOL_TYPE PoolType,
1088 IN SIZE_T NumberOfBytes,
1089 IN ULONG Tag
1090 );
1091
1092 VOID
1093 Ext2FreePool(
1094 IN PVOID P,
1095 IN ULONG Tag
1096 );
1097
1098 void *kzalloc(int size, int flags);
1099 #define kmalloc(size, gfp) Ext2AllocatePool(NonPagedPool, size, 'JBDM')
1100 #define kfree(p) Ext2FreePool(p, 'JBDM')
1101
1102
1103 /* memory slab */
1104
1105 #define SLAB_HWCACHE_ALIGN 0x00002000U /* align objs on a h/w cache lines */
1106 #define SLAB_KERNEL 0x00000001U
1107 #define SLAB_TEMPORARY 0x00000002U
1108
1109 typedef void (*kmem_cache_cb_t)(void*, kmem_cache_t *, unsigned long);
1110
1111 struct kmem_cache {
1112 CHAR name[32];
1113 ULONG flags;
1114 ULONG size;
1115 atomic_t count;
1116 atomic_t acount;
1117 NPAGED_LOOKASIDE_LIST la;
1118 kmem_cache_cb_t constructor;
1119 };
1120
1121
1122 kmem_cache_t *
1123 kmem_cache_create(
1124 const char *name,
1125 size_t size,
1126 size_t offset,
1127 unsigned long flags,
1128 kmem_cache_cb_t ctor
1129 );
1130
1131 void* kmem_cache_alloc(kmem_cache_t *kc, int flags);
1132 void kmem_cache_free(kmem_cache_t *kc, void *p);
1133 int kmem_cache_destroy(kmem_cache_t *kc);
1134
1135
1136 //
1137 // block device
1138 //
1139
1140 #define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
1141
1142 //
1143 // ll_rw_block ....
1144 //
1145
1146
1147 #define RW_MASK 1
1148 #define RWA_MASK 2
1149 #define READ 0
1150 #define WRITE 1
1151 #define READA 2 /* read-ahead - don't block if no resources */
1152 #define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */
1153 #define READ_SYNC (READ | (1 << BIO_RW_SYNC))
1154 #define READ_META (READ | (1 << BIO_RW_META))
1155 #define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC))
1156 #define WRITE_BARRIER ((1 << BIO_RW) | (1 << BIO_RW_BARRIER))
1157
1158 //
1159 // timer routines
1160 //
1161
1162 /*
1163 * These inlines deal with timer wrapping correctly. You are
1164 * strongly encouraged to use them
1165 * 1. Because people otherwise forget
1166 * 2. Because if the timer wrap changes in future you won't have to
1167 * alter your driver code.
1168 *
1169 * time_after(a,b) returns true if the time a is after time b.
1170 *
1171 * Do this with "<0" and ">=0" to only test the sign of the result. A
1172 * good compiler would generate better code (and a really good compiler
1173 * wouldn't care). Gcc is currently neither.
1174 */
1175 #define typecheck(x, y) (TRUE)
1176
1177 #define time_after(a,b) \
1178 (typecheck(unsigned long, a) && \
1179 typecheck(unsigned long, b) && \
1180 ((long)(b) - (long)(a) < 0))
1181 #define time_before(a,b) time_after(b,a)
1182
1183 #define time_after_eq(a,b) \
1184 (typecheck(unsigned long, a) && \
1185 typecheck(unsigned long, b) && \
1186 ((long)(a) - (long)(b) >= 0))
1187 #define time_before_eq(a,b) time_after_eq(b,a)
1188
1189 #define time_in_range(a,b,c) \
1190 (time_after_eq(a,b) && \
1191 time_before_eq(a,c))
1192
1193 #define smp_rmb() do {}while(0)
1194
1195
1196 static inline __u32 do_div64 (__u64 * n, __u64 b)
1197 {
1198 __u64 mod;
1199
1200 mod = *n % b;
1201 *n = *n / b;
1202 return (__u32) mod;
1203 }
1204 #define do_div(n, b) do_div64(&(n), (__u64)b)
1205
1206 #endif // _EXT2_MODULE_HEADER_