2 * COPYRIGHT: See COPYRIGHT.TXT
3 * PROJECT: Ext2 File System Driver for WinNT/2K/XP
5 * PURPOSE: Header file: nls structures & linux kernel ...
6 * PROGRAMMER: Matt Wu <mattwu@163.com>
7 * HOMEPAGE: http://www.ext2fsd.com
11 #ifndef _EXT2_MODULE_HEADER_
12 #define _EXT2_MODULE_HEADER_
14 /* INCLUDES *************************************************************/
16 #include <linux/types.h>
17 #include <linux/errno.h>
18 #include <linux/rbtree.h>
20 #include <linux/log2.h>
22 #if _WIN32_WINNT <= 0x500
23 #define _WIN2K_TARGET_ 1
26 /* STRUCTS ******************************************************/
29 # define offsetof(type, member) ((ULONG_PTR)&(((type *)0)->member))
33 #define container_of(ptr, type, member) \
34 ((type *)((char *)ptr - (char *)offsetof(type, member)))
38 // Byte order swapping routines
41 /* use the runtime routine or compiler's implementation */
42 #if (defined(_M_IX86) && (_MSC_FULL_VER > 13009037)) || \
43 ((defined(_M_AMD64) || defined(_M_IA64)) && \
44 (_MSC_FULL_VER > 13009175))
48 unsigned short __cdecl
_byteswap_ushort(unsigned short);
49 unsigned long __cdecl
_byteswap_ulong (unsigned long);
50 unsigned __int64 __cdecl
_byteswap_uint64(unsigned __int64
);
54 #pragma intrinsic(_byteswap_ushort)
55 #pragma intrinsic(_byteswap_ulong)
56 #pragma intrinsic(_byteswap_uint64)
58 #define RtlUshortByteSwap(_x) _byteswap_ushort((USHORT)(_x))
59 #define RtlUlongByteSwap(_x) _byteswap_ulong((_x))
60 #define RtlUlonglongByteSwap(_x) _byteswap_uint64((_x))
62 #elif !defined(__REACTOS__)
83 #define __swab16(x) RtlUshortByteSwap(x)
84 #define __swab32(x) RtlUlongByteSwap(x)
85 #define __swab64(x) RtlUlonglongByteSwap(x)
87 #define __constant_swab32 __swab32
88 #define __constant_swab64 __swab64
90 #define __constant_htonl(x) __constant_swab32((x))
91 #define __constant_ntohl(x) __constant_swab32((x))
92 #define __constant_htons(x) __constant_swab16((x))
93 #define __constant_ntohs(x) __constant_swab16((x))
94 #define __constant_cpu_to_le64(x) ((__u64)(x))
95 #define __constant_le64_to_cpu(x) ((__u64)(x))
96 #define __constant_cpu_to_le32(x) ((__u32)(x))
97 #define __constant_le32_to_cpu(x) ((__u32)(x))
98 #define __constant_cpu_to_le16(x) ((__u16)(x))
99 #define __constant_le16_to_cpu(x) ((__u16)(x))
100 #define __constant_cpu_to_be64(x) __constant_swab64((x))
101 #define __constant_be64_to_cpu(x) __constant_swab64((x))
102 #define __constant_cpu_to_be32(x) __constant_swab32((x))
103 #define __constant_be32_to_cpu(x) __constant_swab32((x))
104 #define __constant_cpu_to_be16(x) __constant_swab16((x))
105 #define __constant_be16_to_cpu(x) __constant_swab16((x))
106 #define __cpu_to_le64(x) ((__u64)(x))
107 #define __le64_to_cpu(x) ((__u64)(x))
108 #define __cpu_to_le32(x) ((__u32)(x))
109 #define __le32_to_cpu(x) ((__u32)(x))
110 #define __cpu_to_le16(x) ((__u16)(x))
111 #define __le16_to_cpu(x) ((__u16)(x))
112 #define __cpu_to_be64(x) __swab64((x))
113 #define __be64_to_cpu(x) __swab64((x))
114 #define __cpu_to_be32(x) __swab32((x))
115 #define __be32_to_cpu(x) __swab32((x))
116 #define __cpu_to_be16(x) __swab16((x))
117 #define __be16_to_cpu(x) __swab16((x))
118 #define __cpu_to_le64p(x) (*(__u64*)(x))
119 #define __le64_to_cpup(x) (*(__u64*)(x))
120 #define __cpu_to_le32p(x) (*(__u32*)(x))
121 #define __le32_to_cpup(x) (*(__u32*)(x))
122 #define __cpu_to_le16p(x) (*(__u16*)(x))
123 #define __le16_to_cpup(x) (*(__u16*)(x))
124 #define __cpu_to_be64p(x) __swab64p((x))
125 #define __be64_to_cpup(x) __swab64p((x))
126 #define __cpu_to_be32p(x) __swab32p((x))
127 #define __be32_to_cpup(x) __swab32p((x))
128 #define __cpu_to_be16p(x) __swab16p((x))
129 #define __be16_to_cpup(x) __swab16p((x))
130 #define __cpu_to_le64s(x) ((__s64)(x))
131 #define __le64_to_cpus(x) ((__s64)(x))
132 #define __cpu_to_le32s(x) ((__s32)(x))
133 #define __le32_to_cpus(x) ((__s32)(x))
134 #define __cpu_to_le16s(x) ((__s16)(x))
135 #define __le16_to_cpus(x) ((__s16)(x))
136 #define __cpu_to_be64s(x) __swab64s((x))
137 #define __be64_to_cpus(x) __swab64s((x))
138 #define __cpu_to_be32s(x) __swab32s((x))
139 #define __be32_to_cpus(x) __swab32s((x))
140 #define __cpu_to_be16s(x) __swab16s((x))
141 #define __be16_to_cpus(x) __swab16s((x))
144 #define cpu_to_le64 __cpu_to_le64
145 #define le64_to_cpu __le64_to_cpu
146 #define cpu_to_le32 __cpu_to_le32
147 #define le32_to_cpu __le32_to_cpu
148 #define cpu_to_le16 __cpu_to_le16
149 #define le16_to_cpu __le16_to_cpu
152 #define cpu_to_be64 __cpu_to_be64
153 #define be64_to_cpu __be64_to_cpu
154 #define cpu_to_be32 __cpu_to_be32
155 #define be32_to_cpu __be32_to_cpu
156 #define cpu_to_be16 __cpu_to_be16
157 #define be16_to_cpu __be16_to_cpu
158 #define cpu_to_le64p __cpu_to_le64p
159 #define le64_to_cpup __le64_to_cpup
160 #define cpu_to_le32p __cpu_to_le32p
161 #define le32_to_cpup __le32_to_cpup
162 #define cpu_to_le16p __cpu_to_le16p
163 #define le16_to_cpup __le16_to_cpup
164 #define cpu_to_be64p __cpu_to_be64p
165 #define be64_to_cpup __be64_to_cpup
166 #define cpu_to_be32p __cpu_to_be32p
167 #define be32_to_cpup __be32_to_cpup
168 #define cpu_to_be16p __cpu_to_be16p
169 #define be16_to_cpup __be16_to_cpup
170 #define cpu_to_le64s __cpu_to_le64s
171 #define le64_to_cpus __le64_to_cpus
172 #define cpu_to_le32s __cpu_to_le32s
173 #define le32_to_cpus __le32_to_cpus
174 #define cpu_to_le16s __cpu_to_le16s
175 #define le16_to_cpus __le16_to_cpus
176 #define cpu_to_be64s __cpu_to_be64s
177 #define be64_to_cpus __be64_to_cpus
178 #define cpu_to_be32s __cpu_to_be32s
179 #define be32_to_cpus __be32_to_cpus
180 #define cpu_to_be16s __cpu_to_be16s
181 #define be16_to_cpus __be16_to_cpus
184 static inline void le16_add_cpu(__le16
*var
, u16 val
)
186 *var
= cpu_to_le16(le16_to_cpu(*var
) + val
);
189 static inline void le32_add_cpu(__le32
*var
, u32 val
)
191 *var
= cpu_to_le32(le32_to_cpu(*var
) + val
);
194 static inline void le64_add_cpu(__le64
*var
, u64 val
)
196 *var
= cpu_to_le64(le64_to_cpu(*var
) + val
);
200 // Network to host byte swap functions
203 #define ntohl(x) ( ( ( ( x ) & 0x000000ff ) << 24 ) | \
204 ( ( ( x ) & 0x0000ff00 ) << 8 ) | \
205 ( ( ( x ) & 0x00ff0000 ) >> 8 ) | \
206 ( ( ( x ) & 0xff000000 ) >> 24 ) )
208 #define ntohs(x) ( ( ( ( x ) & 0xff00 ) >> 8 ) | \
209 ( ( ( x ) & 0x00ff ) << 8 ) )
212 #define htonl(x) ntohl(x)
213 #define htons(x) ntohs(x)
217 // kernel printk flags
220 #define KERN_EMERG "<0>" /* system is unusable */
221 #define KERN_ALERT "<1>" /* action must be taken immediately */
222 #define KERN_CRIT "<2>" /* critical conditions */
223 #define KERN_ERR "<3>" /* error conditions */
224 #define KERN_WARNING "<4>" /* warning conditions */
225 #define KERN_NOTICE "<5>" /* normal but significant condition */
226 #define KERN_INFO "<6>" /* informational */
227 #define KERN_DEBUG "<7>" /* debug-level messages */
229 #define printk DbgPrint
234 #define MAX_ERRNO 4095
235 #define IS_ERR_VALUE(x) ((x) >= (unsigned long)-MAX_ERRNO)
237 static inline void *ERR_PTR(long error
)
239 return (void *)(long_ptr_t
) error
;
242 static inline long PTR_ERR(const void *ptr
)
244 return (long)(long_ptr_t
) ptr
;
247 static inline long IS_ERR(const void *ptr
)
249 return IS_ERR_VALUE((unsigned long)(long_ptr_t
)ptr
);
253 #define BUG_ON(c) assert(!(c))
255 #define WARN_ON(c) BUG_ON(c)
258 // Linux module definitions
267 #define THIS_MODULE NULL
268 #define MODULE_LICENSE(x)
269 #define MODULE_ALIAS_NLS(x)
270 #define EXPORT_SYMBOL(x)
273 #define try_module_get(x) (TRUE)
274 #define module_put(x)
276 #define module_init(X) int __init module_##X() {return X();}
277 #define module_exit(X) void __exit module_##X() {X();}
279 #define DECLARE_INIT(X) int __init module_##X(void)
280 #define DECLARE_EXIT(X) void __exit module_##X(void)
282 #define LOAD_MODULE(X) do { \
286 #define UNLOAD_MODULE(X) do { \
290 #define LOAD_NLS LOAD_MODULE
291 #define UNLOAD_NLS UNLOAD_MODULE
297 typedef struct _spinlock_t
{
303 #define spin_lock_init(sl) KeInitializeSpinLock(&((sl)->lock))
304 #define spin_lock(sl) KeAcquireSpinLock(&((sl)->lock), &((sl)->irql))
305 #define spin_unlock(sl) KeReleaseSpinLock(&((sl)->lock), (sl)->irql)
306 #define spin_lock_irqsave(sl, flags) do {spin_lock(sl); flags=(sl)->irql;} while(0)
307 #define spin_unlock_irqrestore(sl, flags) do {ASSERT((KIRQL)(flags)==(sl)->irql); spin_unlock(sl);} while(0)
309 #define assert_spin_locked(x) do {} while(0)
312 * Does a critical section need to be broken due to another
313 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
314 * but a general need for low latency)
316 static inline int spin_needbreak(spinlock_t
*lock
)
318 #ifdef CONFIG_PREEMPT
319 return spin_is_contended(lock
);
330 * __set_bit - Set a bit in memory
331 * @nr: the bit to set
332 * @addr: the address to start counting from
334 * Unlike set_bit(), this function is non-atomic and may be reordered.
335 * If it's called on the same region of memory simultaneously, the effect
336 * may be that only one operation succeeds.
338 static inline int set_bit(int nr
, volatile unsigned long *addr
)
340 addr
+= (nr
>> ORDER_PER_LONG
);
341 nr
&= (BITS_PER_LONG
- 1);
343 return !!(InterlockedOr(addr
, (1 << nr
)) & (1 << nr
));
348 * clear_bit - Clears a bit in memory
350 * @addr: Address to start counting from
352 * clear_bit() is atomic and may not be reordered. However, it does
353 * not contain a memory barrier, so if it is used for locking purposes,
354 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
355 * in order to ensure changes are visible on other processors.
357 static inline int clear_bit(int nr
, volatile unsigned long *addr
)
359 addr
+= (nr
>> ORDER_PER_LONG
);
360 nr
&= (BITS_PER_LONG
- 1);
362 return !!(InterlockedAnd(addr
, ~(1 << nr
)) & (1 << nr
));
366 * test_and_clear_bit - Clear a bit and return its old value
368 * @addr: Address to count from
370 * This operation is atomic and cannot be reordered.
371 * It also implies a memory barrier.
373 static inline int test_and_clear_bit(int nr
, volatile unsigned long *addr
)
375 return clear_bit(nr
, addr
);
381 static int test_bit(int nr
, volatile const unsigned long *addr
)
383 return !!((1 << (nr
& (BITS_PER_LONG
- 1))) &
384 (addr
[nr
>> ORDER_PER_LONG
]));
388 * test_and_set_bit - Set a bit and return its old value
390 * @addr: Address to count from
392 * This operation is atomic and cannot be reordered.
393 * It also implies a memory barrier.
395 static inline int test_and_set_bit(int nr
, volatile unsigned long *addr
)
397 return set_bit(nr
, addr
);
401 // list definition ...
404 #include <linux/list.h>
407 /*********************************************
408 * linux scheduler related structures *
409 *********************************************/
415 #define TASK_INTERRUPTIBLE 1
416 #define TASK_UNINTERRUPTIBLE 2
425 extern struct task_struct
*current
;
428 // scheduler routines
432 static inline int cond_resched() {
435 static inline int need_resched() {
439 #define yield() do {} while(0)
440 #define might_sleep() do {} while(0)
446 typedef struct mutex
{
450 #define mutex_init(x) ExInitializeFastMutex(&((x)->lock))
451 #define mutex_lock(x) ExAcquireFastMutex(&((x)->lock))
452 #define mutex_unlock(x) ExReleaseFastMutex(&((x)->lock))
460 typedef PVOID wait_queue_t
;
462 #define WQ_FLAG_EXCLUSIVE 0x01
463 #define WQ_FLAG_AUTO_REMOVAL 0x02
465 struct __wait_queue
{
469 struct list_head task_list
;
473 #define DEFINE_WAIT(name) \
474 wait_queue_t name = (PVOID)wait_queue_create();
477 struct wait_bit_key {
482 struct wait_bit_queue {
483 struct wait_bit_key key;
488 struct __wait_queue_head
{
490 struct list_head task_list
;
492 typedef struct __wait_queue_head wait_queue_head_t
;
494 #define is_sync_wait(wait) (TRUE)
495 #define set_current_state(state) do {} while(0)
496 #define __set_current_state(state) do {} while(0)
498 void init_waitqueue_head(wait_queue_head_t
*q
);
499 int wake_up(wait_queue_head_t
*queue
);
503 * Waitqueues which are removed from the waitqueue_head at wakeup time
505 struct __wait_queue
* wait_queue_create();
506 void wait_queue_destroy(struct __wait_queue
*);
508 void prepare_to_wait(wait_queue_head_t
*q
, wait_queue_t
*wait
, int state
);
509 void prepare_to_wait_exclusive(wait_queue_head_t
*q
, wait_queue_t
*wait
, int state
);
510 void finish_wait(wait_queue_head_t
*q
, wait_queue_t
*wait
);
511 int autoremove_wake_function(wait_queue_t
*wait
, unsigned mode
, int sync
, void *key
);
512 int wake_bit_function(wait_queue_t
*wait
, unsigned mode
, int sync
, void *key
);
520 struct list_head entry
;
521 unsigned long expires
;
523 void (*function
)(unsigned long);
526 #ifdef CONFIG_TIMER_STATS
534 typedef struct kmem_cache kmem_cache_t
;
536 struct block_device
{
538 unsigned long bd_flags
; /* flags */
539 atomic_t bd_count
; /* reference count */
540 PDEVICE_OBJECT bd_dev
; /* device object */
541 ANSI_STRING bd_name
; /* name in ansi string */
542 DISK_GEOMETRY bd_geo
; /* disk geometry */
543 PARTITION_INFORMATION bd_part
; /* partition information */
544 void * bd_priv
; /* pointers to EXT2_VCB
545 NULL if it's a journal dev */
546 PFILE_OBJECT bd_volume
; /* streaming object file */
547 LARGE_MCB bd_extents
; /* dirty extents */
549 kmem_cache_t
* bd_bh_cache
;/* memory cache for buffer_head */
550 ERESOURCE bd_bh_lock
; /* lock for bh tree and reaper list */
551 struct rb_root bd_bh_root
; /* buffer_head red-black tree root */
552 LIST_ENTRY bd_bh_free
; /* reaper list */
553 KEVENT bd_bh_notify
; /* notification event for cleanup */
560 // vom trata paginile in felul urmator:
561 // alocam la sfarsitul structurii inca PAGE_SIZE octeti cand alocam o structura
562 // de tip pagina - acolo vor veni toate buffer-headurile
563 // deci -> page_address(page) = page + sizeof(page)
564 #define page_address(_page) ((char*)_page + sizeof(struct page))
566 typedef struct page
{
575 #define get_page(p) atomic_inc(&(p)->count)
577 #define PG_locked 0 /* Page is locked. Don't touch. */
579 #define PG_referenced 2
580 #define PG_uptodate 3
587 #define PG_highmem 11
588 #define PG_checked 12 /* kill me in 2.5.<early>. */
590 #define PG_reserved 14
591 #define PG_launder 15 /* written out by VM pressure.. */
592 #define PG_fs_1 16 /* Filesystem specific */
594 #ifndef arch_set_page_uptodate
595 #define arch_set_page_uptodate(page)
598 /* Make it prettier to test the above... */
599 #define UnlockPage(page) unlock_page(page)
600 #define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
601 #define SetPageUptodate(page) \
603 arch_set_page_uptodate(page); \
604 set_bit(PG_uptodate, &(page)->flags); \
606 #define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
607 #define PageDirty(page) test_bit(PG_dirty, &(page)->flags)
608 #define SetPageDirty(page) set_bit(PG_dirty, &(page)->flags)
609 #define ClearPageDirty(page) clear_bit(PG_dirty, &(page)->flags)
610 #define PageLocked(page) test_bit(PG_locked, &(page)->flags)
611 #define LockPage(page) set_bit(PG_locked, &(page)->flags)
612 #define TryLockPage(page) test_and_set_bit(PG_locked, &(page)->flags)
613 #define PageChecked(page) test_bit(PG_checked, &(page)->flags)
614 #define SetPageChecked(page) set_bit(PG_checked, &(page)->flags)
615 #define ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags)
616 #define PageLaunder(page) test_bit(PG_launder, &(page)->flags)
617 #define SetPageLaunder(page) set_bit(PG_launder, &(page)->flags)
618 #define ClearPageLaunder(page) clear_bit(PG_launder, &(page)->flags)
619 #define ClearPageArch1(page) clear_bit(PG_arch_1, &(page)->flags)
621 #define PageError(page) test_bit(PG_error, &(page)->flags)
622 #define SetPageError(page) set_bit(PG_error, &(page)->flags)
623 #define ClearPageError(page) clear_bit(PG_error, &(page)->flags)
624 #define PageReferenced(page) test_bit(PG_referenced, &(page)->flags)
625 #define SetPageReferenced(page) set_bit(PG_referenced, &(page)->flags)
626 #define ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->flags)
628 #define PageActive(page) test_bit(PG_active, &(page)->flags)
629 #define SetPageActive(page) set_bit(PG_active, &(page)->flags)
630 #define ClearPageActive(page) clear_bit(PG_active, &(page)->flags)
633 extern unsigned long __get_free_pages(unsigned int gfp_mask
, unsigned int order
);
634 #define __get_free_page(gfp_mask) \
635 __get_free_pages((gfp_mask),0)
637 extern void __free_pages(struct page
*page
, unsigned int order
);
638 extern void free_pages(unsigned long addr
, unsigned int order
);
640 #define __free_page(page) __free_pages((page), 0)
641 #define free_page(addr) free_pages((addr),0)
644 extern void truncate_inode_pages(struct address_space
*, loff_t
);
647 #define __GFP_HIGHMEM 0x02
649 #define __GFP_WAIT 0x10 /* Can wait and reschedule? */
650 #define __GFP_HIGH 0x20 /* Should access emergency pools? */
651 #define __GFP_IO 0x40 /* Can start low memory physical IO? */
652 #define __GFP_HIGHIO 0x80 /* Can start high mem physical IO? */
653 #define __GFP_FS 0x100 /* Can call down to low-level FS? */
655 #define GFP_ATOMIC (__GFP_HIGH)
656 #define GFP_USER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
657 #define GFP_HIGHUSER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS | __GFP_HIGHMEM)
658 #define GFP_KERNEL (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
660 #define __GFP_NOFAIL 0
666 // buffer head definitions
670 BH_Uptodate
, /* Contains valid data */
671 BH_Dirty
, /* Is dirty */
672 BH_Verified
, /* Is verified */
673 BH_Lock
, /* Is locked */
674 BH_Req
, /* Has been submitted for I/O */
675 BH_Uptodate_Lock
, /* Used by the first bh in a page, to serialise
676 * IO completion of other buffers in the page
679 BH_Mapped
, /* Has a disk mapping */
680 BH_New
, /* Disk mapping was newly created by get_block */
681 BH_Async_Read
, /* Is under end_buffer_async_read I/O */
682 BH_Async_Write
, /* Is under end_buffer_async_write I/O */
683 BH_Delay
, /* Buffer is not yet allocated on disk */
684 BH_Boundary
, /* Block is followed by a discontiguity */
685 BH_Write_EIO
, /* I/O error on write */
686 BH_Ordered
, /* ordered write */
687 BH_Eopnotsupp
, /* operation not supported (barrier) */
688 BH_Unwritten
, /* Buffer is allocated on disk but not written */
690 BH_PrivateStart
, /* not a state bit, but the first bit available
691 * for private allocation by other entities
695 #define PAGE_CACHE_SIZE (PAGE_SIZE)
696 #define PAGE_CACHE_SHIFT (12)
697 #define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
702 typedef void (bh_end_io_t
)(struct buffer_head
*bh
, int uptodate
);
705 * Historically, a buffer_head was used to map a single block
706 * within a page, and of course as the unit of I/O through the
707 * filesystem and block layers. Nowadays the basic I/O unit
708 * is the bio, and buffer_heads are used for extracting block
709 * mappings (via a get_block_t call), for tracking state within
710 * a page (via a page_mapping) and for wrapping bio submission
711 * for backward compatibility reasons (e.g. submit_bh).
714 LIST_ENTRY b_link
; /* to be added to reaper list */
715 unsigned long b_state
; /* buffer state bitmap (see above) */
716 struct page
*b_page
; /* the page this bh is mapped to */
717 PMDL b_mdl
; /* MDL of the locked buffer */
718 void *b_bcb
; /* BCB of the buffer */
720 // kdev_t b_dev; /* device (B_FREE = free) */
721 struct block_device
*b_bdev
; /* block device object */
723 blkcnt_t b_blocknr
; /* start block number */
724 size_t b_size
; /* size of mapping */
725 char * b_data
; /* pointer to data within the page */
726 bh_end_io_t
*b_end_io
; /* I/O completion */
727 void *b_private
; /* reserved for b_end_io */
728 // struct list_head b_assoc_buffers; /* associated with another mapping */
729 // struct address_space *b_assoc_map; /* mapping this buffer is associated with */
730 atomic_t b_count
; /* users using this buffer_head */
731 struct rb_node b_rb_node
; /* Red-black tree node entry */
733 LARGE_INTEGER b_ts_creat
; /* creation time*/
734 LARGE_INTEGER b_ts_drop
; /* drop time (to be released) */
739 * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
740 * and buffer_foo() functions.
742 #define BUFFER_FNS(bit, name) \
743 static inline void set_buffer_##name(struct buffer_head *bh) \
745 set_bit(BH_##bit, &(bh)->b_state); \
747 static inline void clear_buffer_##name(struct buffer_head *bh) \
749 clear_bit(BH_##bit, &(bh)->b_state); \
751 static inline int buffer_##name(const struct buffer_head *bh) \
753 return test_bit(BH_##bit, &(bh)->b_state); \
757 * test_set_buffer_foo() and test_clear_buffer_foo()
759 #define TAS_BUFFER_FNS(bit, name) \
760 static inline int test_set_buffer_##name(struct buffer_head *bh) \
762 return test_and_set_bit(BH_##bit, &(bh)->b_state); \
764 static inline int test_clear_buffer_##name(struct buffer_head *bh) \
766 return test_and_clear_bit(BH_##bit, &(bh)->b_state); \
770 * Emit the buffer bitops functions. Note that there are also functions
771 * of the form "mark_buffer_foo()". These are higher-level functions which
772 * do something in addition to setting a b_state bit.
774 BUFFER_FNS(Uptodate
, uptodate
)
775 BUFFER_FNS(Dirty
, dirty
)
776 TAS_BUFFER_FNS(Dirty
, dirty
)
777 BUFFER_FNS(Verified
, verified
)
778 BUFFER_FNS(Lock
, locked
)
779 TAS_BUFFER_FNS(Lock
, locked
)
781 TAS_BUFFER_FNS(Req
, req
)
782 BUFFER_FNS(Mapped
, mapped
)
784 BUFFER_FNS(Async_Read
, async_read
)
785 BUFFER_FNS(Async_Write
, async_write
)
786 BUFFER_FNS(Delay
, delay
)
787 BUFFER_FNS(Boundary
, boundary
)
788 BUFFER_FNS(Write_EIO
, write_io_error
)
789 BUFFER_FNS(Ordered
, ordered
)
790 BUFFER_FNS(Eopnotsupp
, eopnotsupp
)
791 BUFFER_FNS(Unwritten
, unwritten
)
793 #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
794 #define touch_buffer(bh) mark_page_accessed(bh->b_page)
796 /* If we *know* page->private refers to buffer_heads */
798 #define page_buffers(page) \
800 BUG_ON(!PagePrivate(page)), \
801 ((struct buffer_head *)page_private(page)) \
803 #define page_has_buffers(page) PagePrivate(page)
810 void mark_buffer_dirty(struct buffer_head
*bh
);
811 void init_buffer(struct buffer_head
*, bh_end_io_t
*, void *);
812 void set_bh_page(struct buffer_head
*bh
,
813 struct page
*page
, unsigned long offset
);
814 int try_to_free_buffers(struct page
*);
815 struct buffer_head
*alloc_page_buffers(struct page
*page
, unsigned long size
,
817 void create_empty_buffers(struct page
*, unsigned long,
818 unsigned long b_state
);
820 /* Things to do with buffers at mapping->private_list */
821 void mark_buffer_dirty_inode(struct buffer_head
*bh
, struct inode
*inode
);
822 int inode_has_buffers(struct inode
*);
823 void invalidate_inode_buffers(struct inode
*);
824 int remove_inode_buffers(struct inode
*inode
);
826 int sync_mapping_buffers(struct address_space
*mapping
);
828 void unmap_underlying_metadata(struct block_device
*bdev
, sector_t block
);
830 void mark_buffer_async_write(struct buffer_head
*bh
);
831 void invalidate_bdev(struct block_device
*);
832 int sync_blockdev(struct block_device
*bdev
);
833 void __wait_on_buffer(struct buffer_head
*);
834 wait_queue_head_t
*bh_waitq_head(struct buffer_head
*bh
);
835 int fsync_bdev(struct block_device
*);
836 struct super_block
*freeze_bdev(struct block_device
*);
837 void thaw_bdev(struct block_device
*, struct super_block
*);
838 int fsync_super(struct super_block
*);
839 int fsync_no_super(struct block_device
*);
840 struct buffer_head
*__find_get_block(struct block_device
*bdev
, sector_t block
,
842 struct buffer_head
*get_block_bh(struct block_device
*bdev
, sector_t block
,
843 unsigned long size
, int zero
);
844 struct buffer_head
*__getblk(struct block_device
*bdev
, sector_t block
,
846 void __brelse(struct buffer_head
*);
847 void __bforget(struct buffer_head
*);
848 void __breadahead(struct block_device
*, sector_t block
, unsigned int size
);
849 struct buffer_head
*__bread(struct block_device
*, sector_t block
, unsigned size
);
850 void invalidate_bh_lrus(void);
851 struct buffer_head
*alloc_buffer_head(gfp_t gfp_flags
);
852 void free_buffer_head(struct buffer_head
* bh
);
853 void unlock_buffer(struct buffer_head
*bh
);
854 void __lock_buffer(struct buffer_head
*bh
);
855 void ll_rw_block(int, int, struct buffer_head
* bh
[]);
856 int sync_dirty_buffer(struct buffer_head
*bh
);
857 int submit_bh(int, struct buffer_head
*);
858 void write_boundary_block(struct block_device
*bdev
,
859 sector_t bblock
, unsigned blocksize
);
860 int bh_uptodate_or_lock(struct buffer_head
*bh
);
861 int bh_submit_read(struct buffer_head
*bh
);
862 /* They are separately managed */
863 struct buffer_head
*extents_bread(struct super_block
*sb
, sector_t block
);
864 struct buffer_head
*extents_bwrite(struct super_block
*sb
, sector_t block
);
865 void extents_mark_buffer_dirty(struct buffer_head
*bh
);
866 void extents_brelse(struct buffer_head
*bh
);
868 extern int buffer_heads_over_limit
;
871 * Generic address_space_operations implementations for buffer_head-backed
877 int block_write_full_page(struct page
*page
, get_block_t
*get_block
,
878 struct writeback_control
*wbc
);
879 int block_read_full_page(struct page
*, get_block_t
*);
880 int block_write_begin(struct file
*, struct address_space
*,
881 loff_t
, unsigned, unsigned,
882 struct page
**, void **, get_block_t
*);
883 int block_write_end(struct file
*, struct address_space
*,
884 loff_t
, unsigned, unsigned,
885 struct page
*, void *);
886 int generic_write_end(struct file
*, struct address_space
*,
887 loff_t
, unsigned, unsigned,
888 struct page
*, void *);
890 int block_prepare_write(struct page
*, unsigned, unsigned, get_block_t
*);
891 int cont_write_begin(struct file
*, struct address_space
*, loff_t
,
892 unsigned, unsigned, struct page
**, void **,
893 get_block_t
*, loff_t
*);
894 int block_page_mkwrite(struct vm_area_struct
*vma
, struct page
*page
,
895 get_block_t get_block
);
896 sector_t
generic_block_bmap(struct address_space
*, sector_t
, get_block_t
*);
897 int generic_commit_write(struct file
*, struct page
*, unsigned, unsigned);
898 int block_truncate_page(struct address_space
*, loff_t
, get_block_t
*);
899 int file_fsync(struct file
*, struct dentry
*, int);
900 int nobh_write_begin(struct file
*, struct address_space
*,
901 loff_t
, unsigned, unsigned,
902 struct page
**, void **, get_block_t
*);
903 int nobh_write_end(struct file
*, struct address_space
*,
904 loff_t
, unsigned, unsigned,
905 struct page
*, void *);
906 int nobh_truncate_page(struct address_space
*, loff_t
, get_block_t
*);
907 int nobh_writepage(struct page
*page
, get_block_t
*get_block
,
908 struct writeback_control
*wbc
);
909 int generic_cont_expand_simple(struct inode
*inode
, loff_t size
);
912 void block_invalidatepage(struct page
*page
, unsigned long offset
);
913 void page_zero_new_buffers(struct page
*page
, unsigned from
, unsigned to
);
914 int block_commit_write(struct page
*page
, unsigned from
, unsigned to
);
915 void block_sync_page(struct page
*);
917 void buffer_init(void);
923 static inline void attach_page_buffers(struct page
*page
,
924 struct buffer_head
*head
)
926 page_cache_get(page
);
927 SetPagePrivate(page
);
928 set_page_private(page
, (unsigned long)head
);
932 static inline void get_bh(struct buffer_head
*bh
)
934 atomic_inc(&bh
->b_count
);
937 static inline void put_bh(struct buffer_head
*bh
)
943 static inline void brelse(struct buffer_head
*bh
)
949 static inline void fini_bh(struct buffer_head
**bh
)
957 static inline void bforget(struct buffer_head
*bh
)
963 static inline struct buffer_head
*
964 sb_getblk(struct super_block
*sb
, sector_t block
)
966 return get_block_bh(sb
->s_bdev
, block
, sb
->s_blocksize
, 0);
969 static inline struct buffer_head
*
970 sb_getblk_zero(struct super_block
*sb
, sector_t block
)
972 return get_block_bh(sb
->s_bdev
, block
, sb
->s_blocksize
, 1);
975 static inline struct buffer_head
*
976 sb_bread(struct super_block
*sb
, sector_t block
)
978 struct buffer_head
*bh
= __getblk(sb
->s_bdev
, block
, sb
->s_blocksize
);
981 if (!buffer_uptodate(bh
) && (bh_submit_read(bh
) < 0)) {
988 static inline struct buffer_head
*
989 sb_find_get_block(struct super_block
*sb
, sector_t block
)
991 return __find_get_block(sb
->s_bdev
, block
, sb
->s_blocksize
);
995 map_bh(struct buffer_head
*bh
, struct super_block
*sb
, sector_t block
)
997 set_buffer_mapped(bh
);
998 bh
->b_bdev
= sb
->s_bdev
;
999 bh
->b_blocknr
= block
;
1000 bh
->b_size
= sb
->s_blocksize
;
1004 * Calling wait_on_buffer() for a zero-ref buffer is illegal, so we call into
1005 * __wait_on_buffer() just to trip a debug check. Because debug code in inline
1006 * functions is bloaty.
1009 static inline void wait_on_buffer(struct buffer_head
*bh
)
1012 if (buffer_locked(bh
) || atomic_read(&bh
->b_count
) == 0)
1013 __wait_on_buffer(bh
);
1016 static inline void lock_buffer(struct buffer_head
*bh
)
1019 if (test_set_buffer_locked(bh
))
1023 extern int __set_page_dirty_buffers(struct page
*page
);
1026 // unicode character
1032 int (*uni2char
) (wchar_t uni
, unsigned char *out
, int boundlen
);
1033 int (*char2uni
) (const unsigned char *rawstring
, int boundlen
,
1035 unsigned char *charset2lower
;
1036 unsigned char *charset2upper
;
1037 struct module
*owner
;
1038 struct nls_table
*next
;
1041 /* this value hold the maximum octet of charset */
1042 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
1045 extern int register_nls(struct nls_table
*);
1046 extern int unregister_nls(struct nls_table
*);
1047 extern struct nls_table
*load_nls(char *);
1048 extern void unload_nls(struct nls_table
*);
1049 extern struct nls_table
*load_nls_default(void);
1051 extern int utf8_mbtowc(wchar_t *, const __u8
*, int);
1052 extern int utf8_mbstowcs(wchar_t *, const __u8
*, int);
1053 extern int utf8_wctomb(__u8
*, wchar_t, int);
1054 extern int utf8_wcstombs(__u8
*, const wchar_t *, int);
1062 static inline __u32
JIFFIES()
1066 KeQueryTickCount(&Tick
);
1067 Tick
.QuadPart
*= KeQueryTimeIncrement();
1068 Tick
.QuadPart
/= (10000000 / HZ
);
1070 return Tick
.LowPart
;
1073 #define jiffies JIFFIES()
1079 #ifdef _WIN2K_TARGET_
1091 ExAllocatePoolWithTag(
1092 IN POOL_TYPE PoolType
,
1093 IN SIZE_T NumberOfBytes
,
1097 #define ExFreePoolWithTag(_P, _T) ExFreePool(_P)
1100 PVOID
Ext2AllocatePool(
1101 IN POOL_TYPE PoolType
,
1102 IN SIZE_T NumberOfBytes
,
1112 void *kzalloc(int size
, int flags
);
1113 #define kmalloc(size, gfp) Ext2AllocatePool(NonPagedPool, size, 'JBDM')
1114 #define kfree(p) Ext2FreePool(p, 'JBDM')
1119 #define SLAB_HWCACHE_ALIGN 0x00002000U /* align objs on a h/w cache lines */
1120 #define SLAB_KERNEL 0x00000001U
1121 #define SLAB_TEMPORARY 0x00000002U
1123 typedef void (*kmem_cache_cb_t
)(void*, kmem_cache_t
*, unsigned long);
1131 NPAGED_LOOKASIDE_LIST la
;
1132 kmem_cache_cb_t constructor
;
1141 unsigned long flags
,
1142 kmem_cache_cb_t ctor
1145 void* kmem_cache_alloc(kmem_cache_t
*kc
, int flags
);
1146 void kmem_cache_free(kmem_cache_t
*kc
, void *p
);
1147 int kmem_cache_destroy(kmem_cache_t
*kc
);
1154 #define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
1165 #define READA 2 /* read-ahead - don't block if no resources */
1166 #define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */
1167 #define READ_SYNC (READ | (1 << BIO_RW_SYNC))
1168 #define READ_META (READ | (1 << BIO_RW_META))
1169 #define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC))
1170 #define WRITE_BARRIER ((1 << BIO_RW) | (1 << BIO_RW_BARRIER))
1177 * These inlines deal with timer wrapping correctly. You are
1178 * strongly encouraged to use them
1179 * 1. Because people otherwise forget
1180 * 2. Because if the timer wrap changes in future you won't have to
1181 * alter your driver code.
1183 * time_after(a,b) returns true if the time a is after time b.
1185 * Do this with "<0" and ">=0" to only test the sign of the result. A
1186 * good compiler would generate better code (and a really good compiler
1187 * wouldn't care). Gcc is currently neither.
1189 #define typecheck(x, y) (TRUE)
1191 #define time_after(a,b) \
1192 (typecheck(unsigned long, a) && \
1193 typecheck(unsigned long, b) && \
1194 ((long)(b) - (long)(a) < 0))
1195 #define time_before(a,b) time_after(b,a)
1197 #define time_after_eq(a,b) \
1198 (typecheck(unsigned long, a) && \
1199 typecheck(unsigned long, b) && \
1200 ((long)(a) - (long)(b) >= 0))
1201 #define time_before_eq(a,b) time_after_eq(b,a)
1203 #define time_in_range(a,b,c) \
1204 (time_after_eq(a,b) && \
1205 time_before_eq(a,c))
1207 #define smp_rmb() do {}while(0)
1210 static inline __u32
do_div64 (__u64
* n
, __u64 b
)
1218 #define do_div(n, b) do_div64(&(n), (__u64)b)
1220 #endif // _EXT2_MODULE_HEADER_