[EXT2]
[reactos.git] / reactos / drivers / filesystems / ext2 / src / linux.c
1 /*
2 * COPYRIGHT: See COPYRIGHT.TXT
3 * PROJECT: Ext2 File System Driver for WinNT/2K/XP
4 * FILE: linux.c
5 * PROGRAMMER: Matt Wu <mattwu@163.com>
6 * HOMEPAGE: http://www.ext2fsd.com
7 * UPDATE HISTORY:
8 */
9
10 /* INCLUDES *****************************************************************/
11
12 #include <ext2fs.h>
13 #include <linux/jbd.h>
14 #include <linux/errno.h>
15
16 /* GLOBALS ***************************************************************/
17
18 extern PEXT2_GLOBAL Ext2Global;
19
20 /* DEFINITIONS *************************************************************/
21
22 #ifdef ALLOC_PRAGMA
23 #pragma alloc_text(PAGE, kzalloc)
24 #endif
25
26 struct task_struct current_task = {
27 /* pid */ 0,
28 /* tid */ 1,
29 /* comm */ "current\0",
30 /* journal_info */ NULL
31 };
32 struct task_struct *current = &current_task;
33
34 void *kzalloc(int size, int flags)
35 {
36 void *buffer = kmalloc(size, flags);
37 if (buffer) {
38 memset(buffer, 0, size);
39 }
40 return buffer;
41 }
42
43 //
44 // slab routines
45 //
46
47 kmem_cache_t *
48 kmem_cache_create(
49 const char * name,
50 size_t size,
51 size_t offset,
52 unsigned long flags,
53 kmem_cache_cb_t ctor
54 )
55 {
56 kmem_cache_t *kc = NULL;
57
58 kc = kmalloc(sizeof(kmem_cache_t), GFP_KERNEL);
59 if (kc == NULL) {
60 goto errorout;
61 }
62
63 memset(kc, 0, sizeof(kmem_cache_t));
64 ExInitializeNPagedLookasideList(
65 &kc->la,
66 NULL,
67 NULL,
68 0,
69 size,
70 'JBKC',
71 0);
72
73 kc->size = size;
74 strncpy(kc->name, name, 31);
75 kc->constructor = ctor;
76
77 errorout:
78
79 return kc;
80 }
81
82 int kmem_cache_destroy(kmem_cache_t * kc)
83 {
84 ASSERT(kc != NULL);
85
86 ExDeleteNPagedLookasideList(&(kc->la));
87 kfree(kc);
88
89 return 0;
90 }
91
92 void* kmem_cache_alloc(kmem_cache_t *kc, int flags)
93 {
94 PVOID ptr = NULL;
95 ptr = ExAllocateFromNPagedLookasideList(&(kc->la));
96 if (ptr) {
97 atomic_inc(&kc->count);
98 atomic_inc(&kc->acount);
99 }
100 return ptr;
101 }
102
103 void kmem_cache_free(kmem_cache_t *kc, void *p)
104 {
105 if (p) {
106 atomic_dec(&kc->count);
107 ExFreeToNPagedLookasideList(&(kc->la), p);
108 }
109 }
110
111 //
112 // wait queue routines
113 //
114
115 void init_waitqueue_head(wait_queue_head_t *q)
116 {
117 spin_lock_init(&q->lock);
118 INIT_LIST_HEAD(&q->task_list);
119 }
120
121 struct __wait_queue *
122 wait_queue_create()
123 {
124 struct __wait_queue * wait = NULL;
125 wait = kmalloc(sizeof(struct __wait_queue), GFP_KERNEL);
126 if (!wait) {
127 return NULL;
128 }
129
130 memset(wait, 0, sizeof(struct __wait_queue));
131 wait->flags = WQ_FLAG_AUTO_REMOVAL;
132 wait->private = (void *)KeGetCurrentThread();
133 INIT_LIST_HEAD(&wait->task_list);
134 KeInitializeEvent(&(wait->event),
135 SynchronizationEvent,
136 FALSE);
137
138 return wait;
139 }
140
141 void
142 wait_queue_destroy(struct __wait_queue * wait)
143 {
144 kfree(wait);
145 }
146
147 static inline void __add_wait_queue(wait_queue_head_t *head, struct __wait_queue *new)
148 {
149 list_add(&new->task_list, &head->task_list);
150 }
151
152 /*
153 * Used for wake-one threads:
154 */
155 static inline void __add_wait_queue_tail(wait_queue_head_t *head,
156 struct __wait_queue *new)
157 {
158 list_add_tail(&new->task_list, &head->task_list);
159 }
160
161 static inline void __remove_wait_queue(wait_queue_head_t *head,
162 struct __wait_queue *old)
163 {
164 list_del(&old->task_list);
165 }
166
167 void add_wait_queue(wait_queue_head_t *q, wait_queue_t *waiti)
168 {
169 unsigned long flags;
170 struct __wait_queue *wait = *waiti;
171
172 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
173 spin_lock_irqsave(&q->lock, flags);
174 __add_wait_queue(q, wait);
175 spin_unlock_irqrestore(&q->lock, flags);
176 }
177
178 void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *waiti)
179 {
180 unsigned long flags;
181 struct __wait_queue *wait = *waiti;
182
183 wait->flags |= WQ_FLAG_EXCLUSIVE;
184 spin_lock_irqsave(&q->lock, flags);
185 __add_wait_queue_tail(q, wait);
186 spin_unlock_irqrestore(&q->lock, flags);
187 }
188
189 void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *waiti)
190 {
191 unsigned long flags;
192 struct __wait_queue *wait = *waiti;
193
194 spin_lock_irqsave(&q->lock, flags);
195 __remove_wait_queue(q, wait);
196 spin_unlock_irqrestore(&q->lock, flags);
197 }
198
199 /*
200 * Note: we use "set_current_state()" _after_ the wait-queue add,
201 * because we need a memory barrier there on SMP, so that any
202 * wake-function that tests for the wait-queue being active
203 * will be guaranteed to see waitqueue addition _or_ subsequent
204 * tests in this thread will see the wakeup having taken place.
205 *
206 * The spin_unlock() itself is semi-permeable and only protects
207 * one way (it only protects stuff inside the critical region and
208 * stops them from bleeding out - it would still allow subsequent
209 * loads to move into the critical region).
210 */
211 void
212 prepare_to_wait(wait_queue_head_t *q, wait_queue_t *waiti, int state)
213 {
214 unsigned long flags;
215 struct __wait_queue *wait = *waiti;
216
217 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
218 spin_lock_irqsave(&q->lock, flags);
219 if (list_empty(&wait->task_list))
220 __add_wait_queue(q, wait);
221 /*
222 * don't alter the task state if this is just going to
223 * queue an async wait queue callback
224 */
225 if (is_sync_wait(wait))
226 set_current_state(state);
227 spin_unlock_irqrestore(&q->lock, flags);
228 }
229
230 void
231 prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *waiti, int state)
232 {
233 unsigned long flags;
234 struct __wait_queue *wait = *waiti;
235
236 wait->flags |= WQ_FLAG_EXCLUSIVE;
237 spin_lock_irqsave(&q->lock, flags);
238 if (list_empty(&wait->task_list))
239 __add_wait_queue_tail(q, wait);
240 /*
241 * don't alter the task state if this is just going to
242 * queue an async wait queue callback
243 */
244 if (is_sync_wait(wait))
245 set_current_state(state);
246 spin_unlock_irqrestore(&q->lock, flags);
247 }
248 EXPORT_SYMBOL(prepare_to_wait_exclusive);
249
250 void finish_wait(wait_queue_head_t *q, wait_queue_t *waiti)
251 {
252 unsigned long flags;
253 struct __wait_queue *wait = *waiti;
254
255 __set_current_state(TASK_RUNNING);
256 /*
257 * We can check for list emptiness outside the lock
258 * IFF:
259 * - we use the "careful" check that verifies both
260 * the next and prev pointers, so that there cannot
261 * be any half-pending updates in progress on other
262 * CPU's that we haven't seen yet (and that might
263 * still change the stack area.
264 * and
265 * - all other users take the lock (ie we can only
266 * have _one_ other CPU that looks at or modifies
267 * the list).
268 */
269 if (!list_empty_careful(&wait->task_list)) {
270 spin_lock_irqsave(&q->lock, flags);
271 list_del_init(&wait->task_list);
272 spin_unlock_irqrestore(&q->lock, flags);
273 }
274
275 /* free wait */
276 wait_queue_destroy(wait);
277 }
278
279 int wake_up(wait_queue_head_t *queue)
280 {
281 return 0; /* KeSetEvent(&wait->event, 0, FALSE); */
282 }
283
284
285 //
286 // kernel timer routines
287 //
288
289 //
290 // buffer head routines
291 //
292
293 struct _EXT2_BUFFER_HEAD {
294 kmem_cache_t * bh_cache;
295 atomic_t bh_count;
296 atomic_t bh_acount;
297 } g_jbh = {NULL, ATOMIC_INIT(0)};
298
299 int
300 ext2_init_bh()
301 {
302 g_jbh.bh_count.counter = 0;
303 g_jbh.bh_acount.counter = 0;
304 g_jbh.bh_cache = kmem_cache_create(
305 "ext2_bh", /* bh */
306 sizeof(struct buffer_head),
307 0, /* offset */
308 SLAB_TEMPORARY, /* flags */
309 NULL); /* ctor */
310 if (g_jbh.bh_cache == NULL) {
311 printk(KERN_EMERG "JBD: failed to create handle cache\n");
312 return -ENOMEM;
313 }
314 return 0;
315 }
316
317 void
318 ext2_destroy_bh()
319 {
320 if (g_jbh.bh_cache) {
321 kmem_cache_destroy(g_jbh.bh_cache);
322 g_jbh.bh_cache = NULL;
323 }
324 }
325
326 struct buffer_head *
327 new_buffer_head()
328 {
329 struct buffer_head * bh = NULL;
330 bh = kmem_cache_alloc(g_jbh.bh_cache, GFP_NOFS);
331 if (bh) {
332 memset(bh, 0, sizeof(struct buffer_head));
333 DEBUG(DL_BH, ("bh=%p allocated.\n", bh));
334 INC_MEM_COUNT(PS_BUFF_HEAD, bh, sizeof(struct buffer_head));
335 }
336 return bh;
337 }
338
339 void
340 free_buffer_head(struct buffer_head * bh)
341 {
342 if (bh) {
343 if (bh->b_mdl) {
344
345 DEBUG(DL_BH, ("bh=%p mdl=%p (Flags:%xh VA:%p) released.\n", bh, bh->b_mdl,
346 bh->b_mdl->MdlFlags, bh->b_mdl->MappedSystemVa));
347 if (IsFlagOn(bh->b_mdl->MdlFlags, MDL_PAGES_LOCKED)) {
348 /* MmUnlockPages will release it's VA */
349 MmUnlockPages(bh->b_mdl);
350 } else if (IsFlagOn(bh->b_mdl->MdlFlags, MDL_MAPPED_TO_SYSTEM_VA)) {
351 MmUnmapLockedPages(bh->b_mdl->MappedSystemVa, bh->b_mdl);
352 }
353
354 Ext2DestroyMdl(bh->b_mdl);
355 }
356 if (bh->b_bcb) {
357 CcUnpinDataForThread(bh->b_bcb, (ERESOURCE_THREAD)bh | 0x3);
358 }
359
360 DEBUG(DL_BH, ("bh=%p freed.\n", bh));
361 DEC_MEM_COUNT(PS_BUFF_HEAD, bh, sizeof(struct buffer_head));
362 kmem_cache_free(g_jbh.bh_cache, bh);
363 }
364 }
365
366 //
367 // Red-black tree insert routine.
368 //
369
370 static struct buffer_head *__buffer_head_search(struct rb_root *root,
371 sector_t blocknr)
372 {
373 struct rb_node *new = root->rb_node;
374
375 /* Figure out where to put new node */
376 while (new) {
377 struct buffer_head *bh =
378 container_of(new, struct buffer_head, b_rb_node);
379 s64 result = blocknr - bh->b_blocknr;
380
381 if (result < 0)
382 new = new->rb_left;
383 else if (result > 0)
384 new = new->rb_right;
385 else
386 return bh;
387
388 }
389
390 return NULL;
391 }
392
393 static int buffer_head_blocknr_cmp(struct rb_node *a, struct rb_node *b)
394 {
395 struct buffer_head *a_bh, *b_bh;
396 s64 result;
397 a_bh = container_of(a, struct buffer_head, b_rb_node);
398 b_bh = container_of(b, struct buffer_head, b_rb_node);
399 result = a_bh->b_blocknr - b_bh->b_blocknr;
400
401 if (result < 0)
402 return -1;
403 if (result > 0)
404 return 1;
405 return 0;
406 }
407
408 static struct buffer_head *buffer_head_search(struct block_device *bdev,
409 sector_t blocknr)
410 {
411 struct rb_root *root;
412 root = &bdev->bd_bh_root;
413 return __buffer_head_search(root, blocknr);
414 }
415
416 static void buffer_head_insert(struct block_device *bdev, struct buffer_head *bh)
417 {
418 rb_insert(&bdev->bd_bh_root, &bh->b_rb_node, buffer_head_blocknr_cmp);
419 }
420
421 static void buffer_head_remove(struct block_device *bdev, struct buffer_head *bh)
422 {
423 rb_erase(&bh->b_rb_node, &bdev->bd_bh_root);
424 }
425
426 struct buffer_head *
427 get_block_bh_mdl(
428 struct block_device * bdev,
429 sector_t block,
430 unsigned long size,
431 int zero
432 )
433 {
434 PEXT2_VCB Vcb = bdev->bd_priv;
435 LARGE_INTEGER offset;
436 PVOID bcb = NULL;
437 PVOID ptr = NULL;
438
439 KIRQL irql = 0;
440 struct list_head *entry;
441
442 /* allocate buffer_head and initialize it */
443 struct buffer_head *bh = NULL, *tbh = NULL;
444
445 /* check the block is valid or not */
446 if (block >= TOTAL_BLOCKS) {
447 DbgBreak();
448 goto errorout;
449 }
450
451 /* search the bdev bh list */
452 spin_lock_irqsave(&bdev->bd_bh_lock, irql);
453 tbh = buffer_head_search(bdev, block);
454 if (tbh) {
455 bh = tbh;
456 get_bh(bh);
457 spin_unlock_irqrestore(&bdev->bd_bh_lock, irql);
458 goto errorout;
459 }
460 spin_unlock_irqrestore(&bdev->bd_bh_lock, irql);
461
462 bh = new_buffer_head();
463 if (!bh) {
464 goto errorout;
465 }
466 bh->b_bdev = bdev;
467 bh->b_blocknr = block;
468 bh->b_size = size;
469 bh->b_data = NULL;
470 atomic_inc(&g_jbh.bh_count);
471 atomic_inc(&g_jbh.bh_acount);
472
473 again:
474
475 offset.QuadPart = (s64) bh->b_blocknr;
476 offset.QuadPart <<= BLOCK_BITS;
477
478 if (zero) {
479 if (!CcPreparePinWrite(Vcb->Volume,
480 &offset,
481 bh->b_size,
482 FALSE,
483 PIN_WAIT | PIN_EXCLUSIVE,
484 &bcb,
485 &ptr)) {
486 Ext2Sleep(100);
487 goto again;
488 }
489 } else {
490 if (!CcPinRead( Vcb->Volume,
491 &offset,
492 bh->b_size,
493 PIN_WAIT,
494 &bcb,
495 &ptr)) {
496 Ext2Sleep(100);
497 goto again;
498 }
499 set_buffer_uptodate(bh);
500 }
501
502 bh->b_mdl = Ext2CreateMdl(ptr, TRUE, bh->b_size, IoModifyAccess);
503 if (bh->b_mdl) {
504 /* muse map the PTE to NonCached zone. journal recovery will
505 access the PTE under spinlock: DISPATCH_LEVEL IRQL */
506 bh->b_data = MmMapLockedPagesSpecifyCache(
507 bh->b_mdl, KernelMode, MmNonCached,
508 NULL,FALSE, HighPagePriority);
509 }
510 if (!bh->b_mdl || !bh->b_data) {
511 free_buffer_head(bh);
512 bh = NULL;
513 goto errorout;
514 }
515
516 get_bh(bh);
517
518 DEBUG(DL_BH, ("getblk: Vcb=%p bhcount=%u block=%u bh=%p mdl=%p (Flags:%xh VA:%p)\n",
519 Vcb, atomic_read(&g_jbh.bh_count), block, bh, bh->b_mdl, bh->b_mdl->MdlFlags, bh->b_data));
520
521 spin_lock_irqsave(&bdev->bd_bh_lock, irql);
522
523 /* do search again here */
524 tbh = buffer_head_search(bdev, block);
525 if (tbh) {
526 free_buffer_head(bh);
527 bh = tbh;
528 get_bh(bh);
529 spin_unlock_irqrestore(&bdev->bd_bh_lock, irql);
530 goto errorout;
531 } else
532 buffer_head_insert(bdev, bh);
533
534 spin_unlock_irqrestore(&bdev->bd_bh_lock, irql);
535
536 /* we get it */
537 errorout:
538
539 if (bcb)
540 CcUnpinData(bcb);
541
542 return bh;
543 }
544
545 int submit_bh_mdl(int rw, struct buffer_head *bh)
546 {
547 struct block_device *bdev = bh->b_bdev;
548 PEXT2_VCB Vcb = bdev->bd_priv;
549 PBCB Bcb;
550 PVOID Buffer;
551 LARGE_INTEGER Offset;
552
553 ASSERT(Vcb->Identifier.Type == EXT2VCB);
554 ASSERT(bh->b_data);
555
556 if (rw == WRITE) {
557
558 if (IsVcbReadOnly(Vcb)) {
559 goto errorout;
560 }
561
562 SetFlag(Vcb->Volume->Flags, FO_FILE_MODIFIED);
563 Offset.QuadPart = ((LONGLONG)bh->b_blocknr) << BLOCK_BITS;
564 if (CcPreparePinWrite(
565 Vcb->Volume,
566 &Offset,
567 BLOCK_SIZE,
568 FALSE,
569 PIN_WAIT | PIN_EXCLUSIVE,
570 &Bcb,
571 &Buffer )) {
572 #if 0
573 if (memcmp(Buffer, bh->b_data, BLOCK_SIZE) != 0) {
574 DbgBreak();
575 }
576 memmove(Buffer, bh->b_data, BLOCK_SIZE);
577 #endif
578 CcSetDirtyPinnedData(Bcb, NULL);
579 Ext2AddBlockExtent( Vcb, NULL,
580 (ULONG)bh->b_blocknr,
581 (ULONG)bh->b_blocknr,
582 (bh->b_size >> BLOCK_BITS));
583 CcUnpinData(Bcb);
584 } else {
585
586 Ext2AddBlockExtent( Vcb, NULL,
587 (ULONG)bh->b_blocknr,
588 (ULONG)bh->b_blocknr,
589 (bh->b_size >> BLOCK_BITS));
590 }
591
592 } else {
593
594 DbgBreak();
595 }
596
597 errorout:
598
599 unlock_buffer(bh);
600 put_bh(bh);
601 return 0;
602 }
603
604 struct buffer_head *
605 get_block_bh(
606 struct block_device * bdev,
607 sector_t block,
608 unsigned long size,
609 int zero
610 )
611 {
612 PEXT2_VCB Vcb = bdev->bd_priv;
613 LARGE_INTEGER offset;
614
615 KIRQL irql = 0;
616 struct list_head *entry;
617
618 /* allocate buffer_head and initialize it */
619 struct buffer_head *bh = NULL, *tbh = NULL;
620
621 /* check the block is valid or not */
622 if (block >= TOTAL_BLOCKS) {
623 DbgBreak();
624 goto errorout;
625 }
626
627 /* search the bdev bh list */
628 spin_lock_irqsave(&bdev->bd_bh_lock, irql);
629 tbh = buffer_head_search(bdev, block);
630 if (tbh) {
631 bh = tbh;
632 get_bh(bh);
633 spin_unlock_irqrestore(&bdev->bd_bh_lock, irql);
634 goto errorout;
635 }
636 spin_unlock_irqrestore(&bdev->bd_bh_lock, irql);
637
638 bh = new_buffer_head();
639 if (!bh) {
640 goto errorout;
641 }
642 bh->b_bdev = bdev;
643 bh->b_blocknr = block;
644 bh->b_size = size;
645 bh->b_data = NULL;
646 atomic_inc(&g_jbh.bh_count);
647 atomic_inc(&g_jbh.bh_acount);
648
649 again:
650
651 offset.QuadPart = (s64) bh->b_blocknr;
652 offset.QuadPart <<= BLOCK_BITS;
653
654 if (zero) {
655 if (!CcPreparePinWrite(Vcb->Volume,
656 &offset,
657 bh->b_size,
658 FALSE,
659 PIN_WAIT | PIN_EXCLUSIVE,
660 &bh->b_bcb,
661 (PVOID *)&bh->b_data)) {
662 Ext2Sleep(100);
663 goto again;
664 }
665 } else {
666 if (!CcPinRead( Vcb->Volume,
667 &offset,
668 bh->b_size,
669 PIN_WAIT,
670 &bh->b_bcb,
671 (PVOID *)&bh->b_data)) {
672 Ext2Sleep(100);
673 goto again;
674 }
675 set_buffer_uptodate(bh);
676 }
677
678 if (!bh->b_data) {
679 free_buffer_head(bh);
680 bh = NULL;
681 goto errorout;
682 }
683
684 get_bh(bh);
685 CcSetBcbOwnerPointer(bh->b_bcb, (PVOID)((ERESOURCE_THREAD)bh | 0x3));
686
687 DEBUG(DL_BH, ("getblk: Vcb=%p bhcount=%u block=%u bh=%p ptr=%p.\n",
688 Vcb, atomic_read(&g_jbh.bh_count), block, bh, bh->b_data));
689
690 spin_lock_irqsave(&bdev->bd_bh_lock, irql);
691
692 /* do search again here */
693 tbh = buffer_head_search(bdev, block);
694 if (tbh) {
695 get_bh(tbh);
696 spin_unlock_irqrestore(&bdev->bd_bh_lock, irql);
697 free_buffer_head(bh);
698 bh = tbh;
699 goto errorout;
700 } else {
701 buffer_head_insert(bdev, bh);
702 }
703
704 spin_unlock_irqrestore(&bdev->bd_bh_lock, irql);
705
706 /* we get it */
707 errorout:
708
709 return bh;
710 }
711
712 int submit_bh(int rw, struct buffer_head *bh)
713 {
714 struct block_device *bdev = bh->b_bdev;
715 PEXT2_VCB Vcb = bdev->bd_priv;
716 PVOID Buffer;
717 LARGE_INTEGER Offset;
718
719 ASSERT(Vcb->Identifier.Type == EXT2VCB);
720 ASSERT(bh->b_data && bh->b_bcb);
721
722 if (rw == WRITE) {
723
724 if (IsVcbReadOnly(Vcb)) {
725 goto errorout;
726 }
727
728 SetFlag(Vcb->Volume->Flags, FO_FILE_MODIFIED);
729 Offset.QuadPart = ((LONGLONG)bh->b_blocknr) << BLOCK_BITS;
730
731 CcSetDirtyPinnedData(bh->b_bcb, NULL);
732 Ext2AddBlockExtent( Vcb, NULL,
733 (ULONG)bh->b_blocknr,
734 (ULONG)bh->b_blocknr,
735 (bh->b_size >> BLOCK_BITS));
736 } else {
737 DbgBreak();
738 }
739
740 errorout:
741
742 unlock_buffer(bh);
743 put_bh(bh);
744 return 0;
745 }
746
747 struct buffer_head *
748 __getblk(
749 struct block_device * bdev,
750 sector_t block,
751 unsigned long size
752 )
753 {
754 return get_block_bh(bdev, block, size, 0);
755 }
756
757 void __brelse(struct buffer_head *bh)
758 {
759 struct block_device *bdev = bh->b_bdev;
760 PEXT2_VCB Vcb = (PEXT2_VCB)bdev->bd_priv;
761 KIRQL irql = 0;
762
763 ASSERT(Vcb->Identifier.Type == EXT2VCB);
764
765 /* write data in case it's dirty */
766 while (buffer_dirty(bh)) {
767 ll_rw_block(WRITE, 1, &bh);
768 }
769
770 spin_lock_irqsave(&bdev->bd_bh_lock, irql);
771 if (!atomic_dec_and_test(&bh->b_count)) {
772 spin_unlock_irqrestore(&bdev->bd_bh_lock, irql);
773 return;
774 }
775 buffer_head_remove(bdev, bh);
776 spin_unlock_irqrestore(&bdev->bd_bh_lock, irql);
777
778 DEBUG(DL_BH, ("brelse: cnt=%u size=%u blk=%10.10xh bh=%p ptr=%p\n",
779 atomic_read(&g_jbh.bh_count) - 1, bh->b_size,
780 bh->b_blocknr, bh, bh->b_data ));
781
782 free_buffer_head(bh);
783 atomic_dec(&g_jbh.bh_count);
784 }
785
786
787 void __bforget(struct buffer_head *bh)
788 {
789 clear_buffer_dirty(bh);
790 __brelse(bh);
791 }
792
793 void __lock_buffer(struct buffer_head *bh)
794 {
795 }
796
797 void unlock_buffer(struct buffer_head *bh)
798 {
799 clear_buffer_locked(bh);
800 }
801
802 void __wait_on_buffer(struct buffer_head *bh)
803 {
804 }
805
806 void ll_rw_block(int rw, int nr, struct buffer_head * bhs[])
807 {
808 int i;
809
810 for (i = 0; i < nr; i++) {
811
812 struct buffer_head *bh = bhs[i];
813
814 if (rw == SWRITE)
815 lock_buffer(bh);
816 else if (test_set_buffer_locked(bh))
817 continue;
818
819 if (rw == WRITE || rw == SWRITE) {
820 if (test_clear_buffer_dirty(bh)) {
821 get_bh(bh);
822 submit_bh(WRITE, bh);
823 continue;
824 }
825 } else {
826 if (!buffer_uptodate(bh)) {
827 get_bh(bh);
828 submit_bh(rw, bh);
829 continue;
830 }
831 }
832 unlock_buffer(bh);
833 }
834 }
835
836 int bh_submit_read(struct buffer_head *bh)
837 {
838 ll_rw_block(READ, 1, &bh);
839 return 0;
840 }
841
842 int sync_dirty_buffer(struct buffer_head *bh)
843 {
844 int ret = 0;
845
846 ASSERT(atomic_read(&bh->b_count) <= 1);
847 lock_buffer(bh);
848 if (test_clear_buffer_dirty(bh)) {
849 get_bh(bh);
850 ret = submit_bh(WRITE, bh);
851 wait_on_buffer(bh);
852 } else {
853 unlock_buffer(bh);
854 }
855 return ret;
856 }
857
858 void mark_buffer_dirty(struct buffer_head *bh)
859 {
860 set_buffer_dirty(bh);
861 }
862
863 int sync_blockdev(struct block_device *bdev)
864 {
865 PEXT2_VCB Vcb = (PEXT2_VCB) bdev->bd_priv;
866
867 if (0 == atomic_read(&g_jbh.bh_count)) {
868 Ext2FlushVolume(NULL, Vcb, FALSE);
869 }
870 return 0;
871 }
872
873 /*
874 * Perform a pagecache lookup for the matching buffer. If it's there, refre
875 * it in the LRU and mark it as accessed. If it is not present then return
876 * NULL
877 */
878 struct buffer_head *
879 __find_get_block(struct block_device *bdev, sector_t block, unsigned long size)
880 {
881 return __getblk(bdev, block, size);
882 }
883
884 //
885 // inode block mapping
886 //
887
888 ULONGLONG bmap(struct inode *i, ULONGLONG b)
889 {
890 ULONGLONG lcn = 0;
891 struct super_block *s = i->i_sb;
892
893 PEXT2_MCB Mcb = (PEXT2_MCB)i->i_priv;
894 PEXT2_VCB Vcb = (PEXT2_VCB)s->s_priv;
895 PEXT2_EXTENT extent = NULL;
896 ULONGLONG offset = (ULONGLONG)b;
897 NTSTATUS status;
898
899 if (!Mcb || !Vcb) {
900 goto errorout;
901 }
902
903 offset <<= BLOCK_BITS;
904 status = Ext2BuildExtents(
905 NULL,
906 Vcb,
907 Mcb,
908 offset,
909 BLOCK_SIZE,
910 FALSE,
911 &extent
912 );
913
914 if (!NT_SUCCESS(status)) {
915 goto errorout;
916 }
917
918 if (extent == NULL) {
919 goto errorout;
920 }
921
922 lcn = (unsigned long)(extent->Lba >> BLOCK_BITS);
923
924 errorout:
925
926 if (extent) {
927 Ext2FreeExtent(extent);
928 }
929
930 return lcn;
931 }
932
933 void iget(struct inode *inode)
934 {
935 atomic_inc(&inode->i_count);
936 }
937
938 void iput(struct inode *inode)
939 {
940 if (atomic_dec_and_test(&inode->i_count)) {
941 kfree(inode);
942 }
943 }
944
945 //
946 // initialzer and destructor
947 //
948
949 int
950 ext2_init_linux()
951 {
952 int rc = 0;
953
954 rc = ext2_init_bh();
955 if (rc != 0) {
956 goto errorout;
957 }
958
959 errorout:
960
961 return rc;
962 }
963
964 void
965 ext2_destroy_linux()
966 {
967 ext2_destroy_bh();
968 }