[EXT2]
[reactos.git] / reactos / drivers / filesystems / ext2 / src / linux.c
1 /*
2 * COPYRIGHT: See COPYRIGHT.TXT
3 * PROJECT: Ext2 File System Driver for WinNT/2K/XP
4 * FILE: linux.c
5 * PROGRAMMER: Matt Wu <mattwu@163.com>
6 * HOMEPAGE: http://www.ext2fsd.com
7 * UPDATE HISTORY:
8 */
9
10 /* INCLUDES *****************************************************************/
11
12 #include <ext2fs.h>
13 #include <linux/jbd.h>
14 #include <linux/errno.h>
15
16 /* GLOBALS ***************************************************************/
17
18 extern PEXT2_GLOBAL Ext2Global;
19
20 /* DEFINITIONS *************************************************************/
21
22 #ifdef ALLOC_PRAGMA
23 #pragma alloc_text(PAGE, kzalloc)
24 #endif
25
26 struct task_struct current_task = {
27 /* pid */ 0,
28 /* tid */ 1,
29 /* comm */ "current\0",
30 /* journal_info */ NULL
31 };
32 struct task_struct *current = &current_task;
33
34 void *kzalloc(int size, int flags)
35 {
36 void *buffer = kmalloc(size, flags);
37 if (buffer) {
38 memset(buffer, 0, size);
39 }
40 return buffer;
41 }
42
43 //
44 // slab routines
45 //
46
47 kmem_cache_t *
48 kmem_cache_create(
49 const char * name,
50 size_t size,
51 size_t offset,
52 unsigned long flags,
53 kmem_cache_cb_t ctor
54 )
55 {
56 kmem_cache_t *kc = NULL;
57
58 kc = kmalloc(sizeof(kmem_cache_t), GFP_KERNEL);
59 if (kc == NULL) {
60 goto errorout;
61 }
62
63 memset(kc, 0, sizeof(kmem_cache_t));
64 ExInitializeNPagedLookasideList(
65 &kc->la,
66 NULL,
67 NULL,
68 0,
69 size,
70 'JBKC',
71 0);
72
73 kc->size = size;
74 strncpy(kc->name, name, 31);
75 kc->constructor = ctor;
76
77 errorout:
78
79 return kc;
80 }
81
82 int kmem_cache_destroy(kmem_cache_t * kc)
83 {
84 ASSERT(kc != NULL);
85
86 ExDeleteNPagedLookasideList(&(kc->la));
87 kfree(kc);
88
89 return 0;
90 }
91
92 void* kmem_cache_alloc(kmem_cache_t *kc, int flags)
93 {
94 PVOID ptr = NULL;
95 ptr = ExAllocateFromNPagedLookasideList(&(kc->la));
96 if (ptr) {
97 atomic_inc(&kc->count);
98 atomic_inc(&kc->acount);
99 }
100 return ptr;
101 }
102
103 void kmem_cache_free(kmem_cache_t *kc, void *p)
104 {
105 if (p) {
106 atomic_dec(&kc->count);
107 ExFreeToNPagedLookasideList(&(kc->la), p);
108 }
109 }
110
111 //
112 // wait queue routines
113 //
114
115 void init_waitqueue_head(wait_queue_head_t *q)
116 {
117 spin_lock_init(&q->lock);
118 INIT_LIST_HEAD(&q->task_list);
119 }
120
121 struct __wait_queue *
122 wait_queue_create()
123 {
124 struct __wait_queue * wait = NULL;
125 wait = kmalloc(sizeof(struct __wait_queue), GFP_KERNEL);
126 if (!wait) {
127 return NULL;
128 }
129
130 memset(wait, 0, sizeof(struct __wait_queue));
131 wait->flags = WQ_FLAG_AUTO_REMOVAL;
132 wait->private = (void *)KeGetCurrentThread();
133 INIT_LIST_HEAD(&wait->task_list);
134 KeInitializeEvent(&(wait->event),
135 SynchronizationEvent,
136 FALSE);
137
138 return wait;
139 }
140
141 void
142 wait_queue_destroy(struct __wait_queue * wait)
143 {
144 kfree(wait);
145 }
146
147 static inline void __add_wait_queue(wait_queue_head_t *head, struct __wait_queue *new)
148 {
149 list_add(&new->task_list, &head->task_list);
150 }
151
152 /*
153 * Used for wake-one threads:
154 */
155 static inline void __add_wait_queue_tail(wait_queue_head_t *head,
156 struct __wait_queue *new)
157 {
158 list_add_tail(&new->task_list, &head->task_list);
159 }
160
161 static inline void __remove_wait_queue(wait_queue_head_t *head,
162 struct __wait_queue *old)
163 {
164 list_del(&old->task_list);
165 }
166
167 void add_wait_queue(wait_queue_head_t *q, wait_queue_t *waiti)
168 {
169 unsigned long flags;
170 struct __wait_queue *wait = *waiti;
171
172 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
173 spin_lock_irqsave(&q->lock, flags);
174 __add_wait_queue(q, wait);
175 spin_unlock_irqrestore(&q->lock, flags);
176 }
177
178 void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *waiti)
179 {
180 unsigned long flags;
181 struct __wait_queue *wait = *waiti;
182
183 wait->flags |= WQ_FLAG_EXCLUSIVE;
184 spin_lock_irqsave(&q->lock, flags);
185 __add_wait_queue_tail(q, wait);
186 spin_unlock_irqrestore(&q->lock, flags);
187 }
188
189 void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *waiti)
190 {
191 unsigned long flags;
192 struct __wait_queue *wait = *waiti;
193
194 spin_lock_irqsave(&q->lock, flags);
195 __remove_wait_queue(q, wait);
196 spin_unlock_irqrestore(&q->lock, flags);
197 }
198
199 /*
200 * Note: we use "set_current_state()" _after_ the wait-queue add,
201 * because we need a memory barrier there on SMP, so that any
202 * wake-function that tests for the wait-queue being active
203 * will be guaranteed to see waitqueue addition _or_ subsequent
204 * tests in this thread will see the wakeup having taken place.
205 *
206 * The spin_unlock() itself is semi-permeable and only protects
207 * one way (it only protects stuff inside the critical region and
208 * stops them from bleeding out - it would still allow subsequent
209 * loads to move into the critical region).
210 */
211 void
212 prepare_to_wait(wait_queue_head_t *q, wait_queue_t *waiti, int state)
213 {
214 unsigned long flags;
215 struct __wait_queue *wait = *waiti;
216
217 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
218 spin_lock_irqsave(&q->lock, flags);
219 if (list_empty(&wait->task_list))
220 __add_wait_queue(q, wait);
221 /*
222 * don't alter the task state if this is just going to
223 * queue an async wait queue callback
224 */
225 if (is_sync_wait(wait))
226 set_current_state(state);
227 spin_unlock_irqrestore(&q->lock, flags);
228 }
229
230 void
231 prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *waiti, int state)
232 {
233 unsigned long flags;
234 struct __wait_queue *wait = *waiti;
235
236 wait->flags |= WQ_FLAG_EXCLUSIVE;
237 spin_lock_irqsave(&q->lock, flags);
238 if (list_empty(&wait->task_list))
239 __add_wait_queue_tail(q, wait);
240 /*
241 * don't alter the task state if this is just going to
242 * queue an async wait queue callback
243 */
244 if (is_sync_wait(wait))
245 set_current_state(state);
246 spin_unlock_irqrestore(&q->lock, flags);
247 }
248 EXPORT_SYMBOL(prepare_to_wait_exclusive);
249
250 void finish_wait(wait_queue_head_t *q, wait_queue_t *waiti)
251 {
252 unsigned long flags;
253 struct __wait_queue *wait = *waiti;
254
255 __set_current_state(TASK_RUNNING);
256 /*
257 * We can check for list emptiness outside the lock
258 * IFF:
259 * - we use the "careful" check that verifies both
260 * the next and prev pointers, so that there cannot
261 * be any half-pending updates in progress on other
262 * CPU's that we haven't seen yet (and that might
263 * still change the stack area.
264 * and
265 * - all other users take the lock (ie we can only
266 * have _one_ other CPU that looks at or modifies
267 * the list).
268 */
269 if (!list_empty_careful(&wait->task_list)) {
270 spin_lock_irqsave(&q->lock, flags);
271 list_del_init(&wait->task_list);
272 spin_unlock_irqrestore(&q->lock, flags);
273 }
274
275 /* free wait */
276 wait_queue_destroy(wait);
277 }
278
279 int wake_up(wait_queue_head_t *queue)
280 {
281 return 0; /* KeSetEvent(&wait->event, 0, FALSE); */
282 }
283
284
285 //
286 // kernel timer routines
287 //
288
289 //
290 // buffer head routines
291 //
292
293 struct _EXT2_BUFFER_HEAD {
294 kmem_cache_t * bh_cache;
295 atomic_t bh_count;
296 atomic_t bh_acount;
297 } g_jbh = {NULL, ATOMIC_INIT(0)};
298
299 int
300 ext2_init_bh()
301 {
302 g_jbh.bh_count.counter = 0;
303 g_jbh.bh_acount.counter = 0;
304 g_jbh.bh_cache = kmem_cache_create(
305 "ext2_bh", /* bh */
306 sizeof(struct buffer_head),
307 0, /* offset */
308 SLAB_TEMPORARY, /* flags */
309 NULL); /* ctor */
310 if (g_jbh.bh_cache == NULL) {
311 printk(KERN_EMERG "JBD: failed to create handle cache\n");
312 return -ENOMEM;
313 }
314 return 0;
315 }
316
317 void
318 ext2_destroy_bh()
319 {
320 if (g_jbh.bh_cache) {
321 kmem_cache_destroy(g_jbh.bh_cache);
322 g_jbh.bh_cache = NULL;
323 }
324 }
325
326 struct buffer_head *
327 new_buffer_head()
328 {
329 struct buffer_head * bh = NULL;
330 bh = kmem_cache_alloc(g_jbh.bh_cache, GFP_NOFS);
331 if (bh) {
332 memset(bh, 0, sizeof(struct buffer_head));
333 DEBUG(DL_BH, ("bh=%p allocated.\n", bh));
334 INC_MEM_COUNT(PS_BUFF_HEAD, bh, sizeof(struct buffer_head));
335 }
336 return bh;
337 }
338
339 void
340 free_buffer_head(struct buffer_head * bh)
341 {
342 if (bh) {
343 if (bh->b_mdl) {
344
345 DEBUG(DL_BH, ("bh=%p mdl=%p (Flags:%xh VA:%p) released.\n", bh, bh->b_mdl,
346 bh->b_mdl->MdlFlags, bh->b_mdl->MappedSystemVa));
347 if (IsFlagOn(bh->b_mdl->MdlFlags, MDL_PAGES_LOCKED)) {
348 /* MmUnlockPages will release it's VA */
349 MmUnlockPages(bh->b_mdl);
350 } else if (IsFlagOn(bh->b_mdl->MdlFlags, MDL_MAPPED_TO_SYSTEM_VA)) {
351 MmUnmapLockedPages(bh->b_mdl->MappedSystemVa, bh->b_mdl);
352 }
353
354 Ext2DestroyMdl(bh->b_mdl);
355 }
356 DEBUG(DL_BH, ("bh=%p freed.\n", bh));
357 DEC_MEM_COUNT(PS_BUFF_HEAD, bh, sizeof(struct buffer_head));
358 kmem_cache_free(g_jbh.bh_cache, bh);
359 }
360 }
361
362 //
363 // Red-black tree insert routine.
364 //
365
366 static struct buffer_head *__buffer_head_search(struct rb_root *root,
367 sector_t blocknr)
368 {
369 struct rb_node *new = root->rb_node;
370
371 /* Figure out where to put new node */
372 while (new) {
373 struct buffer_head *bh =
374 container_of(new, struct buffer_head, b_rb_node);
375 s64 result = blocknr - bh->b_blocknr;
376
377 if (result < 0)
378 new = new->rb_left;
379 else if (result > 0)
380 new = new->rb_right;
381 else
382 return bh;
383
384 }
385
386 return NULL;
387 }
388
389 static int buffer_head_blocknr_cmp(struct rb_node *a, struct rb_node *b)
390 {
391 struct buffer_head *a_bh, *b_bh;
392 s64 result;
393 a_bh = container_of(a, struct buffer_head, b_rb_node);
394 b_bh = container_of(b, struct buffer_head, b_rb_node);
395 result = a_bh->b_blocknr - b_bh->b_blocknr;
396
397 if (result < 0)
398 return -1;
399 if (result > 0)
400 return 1;
401 return 0;
402 }
403
404 static struct buffer_head *buffer_head_search(struct block_device *bdev,
405 sector_t blocknr)
406 {
407 struct rb_root *root;
408 root = &bdev->bd_bh_root;
409 return __buffer_head_search(root, blocknr);
410 }
411
412 static void buffer_head_insert(struct block_device *bdev, struct buffer_head *bh)
413 {
414 rb_insert(&bdev->bd_bh_root, &bh->b_rb_node, buffer_head_blocknr_cmp);
415 }
416
417 static void buffer_head_remove(struct block_device *bdev, struct buffer_head *bh)
418 {
419 rb_erase(&bh->b_rb_node, &bdev->bd_bh_root);
420 }
421
422 struct buffer_head *
423 get_block_bh(
424 struct block_device * bdev,
425 sector_t block,
426 unsigned long size,
427 int zero
428 )
429 {
430 PEXT2_VCB Vcb = bdev->bd_priv;
431 LARGE_INTEGER offset;
432 PVOID bcb = NULL;
433 PVOID ptr = NULL;
434
435 KIRQL irql = 0;
436 struct list_head *entry;
437
438 /* allocate buffer_head and initialize it */
439 struct buffer_head *bh = NULL, *tbh = NULL;
440
441 /* check the block is valid or not */
442 if (block >= TOTAL_BLOCKS) {
443 DbgBreak();
444 goto errorout;
445 }
446
447 /* search the bdev bh list */
448 spin_lock_irqsave(&bdev->bd_bh_lock, irql);
449 tbh = buffer_head_search(bdev, block);
450 if (tbh) {
451 bh = tbh;
452 get_bh(bh);
453 spin_unlock_irqrestore(&bdev->bd_bh_lock, irql);
454 goto errorout;
455 }
456 spin_unlock_irqrestore(&bdev->bd_bh_lock, irql);
457
458 bh = new_buffer_head();
459 if (!bh) {
460 goto errorout;
461 }
462 bh->b_bdev = bdev;
463 bh->b_blocknr = block;
464 bh->b_size = size;
465 bh->b_data = NULL;
466 atomic_inc(&g_jbh.bh_count);
467 atomic_inc(&g_jbh.bh_acount);
468
469 again:
470
471 offset.QuadPart = (s64) bh->b_blocknr;
472 offset.QuadPart <<= BLOCK_BITS;
473
474 if (zero) {
475 if (!CcPreparePinWrite(Vcb->Volume,
476 &offset,
477 bh->b_size,
478 FALSE,
479 PIN_WAIT | PIN_EXCLUSIVE,
480 &bcb,
481 &ptr)) {
482 Ext2Sleep(100);
483 goto again;
484 }
485 } else {
486 if (!CcPinRead( Vcb->Volume,
487 &offset,
488 bh->b_size,
489 PIN_WAIT,
490 &bcb,
491 &ptr)) {
492 Ext2Sleep(100);
493 goto again;
494 }
495 set_buffer_uptodate(bh);
496 }
497
498 bh->b_mdl = Ext2CreateMdl(ptr, TRUE, bh->b_size, IoModifyAccess);
499 if (bh->b_mdl) {
500 /* muse map the PTE to NonCached zone. journal recovery will
501 access the PTE under spinlock: DISPATCH_LEVEL IRQL */
502 bh->b_data = MmMapLockedPagesSpecifyCache(
503 bh->b_mdl, KernelMode, MmNonCached,
504 NULL,FALSE, HighPagePriority);
505 }
506 if (!bh->b_mdl || !bh->b_data) {
507 free_buffer_head(bh);
508 bh = NULL;
509 goto errorout;
510 }
511
512 get_bh(bh);
513
514 DEBUG(DL_BH, ("getblk: Vcb=%p bhcount=%u block=%u bh=%p mdl=%p (Flags:%xh VA:%p)\n",
515 Vcb, atomic_read(&g_jbh.bh_count), block, bh, bh->b_mdl, bh->b_mdl->MdlFlags, bh->b_data));
516
517 spin_lock_irqsave(&bdev->bd_bh_lock, irql);
518
519 /* do search again here */
520 tbh = buffer_head_search(bdev, block);
521 if (tbh) {
522 free_buffer_head(bh);
523 bh = tbh;
524 get_bh(bh);
525 spin_unlock_irqrestore(&bdev->bd_bh_lock, irql);
526 goto errorout;
527 } else
528 buffer_head_insert(bdev, bh);
529
530 spin_unlock_irqrestore(&bdev->bd_bh_lock, irql);
531
532 /* we get it */
533 errorout:
534
535 if (bcb)
536 CcUnpinData(bcb);
537
538 return bh;
539 }
540
541 struct buffer_head *
542 __getblk(
543 struct block_device * bdev,
544 sector_t block,
545 unsigned long size
546 )
547 {
548 return get_block_bh(bdev, block, size, 0);
549 }
550
551 int submit_bh(int rw, struct buffer_head *bh)
552 {
553 struct block_device *bdev = bh->b_bdev;
554 PEXT2_VCB Vcb = bdev->bd_priv;
555 PBCB Bcb;
556 PVOID Buffer;
557 LARGE_INTEGER Offset;
558
559 ASSERT(Vcb->Identifier.Type == EXT2VCB);
560 ASSERT(bh->b_data);
561
562 if (rw == WRITE) {
563
564 if (IsVcbReadOnly(Vcb)) {
565 goto errorout;
566 }
567
568 SetFlag(Vcb->Volume->Flags, FO_FILE_MODIFIED);
569 Offset.QuadPart = ((LONGLONG)bh->b_blocknr) << BLOCK_BITS;
570 if (CcPreparePinWrite(
571 Vcb->Volume,
572 &Offset,
573 BLOCK_SIZE,
574 FALSE,
575 PIN_WAIT | PIN_EXCLUSIVE,
576 &Bcb,
577 &Buffer )) {
578 #if 0
579 if (memcmp(Buffer, bh->b_data, BLOCK_SIZE) != 0) {
580 DbgBreak();
581 }
582 memmove(Buffer, bh->b_data, BLOCK_SIZE);
583 #endif
584 CcSetDirtyPinnedData(Bcb, NULL);
585 Ext2AddBlockExtent( Vcb, NULL,
586 (ULONG)bh->b_blocknr,
587 (ULONG)bh->b_blocknr,
588 (bh->b_size >> BLOCK_BITS));
589 CcUnpinData(Bcb);
590 } else {
591
592 Ext2AddBlockExtent( Vcb, NULL,
593 (ULONG)bh->b_blocknr,
594 (ULONG)bh->b_blocknr,
595 (bh->b_size >> BLOCK_BITS));
596 }
597
598 } else {
599
600 DbgBreak();
601 }
602
603 errorout:
604
605 unlock_buffer(bh);
606 put_bh(bh);
607 return 0;
608 }
609
610 void __brelse(struct buffer_head *bh)
611 {
612 struct block_device *bdev = bh->b_bdev;
613 PEXT2_VCB Vcb = (PEXT2_VCB)bdev->bd_priv;
614 KIRQL irql = 0;
615
616 ASSERT(Vcb->Identifier.Type == EXT2VCB);
617
618 /* write data in case it's dirty */
619 while (buffer_dirty(bh)) {
620 ll_rw_block(WRITE, 1, &bh);
621 }
622
623 spin_lock_irqsave(&bdev->bd_bh_lock, irql);
624 if (!atomic_dec_and_test(&bh->b_count)) {
625 spin_unlock_irqrestore(&bdev->bd_bh_lock, irql);
626 return;
627 }
628 buffer_head_remove(bdev, bh);
629 spin_unlock_irqrestore(&bdev->bd_bh_lock, irql);
630
631 DEBUG(DL_BH, ("brelse: cnt=%u size=%u blk=%10.10xh bh=%p ptr=%p\n",
632 atomic_read(&g_jbh.bh_count) - 1, bh->b_size,
633 bh->b_blocknr, bh, bh->b_data ));
634
635 free_buffer_head(bh);
636 atomic_dec(&g_jbh.bh_count);
637 }
638
639 void __bforget(struct buffer_head *bh)
640 {
641 clear_buffer_dirty(bh);
642 __brelse(bh);
643 }
644
645 void __lock_buffer(struct buffer_head *bh)
646 {
647 }
648
649 void unlock_buffer(struct buffer_head *bh)
650 {
651 clear_buffer_locked(bh);
652 }
653
654 void __wait_on_buffer(struct buffer_head *bh)
655 {
656 }
657
658 void ll_rw_block(int rw, int nr, struct buffer_head * bhs[])
659 {
660 int i;
661
662 for (i = 0; i < nr; i++) {
663
664 struct buffer_head *bh = bhs[i];
665
666 if (rw == SWRITE)
667 lock_buffer(bh);
668 else if (test_set_buffer_locked(bh))
669 continue;
670
671 if (rw == WRITE || rw == SWRITE) {
672 if (test_clear_buffer_dirty(bh)) {
673 get_bh(bh);
674 submit_bh(WRITE, bh);
675 continue;
676 }
677 } else {
678 if (!buffer_uptodate(bh)) {
679 get_bh(bh);
680 submit_bh(rw, bh);
681 continue;
682 }
683 }
684 unlock_buffer(bh);
685 }
686 }
687
688 int bh_submit_read(struct buffer_head *bh)
689 {
690 ll_rw_block(READ, 1, &bh);
691 return 0;
692 }
693
694 int sync_dirty_buffer(struct buffer_head *bh)
695 {
696 int ret = 0;
697
698 ASSERT(atomic_read(&bh->b_count) <= 1);
699 lock_buffer(bh);
700 if (test_clear_buffer_dirty(bh)) {
701 get_bh(bh);
702 ret = submit_bh(WRITE, bh);
703 wait_on_buffer(bh);
704 } else {
705 unlock_buffer(bh);
706 }
707 return ret;
708 }
709
710 void mark_buffer_dirty(struct buffer_head *bh)
711 {
712 set_buffer_dirty(bh);
713 }
714
715 int sync_blockdev(struct block_device *bdev)
716 {
717 PEXT2_VCB Vcb = (PEXT2_VCB) bdev->bd_priv;
718
719 if (0 == atomic_read(&g_jbh.bh_count)) {
720 Ext2FlushVolume(NULL, Vcb, FALSE);
721 }
722 return 0;
723 }
724
725 /*
726 * Perform a pagecache lookup for the matching buffer. If it's there, refre
727 * it in the LRU and mark it as accessed. If it is not present then return
728 * NULL
729 */
730 struct buffer_head *
731 __find_get_block(struct block_device *bdev, sector_t block, unsigned long size)
732 {
733 return __getblk(bdev, block, size);
734 }
735
736 //
737 // inode block mapping
738 //
739
740 ULONGLONG bmap(struct inode *i, ULONGLONG b)
741 {
742 ULONGLONG lcn = 0;
743 struct super_block *s = i->i_sb;
744
745 PEXT2_MCB Mcb = (PEXT2_MCB)i->i_priv;
746 PEXT2_VCB Vcb = (PEXT2_VCB)s->s_priv;
747 PEXT2_EXTENT extent = NULL;
748 ULONGLONG offset = (ULONGLONG)b;
749 NTSTATUS status;
750
751 if (!Mcb || !Vcb) {
752 goto errorout;
753 }
754
755 offset <<= BLOCK_BITS;
756 status = Ext2BuildExtents(
757 NULL,
758 Vcb,
759 Mcb,
760 offset,
761 BLOCK_SIZE,
762 FALSE,
763 &extent
764 );
765
766 if (!NT_SUCCESS(status)) {
767 goto errorout;
768 }
769
770 if (extent == NULL) {
771 goto errorout;
772 }
773
774 lcn = (unsigned long)(extent->Lba >> BLOCK_BITS);
775
776 errorout:
777
778 if (extent) {
779 Ext2FreeExtent(extent);
780 }
781
782 return lcn;
783 }
784
785 void iget(struct inode *inode)
786 {
787 atomic_inc(&inode->i_count);
788 }
789
790 void iput(struct inode *inode)
791 {
792 if (atomic_dec_and_test(&inode->i_count)) {
793 kfree(inode);
794 }
795 }
796
797 //
798 // initialzer and destructor
799 //
800
801 int
802 ext2_init_linux()
803 {
804 int rc = 0;
805
806 rc = ext2_init_bh();
807 if (rc != 0) {
808 goto errorout;
809 }
810
811 errorout:
812
813 return rc;
814 }
815
816 void
817 ext2_destroy_linux()
818 {
819 ext2_destroy_bh();
820 }