[FASTFAT_NEW] Fix build with FASTFATDBG set
[reactos.git] / drivers / filesystems / fastfat_new / allocsup.c
1 /*++
2
3 Copyright (c) 1990-2000 Microsoft Corporation
4
5 Module Name:
6
7 AllocSup.c
8
9 Abstract:
10
11 This module implements the Allocation support routines for Fat.
12
13
14 --*/
15
16 #include "fatprocs.h"
17
18 //
19 // The Bug check file id for this module
20 //
21
22 #define BugCheckFileId (FAT_BUG_CHECK_ALLOCSUP)
23
24 //
25 // Local debug trace level
26 //
27
28 #define Dbg (DEBUG_TRACE_ALLOCSUP)
29
30 #define FatMin(a, b) ((a) < (b) ? (a) : (b))
31
32 //
33 // This strucure is used by FatLookupFatEntry to remember a pinned page
34 // of fat.
35 //
36
37 typedef struct _FAT_ENUMERATION_CONTEXT {
38
39 VBO VboOfPinnedPage;
40 PBCB Bcb;
41 PVOID PinnedPage;
42
43 } FAT_ENUMERATION_CONTEXT, *PFAT_ENUMERATION_CONTEXT;
44
45 //
46 // Local support routine prototypes
47 //
48
49 VOID
50 FatLookupFatEntry(
51 IN PIRP_CONTEXT IrpContext,
52 IN PVCB Vcb,
53 IN ULONG FatIndex,
54 IN OUT PULONG FatEntry,
55 IN OUT PFAT_ENUMERATION_CONTEXT Context
56 );
57
58 VOID
59 FatSetFatRun(
60 IN PIRP_CONTEXT IrpContext,
61 IN PVCB Vcb,
62 IN ULONG StartingFatIndex,
63 IN ULONG ClusterCount,
64 IN BOOLEAN ChainTogether
65 );
66
67 UCHAR
68 FatLogOf(
69 IN ULONG Value
70 );
71
72 //
73 // Note that the KdPrint below will ONLY fire when the assert does. Leave it
74 // alone.
75 //
76
77 #if DBG
78 #define ASSERT_CURRENT_WINDOW_GOOD(VCB) { \
79 ULONG FreeClusterBitMapClear; \
80 ASSERT( (VCB)->FreeClusterBitMap.Buffer != NULL ); \
81 FreeClusterBitMapClear = RtlNumberOfClearBits(&(VCB)->FreeClusterBitMap); \
82 if ((VCB)->CurrentWindow->ClustersFree != FreeClusterBitMapClear) { \
83 KdPrint(("FAT: ClustersFree %x h != FreeClusterBitMapClear %x h\n", \
84 (VCB)->CurrentWindow->ClustersFree, \
85 FreeClusterBitMapClear)); \
86 } \
87 ASSERT( (VCB)->CurrentWindow->ClustersFree == FreeClusterBitMapClear ); \
88 }
89 #else
90 #define ASSERT_CURRENT_WINDOW_GOOD(VCB)
91 #endif
92
93 //
94 // The following macros provide a convenient way of hiding the details
95 // of bitmap allocation schemes.
96 //
97
98
99 //
100 // VOID
101 // FatLockFreeClusterBitMap (
102 // IN PVCB Vcb
103 // );
104 //
105
106 #define FatLockFreeClusterBitMap(VCB) { \
107 ASSERT(KeAreApcsDisabled()); \
108 ExAcquireFastMutexUnsafe( &(VCB)->FreeClusterBitMapMutex ); \
109 ASSERT_CURRENT_WINDOW_GOOD(VCB) \
110 }
111
112 //
113 // VOID
114 // FatUnlockFreeClusterBitMap (
115 // IN PVCB Vcb
116 // );
117 //
118
119 #define FatUnlockFreeClusterBitMap(VCB) { \
120 ASSERT_CURRENT_WINDOW_GOOD(VCB) \
121 ASSERT(KeAreApcsDisabled()); \
122 ExReleaseFastMutexUnsafe( &(VCB)->FreeClusterBitMapMutex ); \
123 }
124
125 //
126 // BOOLEAN
127 // FatIsClusterFree (
128 // IN PIRP_CONTEXT IrpContext,
129 // IN PVCB Vcb,
130 // IN ULONG FatIndex
131 // );
132 //
133
134 #define FatIsClusterFree(IRPCONTEXT,VCB,FAT_INDEX) \
135 (RtlCheckBit(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2) == 0)
136
137 //
138 // VOID
139 // FatFreeClusters (
140 // IN PIRP_CONTEXT IrpContext,
141 // IN PVCB Vcb,
142 // IN ULONG FatIndex,
143 // IN ULONG ClusterCount
144 // );
145 //
146
147 #define FatFreeClusters(IRPCONTEXT,VCB,FAT_INDEX,CLUSTER_COUNT) { \
148 if ((CLUSTER_COUNT) == 1) { \
149 FatSetFatEntry((IRPCONTEXT),(VCB),(FAT_INDEX),FAT_CLUSTER_AVAILABLE); \
150 } else { \
151 FatSetFatRun((IRPCONTEXT),(VCB),(FAT_INDEX),(CLUSTER_COUNT),FALSE); \
152 } \
153 }
154
155 //
156 // VOID
157 // FatAllocateClusters (
158 // IN PIRP_CONTEXT IrpContext,
159 // IN PVCB Vcb,
160 // IN ULONG FatIndex,
161 // IN ULONG ClusterCount
162 // );
163 //
164
165 #define FatAllocateClusters(IRPCONTEXT,VCB,FAT_INDEX,CLUSTER_COUNT) { \
166 if ((CLUSTER_COUNT) == 1) { \
167 FatSetFatEntry((IRPCONTEXT),(VCB),(FAT_INDEX),FAT_CLUSTER_LAST); \
168 } else { \
169 FatSetFatRun((IRPCONTEXT),(VCB),(FAT_INDEX),(CLUSTER_COUNT),TRUE); \
170 } \
171 }
172
173 //
174 // VOID
175 // FatUnreserveClusters (
176 // IN PIRP_CONTEXT IrpContext,
177 // IN PVCB Vcb,
178 // IN ULONG FatIndex,
179 // IN ULONG ClusterCount
180 // );
181 //
182
183 #define FatUnreserveClusters(IRPCONTEXT,VCB,FAT_INDEX,CLUSTER_COUNT) { \
184 ASSERT( (FAT_INDEX) + (CLUSTER_COUNT) - 2 <= (VCB)->FreeClusterBitMap.SizeOfBitMap ); \
185 ASSERT( (FAT_INDEX) >= 2); \
186 RtlClearBits(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2,(CLUSTER_COUNT)); \
187 if ((FAT_INDEX) < (VCB)->ClusterHint) { \
188 (VCB)->ClusterHint = (FAT_INDEX); \
189 } \
190 }
191
192 //
193 // VOID
194 // FatReserveClusters (
195 // IN PIRP_CONTEXT IrpContext,
196 // IN PVCB Vcb,
197 // IN ULONG FatIndex,
198 // IN ULONG ClusterCount
199 // );
200 //
201 // Handle wrapping the hint back to the front.
202 //
203
204 #define FatReserveClusters(IRPCONTEXT,VCB,FAT_INDEX,CLUSTER_COUNT) { \
205 ULONG _AfterRun = (FAT_INDEX) + (CLUSTER_COUNT); \
206 ASSERT( (FAT_INDEX) + (CLUSTER_COUNT) - 2 <= (VCB)->FreeClusterBitMap.SizeOfBitMap ); \
207 ASSERT( (FAT_INDEX) >= 2); \
208 RtlSetBits(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2,(CLUSTER_COUNT)); \
209 \
210 if (_AfterRun - 2 >= (VCB)->FreeClusterBitMap.SizeOfBitMap) { \
211 _AfterRun = 2; \
212 } \
213 if (RtlCheckBit(&(VCB)->FreeClusterBitMap, _AfterRun - 2)) { \
214 (VCB)->ClusterHint = RtlFindClearBits( &(VCB)->FreeClusterBitMap, 1, _AfterRun - 2) + 2; \
215 if (1 == (VCB)->ClusterHint) { \
216 (VCB)->ClusterHint = 2; \
217 } \
218 } \
219 else { \
220 (VCB)->ClusterHint = _AfterRun; \
221 } \
222 }
223
224 //
225 // ULONG
226 // FatFindFreeClusterRun (
227 // IN PIRP_CONTEXT IrpContext,
228 // IN PVCB Vcb,
229 // IN ULONG ClusterCount,
230 // IN ULONG AlternateClusterHint
231 // );
232 //
233 // Do a special check if only one cluster is desired.
234 //
235
236 #define FatFindFreeClusterRun(IRPCONTEXT,VCB,CLUSTER_COUNT,CLUSTER_HINT) ( \
237 (CLUSTER_COUNT == 1) && \
238 FatIsClusterFree((IRPCONTEXT), (VCB), (CLUSTER_HINT)) ? \
239 (CLUSTER_HINT) : \
240 RtlFindClearBits( &(VCB)->FreeClusterBitMap, \
241 (CLUSTER_COUNT), \
242 (CLUSTER_HINT) - 2) + 2 \
243 )
244
245 //
246 // FAT32: Define the maximum size of the FreeClusterBitMap to be the
247 // maximum size of a FAT16 FAT. If there are more clusters on the
248 // volume than can be represented by this many bytes of bitmap, the
249 // FAT will be split into "buckets", each of which does fit.
250 //
251 // Note this count is in clusters/bits of bitmap.
252 //
253
254 #define MAX_CLUSTER_BITMAP_SIZE (1 << 16)
255
256 //
257 // Calculate the window a given cluster number is in.
258 //
259
260 #define FatWindowOfCluster(C) (((C) - 2) / MAX_CLUSTER_BITMAP_SIZE)
261
262 #ifdef ALLOC_PRAGMA
263 #pragma alloc_text(PAGE, FatAddFileAllocation)
264 #pragma alloc_text(PAGE, FatAllocateDiskSpace)
265 #pragma alloc_text(PAGE, FatDeallocateDiskSpace)
266 #pragma alloc_text(PAGE, FatExamineFatEntries)
267 #pragma alloc_text(PAGE, FatInterpretClusterType)
268 #pragma alloc_text(PAGE, FatLogOf)
269 #pragma alloc_text(PAGE, FatLookupFatEntry)
270 #pragma alloc_text(PAGE, FatLookupFileAllocation)
271 #pragma alloc_text(PAGE, FatLookupFileAllocationSize)
272 #pragma alloc_text(PAGE, FatMergeAllocation)
273 #pragma alloc_text(PAGE, FatSetFatEntry)
274 #pragma alloc_text(PAGE, FatSetFatRun)
275 #pragma alloc_text(PAGE, FatSetupAllocationSupport)
276 #pragma alloc_text(PAGE, FatSplitAllocation)
277 #pragma alloc_text(PAGE, FatTearDownAllocationSupport)
278 #pragma alloc_text(PAGE, FatTruncateFileAllocation)
279 #endif
280
281 \f
282 INLINE
283 ULONG
284 FatSelectBestWindow(
285 IN PVCB Vcb
286 )
287 /*++
288
289 Routine Description:
290
291 Choose a window to allocate clusters from. Order of preference is:
292
293 1. First window with >50% free clusters
294 2. First empty window
295 3. Window with greatest number of free clusters.
296
297 Arguments:
298
299 Vcb - Supplies the Vcb for the volume
300
301 Return Value:
302
303 'Best window' number (index into Vcb->Windows[])
304
305 --*/
306 {
307 ULONG i, Fave = 0;
308 ULONG MaxFree = 0;
309 ULONG FirstEmpty = -1;
310 ULONG ClustersPerWindow = MAX_CLUSTER_BITMAP_SIZE;
311
312 ASSERT( 1 != Vcb->NumberOfWindows);
313
314 for (i = 0; i < Vcb->NumberOfWindows; i++) {
315
316 if (Vcb->Windows[i].ClustersFree == ClustersPerWindow) {
317
318 if (-1 == FirstEmpty) {
319
320 //
321 // Keep note of the first empty window on the disc
322 //
323
324 FirstEmpty = i;
325 }
326 }
327 else if (Vcb->Windows[i].ClustersFree > MaxFree) {
328
329 //
330 // This window has the most free clusters, so far
331 //
332
333 MaxFree = Vcb->Windows[i].ClustersFree;
334 Fave = i;
335
336 //
337 // If this window has >50% free clusters, then we will take it,
338 // so don't bother considering more windows.
339 //
340
341 if (MaxFree >= (ClustersPerWindow >> 1)) {
342
343 break;
344 }
345 }
346 }
347
348 //
349 // If there were no windows with 50% or more freespace, then select the
350 // first empty window on the disc, if any - otherwise we'll just go with
351 // the one with the most free clusters.
352 //
353
354 if ((MaxFree < (ClustersPerWindow >> 1)) && (-1 != FirstEmpty)) {
355
356 Fave = FirstEmpty;
357 }
358
359 return Fave;
360 }
361
362 \f
363 VOID
364 FatSetupAllocationSupport (
365 IN PIRP_CONTEXT IrpContext,
366 IN PVCB Vcb
367 )
368
369 /*++
370
371 Routine Description:
372
373 This routine fills in the Allocation Support structure in the Vcb.
374 Most entries are computed using fat.h macros supplied with data from
375 the Bios Parameter Block. The free cluster count, however, requires
376 going to the Fat and actually counting free sectors. At the same time
377 the free cluster bit map is initalized.
378
379 Arguments:
380
381 Vcb - Supplies the Vcb to fill in.
382
383 --*/
384
385 {
386 #ifndef __REACTOS__
387 ULONG BitMapSize;
388 PVOID BitMapBuffer;
389 #endif
390 ULONG BitIndex;
391
392 #ifndef __REACTOS__
393 PBCB Bcb;
394
395 ULONG Page;
396 ULONG Offset;
397 ULONG FatIndexBitSize;
398 #endif
399 ULONG ClustersDescribableByFat;
400
401 PAGED_CODE();
402
403 DebugTrace(+1, Dbg, "FatSetupAllocationSupport\n", 0);
404 DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
405
406 //
407 // Compute a number of fields for Vcb.AllocationSupport
408 //
409
410 Vcb->AllocationSupport.RootDirectoryLbo = FatRootDirectoryLbo( &Vcb->Bpb );
411 Vcb->AllocationSupport.RootDirectorySize = FatRootDirectorySize( &Vcb->Bpb );
412
413 Vcb->AllocationSupport.FileAreaLbo = FatFileAreaLbo( &Vcb->Bpb );
414
415 Vcb->AllocationSupport.NumberOfClusters = FatNumberOfClusters( &Vcb->Bpb );
416
417 Vcb->AllocationSupport.FatIndexBitSize = FatIndexBitSize( &Vcb->Bpb );
418
419 Vcb->AllocationSupport.LogOfBytesPerSector = FatLogOf(Vcb->Bpb.BytesPerSector);
420 Vcb->AllocationSupport.LogOfBytesPerCluster = FatLogOf(FatBytesPerCluster( &Vcb->Bpb ));
421 Vcb->AllocationSupport.NumberOfFreeClusters = 0;
422
423 //
424 // Deal with a bug in DOS 5 format, if the Fat is not big enough to
425 // describe all the clusters on the disk, reduce this number. We expect
426 // that fat32 volumes will not have this problem.
427 //
428 // Turns out this was not a good assumption. We have to do this always now.
429 //
430
431 ClustersDescribableByFat = ( ((FatIsFat32(Vcb)? Vcb->Bpb.LargeSectorsPerFat :
432 Vcb->Bpb.SectorsPerFat) *
433 Vcb->Bpb.BytesPerSector * 8)
434 / FatIndexBitSize(&Vcb->Bpb) ) - 2;
435
436 if (Vcb->AllocationSupport.NumberOfClusters > ClustersDescribableByFat) {
437
438 Vcb->AllocationSupport.NumberOfClusters = ClustersDescribableByFat;
439 }
440
441 //
442 // Extend the virtual volume file to include the Fat
443 //
444
445 {
446 CC_FILE_SIZES FileSizes;
447
448 FileSizes.AllocationSize.QuadPart =
449 FileSizes.FileSize.QuadPart = (FatReservedBytes( &Vcb->Bpb ) +
450 FatBytesPerFat( &Vcb->Bpb ));
451 FileSizes.ValidDataLength = FatMaxLarge;
452
453 if ( Vcb->VirtualVolumeFile->PrivateCacheMap == NULL ) {
454
455 CcInitializeCacheMap( Vcb->VirtualVolumeFile,
456 &FileSizes,
457 TRUE,
458 &FatData.CacheManagerNoOpCallbacks,
459 Vcb );
460
461 } else {
462
463 CcSetFileSizes( Vcb->VirtualVolumeFile, &FileSizes );
464 }
465 }
466
467 _SEH2_TRY {
468
469 if (FatIsFat32(Vcb) &&
470 Vcb->AllocationSupport.NumberOfClusters > MAX_CLUSTER_BITMAP_SIZE) {
471
472 Vcb->NumberOfWindows = (Vcb->AllocationSupport.NumberOfClusters +
473 MAX_CLUSTER_BITMAP_SIZE - 1) /
474 MAX_CLUSTER_BITMAP_SIZE;
475
476 #ifndef __REACTOS__
477 BitMapSize = MAX_CLUSTER_BITMAP_SIZE;
478 #endif
479
480 } else {
481
482 Vcb->NumberOfWindows = 1;
483 #ifndef __REACTOS__
484 BitMapSize = Vcb->AllocationSupport.NumberOfClusters;
485 #endif
486 }
487
488 Vcb->Windows = FsRtlAllocatePoolWithTag( PagedPool,
489 Vcb->NumberOfWindows * sizeof(FAT_WINDOW),
490 TAG_FAT_WINDOW );
491
492 RtlInitializeBitMap( &Vcb->FreeClusterBitMap,
493 NULL,
494 0 );
495
496 //
497 // Chose a FAT window to begin operation in.
498 //
499
500 if (Vcb->NumberOfWindows > 1) {
501
502 //
503 // Read the fat and count up free clusters. We bias by the two reserved
504 // entries in the FAT.
505 //
506
507 FatExamineFatEntries( IrpContext, Vcb,
508 2,
509 Vcb->AllocationSupport.NumberOfClusters + 2 - 1,
510 TRUE,
511 NULL,
512 NULL);
513
514
515 //
516 // Pick a window to begin allocating from
517 //
518
519 Vcb->CurrentWindow = &Vcb->Windows[ FatSelectBestWindow( Vcb)];
520
521 } else {
522
523 Vcb->CurrentWindow = &Vcb->Windows[0];
524
525 //
526 // Carefully bias ourselves by the two reserved entries in the FAT.
527 //
528
529 Vcb->CurrentWindow->FirstCluster = 2;
530 Vcb->CurrentWindow->LastCluster = Vcb->AllocationSupport.NumberOfClusters + 2 - 1;
531 }
532
533 //
534 // Now transition to the FAT window we have chosen.
535 //
536
537 FatExamineFatEntries( IrpContext, Vcb,
538 0,
539 0,
540 FALSE,
541 Vcb->CurrentWindow,
542 NULL);
543
544 //
545 // Now set the ClusterHint to the first free bit in our favorite
546 // window (except the ClusterHint is off by two).
547 //
548
549 Vcb->ClusterHint =
550 (BitIndex = RtlFindClearBits( &Vcb->FreeClusterBitMap, 1, 0 )) != -1 ?
551 BitIndex + 2 : 2;
552
553 } _SEH2_FINALLY {
554
555 DebugUnwind( FatSetupAllocationSupport );
556
557 //
558 // If we hit an exception, back out.
559 //
560
561 if (_SEH2_AbnormalTermination()) {
562
563 FatTearDownAllocationSupport( IrpContext, Vcb );
564 }
565 } _SEH2_END;
566
567 return;
568 }
569
570 \f
571 VOID
572 FatTearDownAllocationSupport (
573 IN PIRP_CONTEXT IrpContext,
574 IN PVCB Vcb
575 )
576
577 /*++
578
579 Routine Description:
580
581 This routine prepares the volume for closing. Specifically, we must
582 release the free fat bit map buffer, and uninitialize the dirty fat
583 Mcb.
584
585 Arguments:
586
587 Vcb - Supplies the Vcb to fill in.
588
589 Return Value:
590
591 VOID
592
593 --*/
594
595 {
596 DebugTrace(+1, Dbg, "FatTearDownAllocationSupport\n", 0);
597 DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
598
599 PAGED_CODE();
600
601 //
602 // If there are FAT buckets, free them.
603 //
604
605 if ( Vcb->Windows != NULL ) {
606
607 ExFreePool( Vcb->Windows );
608 Vcb->Windows = NULL;
609 }
610
611 //
612 // Free the memory associated with the free cluster bitmap.
613 //
614
615 if ( Vcb->FreeClusterBitMap.Buffer != NULL ) {
616
617 ExFreePool( Vcb->FreeClusterBitMap.Buffer );
618
619 //
620 // NULL this field as an flag.
621 //
622
623 Vcb->FreeClusterBitMap.Buffer = NULL;
624 }
625
626 //
627 // And remove all the runs in the dirty fat Mcb
628 //
629
630 FatRemoveMcbEntry( Vcb, &Vcb->DirtyFatMcb, 0, 0xFFFFFFFF );
631
632 DebugTrace(-1, Dbg, "FatTearDownAllocationSupport -> (VOID)\n", 0);
633
634 UNREFERENCED_PARAMETER( IrpContext );
635
636 return;
637 }
638
639 \f
640 VOID
641 FatLookupFileAllocation (
642 IN PIRP_CONTEXT IrpContext,
643 IN PFCB FcbOrDcb,
644 IN VBO Vbo,
645 OUT PLBO Lbo,
646 OUT PULONG ByteCount,
647 OUT PBOOLEAN Allocated,
648 OUT PBOOLEAN EndOnMax,
649 OUT PULONG Index
650 )
651
652 /*++
653
654 Routine Description:
655
656 This routine looks up the existing mapping of VBO to LBO for a
657 file/directory. The information it queries is either stored in the
658 mcb field of the fcb/dcb or it is stored on in the fat table and
659 needs to be retrieved and decoded, and updated in the mcb.
660
661 Arguments:
662
663 FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being queried
664
665 Vbo - Supplies the VBO whose LBO we want returned
666
667 Lbo - Receives the LBO corresponding to the input Vbo if one exists
668
669 ByteCount - Receives the number of bytes within the run the run
670 that correpond between the input vbo and output lbo.
671
672 Allocated - Receives TRUE if the Vbo does have a corresponding Lbo
673 and FALSE otherwise.
674
675 EndOnMax - Receives TRUE if the run ends in the maximal FAT cluster,
676 which results in a fractional bytecount.
677
678 Index - Receives the Index of the run
679
680 --*/
681
682 {
683 VBO CurrentVbo;
684 LBO CurrentLbo;
685 LBO PriorLbo;
686
687 VBO FirstVboOfCurrentRun;
688 LBO FirstLboOfCurrentRun;
689
690 BOOLEAN LastCluster;
691 ULONG Runs;
692
693 PVCB Vcb;
694 FAT_ENTRY FatEntry;
695 ULONG BytesPerCluster;
696 ULARGE_INTEGER BytesOnVolume;
697
698 FAT_ENUMERATION_CONTEXT Context;
699
700 PAGED_CODE();
701
702 DebugTrace(+1, Dbg, "FatLookupFileAllocation\n", 0);
703 DebugTrace( 0, Dbg, " FcbOrDcb = %8lx\n", FcbOrDcb);
704 DebugTrace( 0, Dbg, " Vbo = %8lx\n", Vbo);
705 DebugTrace( 0, Dbg, " Lbo = %8lx\n", Lbo);
706 DebugTrace( 0, Dbg, " ByteCount = %8lx\n", ByteCount);
707 DebugTrace( 0, Dbg, " Allocated = %8lx\n", Allocated);
708
709 Context.Bcb = NULL;
710
711 Vcb = FcbOrDcb->Vcb;
712
713 *EndOnMax = FALSE;
714
715 //
716 // Check the trivial case that the mapping is already in our
717 // Mcb.
718 //
719
720 if ( FatLookupMcbEntry(Vcb, &FcbOrDcb->Mcb, Vbo, Lbo, ByteCount, Index) ) {
721
722 *Allocated = TRUE;
723
724 ASSERT( ByteCount != 0);
725
726 //
727 // Detect the overflow case, trim and claim the condition.
728 //
729
730 if (Vbo + *ByteCount == 0) {
731
732 *EndOnMax = TRUE;
733 }
734
735 DebugTrace( 0, Dbg, "Found run in Mcb.\n", 0);
736 DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
737 return;
738 }
739
740 //
741 // Initialize the Vcb, the cluster size, LastCluster, and
742 // FirstLboOfCurrentRun (to be used as an indication of the first
743 // iteration through the following while loop).
744 //
745
746 BytesPerCluster = 1 << Vcb->AllocationSupport.LogOfBytesPerCluster;
747
748 BytesOnVolume.QuadPart = UInt32x32To64( Vcb->AllocationSupport.NumberOfClusters, BytesPerCluster );
749
750 LastCluster = FALSE;
751 FirstLboOfCurrentRun = 0;
752
753 //
754 // Discard the case that the request extends beyond the end of
755 // allocation. Note that if the allocation size if not known
756 // AllocationSize is set to 0xffffffff.
757 //
758
759 if ( Vbo >= FcbOrDcb->Header.AllocationSize.LowPart ) {
760
761 *Allocated = FALSE;
762
763 DebugTrace( 0, Dbg, "Vbo beyond end of file.\n", 0);
764 DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
765 return;
766 }
767
768 //
769 // The Vbo is beyond the last Mcb entry. So we adjust Current Vbo/Lbo
770 // and FatEntry to describe the beginning of the last entry in the Mcb.
771 // This is used as initialization for the following loop.
772 //
773 // If the Mcb was empty, we start at the beginning of the file with
774 // CurrentVbo set to 0 to indicate a new run.
775 //
776
777 if (FatLookupLastMcbEntry( Vcb, &FcbOrDcb->Mcb, &CurrentVbo, &CurrentLbo, &Runs )) {
778
779 DebugTrace( 0, Dbg, "Current Mcb size = %8lx.\n", CurrentVbo + 1);
780
781 CurrentVbo -= (BytesPerCluster - 1);
782 CurrentLbo -= (BytesPerCluster - 1);
783
784 //
785 // Convert an index to a count.
786 //
787
788 Runs += 1;
789
790 } else {
791
792 DebugTrace( 0, Dbg, "Mcb empty.\n", 0);
793
794 //
795 // Check for an FcbOrDcb that has no allocation
796 //
797
798 if (FcbOrDcb->FirstClusterOfFile == 0) {
799
800 *Allocated = FALSE;
801
802 DebugTrace( 0, Dbg, "File has no allocation.\n", 0);
803 DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
804 return;
805
806 } else {
807
808 CurrentVbo = 0;
809 CurrentLbo = FatGetLboFromIndex( Vcb, FcbOrDcb->FirstClusterOfFile );
810 FirstVboOfCurrentRun = CurrentVbo;
811 FirstLboOfCurrentRun = CurrentLbo;
812
813 Runs = 0;
814
815 DebugTrace( 0, Dbg, "First Lbo of file = %8lx\n", CurrentLbo);
816 }
817 }
818
819 //
820 // Now we know that we are looking up a valid Vbo, but it is
821 // not in the Mcb, which is a monotonically increasing list of
822 // Vbo's. Thus we have to go to the Fat, and update
823 // the Mcb as we go. We use a try-finally to unpin the page
824 // of fat hanging around. Also we mark *Allocated = FALSE, so that
825 // the caller wont try to use the data if we hit an exception.
826 //
827
828 *Allocated = FALSE;
829
830 _SEH2_TRY {
831
832 FatEntry = (FAT_ENTRY)FatGetIndexFromLbo( Vcb, CurrentLbo );
833
834 //
835 // ASSERT that CurrentVbo and CurrentLbo are now cluster alligned.
836 // The assumption here, is that only whole clusters of Vbos and Lbos
837 // are mapped in the Mcb.
838 //
839
840 ASSERT( ((CurrentLbo - Vcb->AllocationSupport.FileAreaLbo)
841 % BytesPerCluster == 0) &&
842 (CurrentVbo % BytesPerCluster == 0) );
843
844 //
845 // Starting from the first Vbo after the last Mcb entry, scan through
846 // the Fat looking for our Vbo. We continue through the Fat until we
847 // hit a noncontiguity beyond the desired Vbo, or the last cluster.
848 //
849
850 while ( !LastCluster ) {
851
852 //
853 // Get the next fat entry, and update our Current variables.
854 //
855
856 #ifndef __REACTOS__
857 FatLookupFatEntry( IrpContext, Vcb, FatEntry, &FatEntry, &Context );
858 #else
859 FatLookupFatEntry( IrpContext, Vcb, FatEntry, (PULONG)&FatEntry, &Context );
860 #endif
861
862 PriorLbo = CurrentLbo;
863 CurrentLbo = FatGetLboFromIndex( Vcb, FatEntry );
864 CurrentVbo += BytesPerCluster;
865
866 switch ( FatInterpretClusterType( Vcb, FatEntry )) {
867
868 //
869 // Check for a break in the Fat allocation chain.
870 //
871
872 case FatClusterAvailable:
873 case FatClusterReserved:
874 case FatClusterBad:
875
876 DebugTrace( 0, Dbg, "Break in allocation chain, entry = %d\n", FatEntry);
877 DebugTrace(-1, Dbg, "FatLookupFileAllocation -> Fat Corrupt. Raise Status.\n", 0);
878
879 FatPopUpFileCorrupt( IrpContext, FcbOrDcb );
880 FatRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR );
881 break;
882
883 //
884 // If this is the last cluster, we must update the Mcb and
885 // exit the loop.
886 //
887
888 case FatClusterLast:
889
890 //
891 // Assert we know where the current run started. If the
892 // Mcb was empty when we were called, thenFirstLboOfCurrentRun
893 // was set to the start of the file. If the Mcb contained an
894 // entry, then FirstLboOfCurrentRun was set on the first
895 // iteration through the loop. Thus if FirstLboOfCurrentRun
896 // is 0, then there was an Mcb entry and we are on our first
897 // iteration, meaing that the last cluster in the Mcb was
898 // really the last allocated cluster, but we checked Vbo
899 // against AllocationSize, and found it OK, thus AllocationSize
900 // must be too large.
901 //
902 // Note that, when we finally arrive here, CurrentVbo is actually
903 // the first Vbo beyond the file allocation and CurrentLbo is
904 // meaningless.
905 //
906
907 DebugTrace( 0, Dbg, "Read last cluster of file.\n", 0);
908
909 //
910 // Detect the case of the maximal file. Note that this really isn't
911 // a proper Vbo - those are zero-based, and this is a one-based number.
912 // The maximal file, of 2^32 - 1 bytes, has a maximum byte offset of
913 // 2^32 - 2.
914 //
915 // Just so we don't get confused here.
916 //
917
918 if (CurrentVbo == 0) {
919
920 *EndOnMax = TRUE;
921 CurrentVbo -= 1;
922 }
923
924 LastCluster = TRUE;
925
926 if (FirstLboOfCurrentRun != 0 ) {
927
928 DebugTrace( 0, Dbg, "Adding a run to the Mcb.\n", 0);
929 DebugTrace( 0, Dbg, " Vbo = %08lx.\n", FirstVboOfCurrentRun);
930 DebugTrace( 0, Dbg, " Lbo = %08lx.\n", FirstLboOfCurrentRun);
931 DebugTrace( 0, Dbg, " Length = %08lx.\n", CurrentVbo - FirstVboOfCurrentRun);
932
933 (VOID)FatAddMcbEntry( Vcb,
934 &FcbOrDcb->Mcb,
935 FirstVboOfCurrentRun,
936 FirstLboOfCurrentRun,
937 CurrentVbo - FirstVboOfCurrentRun );
938
939 Runs += 1;
940 }
941
942 //
943 // Being at the end of allocation, make sure we have found
944 // the Vbo. If we haven't, seeing as we checked VBO
945 // against AllocationSize, the real disk allocation is less
946 // than that of AllocationSize. This comes about when the
947 // real allocation is not yet known, and AllocaitonSize
948 // contains MAXULONG.
949 //
950 // KLUDGE! - If we were called by FatLookupFileAllocationSize
951 // Vbo is set to MAXULONG - 1, and AllocationSize to the lookup
952 // hint. Thus we merrily go along looking for a match that isn't
953 // there, but in the meantime building an Mcb. If this is
954 // the case, fill in AllocationSize and return.
955 //
956
957 if ( Vbo == MAXULONG - 1 ) {
958
959 *Allocated = FALSE;
960 FcbOrDcb->Header.AllocationSize.QuadPart = CurrentVbo;
961
962 DebugTrace( 0, Dbg, "New file allocation size = %08lx.\n", CurrentVbo);
963 try_return ( NOTHING );
964 }
965
966 //
967 // We will lie ever so slightly if we really terminated on the
968 // maximal byte of a file. It is really allocated.
969 //
970
971 if (Vbo >= CurrentVbo && !*EndOnMax) {
972
973 *Allocated = FALSE;
974 try_return ( NOTHING );
975 }
976
977 break;
978
979 //
980 // This is a continuation in the chain. If the run has a
981 // discontiguity at this point, update the Mcb, and if we are beyond
982 // the desired Vbo, this is the end of the run, so set LastCluster
983 // and exit the loop.
984 //
985
986 case FatClusterNext:
987
988 //
989 // This is the loop check. The Vbo must not be bigger than the size of
990 // the volume, and the Vbo must not have a) wrapped and b) not been at the
991 // very last cluster in the chain, for the case of the maximal file.
992 //
993
994 if ( CurrentVbo == 0 ||
995 (BytesOnVolume.HighPart == 0 && CurrentVbo > BytesOnVolume.LowPart)) {
996
997 FatPopUpFileCorrupt( IrpContext, FcbOrDcb );
998 FatRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR );
999 }
1000
1001 if ( PriorLbo + BytesPerCluster != CurrentLbo ) {
1002
1003 //
1004 // Note that on the first time through the loop
1005 // (FirstLboOfCurrentRun == 0), we don't add the
1006 // run to the Mcb since it curresponds to the last
1007 // run already stored in the Mcb.
1008 //
1009
1010 if ( FirstLboOfCurrentRun != 0 ) {
1011
1012 DebugTrace( 0, Dbg, "Adding a run to the Mcb.\n", 0);
1013 DebugTrace( 0, Dbg, " Vbo = %08lx.\n", FirstVboOfCurrentRun);
1014 DebugTrace( 0, Dbg, " Lbo = %08lx.\n", FirstLboOfCurrentRun);
1015 DebugTrace( 0, Dbg, " Length = %08lx.\n", CurrentVbo - FirstVboOfCurrentRun);
1016
1017 FatAddMcbEntry( Vcb,
1018 &FcbOrDcb->Mcb,
1019 FirstVboOfCurrentRun,
1020 FirstLboOfCurrentRun,
1021 CurrentVbo - FirstVboOfCurrentRun );
1022
1023 Runs += 1;
1024 }
1025
1026 //
1027 // Since we are at a run boundry, with CurrentLbo and
1028 // CurrentVbo being the first cluster of the next run,
1029 // we see if the run we just added encompases the desired
1030 // Vbo, and if so exit. Otherwise we set up two new
1031 // First*boOfCurrentRun, and continue.
1032 //
1033
1034 if (CurrentVbo > Vbo) {
1035
1036 LastCluster = TRUE;
1037
1038 } else {
1039
1040 FirstVboOfCurrentRun = CurrentVbo;
1041 FirstLboOfCurrentRun = CurrentLbo;
1042 }
1043 }
1044 break;
1045
1046 default:
1047
1048 DebugTrace(0, Dbg, "Illegal Cluster Type.\n", FatEntry);
1049
1050 FatBugCheck( 0, 0, 0 );
1051
1052 break;
1053
1054 } // switch()
1055 } // while()
1056
1057 //
1058 // Load up the return parameters.
1059 //
1060 // On exit from the loop, Vbo still contains the desired Vbo, and
1061 // CurrentVbo is the first byte after the run that contained the
1062 // desired Vbo.
1063 //
1064
1065 *Allocated = TRUE;
1066
1067 *Lbo = FirstLboOfCurrentRun + (Vbo - FirstVboOfCurrentRun);
1068
1069 *ByteCount = CurrentVbo - Vbo;
1070
1071 if (ARGUMENT_PRESENT(Index)) {
1072
1073 //
1074 // Note that Runs only needs to be accurate with respect to where we
1075 // ended. Since partial-lookup cases will occur without exclusive
1076 // synchronization, the Mcb itself may be much bigger by now.
1077 //
1078
1079 *Index = Runs - 1;
1080 }
1081
1082 try_exit: NOTHING;
1083
1084 } _SEH2_FINALLY {
1085
1086 DebugUnwind( FatLookupFileAllocation );
1087
1088 //
1089 // We are done reading the Fat, so unpin the last page of fat
1090 // that is hanging around
1091 //
1092
1093 FatUnpinBcb( IrpContext, Context.Bcb );
1094
1095 DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
1096 } _SEH2_END;
1097
1098 return;
1099 }
1100
1101 \f
1102 VOID
1103 FatAddFileAllocation (
1104 IN PIRP_CONTEXT IrpContext,
1105 IN PFCB FcbOrDcb,
1106 IN PFILE_OBJECT FileObject OPTIONAL,
1107 IN ULONG DesiredAllocationSize
1108 )
1109
1110 /*++
1111
1112 Routine Description:
1113
1114 This routine adds additional allocation to the specified file/directory.
1115 Additional allocation is added by appending clusters to the file/directory.
1116
1117 If the file already has a sufficient allocation then this procedure
1118 is effectively a noop.
1119
1120 Arguments:
1121
1122 FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being modified.
1123 This parameter must not specify the root dcb.
1124
1125 FileObject - If supplied inform the cache manager of the change.
1126
1127 DesiredAllocationSize - Supplies the minimum size, in bytes, that we want
1128 allocated to the file/directory.
1129
1130 --*/
1131
1132 {
1133 PVCB Vcb;
1134 LARGE_MCB NewMcb;
1135 PLARGE_MCB McbToCleanup = NULL;
1136 PDIRENT Dirent = NULL;
1137 ULONG NewAllocation;
1138 PBCB Bcb = NULL;
1139 BOOLEAN UnwindWeAllocatedDiskSpace = FALSE;
1140 BOOLEAN UnwindAllocationSizeSet = FALSE;
1141 BOOLEAN UnwindCacheManagerInformed = FALSE;
1142 BOOLEAN UnwindWeInitializedMcb = FALSE;
1143
1144 PAGED_CODE();
1145
1146 DebugTrace(+1, Dbg, "FatAddFileAllocation\n", 0);
1147 DebugTrace( 0, Dbg, " FcbOrDcb = %8lx\n", FcbOrDcb);
1148 DebugTrace( 0, Dbg, " DesiredAllocationSize = %8lx\n", DesiredAllocationSize);
1149
1150 //
1151 // If we haven't yet set the correct AllocationSize, do so.
1152 //
1153
1154 if (FcbOrDcb->Header.AllocationSize.QuadPart == FCB_LOOKUP_ALLOCATIONSIZE_HINT) {
1155
1156 FatLookupFileAllocationSize( IrpContext, FcbOrDcb );
1157 }
1158
1159 //
1160 // Check for the benign case that the desired allocation is already
1161 // within the allocation size.
1162 //
1163
1164 if (DesiredAllocationSize <= FcbOrDcb->Header.AllocationSize.LowPart) {
1165
1166 DebugTrace(0, Dbg, "Desired size within current allocation.\n", 0);
1167
1168 DebugTrace(-1, Dbg, "FatAddFileAllocation -> (VOID)\n", 0);
1169 return;
1170 }
1171
1172 DebugTrace( 0, Dbg, "InitialAllocation = %08lx.\n", FcbOrDcb->Header.AllocationSize.LowPart);
1173
1174 //
1175 // Get a chunk of disk space that will fullfill our needs. If there
1176 // was no initial allocation, start from the hint in the Vcb, otherwise
1177 // try to allocate from the cluster after the initial allocation.
1178 //
1179 // If there was no initial allocation to the file, we can just use the
1180 // Mcb in the FcbOrDcb, otherwise we have to use a new one, and merge
1181 // it to the one in the FcbOrDcb.
1182 //
1183
1184 Vcb = FcbOrDcb->Vcb;
1185
1186 _SEH2_TRY {
1187
1188 if (FcbOrDcb->Header.AllocationSize.LowPart == 0) {
1189
1190 LBO FirstLboOfFile;
1191
1192 ASSERT( FcbOrDcb->FcbCondition == FcbGood );
1193
1194 FatGetDirentFromFcbOrDcb( IrpContext,
1195 FcbOrDcb,
1196 &Dirent,
1197 &Bcb );
1198
1199 ASSERT( Bcb != NULL );
1200
1201 //
1202 // Set this dirty right now since this call can fail.
1203 //
1204
1205 FatSetDirtyBcb( IrpContext, Bcb, Vcb, TRUE );
1206
1207
1208 FatAllocateDiskSpace( IrpContext,
1209 Vcb,
1210 0,
1211 &DesiredAllocationSize,
1212 FALSE,
1213 &FcbOrDcb->Mcb );
1214
1215 UnwindWeAllocatedDiskSpace = TRUE;
1216 McbToCleanup = &FcbOrDcb->Mcb;
1217
1218 //
1219 // We have to update the dirent and FcbOrDcb copies of
1220 // FirstClusterOfFile since before it was 0
1221 //
1222
1223 FatLookupMcbEntry( FcbOrDcb->Vcb,
1224 &FcbOrDcb->Mcb,
1225 0,
1226 &FirstLboOfFile,
1227 (PULONG)NULL,
1228 NULL );
1229
1230 DebugTrace( 0, Dbg, "First Lbo of file will be %08lx.\n", FirstLboOfFile );
1231
1232 FcbOrDcb->FirstClusterOfFile = FatGetIndexFromLbo( Vcb, FirstLboOfFile );
1233
1234 Dirent->FirstClusterOfFile = (USHORT)FcbOrDcb->FirstClusterOfFile;
1235
1236 if ( FatIsFat32(Vcb) ) {
1237
1238 Dirent->FirstClusterOfFileHi = (USHORT)(FcbOrDcb->FirstClusterOfFile >> 16);
1239 }
1240
1241 //
1242 // Note the size of the allocation we need to tell the cache manager about.
1243 //
1244
1245 NewAllocation = DesiredAllocationSize;
1246
1247 } else {
1248
1249 LBO LastAllocatedLbo;
1250 VBO DontCare;
1251
1252 //
1253 // Get the first cluster following the current allocation. It is possible
1254 // the Mcb is empty (or short, etc.) so we need to be slightly careful
1255 // about making sure we don't lie with the hint.
1256 //
1257
1258 (void)FatLookupLastMcbEntry( FcbOrDcb->Vcb, &FcbOrDcb->Mcb, &DontCare, &LastAllocatedLbo, NULL );
1259
1260 //
1261 // Try to get some disk space starting from there.
1262 //
1263
1264 NewAllocation = DesiredAllocationSize - FcbOrDcb->Header.AllocationSize.LowPart;
1265
1266 FsRtlInitializeLargeMcb( &NewMcb, PagedPool );
1267 UnwindWeInitializedMcb = TRUE;
1268 McbToCleanup = &NewMcb;
1269
1270 FatAllocateDiskSpace( IrpContext,
1271 Vcb,
1272 (LastAllocatedLbo != ~0 ?
1273 FatGetIndexFromLbo(Vcb,LastAllocatedLbo + 1) :
1274 0),
1275 &NewAllocation,
1276 FALSE,
1277 &NewMcb );
1278
1279 UnwindWeAllocatedDiskSpace = TRUE;
1280 }
1281
1282 //
1283 // Now that we increased the allocation of the file, mark it in the
1284 // FcbOrDcb. Carefully prepare to handle an inability to grow the cache
1285 // structures.
1286 //
1287
1288 FcbOrDcb->Header.AllocationSize.LowPart += NewAllocation;
1289
1290 //
1291 // Handle the maximal file case, where we may have just wrapped. Note
1292 // that this must be the precise boundary case wrap, i.e. by one byte,
1293 // so that the new allocation is actually one byte "less" as far as we're
1294 // concerned. This is important for the extension case.
1295 //
1296
1297 if (FcbOrDcb->Header.AllocationSize.LowPart == 0) {
1298
1299 NewAllocation -= 1;
1300 FcbOrDcb->Header.AllocationSize.LowPart = 0xffffffff;
1301 }
1302
1303 UnwindAllocationSizeSet = TRUE;
1304
1305 //
1306 // Inform the cache manager to increase the section size
1307 //
1308
1309 if ( ARGUMENT_PRESENT(FileObject) && CcIsFileCached(FileObject) ) {
1310
1311 CcSetFileSizes( FileObject,
1312 (PCC_FILE_SIZES)&FcbOrDcb->Header.AllocationSize );
1313 UnwindCacheManagerInformed = TRUE;
1314 }
1315
1316 //
1317 // In the extension case, we have held off actually gluing the new
1318 // allocation onto the file. This simplifies exception cleanup since
1319 // if it was already added and the section grow failed, we'd have to
1320 // do extra work to unglue it. This way, we can assume that if we
1321 // raise the only thing we need to do is deallocate the disk space.
1322 //
1323 // Merge the allocation now.
1324 //
1325
1326 if (FcbOrDcb->Header.AllocationSize.LowPart != NewAllocation) {
1327
1328 //
1329 // Tack the new Mcb onto the end of the FcbOrDcb one.
1330 //
1331
1332 FatMergeAllocation( IrpContext,
1333 Vcb,
1334 &FcbOrDcb->Mcb,
1335 &NewMcb );
1336 }
1337
1338 } _SEH2_FINALLY {
1339
1340 DebugUnwind( FatAddFileAllocation );
1341
1342 //
1343 // Give FlushFileBuffer a clue here.
1344 //
1345
1346 SetFlag(FcbOrDcb->FcbState, FCB_STATE_FLUSH_FAT);
1347
1348 //
1349 // If we were dogged trying to complete this operation, we need to go
1350 // back various things out.
1351 //
1352
1353 if (_SEH2_AbnormalTermination()) {
1354
1355 //
1356 // Pull off the allocation size we tried to add to this object if
1357 // we failed to grow cache structures or Mcb structures.
1358 //
1359
1360 if (UnwindAllocationSizeSet) {
1361
1362 FcbOrDcb->Header.AllocationSize.LowPart -= NewAllocation;
1363 }
1364
1365 if (UnwindCacheManagerInformed) {
1366
1367 CcSetFileSizes( FileObject,
1368 (PCC_FILE_SIZES)&FcbOrDcb->Header.AllocationSize );
1369 }
1370
1371 //
1372 // In the case of initial allocation, we used the Fcb's Mcb and have
1373 // to clean that up as well as the FAT chain references.
1374 //
1375
1376 if (FcbOrDcb->Header.AllocationSize.LowPart == 0) {
1377
1378 if (Dirent != NULL) {
1379
1380 FcbOrDcb->FirstClusterOfFile = 0;
1381 Dirent->FirstClusterOfFile = 0;
1382
1383 if ( FatIsFat32(Vcb) ) {
1384
1385 Dirent->FirstClusterOfFileHi = 0;
1386 }
1387 }
1388 }
1389
1390 //
1391 // ... and drop the dirent Bcb if we got it. Do it now
1392 // so we can afford to take the exception if we have to.
1393 //
1394
1395 FatUnpinBcb( IrpContext, Bcb );
1396
1397 _SEH2_TRY {
1398
1399 //
1400 // Note this can re-raise.
1401 //
1402
1403 if ( UnwindWeAllocatedDiskSpace ) {
1404
1405 FatDeallocateDiskSpace( IrpContext, Vcb, McbToCleanup );
1406 }
1407
1408 } _SEH2_FINALLY {
1409
1410 //
1411 // We always want to clean up the non-initial allocation temporary Mcb,
1412 // otherwise we have the Fcb's Mcb and we just truncate it away.
1413 //
1414
1415 if (UnwindWeInitializedMcb == TRUE) {
1416
1417 //
1418 // Note that we already know a raise is in progress. No danger
1419 // of encountering the normal case code below and doing this again.
1420 //
1421
1422 FsRtlUninitializeLargeMcb( McbToCleanup );
1423
1424 } else {
1425
1426 if (McbToCleanup) {
1427
1428 FsRtlTruncateLargeMcb( McbToCleanup, 0 );
1429 }
1430 }
1431 } _SEH2_END;
1432 }
1433
1434 DebugTrace(-1, Dbg, "FatAddFileAllocation -> (VOID)\n", 0);
1435 } _SEH2_END;
1436
1437 //
1438 // Non-exceptional cleanup we always want to do. In handling the re-raise possibilities
1439 // during exceptions we had to make sure these two steps always happened there beforehand.
1440 // So now we handle the usual case.
1441 //
1442
1443 FatUnpinBcb( IrpContext, Bcb );
1444
1445 if (UnwindWeInitializedMcb == TRUE) {
1446
1447 FsRtlUninitializeLargeMcb( &NewMcb );
1448 }
1449 }
1450
1451 \f
1452 VOID
1453 FatTruncateFileAllocation (
1454 IN PIRP_CONTEXT IrpContext,
1455 IN PFCB FcbOrDcb,
1456 IN ULONG DesiredAllocationSize
1457 )
1458
1459 /*++
1460
1461 Routine Description:
1462
1463 This routine truncates the allocation to the specified file/directory.
1464
1465 If the file is already smaller than the indicated size then this procedure
1466 is effectively a noop.
1467
1468
1469 Arguments:
1470
1471 FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being modified
1472 This parameter must not specify the root dcb.
1473
1474 DesiredAllocationSize - Supplies the maximum size, in bytes, that we want
1475 allocated to the file/directory. It is rounded
1476 up to the nearest cluster.
1477
1478 Return Value:
1479
1480 VOID - TRUE if the operation completed and FALSE if it had to
1481 block but could not.
1482
1483 --*/
1484
1485 {
1486 PVCB Vcb;
1487 PBCB Bcb = NULL;
1488 LARGE_MCB RemainingMcb;
1489 ULONG BytesPerCluster;
1490 PDIRENT Dirent = NULL;
1491 BOOLEAN UpdatedDirent = FALSE;
1492
1493 ULONG UnwindInitialAllocationSize;
1494 ULONG UnwindInitialFirstClusterOfFile;
1495 BOOLEAN UnwindWeAllocatedMcb = FALSE;
1496
1497 PAGED_CODE();
1498
1499 DebugTrace(+1, Dbg, "FatTruncateFileAllocation\n", 0);
1500 DebugTrace( 0, Dbg, " FcbOrDcb = %8lx\n", FcbOrDcb);
1501 DebugTrace( 0, Dbg, " DesiredAllocationSize = %8lx\n", DesiredAllocationSize);
1502
1503 //
1504 // If the Fcb isn't in good condition, we have no business whacking around on
1505 // the disk after "its" clusters.
1506 //
1507 // Inspired by a Prefix complaint.
1508 //
1509
1510 ASSERT( FcbOrDcb->FcbCondition == FcbGood );
1511
1512 //
1513 // If we haven't yet set the correct AllocationSize, do so.
1514 //
1515
1516 if (FcbOrDcb->Header.AllocationSize.QuadPart == FCB_LOOKUP_ALLOCATIONSIZE_HINT) {
1517
1518 FatLookupFileAllocationSize( IrpContext, FcbOrDcb );
1519 }
1520
1521 //
1522 // Round up the Desired Allocation Size to the next cluster size
1523 //
1524
1525 Vcb = FcbOrDcb->Vcb;
1526
1527 BytesPerCluster = 1 << Vcb->AllocationSupport.LogOfBytesPerCluster;
1528
1529 //
1530 // Note if the desired allocation is zero, to distinguish this from
1531 // the wrap case below.
1532 //
1533
1534 if (DesiredAllocationSize != 0) {
1535
1536 DesiredAllocationSize = (DesiredAllocationSize + (BytesPerCluster - 1)) &
1537 ~(BytesPerCluster - 1);
1538 //
1539 // Check for the benign case that the file is already smaller than
1540 // the desired truncation. Note that if it wraps, then a) it was
1541 // specifying an offset in the maximally allocatable cluster and
1542 // b) we're not asking to extend the file, either. So stop.
1543 //
1544
1545 if (DesiredAllocationSize == 0 ||
1546 DesiredAllocationSize >= FcbOrDcb->Header.AllocationSize.LowPart) {
1547
1548 DebugTrace(0, Dbg, "Desired size within current allocation.\n", 0);
1549
1550 DebugTrace(-1, Dbg, "FatTruncateFileAllocation -> (VOID)\n", 0);
1551 return;
1552 }
1553
1554 }
1555
1556 UnwindInitialAllocationSize = FcbOrDcb->Header.AllocationSize.LowPart;
1557 UnwindInitialFirstClusterOfFile = FcbOrDcb->FirstClusterOfFile;
1558
1559 //
1560 // Update the FcbOrDcb allocation size. If it is now zero, we have the
1561 // additional task of modifying the FcbOrDcb and Dirent copies of
1562 // FirstClusterInFile.
1563 //
1564 // Note that we must pin the dirent before actually deallocating the
1565 // disk space since, in unwind, it would not be possible to reallocate
1566 // deallocated disk space as someone else may have reallocated it and
1567 // may cause an exception when you try to get some more disk space.
1568 // Thus FatDeallocateDiskSpace must be the final dangerous operation.
1569 //
1570
1571 _SEH2_TRY {
1572
1573 FcbOrDcb->Header.AllocationSize.QuadPart = DesiredAllocationSize;
1574
1575 //
1576 // Special case 0
1577 //
1578
1579 if (DesiredAllocationSize == 0) {
1580
1581 //
1582 // We have to update the dirent and FcbOrDcb copies of
1583 // FirstClusterOfFile since before it was 0
1584 //
1585
1586 ASSERT( FcbOrDcb->FcbCondition == FcbGood );
1587
1588 FatGetDirentFromFcbOrDcb( IrpContext, FcbOrDcb, &Dirent, &Bcb );
1589
1590 ASSERT( Dirent && Bcb );
1591
1592 Dirent->FirstClusterOfFile = 0;
1593
1594 if (FatIsFat32(Vcb)) {
1595
1596 Dirent->FirstClusterOfFileHi = 0;
1597 }
1598
1599 FcbOrDcb->FirstClusterOfFile = 0;
1600
1601 FatSetDirtyBcb( IrpContext, Bcb, Vcb, TRUE );
1602 UpdatedDirent = TRUE;
1603
1604 FatDeallocateDiskSpace( IrpContext, Vcb, &FcbOrDcb->Mcb );
1605
1606 FatRemoveMcbEntry( FcbOrDcb->Vcb, &FcbOrDcb->Mcb, 0, 0xFFFFFFFF );
1607
1608 } else {
1609
1610 //
1611 // Split the existing allocation into two parts, one we will keep, and
1612 // one we will deallocate.
1613 //
1614
1615 FsRtlInitializeLargeMcb( &RemainingMcb, PagedPool );
1616 UnwindWeAllocatedMcb = TRUE;
1617
1618 FatSplitAllocation( IrpContext,
1619 Vcb,
1620 &FcbOrDcb->Mcb,
1621 DesiredAllocationSize,
1622 &RemainingMcb );
1623
1624 FatDeallocateDiskSpace( IrpContext, Vcb, &RemainingMcb );
1625
1626 FsRtlUninitializeLargeMcb( &RemainingMcb );
1627 }
1628
1629 } _SEH2_FINALLY {
1630
1631 DebugUnwind( FatTruncateFileAllocation );
1632
1633 //
1634 // Is this really the right backout strategy? It would be nice if we could
1635 // pretend the truncate worked if we knew that the file had gotten into
1636 // a consistent state. Leaving dangled clusters is probably quite preferable.
1637 //
1638
1639 if ( _SEH2_AbnormalTermination() ) {
1640
1641 FcbOrDcb->Header.AllocationSize.LowPart = UnwindInitialAllocationSize;
1642
1643 if ( (DesiredAllocationSize == 0) && (Dirent != NULL)) {
1644
1645 if (UpdatedDirent) {
1646
1647 //
1648 // If the dirent has been updated ok and marked dirty, then we
1649 // failed in deallocatediscspace, and don't know what state
1650 // the on disc fat chain is in. So we throw away the mcb,
1651 // and potentially loose a few clusters until the next
1652 // chkdsk. The operation has succeeded, but the exception
1653 // will still propogate. 5.1
1654 //
1655
1656 FatRemoveMcbEntry( Vcb, &FcbOrDcb->Mcb, 0, 0xFFFFFFFF );
1657 FcbOrDcb->Header.AllocationSize.QuadPart = 0;
1658 }
1659 else {
1660
1661 Dirent->FirstClusterOfFile = (USHORT)UnwindInitialFirstClusterOfFile;
1662
1663 if ( FatIsFat32(Vcb) ) {
1664
1665 Dirent->FirstClusterOfFileHi =
1666 (USHORT)(UnwindInitialFirstClusterOfFile >> 16);
1667 }
1668
1669 FcbOrDcb->FirstClusterOfFile = UnwindInitialFirstClusterOfFile;
1670 }
1671 }
1672
1673 if ( UnwindWeAllocatedMcb ) {
1674
1675 FsRtlUninitializeLargeMcb( &RemainingMcb );
1676 }
1677
1678 //
1679 // Note that in the non zero truncation case, we will also
1680 // leak clusters. However, apart from this, the in memory and on disc
1681 // structures will agree.
1682 }
1683
1684 FatUnpinBcb( IrpContext, Bcb );
1685
1686 //
1687 // Give FlushFileBuffer a clue here.
1688 //
1689
1690 SetFlag(FcbOrDcb->FcbState, FCB_STATE_FLUSH_FAT);
1691
1692 DebugTrace(-1, Dbg, "FatTruncateFileAllocation -> (VOID)\n", 0);
1693 } _SEH2_END;
1694 }
1695
1696 \f
1697 VOID
1698 FatLookupFileAllocationSize (
1699 IN PIRP_CONTEXT IrpContext,
1700 IN PFCB FcbOrDcb
1701 )
1702
1703 /*++
1704
1705 Routine Description:
1706
1707 This routine retrieves the current file allocatio size for the
1708 specified file/directory.
1709
1710 Arguments:
1711
1712 FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being modified
1713
1714 --*/
1715
1716 {
1717 LBO Lbo;
1718 ULONG ByteCount;
1719 BOOLEAN DontCare;
1720
1721 PAGED_CODE();
1722
1723 DebugTrace(+1, Dbg, "FatLookupAllocationSize\n", 0);
1724 DebugTrace( 0, Dbg, " FcbOrDcb = %8lx\n", FcbOrDcb);
1725
1726 //
1727 // We call FatLookupFileAllocation with Vbo of 0xffffffff - 1.
1728 //
1729
1730 FatLookupFileAllocation( IrpContext,
1731 FcbOrDcb,
1732 MAXULONG - 1,
1733 &Lbo,
1734 &ByteCount,
1735 &DontCare,
1736 &DontCare,
1737 NULL );
1738
1739 //
1740 // FileSize was set at Fcb creation time from the contents of the directory entry,
1741 // and we are only now looking up the real length of the allocation chain. If it
1742 // cannot be contained, this is trash. Probably more where that came from.
1743 //
1744
1745 if (FcbOrDcb->Header.FileSize.LowPart > FcbOrDcb->Header.AllocationSize.LowPart) {
1746
1747 FatPopUpFileCorrupt( IrpContext, FcbOrDcb );
1748 FatRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR );
1749 }
1750
1751 DebugTrace(-1, Dbg, "FatLookupFileAllocationSize -> (VOID)\n", 0);
1752 return;
1753 }
1754
1755 \f
1756 VOID
1757 FatAllocateDiskSpace (
1758 IN PIRP_CONTEXT IrpContext,
1759 IN PVCB Vcb,
1760 IN ULONG AbsoluteClusterHint,
1761 IN PULONG ByteCount,
1762 IN BOOLEAN ExactMatchRequired,
1763 OUT PLARGE_MCB Mcb
1764 )
1765
1766 /*++
1767
1768 Routine Description:
1769
1770 This procedure allocates additional disk space and builds an mcb
1771 representing the newly allocated space. If the space cannot be
1772 allocated then this procedure raises an appropriate status.
1773
1774 Searching starts from the hint index in the Vcb unless an alternative
1775 non-zero hint is given in AlternateClusterHint. If we are using the
1776 hint field in the Vcb, it is set to the cluster following our allocation
1777 when we are done.
1778
1779 Disk space can only be allocated in cluster units so this procedure
1780 will round up any byte count to the next cluster boundary.
1781
1782 Pictorially what is done is the following (where ! denotes the end of
1783 the fat chain (i.e., FAT_CLUSTER_LAST)):
1784
1785
1786 Mcb (empty)
1787
1788 becomes
1789
1790 Mcb |--a--|--b--|--c--!
1791
1792 ^
1793 ByteCount ----------+
1794
1795 Arguments:
1796
1797 Vcb - Supplies the VCB being modified
1798
1799 AbsoluteClusterHint - Supplies an alternate hint index to start the
1800 search from. If this is zero we use, and update,
1801 the Vcb hint field.
1802
1803 ByteCount - Supplies the number of bytes that we are requesting, and
1804 receives the number of bytes that we got.
1805
1806 ExactMatchRequired - Caller should set this to TRUE if only the precise run requested
1807 is acceptable.
1808
1809 Mcb - Receives the MCB describing the newly allocated disk space. The
1810 caller passes in an initialized Mcb that is filled in by this procedure.
1811
1812 Return Value:
1813
1814 TRUE - Allocated ok
1815 FALSE - Failed to allocate exactly as requested (=> ExactMatchRequired was TRUE)
1816
1817 --*/
1818
1819 {
1820 UCHAR LogOfBytesPerCluster;
1821 ULONG BytesPerCluster;
1822 ULONG StartingCluster;
1823 ULONG ClusterCount;
1824 ULONG WindowRelativeHint;
1825 #if DBG
1826 #ifndef __REACTOS__
1827 ULONG i;
1828 #endif
1829 ULONG PreviousClear;
1830 #endif
1831
1832 PFAT_WINDOW Window;
1833 BOOLEAN Wait;
1834 BOOLEAN Result = TRUE;
1835
1836 PAGED_CODE();
1837
1838 DebugTrace(+1, Dbg, "FatAllocateDiskSpace\n", 0);
1839 DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
1840 DebugTrace( 0, Dbg, " *ByteCount = %8lx\n", *ByteCount);
1841 DebugTrace( 0, Dbg, " Mcb = %8lx\n", Mcb);
1842 DebugTrace( 0, Dbg, " Hint = %8lx\n", AbsoluteClusterHint);
1843
1844 ASSERT((AbsoluteClusterHint <= Vcb->AllocationSupport.NumberOfClusters + 2) && (1 != AbsoluteClusterHint));
1845
1846 //
1847 // Make sure byte count is not zero
1848 //
1849
1850 if (*ByteCount == 0) {
1851
1852 DebugTrace(0, Dbg, "Nothing to allocate.\n", 0);
1853
1854 DebugTrace(-1, Dbg, "FatAllocateDiskSpace -> (VOID)\n", 0);
1855 return;
1856 }
1857
1858 //
1859 // Compute the cluster count based on the byte count, rounding up
1860 // to the next cluster if there is any remainder. Note that the
1861 // pathalogical case BytesCount == 0 has been eliminated above.
1862 //
1863
1864 LogOfBytesPerCluster = Vcb->AllocationSupport.LogOfBytesPerCluster;
1865 BytesPerCluster = 1 << LogOfBytesPerCluster;
1866
1867 *ByteCount = (*ByteCount + (BytesPerCluster - 1))
1868 & ~(BytesPerCluster - 1);
1869
1870 //
1871 // If ByteCount is NOW zero, then we were asked for the maximal
1872 // filesize (or at least for bytes in the last allocatable sector).
1873 //
1874
1875 if (*ByteCount == 0) {
1876
1877 *ByteCount = 0xffffffff;
1878 ClusterCount = 1 << (32 - LogOfBytesPerCluster);
1879
1880 } else {
1881
1882 ClusterCount = (*ByteCount >> LogOfBytesPerCluster);
1883 }
1884
1885 //
1886 // Make sure there are enough free clusters to start with, and
1887 // take them now so that nobody else takes them from us.
1888 //
1889
1890 ExAcquireResourceSharedLite(&Vcb->ChangeBitMapResource, TRUE);
1891 FatLockFreeClusterBitMap( Vcb );
1892
1893 if (ClusterCount <= Vcb->AllocationSupport.NumberOfFreeClusters) {
1894
1895 Vcb->AllocationSupport.NumberOfFreeClusters -= ClusterCount;
1896
1897 } else {
1898
1899 FatUnlockFreeClusterBitMap( Vcb );
1900 ExReleaseResourceLite(&Vcb->ChangeBitMapResource);
1901
1902 DebugTrace(0, Dbg, "Disk Full. Raise Status.\n", 0);
1903 FatRaiseStatus( IrpContext, STATUS_DISK_FULL );
1904 }
1905
1906 //
1907 // Did the caller supply a hint?
1908 //
1909
1910 if ((0 != AbsoluteClusterHint) && (AbsoluteClusterHint < (Vcb->AllocationSupport.NumberOfClusters + 2))) {
1911
1912 if (Vcb->NumberOfWindows > 1) {
1913
1914 //
1915 // If we're being called upon to allocate clusters outside the
1916 // current window (which happens only via MoveFile), it's a problem.
1917 // We address this by changing the current window to be the one which
1918 // contains the alternate cluster hint. Note that if the user's
1919 // request would cross a window boundary, he doesn't really get what
1920 // he wanted.
1921 //
1922
1923 if (AbsoluteClusterHint < Vcb->CurrentWindow->FirstCluster ||
1924 AbsoluteClusterHint > Vcb->CurrentWindow->LastCluster) {
1925
1926 ULONG BucketNum = FatWindowOfCluster( AbsoluteClusterHint );
1927
1928 ASSERT( BucketNum < Vcb->NumberOfWindows);
1929
1930 //
1931 // Drop our shared lock on the ChangeBitMapResource, and pick it up again
1932 // exclusive in preparation for making the window swap.
1933 //
1934
1935 FatUnlockFreeClusterBitMap(Vcb);
1936 ExReleaseResourceLite(&Vcb->ChangeBitMapResource);
1937 ExAcquireResourceExclusiveLite(&Vcb->ChangeBitMapResource, TRUE);
1938 FatLockFreeClusterBitMap(Vcb);
1939
1940 Window = &Vcb->Windows[BucketNum];
1941
1942 //
1943 // Again, test the current window against the one we want - some other
1944 // thread could have sneaked in behind our backs and kindly set it to the one
1945 // we need, when we dropped and reacquired the ChangeBitMapResource above.
1946 //
1947
1948 if (Window != Vcb->CurrentWindow) {
1949
1950 _SEH2_TRY {
1951
1952 Wait = BooleanFlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT);
1953 SetFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT);
1954
1955 //
1956 // Change to the new window (update Vcb->CurrentWindow) and scan it
1957 // to build up a freespace bitmap etc.
1958 //
1959
1960 FatExamineFatEntries( IrpContext, Vcb,
1961 0,
1962 0,
1963 FALSE,
1964 Window,
1965 NULL);
1966
1967 } _SEH2_FINALLY {
1968
1969 if (!Wait) {
1970
1971 ClearFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT);
1972 }
1973
1974 if (_SEH2_AbnormalTermination()) {
1975
1976 //
1977 // We will have raised as a result of failing to pick up the
1978 // chunk of the FAT for this window move. Release our resources
1979 // and return the cluster count to the volume.
1980 //
1981
1982 Vcb->AllocationSupport.NumberOfFreeClusters += ClusterCount;
1983
1984 FatUnlockFreeClusterBitMap( Vcb );
1985 ExReleaseResourceLite(&Vcb->ChangeBitMapResource);
1986 }
1987 } _SEH2_END;
1988 }
1989 }
1990
1991 //
1992 // Make the hint cluster number relative to the base of the current window...
1993 //
1994 // Currentwindow->Firstcluster is baised by +2 already, so we will lose the
1995 // bias already in AbsoluteClusterHint. Put it back....
1996 //
1997
1998 WindowRelativeHint = AbsoluteClusterHint - Vcb->CurrentWindow->FirstCluster + 2;
1999 }
2000 else {
2001
2002 //
2003 // Only one 'window', ie fat16/12. No modification necessary.
2004 //
2005
2006 WindowRelativeHint = AbsoluteClusterHint;
2007 }
2008 }
2009 else {
2010
2011 //
2012 // Either no hint supplied, or it was out of range, so grab one from the Vcb
2013 //
2014 // NOTE: Clusterhint in the Vcb is not guaranteed to be set (may be -1)
2015 //
2016
2017 WindowRelativeHint = Vcb->ClusterHint;
2018 AbsoluteClusterHint = 0;
2019
2020 //
2021 // Vcb hint may not have been initialized yet. Force to valid cluster.
2022 //
2023
2024 if (-1 == WindowRelativeHint) {
2025
2026 WindowRelativeHint = 2;
2027 }
2028 }
2029
2030 ASSERT((WindowRelativeHint >= 2) && (WindowRelativeHint < Vcb->FreeClusterBitMap.SizeOfBitMap + 2));
2031
2032 //
2033 // Keep track of the window we're allocating from, so we can clean
2034 // up correctly if the current window changes after we unlock the
2035 // bitmap.
2036 //
2037
2038 Window = Vcb->CurrentWindow;
2039
2040 //
2041 // Try to find a run of free clusters large enough for us.
2042 //
2043
2044 StartingCluster = FatFindFreeClusterRun( IrpContext,
2045 Vcb,
2046 ClusterCount,
2047 WindowRelativeHint );
2048 //
2049 // If the above call was successful, we can just update the fat
2050 // and Mcb and exit. Otherwise we have to look for smaller free
2051 // runs.
2052 //
2053 // This test is a bit funky. Note that the error return from
2054 // RtlFindClearBits is -1, and adding two to that is 1.
2055 //
2056
2057 if ((StartingCluster != 1) &&
2058 ((0 == AbsoluteClusterHint) || (StartingCluster == WindowRelativeHint))
2059 ) {
2060
2061 #if DBG
2062 PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap );
2063 #endif // DBG
2064
2065 //
2066 // Take the clusters we found, and unlock the bit map.
2067 //
2068
2069 FatReserveClusters(IrpContext, Vcb, StartingCluster, ClusterCount);
2070
2071 Window->ClustersFree -= ClusterCount;
2072
2073 StartingCluster += Window->FirstCluster;
2074 StartingCluster -= 2;
2075
2076 ASSERT( PreviousClear - ClusterCount == Window->ClustersFree );
2077
2078 FatUnlockFreeClusterBitMap( Vcb );
2079
2080 //
2081 // Note that this call will never fail since there is always
2082 // room for one entry in an empty Mcb.
2083 //
2084
2085 FatAddMcbEntry( Vcb, Mcb,
2086 0,
2087 FatGetLboFromIndex( Vcb, StartingCluster ),
2088 *ByteCount);
2089 _SEH2_TRY {
2090
2091 //
2092 // Update the fat.
2093 //
2094
2095 FatAllocateClusters(IrpContext, Vcb,
2096 StartingCluster,
2097 ClusterCount);
2098
2099 } _SEH2_FINALLY {
2100
2101 DebugUnwind( FatAllocateDiskSpace );
2102
2103 //
2104 // If the allocate clusters failed, remove the run from the Mcb,
2105 // unreserve the clusters, and reset the free cluster count.
2106 //
2107
2108 if (_SEH2_AbnormalTermination()) {
2109
2110 FatRemoveMcbEntry( Vcb, Mcb, 0, *ByteCount );
2111
2112 FatLockFreeClusterBitMap( Vcb );
2113
2114 // Only clear bits if the bitmap window is the same.
2115
2116 if (Window == Vcb->CurrentWindow) {
2117
2118 // Both values (startingcluster and window->firstcluster) are
2119 // already biased by 2, so will cancel, so we need to add in the 2 again.
2120
2121 FatUnreserveClusters( IrpContext, Vcb,
2122 StartingCluster - Window->FirstCluster + 2,
2123 ClusterCount );
2124 }
2125
2126 Window->ClustersFree += ClusterCount;
2127 Vcb->AllocationSupport.NumberOfFreeClusters += ClusterCount;
2128
2129 FatUnlockFreeClusterBitMap( Vcb );
2130 }
2131
2132 ExReleaseResourceLite(&Vcb->ChangeBitMapResource);
2133 } _SEH2_END;
2134
2135 } else {
2136
2137 //
2138 // Note that Index is a zero-based window-relative number. When appropriate
2139 // it'll get converted into a true cluster number and put in Cluster, which
2140 // will be a volume relative true cluster number.
2141 //
2142
2143 ULONG Index;
2144 ULONG Cluster;
2145 ULONG CurrentVbo;
2146 ULONG PriorLastCluster;
2147 ULONG BytesFound;
2148
2149 ULONG ClustersFound = 0;
2150 ULONG ClustersRemaining;
2151
2152 BOOLEAN LockedBitMap = FALSE;
2153 BOOLEAN SelectNextContigWindow = FALSE;
2154
2155 //
2156 // Drop our shared lock on the ChangeBitMapResource, and pick it up again
2157 // exclusive in preparation for making a window swap.
2158 //
2159
2160 FatUnlockFreeClusterBitMap(Vcb);
2161 ExReleaseResourceLite(&Vcb->ChangeBitMapResource);
2162 ExAcquireResourceExclusiveLite(&Vcb->ChangeBitMapResource, TRUE);
2163 FatLockFreeClusterBitMap(Vcb);
2164 LockedBitMap = TRUE;
2165
2166 _SEH2_TRY {
2167
2168 if ( ExactMatchRequired && (1 == Vcb->NumberOfWindows)) {
2169
2170 //
2171 // Give up right now, there are no more windows to search! RtlFindClearBits
2172 // searchs the whole bitmap, so we would have found any contiguous run
2173 // large enough.
2174 //
2175
2176 try_leave( Result = FALSE);
2177 }
2178
2179 //
2180 // While the request is still incomplete, look for the largest
2181 // run of free clusters, mark them taken, allocate the run in
2182 // the Mcb and Fat, and if this isn't the first time through
2183 // the loop link it to prior run on the fat. The Mcb will
2184 // coalesce automatically.
2185 //
2186
2187 ClustersRemaining = ClusterCount;
2188 CurrentVbo = 0;
2189 PriorLastCluster = 0;
2190
2191 while (ClustersRemaining != 0) {
2192
2193 //
2194 // If we just entered the loop, the bit map is already locked
2195 //
2196
2197 if ( !LockedBitMap ) {
2198
2199 FatLockFreeClusterBitMap( Vcb );
2200 LockedBitMap = TRUE;
2201 }
2202
2203 //
2204 // Find the largest run of free clusters. If the run is
2205 // bigger than we need, only use what we need. Note that
2206 // this will then be the last while() iteration.
2207 //
2208
2209 // 12/3/95: need to bias bitmap by 2 bits for the defrag
2210 // hooks and the below macro became impossible to do without in-line
2211 // procedures.
2212 //
2213 // ClustersFound = FatLongestFreeClusterRun( IrpContext, Vcb, &Index );
2214
2215 ClustersFound = 0;
2216
2217 if (!SelectNextContigWindow) {
2218
2219 if ( 0 != WindowRelativeHint) {
2220
2221 ULONG Desired = Vcb->FreeClusterBitMap.SizeOfBitMap - (WindowRelativeHint - 2);
2222
2223 //
2224 // We will try to allocate contiguously. Try from the current hint the to
2225 // end of current window. Don't try for more than we actually need.
2226 //
2227
2228 if (Desired > ClustersRemaining) {
2229
2230 Desired = ClustersRemaining;
2231 }
2232
2233 if (RtlAreBitsClear( &Vcb->FreeClusterBitMap,
2234 WindowRelativeHint - 2,
2235 Desired))
2236 {
2237 //
2238 // Clusters from hint->...windowend are free. Take them.
2239 //
2240
2241 Index = WindowRelativeHint - 2;
2242 ClustersFound = Desired;
2243
2244 if (FatIsFat32(Vcb)) {
2245
2246 //
2247 // We're now up against the end of the current window, so indicate that we
2248 // want the next window in the sequence next time around. (If we're not up
2249 // against the end of the window, then we got what we needed and won't be
2250 // coming around again anyway).
2251 //
2252
2253 SelectNextContigWindow = TRUE;
2254 WindowRelativeHint = 2;
2255 }
2256 else {
2257
2258 //
2259 // FAT 12/16 - we've run up against the end of the volume. Clear the
2260 // hint, since we now have no idea where to look.
2261 //
2262
2263 WindowRelativeHint = 0;
2264 }
2265 #if DBG
2266 PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap );
2267 #endif // DBG
2268 }
2269 else {
2270
2271 if (ExactMatchRequired) {
2272
2273 //
2274 // If our caller required an exact match, then we're hosed. Bail out now.
2275 //
2276
2277 try_leave( Result = FALSE);
2278 }
2279
2280 //
2281 // Hint failed, drop back to pot luck
2282 //
2283
2284 WindowRelativeHint = 0;
2285 }
2286 }
2287
2288 if ((0 == WindowRelativeHint) && (0 == ClustersFound)) {
2289
2290 if (ClustersRemaining <= Vcb->CurrentWindow->ClustersFree) {
2291
2292 //
2293 // The remaining allocation could be satisfied entirely from this
2294 // window. We will ask only for what we need, to try and avoid
2295 // unnecessarily fragmenting large runs of space by always using
2296 // (part of) the largest run we can find. This call will return the
2297 // first run large enough.
2298 //
2299
2300 Index = RtlFindClearBits( &Vcb->FreeClusterBitMap, ClustersRemaining, 0);
2301
2302 if (-1 != Index) {
2303
2304 ClustersFound = ClustersRemaining;
2305 }
2306 }
2307
2308 if (0 == ClustersFound) {
2309
2310 //
2311 // Still nothing, so just take the largest free run we can find.
2312 //
2313
2314 ClustersFound = RtlFindLongestRunClear( &Vcb->FreeClusterBitMap, &Index );
2315
2316 }
2317 #if DBG
2318 PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap );
2319 #endif // DBG
2320 if (ClustersFound >= ClustersRemaining) {
2321
2322 ClustersFound = ClustersRemaining;
2323 }
2324 else {
2325
2326 //
2327 // If we just ran up to the end of a window, set up a hint that
2328 // we'd like the next consecutive window after this one. (FAT32 only)
2329 //
2330
2331 if ( ((Index + ClustersFound) == Vcb->FreeClusterBitMap.SizeOfBitMap) &&
2332 FatIsFat32( Vcb)
2333 ) {
2334
2335 SelectNextContigWindow = TRUE;
2336 WindowRelativeHint = 2;
2337 }
2338 }
2339 }
2340 }
2341
2342 if (ClustersFound == 0) {
2343
2344 ULONG FaveWindow = 0;
2345 BOOLEAN SelectedWindow;
2346
2347 //
2348 // If we found no free clusters on a single-window FAT,
2349 // there was a bad problem with the free cluster count.
2350 //
2351
2352 if (1 == Vcb->NumberOfWindows) {
2353
2354 FatBugCheck( 0, 5, 0 );
2355 }
2356
2357 //
2358 // Switch to a new bucket. Possibly the next one if we're
2359 // currently on a roll (allocating contiguously)
2360 //
2361
2362 SelectedWindow = FALSE;
2363
2364 if ( SelectNextContigWindow) {
2365
2366 ULONG NextWindow;
2367
2368 NextWindow = (((ULONG)((PUCHAR)Vcb->CurrentWindow - (PUCHAR)Vcb->Windows)) / sizeof( FAT_WINDOW)) + 1;
2369
2370 if ((NextWindow < Vcb->NumberOfWindows) &&
2371 ( Vcb->Windows[ NextWindow].ClustersFree > 0)
2372 ) {
2373
2374 FaveWindow = NextWindow;
2375 SelectedWindow = TRUE;
2376 }
2377 else {
2378
2379 if (ExactMatchRequired) {
2380
2381 //
2382 // Some dope tried to allocate a run past the end of the volume...
2383 //
2384
2385 try_leave( Result = FALSE);
2386 }
2387
2388 //
2389 // Give up on the contiguous allocation attempts
2390 //
2391
2392 WindowRelativeHint = 0;
2393 }
2394
2395 SelectNextContigWindow = FALSE;
2396 }
2397
2398 if (!SelectedWindow) {
2399
2400 //
2401 // Select a new window to begin allocating from
2402 //
2403
2404 FaveWindow = FatSelectBestWindow( Vcb);
2405 }
2406
2407 //
2408 // By now we'd better have found a window with some free clusters
2409 //
2410
2411 if (0 == Vcb->Windows[ FaveWindow].ClustersFree) {
2412
2413 FatBugCheck( 0, 5, 1 );
2414 }
2415
2416 Wait = BooleanFlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT);
2417 SetFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT);
2418
2419 FatExamineFatEntries( IrpContext, Vcb,
2420 0,
2421 0,
2422 FALSE,
2423 &Vcb->Windows[FaveWindow],
2424 NULL);
2425
2426 if (!Wait) {
2427
2428 ClearFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT);
2429 }
2430
2431 //
2432 // Now we'll just go around the loop again, having switched windows,
2433 // and allocate....
2434 //
2435 #if DBG
2436 PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap );
2437 #endif //DBG
2438 } // if (clustersfound == 0)
2439 else {
2440
2441 //
2442 // Take the clusters we found, convert our index to a cluster number
2443 // and unlock the bit map.
2444 //
2445
2446 Window = Vcb->CurrentWindow;
2447
2448 FatReserveClusters( IrpContext, Vcb, (Index + 2), ClustersFound );
2449
2450 Cluster = Index + Window->FirstCluster;
2451
2452 Window->ClustersFree -= ClustersFound;
2453 ASSERT( PreviousClear - ClustersFound == Window->ClustersFree );
2454
2455 FatUnlockFreeClusterBitMap( Vcb );
2456 LockedBitMap = FALSE;
2457
2458 //
2459 // Add the newly alloced run to the Mcb.
2460 //
2461
2462 BytesFound = ClustersFound << LogOfBytesPerCluster;
2463
2464 FatAddMcbEntry( Vcb, Mcb,
2465 CurrentVbo,
2466 FatGetLboFromIndex( Vcb, Cluster ),
2467 BytesFound );
2468
2469 //
2470 // Connect the last allocated run with this one, and allocate
2471 // this run on the Fat.
2472 //
2473
2474 if (PriorLastCluster != 0) {
2475
2476 FatSetFatEntry( IrpContext,
2477 Vcb,
2478 PriorLastCluster,
2479 (FAT_ENTRY)Cluster );
2480 }
2481
2482 //
2483 // Update the fat
2484 //
2485
2486 FatAllocateClusters( IrpContext, Vcb, Cluster, ClustersFound );
2487
2488 //
2489 // Prepare for the next iteration.
2490 //
2491
2492 CurrentVbo += BytesFound;
2493 ClustersRemaining -= ClustersFound;
2494 PriorLastCluster = Cluster + ClustersFound - 1;
2495 }
2496 } // while (clustersremaining)
2497
2498 } _SEH2_FINALLY {
2499
2500 DebugUnwind( FatAllocateDiskSpace );
2501
2502 ExReleaseResourceLite(&Vcb->ChangeBitMapResource);
2503
2504 //
2505 // Is there any unwinding to do?
2506 //
2507
2508 if ( _SEH2_AbnormalTermination() || (FALSE == Result)) {
2509
2510 //
2511 // Flag to the caller that they're getting nothing
2512 //
2513
2514 *ByteCount = 0;
2515
2516 //
2517 // There are three places we could have taken this exception:
2518 // when switching the window (FatExamineFatEntries), adding
2519 // a found run to the Mcb (FatAddMcbEntry), or when writing
2520 // the changes to the FAT (FatSetFatEntry). In the first case
2521 // we don't have anything to unwind before deallocation, and
2522 // can detect this by seeing if we have the ClusterBitmap
2523 // mutex out.
2524
2525 if (!LockedBitMap) {
2526
2527 FatLockFreeClusterBitMap( Vcb );
2528
2529 //
2530 // In these cases, we have the possiblity that the FAT
2531 // window is still in place and we need to clear the bits.
2532 // If the Mcb entry isn't there (we raised trying to add
2533 // it), the effect of trying to remove it is a noop.
2534 //
2535
2536 if (Window == Vcb->CurrentWindow) {
2537
2538 //
2539 // Cluster reservation works on cluster 2 based window-relative
2540 // numbers, so we must convert. The subtraction will lose the
2541 // cluster 2 base, so bias the result.
2542 //
2543
2544 FatUnreserveClusters( IrpContext, Vcb,
2545 (Cluster - Window->FirstCluster) + 2,
2546 ClustersFound );
2547 }
2548
2549 //
2550 // Note that FatDeallocateDiskSpace will take care of adjusting
2551 // to account for the entries in the Mcb. All we have to account
2552 // for is the last run that didn't make it.
2553 //
2554
2555 Window->ClustersFree += ClustersFound;
2556 Vcb->AllocationSupport.NumberOfFreeClusters += ClustersFound;
2557
2558 FatUnlockFreeClusterBitMap( Vcb );
2559
2560 FatRemoveMcbEntry( Vcb, Mcb, CurrentVbo, BytesFound );
2561
2562 } else {
2563
2564 //
2565 // Just drop the mutex now - we didn't manage to do anything
2566 // that needs to be backed out.
2567 //
2568
2569 FatUnlockFreeClusterBitMap( Vcb );
2570 }
2571
2572 _SEH2_TRY {
2573
2574 //
2575 // Now we have tidied up, we are ready to just send the Mcb
2576 // off to deallocate disk space
2577 //
2578
2579 FatDeallocateDiskSpace( IrpContext, Vcb, Mcb );
2580
2581 } _SEH2_FINALLY {
2582
2583 //
2584 // Now finally (really), remove all the entries from the mcb
2585 //
2586
2587 FatRemoveMcbEntry( Vcb, Mcb, 0, 0xFFFFFFFF );
2588 } _SEH2_END;
2589 }
2590
2591 DebugTrace(-1, Dbg, "FatAllocateDiskSpace -> (VOID)\n", 0);
2592
2593 } _SEH2_END; // finally
2594 }
2595
2596 return;
2597 }
2598
2599 \f
2600 VOID
2601 FatDeallocateDiskSpace (
2602 IN PIRP_CONTEXT IrpContext,
2603 IN PVCB Vcb,
2604 IN PLARGE_MCB Mcb
2605 )
2606
2607 /*++
2608
2609 Routine Description:
2610
2611 This procedure deallocates the disk space denoted by an input
2612 mcb. Note that the input MCB does not need to necessarily describe
2613 a chain that ends with a FAT_CLUSTER_LAST entry.
2614
2615 Pictorially what is done is the following
2616
2617 Fat |--a--|--b--|--c--|
2618 Mcb |--a--|--b--|--c--|
2619
2620 becomes
2621
2622 Fat |--0--|--0--|--0--|
2623 Mcb |--a--|--b--|--c--|
2624
2625 Arguments:
2626
2627 Vcb - Supplies the VCB being modified
2628
2629 Mcb - Supplies the MCB describing the disk space to deallocate. Note
2630 that Mcb is unchanged by this procedure.
2631
2632
2633 Return Value:
2634
2635 None.
2636
2637 --*/
2638
2639 {
2640 LBO Lbo;
2641 VBO Vbo;
2642
2643 ULONG RunsInMcb;
2644 ULONG ByteCount;
2645 ULONG ClusterCount;
2646 ULONG ClusterIndex;
2647 ULONG McbIndex;
2648
2649 UCHAR LogOfBytesPerCluster;
2650
2651 PFAT_WINDOW Window;
2652
2653 PAGED_CODE();
2654
2655 DebugTrace(+1, Dbg, "FatDeallocateDiskSpace\n", 0);
2656 DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
2657 DebugTrace( 0, Dbg, " Mcb = %8lx\n", Mcb);
2658
2659 LogOfBytesPerCluster = Vcb->AllocationSupport.LogOfBytesPerCluster;
2660
2661 RunsInMcb = FsRtlNumberOfRunsInLargeMcb( Mcb );
2662
2663 if ( RunsInMcb == 0 ) {
2664
2665 DebugTrace(-1, Dbg, "FatDeallocateDiskSpace -> (VOID)\n", 0);
2666 return;
2667 }
2668
2669 _SEH2_TRY {
2670
2671 //
2672 // Run though the Mcb, freeing all the runs in the fat.
2673 //
2674 // We do this in two steps (first update the fat, then the bitmap
2675 // (which can't fail)) to prevent other people from taking clusters
2676 // that we need to re-allocate in the event of unwind.
2677 //
2678
2679 ExAcquireResourceSharedLite(&Vcb->ChangeBitMapResource, TRUE);
2680
2681 RunsInMcb = FsRtlNumberOfRunsInLargeMcb( Mcb );
2682
2683 for ( McbIndex = 0; McbIndex < RunsInMcb; McbIndex++ ) {
2684
2685 FatGetNextMcbEntry( Vcb, Mcb, McbIndex, &Vbo, &Lbo, &ByteCount );
2686
2687 //
2688 // Assert that Fat files have no holes.
2689 //
2690
2691 ASSERT( Lbo != 0 );
2692
2693 //
2694 // Write FAT_CLUSTER_AVAILABLE to each cluster in the run.
2695 //
2696
2697 ClusterCount = ByteCount >> LogOfBytesPerCluster;
2698 ClusterIndex = FatGetIndexFromLbo( Vcb, Lbo );
2699
2700 FatFreeClusters( IrpContext, Vcb, ClusterIndex, ClusterCount );
2701 }
2702
2703 //
2704 // From now on, nothing can go wrong .... (as in raise)
2705 //
2706
2707 FatLockFreeClusterBitMap( Vcb );
2708
2709 for ( McbIndex = 0; McbIndex < RunsInMcb; McbIndex++ ) {
2710
2711 ULONG ClusterEnd;
2712 ULONG MyStart, MyLength, count;
2713 #if DBG
2714 #ifndef __REACTOS__
2715 ULONG PreviousClear, i;
2716 #else
2717 ULONG i;
2718 #endif
2719 #endif
2720
2721 FatGetNextMcbEntry( Vcb, Mcb, McbIndex, &Vbo, &Lbo, &ByteCount );
2722
2723 //
2724 // Mark the bits clear in the FreeClusterBitMap.
2725 //
2726
2727 ClusterCount = ByteCount >> LogOfBytesPerCluster;
2728 ClusterIndex = FatGetIndexFromLbo( Vcb, Lbo );
2729
2730 Window = Vcb->CurrentWindow;
2731
2732 //
2733 // If we've divided the bitmap, elide bitmap manipulation for
2734 // runs that are outside the current bucket.
2735 //
2736
2737 ClusterEnd = ClusterIndex + ClusterCount - 1;
2738
2739 if (!(ClusterIndex > Window->LastCluster ||
2740 ClusterEnd < Window->FirstCluster)) {
2741
2742 //
2743 // The run being freed overlaps the current bucket, so we'll
2744 // have to clear some bits.
2745 //
2746
2747 if (ClusterIndex < Window->FirstCluster &&
2748 ClusterEnd > Window->LastCluster) {
2749
2750 MyStart = Window->FirstCluster;
2751 MyLength = Window->LastCluster - Window->FirstCluster + 1;
2752
2753 } else if (ClusterIndex < Window->FirstCluster) {
2754
2755 MyStart = Window->FirstCluster;
2756 MyLength = ClusterEnd - Window->FirstCluster + 1;
2757
2758 } else {
2759
2760 //
2761 // The range being freed starts in the bucket, and may possibly
2762 // extend beyond the bucket.
2763 //
2764
2765 MyStart = ClusterIndex;
2766
2767 if (ClusterEnd <= Window->LastCluster) {
2768
2769 MyLength = ClusterCount;
2770
2771 } else {
2772
2773 MyLength = Window->LastCluster - ClusterIndex + 1;
2774 }
2775 }
2776
2777 if (MyLength == 0) {
2778
2779 continue;
2780 }
2781
2782 #if DBG
2783 #ifndef __REACTOS__
2784 PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap );
2785 #endif
2786
2787
2788 //
2789 // Verify that the Bits are all really set.
2790 //
2791
2792 ASSERT( MyStart + MyLength - Window->FirstCluster <= Vcb->FreeClusterBitMap.SizeOfBitMap );
2793
2794 for (i = 0; i < MyLength; i++) {
2795
2796 ASSERT( RtlCheckBit(&Vcb->FreeClusterBitMap,
2797 MyStart - Window->FirstCluster + i) == 1 );
2798 }
2799 #endif // DBG
2800
2801 FatUnreserveClusters( IrpContext, Vcb,
2802 MyStart - Window->FirstCluster + 2,
2803 MyLength );
2804 }
2805
2806 //
2807 // Adjust the ClustersFree count for each bitmap window, even the ones
2808 // that are not the current window.
2809 //
2810
2811 if (FatIsFat32(Vcb)) {
2812
2813 Window = &Vcb->Windows[FatWindowOfCluster( ClusterIndex )];
2814
2815 } else {
2816
2817 Window = &Vcb->Windows[0];
2818 }
2819
2820 MyStart = ClusterIndex;
2821
2822 for (MyLength = ClusterCount; MyLength > 0; MyLength -= count) {
2823
2824 count = FatMin(Window->LastCluster - MyStart + 1, MyLength);
2825 Window->ClustersFree += count;
2826
2827 //
2828 // If this was not the last window this allocation spanned,
2829 // advance to the next.
2830 //
2831
2832 if (MyLength != count) {
2833
2834 Window++;
2835 MyStart = Window->FirstCluster;
2836 }
2837 }
2838
2839 //
2840 // Deallocation is now complete. Adjust the free cluster count.
2841 //
2842
2843 Vcb->AllocationSupport.NumberOfFreeClusters += ClusterCount;
2844 }
2845
2846 #if DBG
2847 if (Vcb->CurrentWindow->ClustersFree !=
2848 RtlNumberOfClearBits(&Vcb->FreeClusterBitMap)) {
2849
2850 DbgPrint("%x vs %x\n", Vcb->CurrentWindow->ClustersFree,
2851 RtlNumberOfClearBits(&Vcb->FreeClusterBitMap));
2852
2853 DbgPrint("%x for %x\n", ClusterIndex, ClusterCount);
2854 }
2855 #endif
2856
2857 FatUnlockFreeClusterBitMap( Vcb );
2858
2859
2860 } _SEH2_FINALLY {
2861
2862 DebugUnwind( FatDeallocateDiskSpace );
2863
2864 //
2865 // Is there any unwinding to do?
2866 //
2867
2868 ExReleaseResourceLite(&Vcb->ChangeBitMapResource);
2869
2870 if ( _SEH2_AbnormalTermination() ) {
2871
2872 LBO Lbo;
2873 VBO Vbo;
2874
2875 ULONG Index;
2876 ULONG Clusters;
2877 ULONG FatIndex;
2878 ULONG PriorLastIndex;
2879
2880 //
2881 // For each entry we already deallocated, reallocate it,
2882 // chaining together as nessecary. Note that we continue
2883 // up to and including the last "for" iteration even though
2884 // the SetFatRun could not have been successful. This
2885 // allows us a convienent way to re-link the final successful
2886 // SetFatRun.
2887 //
2888 // It is possible that the reason we got here will prevent us
2889 // from succeeding in this operation.
2890 //
2891
2892 PriorLastIndex = 0;
2893
2894 for (Index = 0; Index <= McbIndex; Index++) {
2895
2896 FatGetNextMcbEntry(Vcb, Mcb, Index, &Vbo, &Lbo, &ByteCount);
2897
2898 FatIndex = FatGetIndexFromLbo( Vcb, Lbo );
2899 Clusters = ByteCount >> LogOfBytesPerCluster;
2900
2901 //
2902 // We must always restore the prior iteration's last
2903 // entry, pointing it to the first cluster of this run.
2904 //
2905
2906 if (PriorLastIndex != 0) {
2907
2908 FatSetFatEntry( IrpContext,
2909 Vcb,
2910 PriorLastIndex,
2911 (FAT_ENTRY)FatIndex );
2912 }
2913
2914 //
2915 // If this is not the last entry (the one that failed)
2916 // then reallocate the disk space on the fat.
2917 //
2918
2919 if ( Index < McbIndex ) {
2920
2921 FatAllocateClusters(IrpContext, Vcb, FatIndex, Clusters);
2922
2923 PriorLastIndex = FatIndex + Clusters - 1;
2924 }
2925 }
2926 }
2927
2928 DebugTrace(-1, Dbg, "FatDeallocateDiskSpace -> (VOID)\n", 0);
2929 } _SEH2_END;
2930
2931 return;
2932 }
2933
2934 \f
2935 VOID
2936 FatSplitAllocation (
2937 IN PIRP_CONTEXT IrpContext,
2938 IN PVCB Vcb,
2939 IN OUT PLARGE_MCB Mcb,
2940 IN VBO SplitAtVbo,
2941 OUT PLARGE_MCB RemainingMcb
2942 )
2943
2944 /*++
2945
2946 Routine Description:
2947
2948 This procedure takes a single mcb and splits its allocation into
2949 two separate allocation units. The separation must only be done
2950 on cluster boundaries, otherwise we bugcheck.
2951
2952 On the disk this actually works by inserting a FAT_CLUSTER_LAST into
2953 the last index of the first part being split out.
2954
2955 Pictorially what is done is the following (where ! denotes the end of
2956 the fat chain (i.e., FAT_CLUSTER_LAST)):
2957
2958
2959 Mcb |--a--|--b--|--c--|--d--|--e--|--f--|
2960
2961 ^
2962 SplitAtVbo ---------------------+
2963
2964 RemainingMcb (empty)
2965
2966 becomes
2967
2968 Mcb |--a--|--b--|--c--!
2969
2970
2971 RemainingMcb |--d--|--e--|--f--|
2972
2973 Arguments:
2974
2975 Vcb - Supplies the VCB being modified
2976
2977 Mcb - Supplies the MCB describing the allocation being split into
2978 two parts. Upon return this Mcb now contains the first chain.
2979
2980 SplitAtVbo - Supplies the VBO of the first byte for the second chain
2981 that we creating.
2982
2983 RemainingMcb - Receives the MCB describing the second chain of allocated
2984 disk space. The caller passes in an initialized Mcb that
2985 is filled in by this procedure STARTING AT VBO 0.
2986
2987 Return Value:
2988
2989 VOID - TRUE if the operation completed and FALSE if it had to
2990 block but could not.
2991
2992 --*/
2993
2994 {
2995 VBO SourceVbo;
2996 VBO TargetVbo;
2997 VBO DontCare;
2998
2999 LBO Lbo;
3000
3001 ULONG ByteCount;
3002 ULONG BytesPerCluster;
3003
3004 PAGED_CODE();
3005
3006 DebugTrace(+1, Dbg, "FatSplitAllocation\n", 0);
3007 DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
3008 DebugTrace( 0, Dbg, " Mcb = %8lx\n", Mcb);
3009 DebugTrace( 0, Dbg, " SplitAtVbo = %8lx\n", SplitAtVbo);
3010 DebugTrace( 0, Dbg, " RemainingMcb = %8lx\n", RemainingMcb);
3011
3012 BytesPerCluster = 1 << Vcb->AllocationSupport.LogOfBytesPerCluster;
3013
3014 //
3015 // Assert that the split point is cluster alligned
3016 //
3017
3018 ASSERT( (SplitAtVbo & (BytesPerCluster - 1)) == 0 );
3019
3020 //
3021 // We should never be handed an empty source MCB and asked to split
3022 // at a non zero point.
3023 //
3024
3025 ASSERT( !((0 != SplitAtVbo) && (0 == FsRtlNumberOfRunsInLargeMcb( Mcb))));
3026
3027 //
3028 // Assert we were given an empty target Mcb.
3029 //
3030
3031 //
3032 // This assert is commented out to avoid hitting in the Ea error
3033 // path. In that case we will be using the same Mcb's to split the
3034 // allocation that we used to merge them. The target Mcb will contain
3035 // the runs that the split will attempt to insert.
3036 //
3037 //
3038 // ASSERT( FsRtlNumberOfRunsInMcb( RemainingMcb ) == 0 );
3039 //
3040
3041 _SEH2_TRY {
3042
3043 //
3044 // Move the runs after SplitAtVbo from the souce to the target
3045 //
3046
3047 SourceVbo = SplitAtVbo;
3048 TargetVbo = 0;
3049
3050 while (FatLookupMcbEntry(Vcb, Mcb, SourceVbo, &Lbo, &ByteCount, NULL)) {
3051
3052 FatAddMcbEntry( Vcb, RemainingMcb, TargetVbo, Lbo, ByteCount );
3053
3054 FatRemoveMcbEntry( Vcb, Mcb, SourceVbo, ByteCount );
3055
3056 TargetVbo += ByteCount;
3057 SourceVbo += ByteCount;
3058
3059 //
3060 // If SourceVbo overflows, we were actually snipping off the end
3061 // of the maximal file ... and are now done.
3062 //
3063
3064 if (SourceVbo == 0) {
3065
3066 break;
3067 }
3068 }
3069
3070 //
3071 // Mark the last pre-split cluster as a FAT_LAST_CLUSTER
3072 //
3073
3074 if ( SplitAtVbo != 0 ) {
3075
3076 FatLookupLastMcbEntry( Vcb, Mcb, &DontCare, &Lbo, NULL );
3077
3078 FatSetFatEntry( IrpContext,
3079 Vcb,
3080 FatGetIndexFromLbo( Vcb, Lbo ),
3081 FAT_CLUSTER_LAST );
3082 }
3083
3084 } _SEH2_FINALLY {
3085
3086 DebugUnwind( FatSplitAllocation );
3087
3088 //
3089 // If we got an exception, we must glue back together the Mcbs
3090 //
3091
3092 if ( _SEH2_AbnormalTermination() ) {
3093
3094 TargetVbo = SplitAtVbo;
3095 SourceVbo = 0;
3096
3097 while (FatLookupMcbEntry(Vcb, RemainingMcb, SourceVbo, &Lbo, &ByteCount, NULL)) {
3098
3099 FatAddMcbEntry( Vcb, Mcb, TargetVbo, Lbo, ByteCount );
3100
3101 FatRemoveMcbEntry( Vcb, RemainingMcb, SourceVbo, ByteCount );
3102
3103 TargetVbo += ByteCount;
3104 SourceVbo += ByteCount;
3105 }
3106 }
3107
3108 DebugTrace(-1, Dbg, "FatSplitAllocation -> (VOID)\n", 0);
3109 } _SEH2_END;
3110
3111 return;
3112 }
3113
3114 \f
3115 VOID
3116 FatMergeAllocation (
3117 IN PIRP_CONTEXT IrpContext,
3118 IN PVCB Vcb,
3119 IN OUT PLARGE_MCB Mcb,
3120 IN PLARGE_MCB SecondMcb
3121 )
3122
3123 /*++
3124
3125 Routine Description:
3126
3127 This routine takes two separate allocations described by two MCBs and
3128 joins them together into one allocation.
3129
3130 Pictorially what is done is the following (where ! denotes the end of
3131 the fat chain (i.e., FAT_CLUSTER_LAST)):
3132
3133
3134 Mcb |--a--|--b--|--c--!
3135
3136 SecondMcb |--d--|--e--|--f--|
3137
3138 becomes
3139
3140 Mcb |--a--|--b--|--c--|--d--|--e--|--f--|
3141
3142 SecondMcb |--d--|--e--|--f--|
3143
3144
3145 Arguments:
3146
3147 Vcb - Supplies the VCB being modified
3148
3149 Mcb - Supplies the MCB of the first allocation that is being modified.
3150 Upon return this Mcb will also describe the newly enlarged
3151 allocation
3152
3153 SecondMcb - Supplies the ZERO VBO BASED MCB of the second allocation
3154 that is being appended to the first allocation. This
3155 procedure leaves SecondMcb unchanged.
3156
3157 Return Value:
3158
3159 VOID - TRUE if the operation completed and FALSE if it had to
3160 block but could not.
3161
3162 --*/
3163
3164 {
3165 VBO SpliceVbo;
3166 LBO SpliceLbo;
3167
3168 VBO SourceVbo;
3169 VBO TargetVbo;
3170
3171 LBO Lbo;
3172
3173 ULONG ByteCount;
3174
3175 PAGED_CODE();
3176
3177 DebugTrace(+1, Dbg, "FatMergeAllocation\n", 0);
3178 DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
3179 DebugTrace( 0, Dbg, " Mcb = %8lx\n", Mcb);
3180 DebugTrace( 0, Dbg, " SecondMcb = %8lx\n", SecondMcb);
3181
3182 _SEH2_TRY {
3183
3184 //
3185 // Append the runs from SecondMcb to Mcb
3186 //
3187
3188 (void)FatLookupLastMcbEntry( Vcb, Mcb, &SpliceVbo, &SpliceLbo, NULL );
3189
3190 SourceVbo = 0;
3191 TargetVbo = SpliceVbo + 1;
3192
3193 while (FatLookupMcbEntry(Vcb, SecondMcb, SourceVbo, &Lbo, &ByteCount, NULL)) {
3194
3195 FatAddMcbEntry( Vcb, Mcb, TargetVbo, Lbo, ByteCount );
3196
3197 SourceVbo += ByteCount;
3198 TargetVbo += ByteCount;
3199 }
3200
3201 //
3202 // Link the last pre-merge cluster to the first cluster of SecondMcb
3203 //
3204
3205 FatLookupMcbEntry( Vcb, SecondMcb, 0, &Lbo, (PULONG)NULL, NULL );
3206
3207 FatSetFatEntry( IrpContext,
3208 Vcb,
3209 FatGetIndexFromLbo( Vcb, SpliceLbo ),
3210 (FAT_ENTRY)FatGetIndexFromLbo( Vcb, Lbo ) );
3211
3212 } _SEH2_FINALLY {
3213
3214 DebugUnwind( FatMergeAllocation );
3215
3216 //
3217 // If we got an exception, we must remove the runs added to Mcb
3218 //
3219
3220 if ( _SEH2_AbnormalTermination() ) {
3221
3222 ULONG CutLength;
3223
3224 if ((CutLength = TargetVbo - (SpliceVbo + 1)) != 0) {
3225
3226 FatRemoveMcbEntry( Vcb, Mcb, SpliceVbo + 1, CutLength);
3227 }
3228 }
3229
3230 DebugTrace(-1, Dbg, "FatMergeAllocation -> (VOID)\n", 0);
3231 } _SEH2_END;
3232
3233 return;
3234 }
3235
3236 \f
3237 //
3238 // Internal support routine
3239 //
3240
3241 CLUSTER_TYPE
3242 FatInterpretClusterType (
3243 IN PVCB Vcb,
3244 IN FAT_ENTRY Entry
3245 )
3246
3247 /*++
3248
3249 Routine Description:
3250
3251 This procedure tells the caller how to interpret the input fat table
3252 entry. It will indicate if the fat cluster is available, resereved,
3253 bad, the last one, or the another fat index. This procedure can deal
3254 with both 12 and 16 bit fat.
3255
3256 Arguments:
3257
3258 Vcb - Supplies the Vcb to examine, yields 12/16 bit info
3259
3260 Entry - Supplies the fat entry to examine
3261
3262 Return Value:
3263
3264 CLUSTER_TYPE - Is the type of the input Fat entry
3265
3266 --*/
3267
3268 {
3269 DebugTrace(+1, Dbg, "InterpretClusterType\n", 0);
3270 DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
3271 DebugTrace( 0, Dbg, " Entry = %8lx\n", Entry);
3272
3273 PAGED_CODE();
3274
3275 switch(Vcb->AllocationSupport.FatIndexBitSize ) {
3276 case 32:
3277 Entry &= FAT32_ENTRY_MASK;
3278 break;
3279
3280 case 12:
3281 ASSERT( Entry <= 0xfff );
3282 if (Entry >= 0x0ff0) {
3283 Entry |= 0x0FFFF000;
3284 }
3285 break;
3286
3287 default:
3288 case 16:
3289 ASSERT( Entry <= 0xffff );
3290 if (Entry >= 0x0fff0) {
3291 Entry |= 0x0FFF0000;
3292 }
3293 break;
3294 }
3295
3296 if (Entry == FAT_CLUSTER_AVAILABLE) {
3297
3298 DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterAvailable\n", 0);
3299
3300 return FatClusterAvailable;
3301
3302 } else if (Entry < FAT_CLUSTER_RESERVED) {
3303
3304 DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterNext\n", 0);
3305
3306 return FatClusterNext;
3307
3308 } else if (Entry < FAT_CLUSTER_BAD) {
3309
3310 DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterReserved\n", 0);
3311
3312 return FatClusterReserved;
3313
3314 } else if (Entry == FAT_CLUSTER_BAD) {
3315
3316 DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterBad\n", 0);
3317
3318 return FatClusterBad;
3319
3320 } else {
3321
3322 DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterLast\n", 0);
3323
3324 return FatClusterLast;
3325 }
3326 }
3327
3328 \f
3329 //
3330 // Internal support routine
3331 //
3332
3333 VOID
3334 FatLookupFatEntry (
3335 IN PIRP_CONTEXT IrpContext,
3336 IN PVCB Vcb,
3337 IN ULONG FatIndex,
3338 IN OUT PULONG FatEntry,
3339 IN OUT PFAT_ENUMERATION_CONTEXT Context
3340 )
3341
3342 /*++
3343
3344 Routine Description:
3345
3346 This routine takes an index into the fat and gives back the value
3347 in the Fat at this index. At any given time, for a 16 bit fat, this
3348 routine allows only one page per volume of the fat to be pinned in
3349 memory. For a 12 bit bit fat, the entire fat (max 6k) is pinned. This
3350 extra layer of caching makes the vast majority of requests very
3351 fast. The context for this caching stored in a structure in the Vcb.
3352
3353 Arguments:
3354
3355 Vcb - Supplies the Vcb to examine, yields 12/16 bit info,
3356 fat access context, etc.
3357
3358 FatIndex - Supplies the fat index to examine.
3359
3360 FatEntry - Receives the fat entry pointed to by FatIndex. Note that
3361 it must point to non-paged pool.
3362
3363 Context - This structure keeps track of a page of pinned fat between calls.
3364
3365 --*/
3366
3367 {
3368 PAGED_CODE();
3369
3370 DebugTrace(+1, Dbg, "FatLookupFatEntry\n", 0);
3371 DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
3372 DebugTrace( 0, Dbg, " FatIndex = %4x\n", FatIndex);
3373 DebugTrace( 0, Dbg, " FatEntry = %8lx\n", FatEntry);
3374
3375 //
3376 // Make sure they gave us a valid fat index.
3377 //
3378
3379 FatVerifyIndexIsValid(IrpContext, Vcb, FatIndex);
3380
3381 //
3382 // Case on 12 or 16 bit fats.
3383 //
3384 // In the 12 bit case (mostly floppies) we always have the whole fat
3385 // (max 6k bytes) pinned during allocation operations. This is possibly
3386 // a wee bit slower, but saves headaches over fat entries with 8 bits
3387 // on one page, and 4 bits on the next.
3388 //
3389 // The 16 bit case always keeps the last used page pinned until all
3390 // operations are done and it is unpinned.
3391 //
3392
3393 //
3394 // DEAL WITH 12 BIT CASE
3395 //
3396
3397 if (Vcb->AllocationSupport.FatIndexBitSize == 12) {
3398
3399 //
3400 // Check to see if the fat is already pinned, otherwise pin it.
3401 //
3402
3403 if (Context->Bcb == NULL) {
3404
3405 FatReadVolumeFile( IrpContext,
3406 Vcb,
3407 FatReservedBytes( &Vcb->Bpb ),
3408 FatBytesPerFat( &Vcb->Bpb ),
3409 &Context->Bcb,
3410 &Context->PinnedPage );
3411 }
3412
3413 //
3414 // Load the return value.
3415 //
3416
3417
3418 FatLookup12BitEntry( Context->PinnedPage, FatIndex, FatEntry );
3419
3420 } else if (Vcb->AllocationSupport.FatIndexBitSize == 32) {
3421
3422 //
3423 // DEAL WITH 32 BIT CASE
3424 //
3425
3426 ULONG PageEntryOffset;
3427 ULONG OffsetIntoVolumeFile;
3428
3429 //
3430 // Initialize two local variables that help us.
3431 //
3432 OffsetIntoVolumeFile = FatReservedBytes(&Vcb->Bpb) + FatIndex * sizeof(FAT_ENTRY);
3433 PageEntryOffset = (OffsetIntoVolumeFile % PAGE_SIZE) / sizeof(FAT_ENTRY);
3434
3435 //
3436 // Check to see if we need to read in a new page of fat
3437 //
3438
3439 if ((Context->Bcb == NULL) ||
3440 (OffsetIntoVolumeFile / PAGE_SIZE != Context->VboOfPinnedPage / PAGE_SIZE)) {
3441
3442 //
3443 // The entry wasn't in the pinned page, so must we unpin the current
3444 // page (if any) and read in a new page.
3445 //
3446
3447 FatUnpinBcb( IrpContext, Context->Bcb );
3448
3449 FatReadVolumeFile( IrpContext,
3450 Vcb,
3451 OffsetIntoVolumeFile & ~(PAGE_SIZE - 1),
3452 PAGE_SIZE,
3453 &Context->Bcb,
3454 &Context->PinnedPage );
3455
3456 Context->VboOfPinnedPage = OffsetIntoVolumeFile & ~(PAGE_SIZE - 1);
3457 }
3458
3459 //
3460 // Grab the fat entry from the pinned page, and return
3461 //
3462
3463 *FatEntry = ((PULONG)(Context->PinnedPage))[PageEntryOffset] & FAT32_ENTRY_MASK;
3464
3465 } else {
3466
3467 //
3468 // DEAL WITH 16 BIT CASE
3469 //
3470
3471 ULONG PageEntryOffset;
3472 ULONG OffsetIntoVolumeFile;
3473
3474 //
3475 // Initialize two local variables that help us.
3476 //
3477
3478 OffsetIntoVolumeFile = FatReservedBytes(&Vcb->Bpb) + FatIndex * sizeof(USHORT);
3479 PageEntryOffset = (OffsetIntoVolumeFile % PAGE_SIZE) / sizeof(USHORT);
3480
3481 //
3482 // Check to see if we need to read in a new page of fat
3483 //
3484
3485 if ((Context->Bcb == NULL) ||
3486 (OffsetIntoVolumeFile / PAGE_SIZE != Context->VboOfPinnedPage / PAGE_SIZE)) {
3487
3488 //
3489 // The entry wasn't in the pinned page, so must we unpin the current
3490 // page (if any) and read in a new page.
3491 //
3492
3493 FatUnpinBcb( IrpContext, Context->Bcb );
3494
3495 FatReadVolumeFile( IrpContext,
3496 Vcb,
3497 OffsetIntoVolumeFile & ~(PAGE_SIZE - 1),
3498 PAGE_SIZE,
3499 &Context->Bcb,
3500 &Context->PinnedPage );
3501
3502 Context->VboOfPinnedPage = OffsetIntoVolumeFile & ~(PAGE_SIZE - 1);
3503 }
3504
3505 //
3506 // Grab the fat entry from the pinned page, and return
3507 //
3508
3509 *FatEntry = ((PUSHORT)(Context->PinnedPage))[PageEntryOffset];
3510 }
3511
3512 DebugTrace(-1, Dbg, "FatLookupFatEntry -> (VOID)\n", 0);
3513 return;
3514 }
3515
3516 \f
3517 VOID
3518 FatSetFatEntry (
3519 IN PIRP_CONTEXT IrpContext,
3520 IN PVCB Vcb,
3521 IN ULONG FatIndex,
3522 IN FAT_ENTRY FatEntry
3523 )
3524
3525 /*++
3526
3527 Routine Description:
3528
3529 This routine takes an index into the fat and puts a value in the Fat
3530 at this index. The routine special cases 12, 16 and 32 bit fats. In
3531 all cases we go to the cache manager for a piece of the fat.
3532
3533 We have a special form of this call for setting the DOS-style dirty bit.
3534 Unlike the dirty bit in the boot sector, we do not go to special effort
3535 to make sure that this hits the disk synchronously - if the system goes
3536 down in the window between the dirty bit being set in the boot sector
3537 and the FAT index zero dirty bit being lazy written, then life is tough.
3538
3539 The only possible scenario is that Win9x may see what it thinks is a clean
3540 volume that really isn't (hopefully Memphis will pay attention to our dirty
3541 bit as well). The dirty bit will get out quickly, and if heavy activity is
3542 occurring, then the dirty bit should actually be there virtually all of the
3543 time since the act of cleaning the volume is the "rare" occurance.
3544
3545 There are synchronization concerns that would crop up if we tried to make
3546 this synchronous. This thread may already own the Bcb shared for the first
3547 sector of the FAT (so we can't get it exclusive for a writethrough). This
3548 would require some more serious replumbing to work around than I want to
3549 consider at this time.
3550
3551 We can and do, however, synchronously set the bit clean.
3552
3553 At this point the reader should understand why the NT dirty bit is where it is.
3554
3555 Arguments:
3556
3557 Vcb - Supplies the Vcb to examine, yields 12/16/32 bit info, etc.
3558
3559 FatIndex - Supplies the destination fat index.
3560
3561 FatEntry - Supplies the source fat entry.
3562
3563 --*/
3564
3565 {
3566 LBO Lbo;
3567 PBCB Bcb = NULL;
3568 ULONG SectorSize;
3569 ULONG OffsetIntoVolumeFile;
3570 ULONG WasWait = TRUE;
3571 BOOLEAN RegularOperation = TRUE;
3572 BOOLEAN CleaningOperation = FALSE;
3573 BOOLEAN ReleaseMutex = FALSE;
3574
3575 PAGED_CODE();
3576
3577 DebugTrace(+1, Dbg, "FatSetFatEntry\n", 0);
3578 DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
3579 DebugTrace( 0, Dbg, " FatIndex = %4x\n", FatIndex);
3580 DebugTrace( 0, Dbg, " FatEntry = %4x\n", FatEntry);
3581
3582 //
3583 // Make sure they gave us a valid fat index if this isn't the special
3584 // clean-bit modifying call.
3585 //
3586
3587 if (FatIndex == FAT_DIRTY_BIT_INDEX) {
3588
3589 //
3590 // We are setting the clean bit state. Of course, we could
3591 // have corruption that would cause us to try to fiddle the
3592 // reserved index - we guard against this by having the
3593 // special entry values use the reserved high 4 bits that
3594 // we know that we'll never try to set.
3595 //
3596
3597 //
3598 // We don't want to repin the FAT pages involved here. Just
3599 // let the lazy writer hit them when it can.
3600 //
3601
3602 RegularOperation = FALSE;
3603
3604 switch (FatEntry) {
3605 case FAT_CLEAN_VOLUME:
3606 FatEntry = FAT_CLEAN_ENTRY;
3607 CleaningOperation = TRUE;
3608 break;
3609
3610 case FAT_DIRTY_VOLUME:
3611 switch (Vcb->AllocationSupport.FatIndexBitSize) {
3612 case 12:
3613 FatEntry = FAT12_DIRTY_ENTRY;
3614 break;
3615
3616 case 32:
3617 FatEntry = FAT32_DIRTY_ENTRY;
3618 break;
3619
3620 default:
3621 FatEntry = FAT16_DIRTY_ENTRY;
3622 break;
3623 }
3624 break;
3625
3626 default:
3627 FatRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR );
3628 break;
3629 }
3630
3631 //
3632 // Disable dirtying semantics for the duration of this operation. Force this
3633 // operation to wait for the duration.
3634 //
3635
3636 WasWait = FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT );
3637 SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT | IRP_CONTEXT_FLAG_DISABLE_DIRTY );
3638
3639 } else {
3640
3641 ASSERT( !(FatEntry & ~FAT32_ENTRY_MASK) );
3642 FatVerifyIndexIsValid(IrpContext, Vcb, FatIndex);
3643 }
3644
3645 //
3646 // Set Sector Size
3647 //
3648
3649 SectorSize = 1 << Vcb->AllocationSupport.LogOfBytesPerSector;
3650
3651 //
3652 // Case on 12 or 16 bit fats.
3653 //
3654 // In the 12 bit case (mostly floppies) we always have the whole fat
3655 // (max 6k bytes) pinned during allocation operations. This is possibly
3656 // a wee bit slower, but saves headaches over fat entries with 8 bits
3657 // on one page, and 4 bits on the next.
3658 //
3659 // In the 16 bit case we only read the page that we need to set the fat
3660 // entry.
3661 //
3662
3663 //
3664 // DEAL WITH 12 BIT CASE
3665 //
3666
3667 _SEH2_TRY {
3668
3669 if (Vcb->AllocationSupport.FatIndexBitSize == 12) {
3670
3671 PVOID PinnedFat;
3672
3673 //
3674 // Make sure we have a valid entry
3675 //
3676
3677 FatEntry &= 0xfff;
3678
3679 //
3680 // We read in the entire fat. Note that using prepare write marks
3681 // the bcb pre-dirty, so we don't have to do it explicitly.
3682 //
3683
3684 OffsetIntoVolumeFile = FatReservedBytes( &Vcb->Bpb ) + FatIndex * 3 / 2;
3685
3686 FatPrepareWriteVolumeFile( IrpContext,
3687 Vcb,
3688 FatReservedBytes( &Vcb->Bpb ),
3689 FatBytesPerFat( &Vcb->Bpb ),
3690 &Bcb,
3691 &PinnedFat,
3692 RegularOperation,
3693 FALSE );
3694
3695 //
3696 // Mark the sector(s) dirty in the DirtyFatMcb. This call is
3697 // complicated somewhat for the 12 bit case since a single
3698 // entry write can span two sectors (and pages).
3699 //
3700 // Get the Lbo for the sector where the entry starts, and add it to
3701 // the dirty fat Mcb.
3702 //
3703
3704 Lbo = OffsetIntoVolumeFile & ~(SectorSize - 1);
3705
3706 FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize);
3707
3708 //
3709 // If the entry started on the last byte of the sector, it continues
3710 // to the next sector, so mark the next sector dirty as well.
3711 //
3712 // Note that this entry will simply coalese with the last entry,
3713 // so this operation cannot fail. Also if we get this far, we have
3714 // made it, so no unwinding will be needed.
3715 //
3716
3717 if ( (OffsetIntoVolumeFile & (SectorSize - 1)) == (SectorSize - 1) ) {
3718
3719 Lbo += SectorSize;
3720
3721 FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize );
3722 }
3723
3724 //
3725 // Store the entry into the fat; we need a little synchonization
3726 // here and can't use a spinlock since the bytes might not be
3727 // resident.
3728 //
3729
3730 FatLockFreeClusterBitMap( Vcb );
3731 ReleaseMutex = TRUE;
3732
3733 FatSet12BitEntry( PinnedFat, FatIndex, FatEntry );
3734
3735 FatUnlockFreeClusterBitMap( Vcb );
3736 ReleaseMutex = FALSE;
3737
3738 } else if (Vcb->AllocationSupport.FatIndexBitSize == 32) {
3739
3740 //
3741 // DEAL WITH 32 BIT CASE
3742 //
3743
3744 PULONG PinnedFatEntry32;
3745
3746 //
3747 // Read in a new page of fat
3748 //
3749
3750 OffsetIntoVolumeFile = FatReservedBytes( &Vcb->Bpb ) +
3751 FatIndex * sizeof( FAT_ENTRY );
3752
3753 FatPrepareWriteVolumeFile( IrpContext,
3754 Vcb,
3755 OffsetIntoVolumeFile,
3756 sizeof(FAT_ENTRY),
3757 &Bcb,
3758 (PVOID *)&PinnedFatEntry32,
3759 RegularOperation,
3760 FALSE );
3761 //
3762 // Mark the sector dirty in the DirtyFatMcb
3763 //
3764
3765 Lbo = OffsetIntoVolumeFile & ~(SectorSize - 1);
3766
3767 FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize);
3768
3769 //
3770 // Store the FatEntry to the pinned page.
3771 //
3772 // Preserve the reserved bits in FAT32 entries in the file heap.
3773 //
3774
3775 #ifdef ALPHA
3776 FatLockFreeClusterBitMap( Vcb );
3777 ReleaseMutex = TRUE;
3778 #endif // ALPHA
3779
3780 if (FatIndex != FAT_DIRTY_BIT_INDEX) {
3781
3782 *PinnedFatEntry32 = ((*PinnedFatEntry32 & ~FAT32_ENTRY_MASK) | FatEntry);
3783
3784 } else {
3785
3786 *PinnedFatEntry32 = FatEntry;
3787 }
3788
3789 #ifdef ALPHA
3790 FatUnlockFreeClusterBitMap( Vcb );
3791 ReleaseMutex = FALSE;
3792 #endif // ALPHA
3793
3794 } else {
3795
3796 //
3797 // DEAL WITH 16 BIT CASE
3798 //
3799
3800 PUSHORT PinnedFatEntry;
3801
3802 //
3803 // Read in a new page of fat
3804 //
3805
3806 OffsetIntoVolumeFile = FatReservedBytes( &Vcb->Bpb ) +
3807 FatIndex * sizeof(USHORT);
3808
3809 FatPrepareWriteVolumeFile( IrpContext,
3810 Vcb,
3811 OffsetIntoVolumeFile,
3812 sizeof(USHORT),
3813 &Bcb,
3814 (PVOID *)&PinnedFatEntry,
3815 RegularOperation,
3816 FALSE );
3817 //
3818 // Mark the sector dirty in the DirtyFatMcb
3819 //
3820
3821 Lbo = OffsetIntoVolumeFile & ~(SectorSize - 1);
3822
3823 FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize);
3824
3825 //
3826 // Store the FatEntry to the pinned page.
3827 //
3828 // We need extra synchronization here for broken architectures
3829 // like the ALPHA that don't support atomic 16 bit writes.
3830 //
3831
3832 #ifdef ALPHA
3833 FatLockFreeClusterBitMap( Vcb );
3834 ReleaseMutex = TRUE;
3835 #endif // ALPHA
3836
3837 *PinnedFatEntry = (USHORT)FatEntry;
3838
3839 #ifdef ALPHA
3840 FatUnlockFreeClusterBitMap( Vcb );
3841 ReleaseMutex = FALSE;
3842 #endif // ALPHA
3843 }
3844
3845 } _SEH2_FINALLY {
3846
3847 DebugUnwind( FatSetFatEntry );
3848
3849 //
3850 // Re-enable volume dirtying in case this was a dirty bit operation.
3851 //
3852
3853 ClearFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_DISABLE_DIRTY );
3854
3855 //
3856 // Make this operation asynchronous again if needed.
3857 //
3858
3859 if (!WasWait) {
3860
3861 ClearFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT );
3862 }
3863
3864 //
3865 // If we still somehow have the Mutex, release it.
3866 //
3867
3868 if (ReleaseMutex) {
3869
3870 ASSERT( _SEH2_AbnormalTermination() );
3871
3872 FatUnlockFreeClusterBitMap( Vcb );
3873 }
3874
3875 //
3876 // Unpin the Bcb. For cleaning operations, we make this write-through.
3877 //
3878
3879 if (CleaningOperation && Bcb) {
3880
3881 IO_STATUS_BLOCK IgnoreStatus;
3882
3883 CcRepinBcb( Bcb );
3884 CcUnpinData( Bcb );
3885 DbgDoit( IrpContext->PinCount -= 1 );
3886 CcUnpinRepinnedBcb( Bcb, TRUE, &IgnoreStatus );
3887
3888 } else {
3889
3890 FatUnpinBcb(IrpContext, Bcb);
3891 }
3892
3893 DebugTrace(-1, Dbg, "FatSetFatEntry -> (VOID)\n", 0);
3894 } _SEH2_END;
3895
3896 return;
3897 }
3898
3899 \f
3900 //
3901 // Internal support routine
3902 //
3903
3904 VOID
3905 FatSetFatRun (
3906 IN PIRP_CONTEXT IrpContext,
3907 IN PVCB Vcb,
3908 IN ULONG StartingFatIndex,
3909 IN ULONG ClusterCount,
3910 IN BOOLEAN ChainTogether
3911 )
3912
3913 /*++
3914
3915 Routine Description:
3916
3917 This routine sets a continuous run of clusters in the fat. If ChainTogether
3918 is TRUE, then the clusters are linked together as in normal Fat fasion,
3919 with the last cluster receiving FAT_CLUSTER_LAST. If ChainTogether is
3920 FALSE, all the entries are set to FAT_CLUSTER_AVAILABLE, effectively
3921 freeing all the clusters in the run.
3922
3923 Arguments:
3924
3925 Vcb - Supplies the Vcb to examine, yields 12/16 bit info, etc.
3926
3927 StartingFatIndex - Supplies the destination fat index.
3928
3929 ClusterCount - Supplies the number of contiguous clusters to work on.
3930
3931 ChainTogether - Tells us whether to fill the entries with links, or
3932 FAT_CLUSTER_AVAILABLE
3933
3934
3935 Return Value:
3936
3937 VOID
3938
3939 --*/
3940
3941 {
3942 #define MAXCOUNTCLUS 0x10000
3943 #define COUNTSAVEDBCBS ((MAXCOUNTCLUS * sizeof(FAT_ENTRY) / PAGE_SIZE) + 2)
3944 PBCB SavedBcbs[COUNTSAVEDBCBS][2];
3945
3946 ULONG SectorSize;
3947 ULONG Cluster;
3948
3949 LBO StartSectorLbo;
3950 LBO FinalSectorLbo;
3951 LBO Lbo;
3952
3953 PVOID PinnedFat;
3954
3955 #ifndef __REACTOS__
3956 ULONG StartingPage;
3957 #endif
3958
3959 BOOLEAN ReleaseMutex = FALSE;
3960
3961 ULONG SavedStartingFatIndex = StartingFatIndex;
3962
3963 PAGED_CODE();
3964
3965 DebugTrace(+1, Dbg, "FatSetFatRun\n", 0);
3966 DebugTrace( 0, Dbg, " Vcb = %8lx\n", Vcb);
3967 DebugTrace( 0, Dbg, " StartingFatIndex = %8x\n", StartingFatIndex);
3968 DebugTrace( 0, Dbg, " ClusterCount = %8lx\n", ClusterCount);
3969 DebugTrace( 0, Dbg, " ChainTogether = %s\n", ChainTogether ? "TRUE":"FALSE");
3970
3971 //
3972 // Make sure they gave us a valid fat run.
3973 //
3974
3975 FatVerifyIndexIsValid(IrpContext, Vcb, StartingFatIndex);
3976 FatVerifyIndexIsValid(IrpContext, Vcb, StartingFatIndex + ClusterCount - 1);
3977
3978 //
3979 // Check special case
3980 //
3981
3982 if (ClusterCount == 0) {
3983
3984 DebugTrace(-1, Dbg, "FatSetFatRun -> (VOID)\n", 0);
3985 return;
3986 }
3987
3988 //
3989 // Set Sector Size
3990 //
3991
3992 SectorSize = 1 << Vcb->AllocationSupport.LogOfBytesPerSector;
3993
3994 //
3995 // Case on 12 or 16 bit fats.
3996 //
3997 // In the 12 bit case (mostly floppies) we always have the whole fat
3998 // (max 6k bytes) pinned during allocation operations. This is possibly
3999 // a wee bit slower, but saves headaches over fat entries with 8 bits
4000 // on one page, and 4 bits on the next.
4001 //
4002 // In the 16 bit case we only read one page at a time, as needed.
4003 //
4004
4005 //
4006 // DEAL WITH 12 BIT CASE
4007 //
4008
4009 _SEH2_TRY {
4010
4011 if (Vcb->AllocationSupport.FatIndexBitSize == 12) {
4012
4013 #ifndef __REACTOS__
4014 StartingPage = 0;
4015 #endif
4016
4017 //
4018 // We read in the entire fat. Note that using prepare write marks
4019 // the bcb pre-dirty, so we don't have to do it explicitly.
4020 //
4021
4022 RtlZeroMemory( &SavedBcbs[0], 2 * sizeof(PBCB) * 2);
4023
4024 FatPrepareWriteVolumeFile( IrpContext,
4025 Vcb,
4026 FatReservedBytes( &Vcb->Bpb ),
4027 FatBytesPerFat( &Vcb->Bpb ),
4028 &SavedBcbs[0][0],
4029 &PinnedFat,
4030 TRUE,
4031 FALSE );
4032
4033 //
4034 // Mark the affected sectors dirty. Note that FinalSectorLbo is
4035 // the Lbo of the END of the entry (Thus * 3 + 2). This makes sure
4036 // we catch the case of a dirty fat entry straddling a sector boundry.
4037 //
4038 // Note that if the first AddMcbEntry succeeds, all following ones
4039 // will simply coalese, and thus also succeed.
4040 //
4041
4042 StartSectorLbo = (FatReservedBytes( &Vcb->Bpb ) + StartingFatIndex * 3 / 2)
4043 & ~(SectorSize - 1);
4044
4045 FinalSectorLbo = (FatReservedBytes( &Vcb->Bpb ) + ((StartingFatIndex +
4046 ClusterCount) * 3 + 2) / 2) & ~(SectorSize - 1);
4047
4048 for (Lbo = StartSectorLbo; Lbo <= FinalSectorLbo; Lbo += SectorSize) {
4049
4050 FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize );
4051 }
4052
4053 //
4054 // Store the entries into the fat; we need a little
4055 // synchonization here and can't use a spinlock since the bytes
4056 // might not be resident.
4057 //
4058
4059 FatLockFreeClusterBitMap( Vcb );
4060 ReleaseMutex = TRUE;
4061
4062 for (Cluster = StartingFatIndex;
4063 Cluster < StartingFatIndex + ClusterCount - 1;
4064 Cluster++) {
4065
4066 FatSet12BitEntry( PinnedFat,
4067 Cluster,
4068 ChainTogether ? Cluster + 1 : FAT_CLUSTER_AVAILABLE );
4069 }
4070
4071 //
4072 // Save the last entry
4073 //
4074
4075 FatSet12BitEntry( PinnedFat,
4076 Cluster,
4077 ChainTogether ?
4078 FAT_CLUSTER_LAST & 0xfff : FAT_CLUSTER_AVAILABLE );
4079
4080 FatUnlockFreeClusterBitMap( Vcb );
4081 ReleaseMutex = FALSE;
4082
4083 } else if (Vcb->AllocationSupport.FatIndexBitSize == 32) {
4084
4085 //
4086 // DEAL WITH 32 BIT CASE
4087 //
4088
4089 for (;;) {
4090
4091 VBO StartOffsetInVolume;
4092 VBO FinalOffsetInVolume;
4093
4094 ULONG Page;
4095 ULONG FinalCluster;
4096 PULONG FatEntry;
4097 ULONG ClusterCountThisRun;
4098
4099 StartOffsetInVolume = FatReservedBytes(&Vcb->Bpb) +
4100 StartingFatIndex * sizeof(FAT_ENTRY);
4101
4102 if (ClusterCount > MAXCOUNTCLUS) {
4103 ClusterCountThisRun = MAXCOUNTCLUS;
4104 } else {
4105 ClusterCountThisRun = ClusterCount;
4106 }
4107
4108 FinalOffsetInVolume = StartOffsetInVolume +
4109 (ClusterCountThisRun - 1) * sizeof(FAT_ENTRY);
4110
4111 #ifndef __REACTOS__
4112 StartingPage = StartOffsetInVolume / PAGE_SIZE;
4113 #endif
4114
4115 {
4116 ULONG NumberOfPages;
4117 ULONG Offset;
4118
4119 NumberOfPages = (FinalOffsetInVolume / PAGE_SIZE) -
4120 (StartOffsetInVolume / PAGE_SIZE) + 1;
4121
4122 RtlZeroMemory( &SavedBcbs[0][0], (NumberOfPages + 1) * sizeof(PBCB) * 2 );
4123
4124 for ( Page = 0, Offset = StartOffsetInVolume & ~(PAGE_SIZE - 1);
4125 Page < NumberOfPages;
4126 Page++, Offset += PAGE_SIZE ) {
4127
4128 FatPrepareWriteVolumeFile( IrpContext,
4129 Vcb,
4130 Offset,
4131 PAGE_SIZE,
4132 &SavedBcbs[Page][0],
4133 (PVOID *)&SavedBcbs[Page][1],
4134 TRUE,
4135 FALSE );
4136
4137 if (Page == 0) {
4138
4139 FatEntry = (PULONG)((PUCHAR)SavedBcbs[0][1] +
4140 (StartOffsetInVolume % PAGE_SIZE));
4141 }
4142 }
4143 }
4144
4145 //
4146 // Mark the run dirty
4147 //
4148
4149 StartSectorLbo = StartOffsetInVolume & ~(SectorSize - 1);
4150 FinalSectorLbo = FinalOffsetInVolume & ~(SectorSize - 1);
4151
4152 for (Lbo = StartSectorLbo; Lbo <= FinalSectorLbo; Lbo += SectorSize) {
4153
4154 FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO)Lbo, Lbo, SectorSize );
4155 }
4156
4157 //
4158 // Store the entries
4159 //
4160 // We need extra synchronization here for broken architectures
4161 // like the ALPHA that don't support atomic 16 bit writes.
4162 //
4163
4164 #ifdef ALPHA
4165 FatLockFreeClusterBitMap( Vcb );
4166 ReleaseMutex = TRUE;
4167 #endif // ALPHA
4168
4169 FinalCluster = StartingFatIndex + ClusterCountThisRun - 1;
4170 Page = 0;
4171
4172 for (Cluster = StartingFatIndex;
4173 Cluster <= FinalCluster;
4174 Cluster++, FatEntry++) {
4175
4176 //
4177 // If we just crossed a page boundry (as opposed to starting
4178 // on one), update our idea of FatEntry.
4179
4180 if ( (((ULONG_PTR)FatEntry & (PAGE_SIZE-1)) == 0) &&
4181 (Cluster != StartingFatIndex) ) {
4182
4183 Page += 1;
4184 FatEntry = (PULONG)SavedBcbs[Page][1];
4185 }
4186
4187 *FatEntry = ChainTogether ? (FAT_ENTRY)(Cluster + 1) :
4188 FAT_CLUSTER_AVAILABLE;
4189 }
4190
4191 //
4192 // Fix up the last entry if we were chaining together
4193 //
4194
4195 if ((ClusterCount <= MAXCOUNTCLUS) &&
4196 ChainTogether ) {
4197
4198 *(FatEntry-1) = FAT_CLUSTER_LAST;
4199 }
4200
4201 #ifdef ALPHA
4202 FatUnlockFreeClusterBitMap( Vcb );
4203 ReleaseMutex = FALSE;
4204 #endif // ALPHA
4205
4206 {
4207 ULONG i = 0;
4208 //
4209 // Unpin the Bcbs
4210 //
4211
4212 while ( SavedBcbs[i][0] != NULL ) {
4213
4214 FatUnpinBcb( IrpContext, SavedBcbs[i][0] );
4215 SavedBcbs[i][0] = NULL;
4216
4217 i += 1;
4218 }
4219 }
4220
4221 if (ClusterCount <= MAXCOUNTCLUS) {
4222
4223 break;
4224
4225 } else {
4226
4227 StartingFatIndex += MAXCOUNTCLUS;
4228 ClusterCount -= MAXCOUNTCLUS;
4229 }
4230 }
4231
4232 } else {
4233
4234 //
4235 // DEAL WITH 16 BIT CASE
4236 //
4237
4238 VBO StartOffsetInVolume;
4239 VBO FinalOffsetInVolume;
4240
4241 ULONG Page;
4242 ULONG FinalCluster;
4243 PUSHORT FatEntry;
4244
4245 StartOffsetInVolume = FatReservedBytes(&Vcb->Bpb) +
4246 StartingFatIndex * sizeof(USHORT);
4247
4248 FinalOffsetInVolume = StartOffsetInVolume +
4249 (ClusterCount - 1) * sizeof(USHORT);
4250
4251 #ifndef __REACTOS__
4252 StartingPage = StartOffsetInVolume / PAGE_SIZE;
4253 #endif
4254
4255 //
4256 // Read in one page of fat at a time. We cannot read in the
4257 // all of the fat we need because of cache manager limitations.
4258 //
4259 // SavedBcb was initialized to be able to hold the largest
4260 // possible number of pages in a fat plus and extra one to
4261 // accomadate the boot sector, plus one more to make sure there
4262 // is enough room for the RtlZeroMemory below that needs the mark
4263 // the first Bcb after all the ones we will use as an end marker.
4264 //
4265
4266 {
4267 ULONG NumberOfPages;
4268 ULONG Offset;
4269
4270 NumberOfPages = (FinalOffsetInVolume / PAGE_SIZE) -
4271 (StartOffsetInVolume / PAGE_SIZE) + 1;
4272
4273 RtlZeroMemory( &SavedBcbs[0][0], (NumberOfPages + 1) * sizeof(PBCB) * 2 );
4274
4275 for ( Page = 0, Offset = StartOffsetInVolume & ~(PAGE_SIZE - 1);
4276 Page < NumberOfPages;
4277 Page++, Offset += PAGE_SIZE ) {
4278
4279 FatPrepareWriteVolumeFile( IrpContext,
4280 Vcb,
4281 Offset,
4282 PAGE_SIZE,
4283 &SavedBcbs[Page][0],
4284 (PVOID *)&SavedBcbs[Page][1],
4285 TRUE,
4286 FALSE );
4287
4288 if (Page == 0) {
4289
4290 FatEntry = (PUSHORT)((PUCHAR)SavedBcbs[0][1] +
4291 (StartOffsetInVolume % PAGE_SIZE));
4292 }
4293 }
4294 }
4295
4296 //
4297 // Mark the run dirty
4298 //
4299
4300 StartSectorLbo = StartOffsetInVolume & ~(SectorSize - 1);
4301 FinalSectorLbo = FinalOffsetInVolume & ~(SectorSize - 1);
4302
4303 for (Lbo = StartSectorLbo; Lbo <= FinalSectorLbo; Lbo += SectorSize) {
4304
4305 FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize );
4306 }
4307
4308 //
4309 // Store the entries
4310 //
4311 // We need extra synchronization here for broken architectures
4312 // like the ALPHA that don't support atomic 16 bit writes.
4313 //
4314
4315 #ifdef ALPHA
4316 FatLockFreeClusterBitMap( Vcb );
4317 ReleaseMutex = TRUE;
4318 #endif // ALPHA
4319
4320 FinalCluster = StartingFatIndex + ClusterCount - 1;
4321 Page = 0;
4322
4323 for (Cluster = StartingFatIndex;
4324 Cluster <= FinalCluster;
4325 Cluster++, FatEntry++) {
4326
4327 //
4328 // If we just crossed a page boundry (as opposed to starting
4329 // on one), update our idea of FatEntry.
4330
4331 if ( (((ULONG_PTR)FatEntry & (PAGE_SIZE-1)) == 0) &&
4332 (Cluster != StartingFatIndex) ) {
4333
4334 Page += 1;
4335 FatEntry = (PUSHORT)SavedBcbs[Page][1];
4336 }
4337
4338 *FatEntry = (USHORT) (ChainTogether ? (FAT_ENTRY)(Cluster + 1) :
4339 FAT_CLUSTER_AVAILABLE);
4340 }
4341
4342 //
4343 // Fix up the last entry if we were chaining together
4344 //
4345
4346 if ( ChainTogether ) {
4347
4348 *(FatEntry-1) = (USHORT)FAT_CLUSTER_LAST;
4349 }
4350 #ifdef ALPHA
4351 FatUnlockFreeClusterBitMap( Vcb );
4352 ReleaseMutex = FALSE;
4353 #endif // ALPHA
4354 }
4355
4356 } _SEH2_FINALLY {
4357
4358 ULONG i = 0;
4359
4360 DebugUnwind( FatSetFatRun );
4361
4362 //
4363 // If we still somehow have the Mutex, release it.
4364 //
4365
4366 if (ReleaseMutex) {
4367
4368 ASSERT( _SEH2_AbnormalTermination() );
4369
4370 FatUnlockFreeClusterBitMap( Vcb );
4371 }
4372
4373 //
4374 // Unpin the Bcbs
4375 //
4376
4377 while ( SavedBcbs[i][0] != NULL ) {
4378
4379 FatUnpinBcb( IrpContext, SavedBcbs[i][0] );
4380
4381 i += 1;
4382 }
4383
4384 //
4385 // At this point nothing in this finally clause should have raised.
4386 // So, now comes the unsafe (sigh) stuff.
4387 //
4388
4389 if ( _SEH2_AbnormalTermination() &&
4390 (Vcb->AllocationSupport.FatIndexBitSize == 32) ) {
4391
4392 //
4393 // Fat32 unwind
4394 //
4395 // This case is more complex because the FAT12 and FAT16 cases
4396 // pin all the needed FAT pages (128K max), after which it
4397 // can't fail, before changing any FAT entries. In the Fat32
4398 // case, it may not be practical to pin all the needed FAT
4399 // pages, because that could span many megabytes. So Fat32
4400 // attacks in chunks, and if a failure occurs once the first
4401 // chunk has been updated, we have to back out the updates.
4402 //
4403 // The unwind consists of walking back over each FAT entry we
4404 // have changed, setting it back to the previous value. Note
4405 // that the previous value with either be FAT_CLUSTER_AVAILABLE
4406 // (if ChainTogether==TRUE) or a simple link to the successor
4407 // (if ChainTogether==FALSE).
4408 //
4409 // We concede that any one of these calls could fail too; our
4410 // objective is to make this case no more likely than the case
4411 // for a file consisting of multiple disjoint runs.
4412 //
4413
4414 while ( StartingFatIndex > SavedStartingFatIndex ) {
4415
4416 StartingFatIndex--;
4417
4418 FatSetFatEntry( IrpContext, Vcb, StartingFatIndex,
4419 ChainTogether ?
4420 StartingFatIndex + 1 : FAT_CLUSTER_AVAILABLE );
4421 }
4422 }
4423
4424 DebugTrace(-1, Dbg, "FatSetFatRun -> (VOID)\n", 0);
4425 } _SEH2_END;
4426
4427 return;
4428 }
4429
4430 \f
4431 //
4432 // Internal support routine
4433 //
4434
4435 UCHAR
4436 FatLogOf (
4437 IN ULONG Value
4438 )
4439
4440 /*++
4441
4442 Routine Description:
4443
4444 This routine just computes the base 2 log of an integer. It is only used
4445 on objects that are know to be powers of two.
4446
4447 Arguments:
4448
4449 Value - The value to take the base 2 log of.
4450
4451 Return Value:
4452
4453 UCHAR - The base 2 log of Value.
4454
4455 --*/
4456
4457 {
4458 UCHAR Log = 0;
4459
4460 PAGED_CODE();
4461
4462 DebugTrace(+1, Dbg, "LogOf\n", 0);
4463 DebugTrace( 0, Dbg, " Value = %8lx\n", Value);
4464
4465 //
4466 // Knock bits off until we we get a one at position 0
4467 //
4468
4469 while ( (Value & 0xfffffffe) != 0 ) {
4470
4471 Log++;
4472 Value >>= 1;
4473 }
4474
4475 //
4476 // If there was more than one bit set, the file system messed up,
4477 // Bug Check.
4478 //
4479
4480 if (Value != 0x1) {
4481
4482 DebugTrace( 0, Dbg, "Received non power of 2.\n", 0);
4483
4484 FatBugCheck( Value, Log, 0 );
4485 }
4486
4487 DebugTrace(-1, Dbg, "LogOf -> %8lx\n", Log);
4488
4489 return Log;
4490 }
4491
4492 \f
4493 VOID
4494 FatExamineFatEntries(
4495 IN PIRP_CONTEXT IrpContext,
4496 IN PVCB Vcb,
4497 IN ULONG StartIndex OPTIONAL,
4498 IN ULONG EndIndex OPTIONAL,
4499 IN BOOLEAN SetupWindows,
4500 IN PFAT_WINDOW SwitchToWindow OPTIONAL,
4501 IN PULONG BitMapBuffer OPTIONAL
4502 )
4503 /*++
4504
4505 Routine Description:
4506
4507 This routine handles scanning a segment of the FAT into in-memory structures.
4508
4509 There are three fundamental cases, with variations depending on the FAT type:
4510
4511 1) During volume setup, FatSetupAllocations
4512
4513 1a) for FAT12/16, read the FAT into our free clusterbitmap
4514 1b) for FAT32, perform the initial scan for window free cluster counts
4515
4516 2) Switching FAT32 windows on the fly during system operation
4517
4518 3) Reading arbitrary segments of the FAT for the purposes of the GetVolumeBitmap
4519 call (only for FAT32)
4520
4521 There really is too much going on in here. At some point this should be
4522 substantially rewritten.
4523
4524 Arguments:
4525
4526 Vcb - Supplies the volume involved
4527
4528 StartIndex - Supplies the starting cluster, ignored if SwitchToWindow supplied
4529
4530 EndIndex - Supplies the ending cluster, ignored if SwitchToWindow supplied
4531
4532 SetupWindows - Indicates if we are doing the initial FAT32 scan
4533
4534 SwitchToWindow - Supplies the FAT window we are examining and will switch to
4535
4536 BitMapBuffer - Supplies a specific bitmap to fill in, if not supplied we fill
4537 in the volume free cluster bitmap if !SetupWindows
4538
4539 Return Value:
4540
4541 None. Lots of side effects.
4542
4543 --*/
4544 {
4545 ULONG FatIndexBitSize;
4546 ULONG Page;
4547 ULONG Offset;
4548 ULONG FatIndex;
4549 FAT_ENTRY FatEntry = FAT_CLUSTER_AVAILABLE;
4550 FAT_ENTRY FirstFatEntry = FAT_CLUSTER_AVAILABLE;
4551 PUSHORT FatBuffer;
4552 PVOID pv;
4553 PBCB Bcb;
4554 ULONG EntriesPerWindow;
4555 #ifndef __REACTOS__
4556 ULONG BitIndex;
4557 #endif
4558
4559 ULONG ClustersThisRun;
4560 ULONG StartIndexOfThisRun;
4561
4562 PULONG FreeClusterCount = NULL;
4563
4564 PFAT_WINDOW CurrentWindow = NULL;
4565
4566 PVOID NewBitMapBuffer = NULL;
4567 PRTL_BITMAP BitMap = NULL;
4568 RTL_BITMAP PrivateBitMap;
4569
4570 enum RunType {
4571 FreeClusters,
4572 AllocatedClusters,
4573 UnknownClusters
4574 } CurrentRun;
4575
4576 PAGED_CODE();
4577
4578 //
4579 // Now assert correct usage.
4580 //
4581
4582 FatIndexBitSize = Vcb->AllocationSupport.FatIndexBitSize;
4583
4584 ASSERT( !(SetupWindows && (SwitchToWindow || BitMapBuffer)));
4585 ASSERT( !(SetupWindows && FatIndexBitSize != 32));
4586
4587 if (Vcb->NumberOfWindows > 1) {
4588
4589 //
4590 // FAT32: Calculate the number of FAT entries covered by a window. This is
4591 // equal to the number of bits in the freespace bitmap, the size of which
4592 // is hardcoded.
4593 //
4594
4595 EntriesPerWindow = MAX_CLUSTER_BITMAP_SIZE;
4596
4597 } else {
4598
4599 EntriesPerWindow = Vcb->AllocationSupport.NumberOfClusters;
4600 }
4601
4602 //
4603 // We will also fill in the cumulative count of free clusters for
4604 // the entire volume. If this is not appropriate, NULL it out
4605 // shortly.
4606 //
4607
4608 FreeClusterCount = &Vcb->AllocationSupport.NumberOfFreeClusters;
4609
4610 if (SetupWindows) {
4611
4612 ASSERT(BitMapBuffer == NULL);
4613
4614 //
4615 // In this case we're just supposed to scan the fat and set up
4616 // the information regarding where the buckets fall and how many
4617 // free clusters are in each.
4618 //
4619 // It is fine to monkey with the real windows, we must be able
4620 // to do this to activate the volume.
4621 //
4622
4623 BitMap = NULL;
4624
4625 CurrentWindow = &Vcb->Windows[0];
4626 CurrentWindow->FirstCluster = StartIndex;
4627 CurrentWindow->ClustersFree = 0;
4628
4629 //
4630 // We always wish to calculate total free clusters when
4631 // setting up the FAT windows.
4632 //
4633
4634 } else if (BitMapBuffer == NULL) {
4635
4636 //
4637 // We will be filling in the free cluster bitmap for the volume.
4638 // Careful, we can raise out of here and be hopelessly hosed if
4639 // we built this up in the main bitmap/window itself.
4640 //
4641 // For simplicity's sake, we'll do the swap for everyone. FAT32
4642 // provokes the need since we can't tolerate partial results
4643 // when switching windows.
4644 //
4645
4646 ASSERT( SwitchToWindow );
4647
4648 CurrentWindow = SwitchToWindow;
4649 StartIndex = CurrentWindow->FirstCluster;
4650 EndIndex = CurrentWindow->LastCluster;
4651
4652 BitMap = &PrivateBitMap;
4653 NewBitMapBuffer = FsRtlAllocatePoolWithTag( PagedPool,
4654 (EntriesPerWindow + 7) / 8,
4655 TAG_FAT_BITMAP );
4656
4657 RtlInitializeBitMap( &PrivateBitMap,
4658 NewBitMapBuffer,
4659 EndIndex - StartIndex + 1);
4660
4661 if (FatIndexBitSize == 32) {
4662
4663 //
4664 // We do not wish count total clusters here.
4665 //
4666
4667 FreeClusterCount = NULL;
4668
4669 }
4670
4671 } else {
4672
4673 BitMap = &PrivateBitMap;
4674 RtlInitializeBitMap(&PrivateBitMap,
4675 BitMapBuffer,
4676 EndIndex - StartIndex + 1);
4677
4678 //
4679 // We do not count total clusters here.
4680 //
4681
4682 FreeClusterCount = NULL;
4683 }
4684
4685 //
4686 // Now, our start index better be in the file heap.
4687 //
4688
4689 ASSERT( StartIndex >= 2 );
4690
4691 //
4692 // Pick up the initial chunk of the FAT and first entry.
4693 //
4694
4695 if (FatIndexBitSize == 12) {
4696
4697 //
4698 // We read in the entire fat in the 12 bit case.
4699 //
4700
4701 FatReadVolumeFile( IrpContext,
4702 Vcb,
4703 FatReservedBytes( &Vcb->Bpb ),
4704 FatBytesPerFat( &Vcb->Bpb ),
4705 &Bcb,
4706 (PVOID *)&FatBuffer );
4707
4708 FatLookup12BitEntry(FatBuffer, 0, &FirstFatEntry);
4709
4710 } else {
4711
4712 //
4713 // Read in one page of fat at a time. We cannot read in the
4714 // all of the fat we need because of cache manager limitations.
4715 //
4716
4717 ULONG BytesPerEntry = FatIndexBitSize >> 3;
4718 #ifndef __REACTOS__
4719 ULONG EntriesPerPage = PAGE_SIZE / BytesPerEntry;
4720 #endif
4721
4722 Page = (FatReservedBytes(&Vcb->Bpb) + StartIndex * BytesPerEntry) / PAGE_SIZE;
4723
4724 Offset = Page * PAGE_SIZE;
4725
4726 FatReadVolumeFile( IrpContext,
4727 Vcb,
4728 Offset,
4729 PAGE_SIZE,
4730 &Bcb,
4731 &pv);
4732
4733 if (FatIndexBitSize == 32) {
4734
4735
4736 FatBuffer = (PUSHORT)((PUCHAR)pv +
4737 (FatReservedBytes(&Vcb->Bpb) + StartIndex * BytesPerEntry) %
4738 PAGE_SIZE);
4739
4740 FirstFatEntry = *((PULONG)FatBuffer);
4741 FirstFatEntry = FirstFatEntry & FAT32_ENTRY_MASK;
4742
4743 } else {
4744
4745 FatBuffer = (PUSHORT)((PUCHAR)pv +
4746 FatReservedBytes(&Vcb->Bpb) % PAGE_SIZE) + 2;
4747
4748 FirstFatEntry = *FatBuffer;
4749 }
4750
4751 }
4752
4753 CurrentRun = (FirstFatEntry == FAT_CLUSTER_AVAILABLE) ?
4754 FreeClusters : AllocatedClusters;
4755
4756 StartIndexOfThisRun = StartIndex;
4757
4758 _SEH2_TRY {
4759
4760 for (FatIndex = StartIndex; FatIndex <= EndIndex; FatIndex++) {
4761
4762
4763 if (FatIndexBitSize == 12) {
4764
4765 FatLookup12BitEntry(FatBuffer, FatIndex, &FatEntry);
4766
4767 } else {
4768
4769 //
4770 // If we are setting up the FAT32 windows and have stepped into a new
4771 // bucket, finalize this one and move forward.
4772 //
4773
4774 if (SetupWindows &&
4775 FatIndex > StartIndex &&
4776 (FatIndex - 2) % EntriesPerWindow == 0) {
4777
4778 CurrentWindow->LastCluster = FatIndex - 1;
4779
4780 if (CurrentRun == FreeClusters) {
4781
4782 //
4783 // We must be counting clusters in order to modify the
4784 // contents of the window.
4785 //
4786
4787 ASSERT( FreeClusterCount );
4788
4789
4790 ClustersThisRun = FatIndex - StartIndexOfThisRun;
4791 CurrentWindow->ClustersFree += ClustersThisRun;
4792
4793 if (FreeClusterCount) {
4794 *FreeClusterCount += ClustersThisRun;
4795 }
4796
4797 } else {
4798
4799 ASSERT(CurrentRun == AllocatedClusters);
4800
4801 ClustersThisRun = FatIndex - StartIndexOfThisRun;
4802 }
4803
4804 StartIndexOfThisRun = FatIndex;
4805 CurrentRun = UnknownClusters;
4806
4807 CurrentWindow++;
4808 CurrentWindow->ClustersFree = 0;
4809 CurrentWindow->FirstCluster = FatIndex;
4810 }
4811
4812 //
4813 // If we just stepped onto a new page, grab a new pointer.
4814 //
4815
4816 if (((ULONG_PTR)FatBuffer & (PAGE_SIZE - 1)) == 0) {
4817
4818 FatUnpinBcb( IrpContext, Bcb );
4819
4820 Page++;
4821 Offset += PAGE_SIZE;
4822
4823 FatReadVolumeFile( IrpContext,
4824 Vcb,
4825 Offset,
4826 PAGE_SIZE,
4827 &Bcb,
4828 &pv );
4829
4830 FatBuffer = (PUSHORT)pv;
4831 }
4832
4833 if (FatIndexBitSize == 32) {
4834
4835 #ifndef __REACTOS__
4836 FatEntry = *((PULONG)FatBuffer)++;
4837 #else
4838 FatEntry = *FatBuffer;
4839 FatBuffer += 1;
4840 #endif
4841 FatEntry = FatEntry & FAT32_ENTRY_MASK;
4842
4843 } else {
4844
4845 FatEntry = *FatBuffer;
4846 FatBuffer += 1;
4847 }
4848 }
4849
4850 if (CurrentRun == UnknownClusters) {
4851
4852 CurrentRun = (FatEntry == FAT_CLUSTER_AVAILABLE) ?
4853 FreeClusters : AllocatedClusters;
4854 }
4855
4856 //
4857 // Are we switching from a free run to an allocated run?
4858 //
4859
4860 if (CurrentRun == FreeClusters &&
4861 FatEntry != FAT_CLUSTER_AVAILABLE) {
4862
4863 ClustersThisRun = FatIndex - StartIndexOfThisRun;
4864
4865 if (FreeClusterCount) {
4866
4867 *FreeClusterCount += ClustersThisRun;
4868 CurrentWindow->ClustersFree += ClustersThisRun;
4869 }
4870
4871 if (BitMap) {
4872
4873 RtlClearBits( BitMap,
4874 StartIndexOfThisRun - StartIndex,
4875 ClustersThisRun );
4876 }
4877
4878 CurrentRun = AllocatedClusters;
4879 StartIndexOfThisRun = FatIndex;
4880 }
4881
4882 //
4883 // Are we switching from an allocated run to a free run?
4884 //
4885
4886 if (CurrentRun == AllocatedClusters &&
4887 FatEntry == FAT_CLUSTER_AVAILABLE) {
4888
4889 ClustersThisRun = FatIndex - StartIndexOfThisRun;
4890
4891 if (BitMap) {
4892
4893 RtlSetBits( BitMap,
4894 StartIndexOfThisRun - StartIndex,
4895 ClustersThisRun );
4896 }
4897
4898 CurrentRun = FreeClusters;
4899 StartIndexOfThisRun = FatIndex;
4900 }
4901 }
4902
4903 //
4904 // Now we have to record the final run we encountered
4905 //
4906
4907 ClustersThisRun = FatIndex - StartIndexOfThisRun;
4908
4909 if (CurrentRun == FreeClusters) {
4910
4911 if (FreeClusterCount) {
4912
4913 *FreeClusterCount += ClustersThisRun;
4914 CurrentWindow->ClustersFree += ClustersThisRun;
4915 }
4916
4917 if (BitMap) {
4918
4919 RtlClearBits( BitMap,
4920 StartIndexOfThisRun - StartIndex,
4921 ClustersThisRun );
4922 }
4923
4924 } else {
4925
4926 if (BitMap) {
4927
4928 RtlSetBits( BitMap,
4929 StartIndexOfThisRun - StartIndex,
4930 ClustersThisRun );
4931 }
4932 }
4933
4934 //
4935 // And finish the last window if we are in setup.
4936 //
4937
4938 if (SetupWindows) {
4939
4940 CurrentWindow->LastCluster = FatIndex - 1;
4941 }
4942
4943 //
4944 // Now switch the active window if required. We've succesfully gotten everything
4945 // nailed down.
4946 //
4947 // If we were tracking the free cluster count, this means we should update the
4948 // window. This is the case of FAT12/16 initialization.
4949 //
4950
4951 if (SwitchToWindow) {
4952
4953 if (Vcb->FreeClusterBitMap.Buffer) {
4954
4955 ExFreePool( Vcb->FreeClusterBitMap.Buffer );
4956 }
4957
4958 RtlInitializeBitMap( &Vcb->FreeClusterBitMap,
4959 NewBitMapBuffer,
4960 EndIndex - StartIndex + 1 );
4961
4962 NewBitMapBuffer = NULL;
4963
4964 Vcb->CurrentWindow = SwitchToWindow;
4965 Vcb->ClusterHint = -1;
4966
4967 if (FreeClusterCount) {
4968
4969 ASSERT( !SetupWindows );
4970 ASSERT( FatIndexBitSize != 32 );
4971
4972 Vcb->CurrentWindow->ClustersFree = *FreeClusterCount;
4973 }
4974 }
4975
4976 //
4977 // Make sure plausible things occured ...
4978 //
4979
4980 if (!SetupWindows && BitMapBuffer == NULL) {
4981
4982 ASSERT_CURRENT_WINDOW_GOOD( Vcb );
4983 }
4984
4985 ASSERT(Vcb->AllocationSupport.NumberOfFreeClusters <= Vcb->AllocationSupport.NumberOfClusters);
4986
4987 } _SEH2_FINALLY {
4988
4989 //
4990 // Unpin the last bcb and drop the temporary bitmap buffer if it exists.
4991 //
4992
4993 FatUnpinBcb( IrpContext, Bcb);
4994
4995 if (NewBitMapBuffer) {
4996
4997 ExFreePool( NewBitMapBuffer );
4998 }
4999 } _SEH2_END;
5000 }
5001