[USBOHCI_NEW][USBUHCI_NEW] Avoid unnecessary/incorrect status defines.
[reactos.git] / drivers / filesystems / fastfat_new / allocsup.c
1 /*++
2
3 Copyright (c) 1990-2000 Microsoft Corporation
4
5 Module Name:
6
7 AllocSup.c
8
9 Abstract:
10
11 This module implements the Allocation support routines for Fat.
12
13
14 --*/
15
16 #include "fatprocs.h"
17
18 //
19 // The Bug check file id for this module
20 //
21
22 #define BugCheckFileId (FAT_BUG_CHECK_ALLOCSUP)
23
24 //
25 // Local debug trace level
26 //
27
28 #define Dbg (DEBUG_TRACE_ALLOCSUP)
29
30 #define FatMin(a, b) ((a) < (b) ? (a) : (b))
31
32 //
33 // Define prefetch page count for the FAT
34 //
35
36 #define FAT_PREFETCH_PAGE_COUNT 0x100
37
38 //
39 // Local support routine prototypes
40 //
41
42 VOID
43 FatLookupFatEntry(
44 IN PIRP_CONTEXT IrpContext,
45 IN PVCB Vcb,
46 IN ULONG FatIndex,
47 IN OUT PULONG FatEntry,
48 IN OUT PFAT_ENUMERATION_CONTEXT Context
49 );
50
51 VOID
52 FatSetFatRun(
53 IN PIRP_CONTEXT IrpContext,
54 IN PVCB Vcb,
55 IN ULONG StartingFatIndex,
56 IN ULONG ClusterCount,
57 IN BOOLEAN ChainTogether
58 );
59
60 UCHAR
61 FatLogOf(
62 IN ULONG Value
63 );
64
65 //
66 // Note that the KdPrint below will ONLY fire when the assert does. Leave it
67 // alone.
68 //
69
70 #if DBG
71 #define ASSERT_CURRENT_WINDOW_GOOD(VCB) { \
72 ULONG FreeClusterBitMapClear; \
73 NT_ASSERT( (VCB)->FreeClusterBitMap.Buffer != NULL ); \
74 FreeClusterBitMapClear = RtlNumberOfClearBits(&(VCB)->FreeClusterBitMap); \
75 if ((VCB)->CurrentWindow->ClustersFree != FreeClusterBitMapClear) { \
76 KdPrint(("FAT: ClustersFree %x h != FreeClusterBitMapClear %x h\n", \
77 (VCB)->CurrentWindow->ClustersFree, \
78 FreeClusterBitMapClear)); \
79 } \
80 NT_ASSERT( (VCB)->CurrentWindow->ClustersFree == FreeClusterBitMapClear ); \
81 }
82 #else
83 #define ASSERT_CURRENT_WINDOW_GOOD(VCB)
84 #endif
85
86 //
87 // The following macros provide a convenient way of hiding the details
88 // of bitmap allocation schemes.
89 //
90
91
92 //
93 // VOID
94 // FatLockFreeClusterBitMap (
95 // IN PVCB Vcb
96 // );
97 //
98
99 #define FatLockFreeClusterBitMap(VCB) { \
100 NT_ASSERT(KeAreApcsDisabled()); \
101 ExAcquireFastMutexUnsafe( &(VCB)->FreeClusterBitMapMutex ); \
102 ASSERT_CURRENT_WINDOW_GOOD(VCB) \
103 }
104
105 //
106 // VOID
107 // FatUnlockFreeClusterBitMap (
108 // IN PVCB Vcb
109 // );
110 //
111
112 #define FatUnlockFreeClusterBitMap(VCB) { \
113 ASSERT_CURRENT_WINDOW_GOOD(VCB) \
114 NT_ASSERT(KeAreApcsDisabled()); \
115 ExReleaseFastMutexUnsafe( &(VCB)->FreeClusterBitMapMutex ); \
116 }
117
118 //
119 // BOOLEAN
120 // FatIsClusterFree (
121 // IN PIRP_CONTEXT IrpContext,
122 // IN PVCB Vcb,
123 // IN ULONG FatIndex
124 // );
125 //
126
127 #define FatIsClusterFree(IRPCONTEXT,VCB,FAT_INDEX) \
128 (RtlCheckBit(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2) == 0)
129
130 //
131 // VOID
132 // FatFreeClusters (
133 // IN PIRP_CONTEXT IrpContext,
134 // IN PVCB Vcb,
135 // IN ULONG FatIndex,
136 // IN ULONG ClusterCount
137 // );
138 //
139
140 #define FatFreeClusters(IRPCONTEXT,VCB,FAT_INDEX,CLUSTER_COUNT) { \
141 if ((CLUSTER_COUNT) == 1) { \
142 FatSetFatEntry((IRPCONTEXT),(VCB),(FAT_INDEX),FAT_CLUSTER_AVAILABLE); \
143 } else { \
144 FatSetFatRun((IRPCONTEXT),(VCB),(FAT_INDEX),(CLUSTER_COUNT),FALSE); \
145 } \
146 }
147
148 //
149 // VOID
150 // FatAllocateClusters (
151 // IN PIRP_CONTEXT IrpContext,
152 // IN PVCB Vcb,
153 // IN ULONG FatIndex,
154 // IN ULONG ClusterCount
155 // );
156 //
157
158 #define FatAllocateClusters(IRPCONTEXT,VCB,FAT_INDEX,CLUSTER_COUNT) { \
159 if ((CLUSTER_COUNT) == 1) { \
160 FatSetFatEntry((IRPCONTEXT),(VCB),(FAT_INDEX),FAT_CLUSTER_LAST); \
161 } else { \
162 FatSetFatRun((IRPCONTEXT),(VCB),(FAT_INDEX),(CLUSTER_COUNT),TRUE); \
163 } \
164 }
165
166 //
167 // VOID
168 // FatUnreserveClusters (
169 // IN PIRP_CONTEXT IrpContext,
170 // IN PVCB Vcb,
171 // IN ULONG FatIndex,
172 // IN ULONG ClusterCount
173 // );
174 //
175
176 #define FatUnreserveClusters(IRPCONTEXT,VCB,FAT_INDEX,CLUSTER_COUNT) { \
177 NT_ASSERT( (FAT_INDEX) + (CLUSTER_COUNT) - 2 <= (VCB)->FreeClusterBitMap.SizeOfBitMap );\
178 NT_ASSERT( (FAT_INDEX) >= 2); \
179 RtlClearBits(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2,(CLUSTER_COUNT)); \
180 if ((FAT_INDEX) < (VCB)->ClusterHint) { \
181 (VCB)->ClusterHint = (FAT_INDEX); \
182 } \
183 }
184
185 //
186 // VOID
187 // FatReserveClusters (
188 // IN PIRP_CONTEXT IrpContext,
189 // IN PVCB Vcb,
190 // IN ULONG FatIndex,
191 // IN ULONG ClusterCount
192 // );
193 //
194 // Handle wrapping the hint back to the front.
195 //
196
197 #define FatReserveClusters(IRPCONTEXT,VCB,FAT_INDEX,CLUSTER_COUNT) { \
198 ULONG _AfterRun = (FAT_INDEX) + (CLUSTER_COUNT); \
199 NT_ASSERT( (FAT_INDEX) + (CLUSTER_COUNT) - 2 <= (VCB)->FreeClusterBitMap.SizeOfBitMap );\
200 NT_ASSERT( (FAT_INDEX) >= 2); \
201 RtlSetBits(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2,(CLUSTER_COUNT)); \
202 \
203 if (_AfterRun - 2 >= (VCB)->FreeClusterBitMap.SizeOfBitMap) { \
204 _AfterRun = 2; \
205 } \
206 if (RtlCheckBit(&(VCB)->FreeClusterBitMap, _AfterRun - 2)) { \
207 (VCB)->ClusterHint = RtlFindClearBits( &(VCB)->FreeClusterBitMap, 1, _AfterRun - 2) + 2; \
208 if (1 == (VCB)->ClusterHint) { \
209 (VCB)->ClusterHint = 2; \
210 } \
211 } \
212 else { \
213 (VCB)->ClusterHint = _AfterRun; \
214 } \
215 }
216
217 //
218 // ULONG
219 // FatFindFreeClusterRun (
220 // IN PIRP_CONTEXT IrpContext,
221 // IN PVCB Vcb,
222 // IN ULONG ClusterCount,
223 // IN ULONG AlternateClusterHint
224 // );
225 //
226 // Do a special check if only one cluster is desired.
227 //
228
229 #define FatFindFreeClusterRun(IRPCONTEXT,VCB,CLUSTER_COUNT,CLUSTER_HINT) ( \
230 (CLUSTER_COUNT == 1) && \
231 FatIsClusterFree((IRPCONTEXT), (VCB), (CLUSTER_HINT)) ? \
232 (CLUSTER_HINT) : \
233 RtlFindClearBits( &(VCB)->FreeClusterBitMap, \
234 (CLUSTER_COUNT), \
235 (CLUSTER_HINT) - 2) + 2 \
236 )
237
238 //
239 // FAT32: Define the maximum size of the FreeClusterBitMap to be the
240 // maximum size of a FAT16 FAT. If there are more clusters on the
241 // volume than can be represented by this many bytes of bitmap, the
242 // FAT will be split into "buckets", each of which does fit.
243 //
244 // Note this count is in clusters/bits of bitmap.
245 //
246
247 #define MAX_CLUSTER_BITMAP_SIZE (1 << 16)
248
249 //
250 // Calculate the window a given cluster number is in.
251 //
252
253 #define FatWindowOfCluster(C) (((C) - 2) / MAX_CLUSTER_BITMAP_SIZE)
254
255 #ifdef ALLOC_PRAGMA
256 #pragma alloc_text(PAGE, FatAddFileAllocation)
257 #pragma alloc_text(PAGE, FatAllocateDiskSpace)
258 #pragma alloc_text(PAGE, FatDeallocateDiskSpace)
259 #pragma alloc_text(PAGE, FatExamineFatEntries)
260 #pragma alloc_text(PAGE, FatInterpretClusterType)
261 #pragma alloc_text(PAGE, FatLogOf)
262 #pragma alloc_text(PAGE, FatLookupFatEntry)
263 #pragma alloc_text(PAGE, FatLookupFileAllocation)
264 #pragma alloc_text(PAGE, FatLookupFileAllocationSize)
265 #pragma alloc_text(PAGE, FatMergeAllocation)
266 #pragma alloc_text(PAGE, FatSetFatEntry)
267 #pragma alloc_text(PAGE, FatSetFatRun)
268 #pragma alloc_text(PAGE, FatSetupAllocationSupport)
269 #pragma alloc_text(PAGE, FatSplitAllocation)
270 #pragma alloc_text(PAGE, FatTearDownAllocationSupport)
271 #pragma alloc_text(PAGE, FatTruncateFileAllocation)
272 #endif
273
274 \f
275 INLINE
276 ULONG
277 FatSelectBestWindow(
278 IN PVCB Vcb
279 )
280 /*++
281
282 Routine Description:
283
284 Choose a window to allocate clusters from. Order of preference is:
285
286 1. First window with >50% free clusters
287 2. First empty window
288 3. Window with greatest number of free clusters.
289
290 Arguments:
291
292 Vcb - Supplies the Vcb for the volume
293
294 Return Value:
295
296 'Best window' number (index into Vcb->Windows[])
297
298 --*/
299 {
300 ULONG i, Fave = 0;
301 ULONG MaxFree = 0;
302 ULONG FirstEmpty = (ULONG)-1;
303 ULONG ClustersPerWindow = MAX_CLUSTER_BITMAP_SIZE;
304
305 NT_ASSERT( 1 != Vcb->NumberOfWindows);
306
307 for (i = 0; i < Vcb->NumberOfWindows; i++) {
308
309 if (Vcb->Windows[i].ClustersFree == ClustersPerWindow) {
310
311 if (-1 == FirstEmpty) {
312
313 //
314 // Keep note of the first empty window on the disc
315 //
316
317 FirstEmpty = i;
318 }
319 }
320 else if (Vcb->Windows[i].ClustersFree > MaxFree) {
321
322 //
323 // This window has the most free clusters, so far
324 //
325
326 MaxFree = Vcb->Windows[i].ClustersFree;
327 Fave = i;
328
329 //
330 // If this window has >50% free clusters, then we will take it,
331 // so don't bother considering more windows.
332 //
333
334 if (MaxFree >= (ClustersPerWindow >> 1)) {
335
336 break;
337 }
338 }
339 }
340
341 //
342 // If there were no windows with 50% or more freespace, then select the
343 // first empty window on the disc, if any - otherwise we'll just go with
344 // the one with the most free clusters.
345 //
346
347 if ((MaxFree < (ClustersPerWindow >> 1)) && (-1 != FirstEmpty)) {
348
349 Fave = FirstEmpty;
350 }
351
352 return Fave;
353 }
354
355 \f
356 VOID
357 FatSetupAllocationSupport (
358 IN PIRP_CONTEXT IrpContext,
359 IN PVCB Vcb
360 )
361
362 /*++
363
364 Routine Description:
365
366 This routine fills in the Allocation Support structure in the Vcb.
367 Most entries are computed using fat.h macros supplied with data from
368 the Bios Parameter Block. The free cluster count, however, requires
369 going to the Fat and actually counting free sectors. At the same time
370 the free cluster bit map is initalized.
371
372 Arguments:
373
374 Vcb - Supplies the Vcb to fill in.
375
376 --*/
377
378 {
379 ULONG BitIndex;
380 ULONG ClustersDescribableByFat;
381
382 PAGED_CODE();
383
384 DebugTrace(+1, Dbg, "FatSetupAllocationSupport\n", 0);
385 DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
386
387 //
388 // Compute a number of fields for Vcb.AllocationSupport
389 //
390
391 Vcb->AllocationSupport.RootDirectoryLbo = FatRootDirectoryLbo( &Vcb->Bpb );
392 Vcb->AllocationSupport.RootDirectorySize = FatRootDirectorySize( &Vcb->Bpb );
393
394 Vcb->AllocationSupport.FileAreaLbo = FatFileAreaLbo( &Vcb->Bpb );
395
396 Vcb->AllocationSupport.NumberOfClusters = FatNumberOfClusters( &Vcb->Bpb );
397
398 Vcb->AllocationSupport.FatIndexBitSize = FatIndexBitSize( &Vcb->Bpb );
399
400 Vcb->AllocationSupport.LogOfBytesPerSector = FatLogOf(Vcb->Bpb.BytesPerSector);
401 Vcb->AllocationSupport.LogOfBytesPerCluster = FatLogOf(FatBytesPerCluster( &Vcb->Bpb ));
402 Vcb->AllocationSupport.NumberOfFreeClusters = 0;
403
404
405 //
406 // Deal with a bug in DOS 5 format, if the Fat is not big enough to
407 // describe all the clusters on the disk, reduce this number. We expect
408 // that fat32 volumes will not have this problem.
409 //
410 // Turns out this was not a good assumption. We have to do this always now.
411 //
412
413 ClustersDescribableByFat = ( ((FatIsFat32(Vcb)? Vcb->Bpb.LargeSectorsPerFat :
414 Vcb->Bpb.SectorsPerFat) *
415 Vcb->Bpb.BytesPerSector * 8)
416 / FatIndexBitSize(&Vcb->Bpb) ) - 2;
417
418 if (Vcb->AllocationSupport.NumberOfClusters > ClustersDescribableByFat) {
419
420 Vcb->AllocationSupport.NumberOfClusters = ClustersDescribableByFat;
421 }
422
423 //
424 // Extend the virtual volume file to include the Fat
425 //
426
427 {
428 CC_FILE_SIZES FileSizes;
429
430 FileSizes.AllocationSize.QuadPart =
431 FileSizes.FileSize.QuadPart = (FatReservedBytes( &Vcb->Bpb ) +
432 FatBytesPerFat( &Vcb->Bpb ));
433 FileSizes.ValidDataLength = FatMaxLarge;
434
435 if ( Vcb->VirtualVolumeFile->PrivateCacheMap == NULL ) {
436
437 FatInitializeCacheMap( Vcb->VirtualVolumeFile,
438 &FileSizes,
439 TRUE,
440 &FatData.CacheManagerNoOpCallbacks,
441 Vcb );
442
443 } else {
444
445 CcSetFileSizes( Vcb->VirtualVolumeFile, &FileSizes );
446 }
447 }
448
449 _SEH2_TRY {
450
451 if (FatIsFat32(Vcb) &&
452 Vcb->AllocationSupport.NumberOfClusters > MAX_CLUSTER_BITMAP_SIZE) {
453
454 Vcb->NumberOfWindows = (Vcb->AllocationSupport.NumberOfClusters +
455 MAX_CLUSTER_BITMAP_SIZE - 1) /
456 MAX_CLUSTER_BITMAP_SIZE;
457
458 } else {
459
460 Vcb->NumberOfWindows = 1;
461 }
462
463 Vcb->Windows = FsRtlAllocatePoolWithTag( PagedPool,
464 Vcb->NumberOfWindows * sizeof(FAT_WINDOW),
465 TAG_FAT_WINDOW );
466
467 RtlInitializeBitMap( &Vcb->FreeClusterBitMap,
468 NULL,
469 0 );
470
471 //
472 // Chose a FAT window to begin operation in.
473 //
474
475 if (Vcb->NumberOfWindows > 1) {
476
477 //
478 // Read the fat and count up free clusters. We bias by the two reserved
479 // entries in the FAT.
480 //
481
482 FatExamineFatEntries( IrpContext, Vcb,
483 2,
484 Vcb->AllocationSupport.NumberOfClusters + 2 - 1,
485 TRUE,
486 NULL,
487 NULL);
488
489
490 //
491 // Pick a window to begin allocating from
492 //
493
494 Vcb->CurrentWindow = &Vcb->Windows[ FatSelectBestWindow( Vcb)];
495
496 } else {
497
498 Vcb->CurrentWindow = &Vcb->Windows[0];
499
500 //
501 // Carefully bias ourselves by the two reserved entries in the FAT.
502 //
503
504 Vcb->CurrentWindow->FirstCluster = 2;
505 Vcb->CurrentWindow->LastCluster = Vcb->AllocationSupport.NumberOfClusters + 2 - 1;
506 }
507
508 //
509 // Now transition to the FAT window we have chosen.
510 //
511
512 FatExamineFatEntries( IrpContext, Vcb,
513 0,
514 0,
515 FALSE,
516 Vcb->CurrentWindow,
517 NULL);
518
519 //
520 // Now set the ClusterHint to the first free bit in our favorite
521 // window (except the ClusterHint is off by two).
522 //
523
524 Vcb->ClusterHint =
525 (BitIndex = RtlFindClearBits( &Vcb->FreeClusterBitMap, 1, 0 )) != -1 ?
526 BitIndex + 2 : 2;
527
528 } _SEH2_FINALLY {
529
530 DebugUnwind( FatSetupAllocationSupport );
531
532 //
533 // If we hit an exception, back out.
534 //
535
536 if (_SEH2_AbnormalTermination()) {
537
538 FatTearDownAllocationSupport( IrpContext, Vcb );
539 }
540 } _SEH2_END;
541
542 return;
543 }
544
545 \f
546 VOID
547 FatTearDownAllocationSupport (
548 IN PIRP_CONTEXT IrpContext,
549 IN PVCB Vcb
550 )
551
552 /*++
553
554 Routine Description:
555
556 This routine prepares the volume for closing. Specifically, we must
557 release the free fat bit map buffer, and uninitialize the dirty fat
558 Mcb.
559
560 Arguments:
561
562 Vcb - Supplies the Vcb to fill in.
563
564 Return Value:
565
566 VOID
567
568 --*/
569
570 {
571 DebugTrace(+1, Dbg, "FatTearDownAllocationSupport\n", 0);
572 DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
573
574 PAGED_CODE();
575
576 //
577 // If there are FAT buckets, free them.
578 //
579
580 if ( Vcb->Windows != NULL ) {
581
582 ExFreePool( Vcb->Windows );
583 Vcb->Windows = NULL;
584 }
585
586 //
587 // Free the memory associated with the free cluster bitmap.
588 //
589
590 if ( Vcb->FreeClusterBitMap.Buffer != NULL ) {
591
592 ExFreePool( Vcb->FreeClusterBitMap.Buffer );
593
594 //
595 // NULL this field as an flag.
596 //
597
598 Vcb->FreeClusterBitMap.Buffer = NULL;
599 }
600
601 //
602 // And remove all the runs in the dirty fat Mcb
603 //
604
605 FatRemoveMcbEntry( Vcb, &Vcb->DirtyFatMcb, 0, 0xFFFFFFFF );
606
607 DebugTrace(-1, Dbg, "FatTearDownAllocationSupport -> (VOID)\n", 0);
608
609 UNREFERENCED_PARAMETER( IrpContext );
610
611 return;
612 }
613
614 \f
615 _Requires_lock_held_(_Global_critical_region_)
616 VOID
617 FatLookupFileAllocation (
618 IN PIRP_CONTEXT IrpContext,
619 IN PFCB FcbOrDcb,
620 IN VBO Vbo,
621 OUT PLBO Lbo,
622 OUT PULONG ByteCount,
623 OUT PBOOLEAN Allocated,
624 OUT PBOOLEAN EndOnMax,
625 OUT PULONG Index
626 )
627
628 /*++
629
630 Routine Description:
631
632 This routine looks up the existing mapping of VBO to LBO for a
633 file/directory. The information it queries is either stored in the
634 mcb field of the fcb/dcb or it is stored on in the fat table and
635 needs to be retrieved and decoded, and updated in the mcb.
636
637 Arguments:
638
639 FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being queried
640
641 Vbo - Supplies the VBO whose LBO we want returned
642
643 Lbo - Receives the LBO corresponding to the input Vbo if one exists
644
645 ByteCount - Receives the number of bytes within the run the run
646 that correpond between the input vbo and output lbo.
647
648 Allocated - Receives TRUE if the Vbo does have a corresponding Lbo
649 and FALSE otherwise.
650
651 EndOnMax - Receives TRUE if the run ends in the maximal FAT cluster,
652 which results in a fractional bytecount.
653
654 Index - Receives the Index of the run
655
656 --*/
657
658 {
659 VBO CurrentVbo;
660 LBO CurrentLbo;
661 LBO PriorLbo;
662
663 VBO FirstVboOfCurrentRun = 0;
664 LBO FirstLboOfCurrentRun;
665
666 BOOLEAN LastCluster;
667 ULONG Runs;
668
669 PVCB Vcb;
670 FAT_ENTRY FatEntry;
671 ULONG BytesPerCluster;
672 ULARGE_INTEGER BytesOnVolume;
673
674 FAT_ENUMERATION_CONTEXT Context;
675
676 PAGED_CODE();
677
678 Vcb = FcbOrDcb->Vcb;
679
680
681 DebugTrace(+1, Dbg, "FatLookupFileAllocation\n", 0);
682 DebugTrace( 0, Dbg, " FcbOrDcb = %p\n", FcbOrDcb);
683 DebugTrace( 0, Dbg, " Vbo = %8lx\n", Vbo);
684 DebugTrace( 0, Dbg, " pLbo = %8lx\n", Lbo);
685 DebugTrace( 0, Dbg, " pByteCount = %8lx\n", ByteCount);
686 DebugTrace( 0, Dbg, " pAllocated = %8lx\n", Allocated);
687
688 Context.Bcb = NULL;
689
690 *EndOnMax = FALSE;
691
692 //
693 // Check the trivial case that the mapping is already in our
694 // Mcb.
695 //
696
697 if ( FatLookupMcbEntry(Vcb, &FcbOrDcb->Mcb, Vbo, Lbo, ByteCount, Index) ) {
698
699 *Allocated = TRUE;
700
701 NT_ASSERT( *ByteCount != 0 );
702
703 //
704 // Detect the overflow case, trim and claim the condition.
705 //
706
707 if (Vbo + *ByteCount == 0) {
708
709 *EndOnMax = TRUE;
710 }
711
712 DebugTrace( 0, Dbg, "Found run in Mcb.\n", 0);
713 DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
714 return;
715 }
716
717 //
718 // Initialize the Vcb, the cluster size, LastCluster, and
719 // FirstLboOfCurrentRun (to be used as an indication of the first
720 // iteration through the following while loop).
721 //
722
723 BytesPerCluster = 1 << Vcb->AllocationSupport.LogOfBytesPerCluster;
724
725 BytesOnVolume.QuadPart = UInt32x32To64( Vcb->AllocationSupport.NumberOfClusters, BytesPerCluster );
726
727 LastCluster = FALSE;
728 FirstLboOfCurrentRun = 0;
729
730 //
731 // Discard the case that the request extends beyond the end of
732 // allocation. Note that if the allocation size if not known
733 // AllocationSize is set to 0xffffffff.
734 //
735
736 if ( Vbo >= FcbOrDcb->Header.AllocationSize.LowPart ) {
737
738 *Allocated = FALSE;
739
740 DebugTrace( 0, Dbg, "Vbo beyond end of file.\n", 0);
741 DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
742 return;
743 }
744
745 //
746 // The Vbo is beyond the last Mcb entry. So we adjust Current Vbo/Lbo
747 // and FatEntry to describe the beginning of the last entry in the Mcb.
748 // This is used as initialization for the following loop.
749 //
750 // If the Mcb was empty, we start at the beginning of the file with
751 // CurrentVbo set to 0 to indicate a new run.
752 //
753
754 if (FatLookupLastMcbEntry( Vcb, &FcbOrDcb->Mcb, &CurrentVbo, &CurrentLbo, &Runs )) {
755
756 DebugTrace( 0, Dbg, "Current Mcb size = %8lx.\n", CurrentVbo + 1);
757
758 CurrentVbo -= (BytesPerCluster - 1);
759 CurrentLbo -= (BytesPerCluster - 1);
760
761 //
762 // Convert an index to a count.
763 //
764
765 Runs += 1;
766
767 } else {
768
769 DebugTrace( 0, Dbg, "Mcb empty.\n", 0);
770
771 //
772 // Check for an FcbOrDcb that has no allocation
773 //
774
775 if (FcbOrDcb->FirstClusterOfFile == 0) {
776
777 *Allocated = FALSE;
778
779 DebugTrace( 0, Dbg, "File has no allocation.\n", 0);
780 DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
781 return;
782
783 } else {
784
785 CurrentVbo = 0;
786 CurrentLbo = FatGetLboFromIndex( Vcb, FcbOrDcb->FirstClusterOfFile );
787 FirstVboOfCurrentRun = CurrentVbo;
788 FirstLboOfCurrentRun = CurrentLbo;
789
790 Runs = 0;
791
792 DebugTrace( 0, Dbg, "First Lbo of file = %8lx\n", CurrentLbo);
793 }
794 }
795
796 //
797 // Now we know that we are looking up a valid Vbo, but it is
798 // not in the Mcb, which is a monotonically increasing list of
799 // Vbo's. Thus we have to go to the Fat, and update
800 // the Mcb as we go. We use a try-finally to unpin the page
801 // of fat hanging around. Also we mark *Allocated = FALSE, so that
802 // the caller wont try to use the data if we hit an exception.
803 //
804
805 *Allocated = FALSE;
806
807 _SEH2_TRY {
808
809 FatEntry = (FAT_ENTRY)FatGetIndexFromLbo( Vcb, CurrentLbo );
810
811 //
812 // ASSERT that CurrentVbo and CurrentLbo are now cluster alligned.
813 // The assumption here, is that only whole clusters of Vbos and Lbos
814 // are mapped in the Mcb.
815 //
816
817 NT_ASSERT( ((CurrentLbo - Vcb->AllocationSupport.FileAreaLbo)
818 % BytesPerCluster == 0) &&
819 (CurrentVbo % BytesPerCluster == 0) );
820
821 //
822 // Starting from the first Vbo after the last Mcb entry, scan through
823 // the Fat looking for our Vbo. We continue through the Fat until we
824 // hit a noncontiguity beyond the desired Vbo, or the last cluster.
825 //
826
827 while ( !LastCluster ) {
828
829 //
830 // Get the next fat entry, and update our Current variables.
831 //
832
833 FatLookupFatEntry( IrpContext, Vcb, FatEntry, (PULONG)&FatEntry, &Context );
834
835 PriorLbo = CurrentLbo;
836 CurrentLbo = FatGetLboFromIndex( Vcb, FatEntry );
837 CurrentVbo += BytesPerCluster;
838
839 switch ( FatInterpretClusterType( Vcb, FatEntry )) {
840
841 //
842 // Check for a break in the Fat allocation chain.
843 //
844
845 case FatClusterAvailable:
846 case FatClusterReserved:
847 case FatClusterBad:
848
849 DebugTrace( 0, Dbg, "Break in allocation chain, entry = %d\n", FatEntry);
850 DebugTrace(-1, Dbg, "FatLookupFileAllocation -> Fat Corrupt. Raise Status.\n", 0);
851
852 FatPopUpFileCorrupt( IrpContext, FcbOrDcb );
853 FatRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR );
854 break;
855
856 //
857 // If this is the last cluster, we must update the Mcb and
858 // exit the loop.
859 //
860
861 case FatClusterLast:
862
863 //
864 // Assert we know where the current run started. If the
865 // Mcb was empty when we were called, thenFirstLboOfCurrentRun
866 // was set to the start of the file. If the Mcb contained an
867 // entry, then FirstLboOfCurrentRun was set on the first
868 // iteration through the loop. Thus if FirstLboOfCurrentRun
869 // is 0, then there was an Mcb entry and we are on our first
870 // iteration, meaing that the last cluster in the Mcb was
871 // really the last allocated cluster, but we checked Vbo
872 // against AllocationSize, and found it OK, thus AllocationSize
873 // must be too large.
874 //
875 // Note that, when we finally arrive here, CurrentVbo is actually
876 // the first Vbo beyond the file allocation and CurrentLbo is
877 // meaningless.
878 //
879
880 DebugTrace( 0, Dbg, "Read last cluster of file.\n", 0);
881
882 //
883 // Detect the case of the maximal file. Note that this really isn't
884 // a proper Vbo - those are zero-based, and this is a one-based number.
885 // The maximal file, of 2^32 - 1 bytes, has a maximum byte offset of
886 // 2^32 - 2.
887 //
888 // Just so we don't get confused here.
889 //
890
891 if (CurrentVbo == 0) {
892
893 *EndOnMax = TRUE;
894 CurrentVbo -= 1;
895 }
896
897 LastCluster = TRUE;
898
899 if (FirstLboOfCurrentRun != 0 ) {
900
901 DebugTrace( 0, Dbg, "Adding a run to the Mcb.\n", 0);
902 DebugTrace( 0, Dbg, " Vbo = %08lx.\n", FirstVboOfCurrentRun);
903 DebugTrace( 0, Dbg, " Lbo = %08lx.\n", FirstLboOfCurrentRun);
904 DebugTrace( 0, Dbg, " Length = %08lx.\n", CurrentVbo - FirstVboOfCurrentRun);
905
906 (VOID)FatAddMcbEntry( Vcb,
907 &FcbOrDcb->Mcb,
908 FirstVboOfCurrentRun,
909 FirstLboOfCurrentRun,
910 CurrentVbo - FirstVboOfCurrentRun );
911
912 Runs += 1;
913 }
914
915 //
916 // Being at the end of allocation, make sure we have found
917 // the Vbo. If we haven't, seeing as we checked VBO
918 // against AllocationSize, the real disk allocation is less
919 // than that of AllocationSize. This comes about when the
920 // real allocation is not yet known, and AllocaitonSize
921 // contains MAXULONG.
922 //
923 // KLUDGE! - If we were called by FatLookupFileAllocationSize
924 // Vbo is set to MAXULONG - 1, and AllocationSize to the lookup
925 // hint. Thus we merrily go along looking for a match that isn't
926 // there, but in the meantime building an Mcb. If this is
927 // the case, fill in AllocationSize and return.
928 //
929
930 if ( Vbo == MAXULONG - 1 ) {
931
932 *Allocated = FALSE;
933
934 FcbOrDcb->Header.AllocationSize.QuadPart = CurrentVbo;
935
936 DebugTrace( 0, Dbg, "New file allocation size = %08lx.\n", CurrentVbo);
937 try_return ( NOTHING );
938 }
939
940 //
941 // We will lie ever so slightly if we really terminated on the
942 // maximal byte of a file. It is really allocated.
943 //
944
945 if (Vbo >= CurrentVbo && !*EndOnMax) {
946
947 *Allocated = FALSE;
948 try_return ( NOTHING );
949 }
950
951 break;
952
953 //
954 // This is a continuation in the chain. If the run has a
955 // discontiguity at this point, update the Mcb, and if we are beyond
956 // the desired Vbo, this is the end of the run, so set LastCluster
957 // and exit the loop.
958 //
959
960 case FatClusterNext:
961
962 //
963 // This is the loop check. The Vbo must not be bigger than the size of
964 // the volume, and the Vbo must not have a) wrapped and b) not been at the
965 // very last cluster in the chain, for the case of the maximal file.
966 //
967
968 if ( CurrentVbo == 0 ||
969 (BytesOnVolume.HighPart == 0 && CurrentVbo > BytesOnVolume.LowPart)) {
970
971 FatPopUpFileCorrupt( IrpContext, FcbOrDcb );
972 FatRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR );
973 }
974
975 if ( PriorLbo + BytesPerCluster != CurrentLbo ) {
976
977 //
978 // Note that on the first time through the loop
979 // (FirstLboOfCurrentRun == 0), we don't add the
980 // run to the Mcb since it curresponds to the last
981 // run already stored in the Mcb.
982 //
983
984 if ( FirstLboOfCurrentRun != 0 ) {
985
986 DebugTrace( 0, Dbg, "Adding a run to the Mcb.\n", 0);
987 DebugTrace( 0, Dbg, " Vbo = %08lx.\n", FirstVboOfCurrentRun);
988 DebugTrace( 0, Dbg, " Lbo = %08lx.\n", FirstLboOfCurrentRun);
989 DebugTrace( 0, Dbg, " Length = %08lx.\n", CurrentVbo - FirstVboOfCurrentRun);
990
991 FatAddMcbEntry( Vcb,
992 &FcbOrDcb->Mcb,
993 FirstVboOfCurrentRun,
994 FirstLboOfCurrentRun,
995 CurrentVbo - FirstVboOfCurrentRun );
996
997 Runs += 1;
998 }
999
1000 //
1001 // Since we are at a run boundry, with CurrentLbo and
1002 // CurrentVbo being the first cluster of the next run,
1003 // we see if the run we just added encompases the desired
1004 // Vbo, and if so exit. Otherwise we set up two new
1005 // First*boOfCurrentRun, and continue.
1006 //
1007
1008 if (CurrentVbo > Vbo) {
1009
1010 LastCluster = TRUE;
1011
1012 } else {
1013
1014 FirstVboOfCurrentRun = CurrentVbo;
1015 FirstLboOfCurrentRun = CurrentLbo;
1016 }
1017 }
1018 break;
1019
1020 default:
1021
1022 DebugTrace(0, Dbg, "Illegal Cluster Type.\n", FatEntry);
1023
1024 #ifdef _MSC_VER
1025 #pragma prefast( suppress: 28159, "we bugcheck here because our internal data structures are seriously corrupted if this happens" )
1026 #endif
1027 FatBugCheck( 0, 0, 0 );
1028
1029 break;
1030
1031 } // switch()
1032 } // while()
1033
1034 //
1035 // Load up the return parameters.
1036 //
1037 // On exit from the loop, Vbo still contains the desired Vbo, and
1038 // CurrentVbo is the first byte after the run that contained the
1039 // desired Vbo.
1040 //
1041
1042 *Allocated = TRUE;
1043
1044 *Lbo = FirstLboOfCurrentRun + (Vbo - FirstVboOfCurrentRun);
1045
1046 *ByteCount = CurrentVbo - Vbo;
1047
1048 if (ARGUMENT_PRESENT(Index)) {
1049
1050 //
1051 // Note that Runs only needs to be accurate with respect to where we
1052 // ended. Since partial-lookup cases will occur without exclusive
1053 // synchronization, the Mcb itself may be much bigger by now.
1054 //
1055
1056 *Index = Runs - 1;
1057 }
1058
1059 try_exit: NOTHING;
1060
1061 } _SEH2_FINALLY {
1062
1063 DebugUnwind( FatLookupFileAllocation );
1064
1065 //
1066 // We are done reading the Fat, so unpin the last page of fat
1067 // that is hanging around
1068 //
1069
1070 FatUnpinBcb( IrpContext, Context.Bcb );
1071
1072 DebugTrace(-1, Dbg, "FatLookupFileAllocation -> (VOID)\n", 0);
1073 } _SEH2_END;
1074
1075 return;
1076 }
1077
1078 \f
1079 _Requires_lock_held_(_Global_critical_region_)
1080 VOID
1081 FatAddFileAllocation (
1082 IN PIRP_CONTEXT IrpContext,
1083 IN PFCB FcbOrDcb,
1084 IN PFILE_OBJECT FileObject OPTIONAL,
1085 IN ULONG DesiredAllocationSize
1086 )
1087
1088 /*++
1089
1090 Routine Description:
1091
1092 This routine adds additional allocation to the specified file/directory.
1093 Additional allocation is added by appending clusters to the file/directory.
1094
1095 If the file already has a sufficient allocation then this procedure
1096 is effectively a noop.
1097
1098 Arguments:
1099
1100 FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being modified.
1101 This parameter must not specify the root dcb.
1102
1103 FileObject - If supplied inform the cache manager of the change.
1104
1105 DesiredAllocationSize - Supplies the minimum size, in bytes, that we want
1106 allocated to the file/directory.
1107
1108 --*/
1109
1110 {
1111 PVCB Vcb;
1112 LARGE_MCB NewMcb = {0};
1113 PLARGE_MCB McbToCleanup = NULL;
1114 PDIRENT Dirent = NULL;
1115 ULONG NewAllocation = 0;
1116 PBCB Bcb = NULL;
1117 BOOLEAN UnwindWeAllocatedDiskSpace = FALSE;
1118 BOOLEAN UnwindAllocationSizeSet = FALSE;
1119 BOOLEAN UnwindCacheManagerInformed = FALSE;
1120 BOOLEAN UnwindWeInitializedMcb = FALSE;
1121
1122 PAGED_CODE();
1123
1124 DebugTrace(+1, Dbg, "FatAddFileAllocation\n", 0);
1125 DebugTrace( 0, Dbg, " FcbOrDcb = %p\n", FcbOrDcb);
1126 DebugTrace( 0, Dbg, " DesiredAllocationSize = %8lx\n", DesiredAllocationSize);
1127
1128 Vcb = FcbOrDcb->Vcb;
1129
1130 //
1131 // If we haven't yet set the correct AllocationSize, do so.
1132 //
1133
1134 if (FcbOrDcb->Header.AllocationSize.QuadPart == FCB_LOOKUP_ALLOCATIONSIZE_HINT) {
1135
1136 FatLookupFileAllocationSize( IrpContext, FcbOrDcb );
1137 }
1138
1139 //
1140 // Check for the benign case that the desired allocation is already
1141 // within the allocation size.
1142 //
1143
1144 if (DesiredAllocationSize <= FcbOrDcb->Header.AllocationSize.LowPart) {
1145
1146 DebugTrace(0, Dbg, "Desired size within current allocation.\n", 0);
1147
1148 DebugTrace(-1, Dbg, "FatAddFileAllocation -> (VOID)\n", 0);
1149 return;
1150 }
1151
1152 DebugTrace( 0, Dbg, "InitialAllocation = %08lx.\n", FcbOrDcb->Header.AllocationSize.LowPart);
1153
1154 //
1155 // Get a chunk of disk space that will fullfill our needs. If there
1156 // was no initial allocation, start from the hint in the Vcb, otherwise
1157 // try to allocate from the cluster after the initial allocation.
1158 //
1159 // If there was no initial allocation to the file, we can just use the
1160 // Mcb in the FcbOrDcb, otherwise we have to use a new one, and merge
1161 // it to the one in the FcbOrDcb.
1162 //
1163
1164 _SEH2_TRY {
1165
1166 if (FcbOrDcb->Header.AllocationSize.LowPart == 0) {
1167
1168 LBO FirstLboOfFile;
1169
1170 NT_ASSERT( FcbOrDcb->FcbCondition == FcbGood );
1171
1172 FatGetDirentFromFcbOrDcb( IrpContext,
1173 FcbOrDcb,
1174 FALSE,
1175 &Dirent,
1176 &Bcb );
1177 //
1178 // Set this dirty right now since this call can fail.
1179 //
1180
1181 FatSetDirtyBcb( IrpContext, Bcb, Vcb, TRUE );
1182
1183 FatAllocateDiskSpace( IrpContext,
1184 Vcb,
1185 0,
1186 &DesiredAllocationSize,
1187 FALSE,
1188 &FcbOrDcb->Mcb );
1189
1190 UnwindWeAllocatedDiskSpace = TRUE;
1191 McbToCleanup = &FcbOrDcb->Mcb;
1192
1193 //
1194 // We have to update the dirent and FcbOrDcb copies of
1195 // FirstClusterOfFile since before it was 0
1196 //
1197
1198 FatLookupMcbEntry( FcbOrDcb->Vcb,
1199 &FcbOrDcb->Mcb,
1200 0,
1201 &FirstLboOfFile,
1202 (PULONG)NULL,
1203 NULL );
1204
1205 DebugTrace( 0, Dbg, "First Lbo of file will be %08lx.\n", FirstLboOfFile );
1206
1207 FcbOrDcb->FirstClusterOfFile = FatGetIndexFromLbo( Vcb, FirstLboOfFile );
1208
1209 Dirent->FirstClusterOfFile = (USHORT)FcbOrDcb->FirstClusterOfFile;
1210
1211 if ( FatIsFat32(Vcb) ) {
1212
1213 Dirent->FirstClusterOfFileHi = (USHORT)(FcbOrDcb->FirstClusterOfFile >> 16);
1214 }
1215
1216 //
1217 // Note the size of the allocation we need to tell the cache manager about.
1218 //
1219
1220 NewAllocation = DesiredAllocationSize;
1221
1222 } else {
1223
1224 LBO LastAllocatedLbo;
1225 VBO DontCare;
1226
1227 //
1228 // Get the first cluster following the current allocation. It is possible
1229 // the Mcb is empty (or short, etc.) so we need to be slightly careful
1230 // about making sure we don't lie with the hint.
1231 //
1232
1233 (void)FatLookupLastMcbEntry( FcbOrDcb->Vcb, &FcbOrDcb->Mcb, &DontCare, &LastAllocatedLbo, NULL );
1234
1235 //
1236 // Try to get some disk space starting from there.
1237 //
1238
1239 NewAllocation = DesiredAllocationSize - FcbOrDcb->Header.AllocationSize.LowPart;
1240
1241 FsRtlInitializeLargeMcb( &NewMcb, PagedPool );
1242 UnwindWeInitializedMcb = TRUE;
1243 McbToCleanup = &NewMcb;
1244
1245 FatAllocateDiskSpace( IrpContext,
1246 Vcb,
1247 (LastAllocatedLbo != ~0 ?
1248 FatGetIndexFromLbo(Vcb,LastAllocatedLbo + 1) :
1249 0),
1250 &NewAllocation,
1251 FALSE,
1252 &NewMcb );
1253
1254 UnwindWeAllocatedDiskSpace = TRUE;
1255 }
1256
1257 //
1258 // Now that we increased the allocation of the file, mark it in the
1259 // FcbOrDcb. Carefully prepare to handle an inability to grow the cache
1260 // structures.
1261 //
1262
1263 FcbOrDcb->Header.AllocationSize.LowPart += NewAllocation;
1264
1265 //
1266 // Handle the maximal file case, where we may have just wrapped. Note
1267 // that this must be the precise boundary case wrap, i.e. by one byte,
1268 // so that the new allocation is actually one byte "less" as far as we're
1269 // concerned. This is important for the extension case.
1270 //
1271
1272 if (FcbOrDcb->Header.AllocationSize.LowPart == 0) {
1273
1274 NewAllocation -= 1;
1275 FcbOrDcb->Header.AllocationSize.LowPart = 0xffffffff;
1276 }
1277
1278 UnwindAllocationSizeSet = TRUE;
1279
1280 //
1281 // Inform the cache manager to increase the section size
1282 //
1283
1284 if ( ARGUMENT_PRESENT(FileObject) && CcIsFileCached(FileObject) ) {
1285
1286 CcSetFileSizes( FileObject,
1287 (PCC_FILE_SIZES)&FcbOrDcb->Header.AllocationSize );
1288 UnwindCacheManagerInformed = TRUE;
1289 }
1290
1291 //
1292 // In the extension case, we have held off actually gluing the new
1293 // allocation onto the file. This simplifies exception cleanup since
1294 // if it was already added and the section grow failed, we'd have to
1295 // do extra work to unglue it. This way, we can assume that if we
1296 // raise the only thing we need to do is deallocate the disk space.
1297 //
1298 // Merge the allocation now.
1299 //
1300
1301 if (FcbOrDcb->Header.AllocationSize.LowPart != NewAllocation) {
1302
1303 //
1304 // Tack the new Mcb onto the end of the FcbOrDcb one.
1305 //
1306
1307 FatMergeAllocation( IrpContext,
1308 Vcb,
1309 &FcbOrDcb->Mcb,
1310 &NewMcb );
1311 }
1312
1313 } _SEH2_FINALLY {
1314
1315 DebugUnwind( FatAddFileAllocation );
1316
1317 //
1318 // Give FlushFileBuffer/Cleanup a clue here, regardless of success/fail..
1319 //
1320
1321 SetFlag(FcbOrDcb->FcbState, FCB_STATE_FLUSH_FAT);
1322
1323 //
1324 // If we were dogged trying to complete this operation, we need to go
1325 // back various things out.
1326 //
1327
1328 if (_SEH2_AbnormalTermination()) {
1329
1330 //
1331 // Pull off the allocation size we tried to add to this object if
1332 // we failed to grow cache structures or Mcb structures.
1333 //
1334
1335 if (UnwindAllocationSizeSet) {
1336
1337 FcbOrDcb->Header.AllocationSize.LowPart -= NewAllocation;
1338 }
1339
1340 if (UnwindCacheManagerInformed) {
1341
1342 CcSetFileSizes( FileObject,
1343 (PCC_FILE_SIZES)&FcbOrDcb->Header.AllocationSize );
1344 }
1345
1346 //
1347 // In the case of initial allocation, we used the Fcb's Mcb and have
1348 // to clean that up as well as the FAT chain references.
1349 //
1350
1351 if (FcbOrDcb->Header.AllocationSize.LowPart == 0) {
1352
1353 if (Dirent != NULL) {
1354
1355 FcbOrDcb->FirstClusterOfFile = 0;
1356 Dirent->FirstClusterOfFile = 0;
1357
1358 if ( FatIsFat32(Vcb) ) {
1359
1360 Dirent->FirstClusterOfFileHi = 0;
1361 }
1362 }
1363 }
1364
1365 //
1366 // ... and drop the dirent Bcb if we got it. Do it now
1367 // so we can afford to take the exception if we have to.
1368 //
1369
1370 FatUnpinBcb( IrpContext, Bcb );
1371
1372 _SEH2_TRY {
1373
1374 //
1375 // Note this can re-raise.
1376 //
1377
1378 if ( UnwindWeAllocatedDiskSpace ) {
1379
1380 FatDeallocateDiskSpace( IrpContext, Vcb, McbToCleanup, FALSE );
1381 }
1382
1383 } _SEH2_FINALLY {
1384
1385 //
1386 // We always want to clean up the non-initial allocation temporary Mcb,
1387 // otherwise we have the Fcb's Mcb and we just truncate it away.
1388 //
1389
1390 if (UnwindWeInitializedMcb == TRUE) {
1391
1392 //
1393 // Note that we already know a raise is in progress. No danger
1394 // of encountering the normal case code below and doing this again.
1395 //
1396
1397 FsRtlUninitializeLargeMcb( McbToCleanup );
1398
1399 } else {
1400
1401 if (McbToCleanup) {
1402
1403 FsRtlTruncateLargeMcb( McbToCleanup, 0 );
1404 }
1405 }
1406 } _SEH2_END;
1407 }
1408
1409 DebugTrace(-1, Dbg, "FatAddFileAllocation -> (VOID)\n", 0);
1410 } _SEH2_END;
1411
1412 //
1413 // Non-exceptional cleanup we always want to do. In handling the re-raise possibilities
1414 // during exceptions we had to make sure these two steps always happened there beforehand.
1415 // So now we handle the usual case.
1416 //
1417
1418 FatUnpinBcb( IrpContext, Bcb );
1419
1420 if (UnwindWeInitializedMcb == TRUE) {
1421
1422 FsRtlUninitializeLargeMcb( &NewMcb );
1423 }
1424 }
1425
1426 _Requires_lock_held_(_Global_critical_region_)
1427 VOID
1428 FatTruncateFileAllocation (
1429 IN PIRP_CONTEXT IrpContext,
1430 IN PFCB FcbOrDcb,
1431 IN ULONG DesiredAllocationSize
1432 )
1433
1434 /*++
1435
1436 Routine Description:
1437
1438 This routine truncates the allocation to the specified file/directory.
1439
1440 If the file is already smaller than the indicated size then this procedure
1441 is effectively a noop.
1442
1443
1444 Arguments:
1445
1446 FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being modified
1447 This parameter must not specify the root dcb.
1448
1449 DesiredAllocationSize - Supplies the maximum size, in bytes, that we want
1450 allocated to the file/directory. It is rounded
1451 up to the nearest cluster.
1452
1453 Return Value:
1454
1455 VOID - TRUE if the operation completed and FALSE if it had to
1456 block but could not.
1457
1458 --*/
1459
1460 {
1461 PVCB Vcb;
1462 PBCB Bcb = NULL;
1463 LARGE_MCB RemainingMcb = {0};
1464 ULONG BytesPerCluster;
1465 PDIRENT Dirent = NULL;
1466 BOOLEAN UpdatedDirent = FALSE;
1467
1468 ULONG UnwindInitialAllocationSize;
1469 ULONG UnwindInitialFirstClusterOfFile;
1470 BOOLEAN UnwindWeAllocatedMcb = FALSE;
1471
1472 PAGED_CODE();
1473
1474 Vcb = FcbOrDcb->Vcb;
1475
1476 DebugTrace(+1, Dbg, "FatTruncateFileAllocation\n", 0);
1477 DebugTrace( 0, Dbg, " FcbOrDcb = %p\n", FcbOrDcb);
1478 DebugTrace( 0, Dbg, " DesiredAllocationSize = %8lx\n", DesiredAllocationSize);
1479
1480 //
1481 // If the Fcb isn't in good condition, we have no business whacking around on
1482 // the disk after "its" clusters.
1483 //
1484 // Inspired by a Prefix complaint.
1485 //
1486
1487 NT_ASSERT( FcbOrDcb->FcbCondition == FcbGood );
1488
1489 //
1490 // If we haven't yet set the correct AllocationSize, do so.
1491 //
1492
1493 if (FcbOrDcb->Header.AllocationSize.QuadPart == FCB_LOOKUP_ALLOCATIONSIZE_HINT) {
1494
1495 FatLookupFileAllocationSize( IrpContext, FcbOrDcb );
1496 }
1497
1498 //
1499 // Round up the Desired Allocation Size to the next cluster size
1500 //
1501
1502 BytesPerCluster = 1 << Vcb->AllocationSupport.LogOfBytesPerCluster;
1503
1504 //
1505 // Note if the desired allocation is zero, to distinguish this from
1506 // the wrap case below.
1507 //
1508
1509 if (DesiredAllocationSize != 0) {
1510
1511 DesiredAllocationSize = (DesiredAllocationSize + (BytesPerCluster - 1)) &
1512 ~(BytesPerCluster - 1);
1513 //
1514 // Check for the benign case that the file is already smaller than
1515 // the desired truncation. Note that if it wraps, then a) it was
1516 // specifying an offset in the maximally allocatable cluster and
1517 // b) we're not asking to extend the file, either. So stop.
1518 //
1519
1520 if (DesiredAllocationSize == 0 ||
1521 DesiredAllocationSize >= FcbOrDcb->Header.AllocationSize.LowPart) {
1522
1523 DebugTrace(0, Dbg, "Desired size within current allocation.\n", 0);
1524
1525 DebugTrace(-1, Dbg, "FatTruncateFileAllocation -> (VOID)\n", 0);
1526 return;
1527 }
1528
1529 }
1530
1531 //
1532 // This is a no-op if the allocation size is already what we want.
1533 //
1534
1535 if (DesiredAllocationSize == FcbOrDcb->Header.AllocationSize.LowPart) {
1536
1537 DebugTrace(0, Dbg, "Desired size equals current allocation.\n", 0);
1538 DebugTrace(-1, Dbg, "FatTruncateFileAllocation -> (VOID)\n", 0);
1539 return;
1540 }
1541
1542 UnwindInitialAllocationSize = FcbOrDcb->Header.AllocationSize.LowPart;
1543 UnwindInitialFirstClusterOfFile = FcbOrDcb->FirstClusterOfFile;
1544
1545 //
1546 // Update the FcbOrDcb allocation size. If it is now zero, we have the
1547 // additional task of modifying the FcbOrDcb and Dirent copies of
1548 // FirstClusterInFile.
1549 //
1550 // Note that we must pin the dirent before actually deallocating the
1551 // disk space since, in unwind, it would not be possible to reallocate
1552 // deallocated disk space as someone else may have reallocated it and
1553 // may cause an exception when you try to get some more disk space.
1554 // Thus FatDeallocateDiskSpace must be the final dangerous operation.
1555 //
1556
1557 _SEH2_TRY {
1558
1559 FcbOrDcb->Header.AllocationSize.QuadPart = DesiredAllocationSize;
1560
1561 //
1562 // Special case 0
1563 //
1564
1565 if (DesiredAllocationSize == 0) {
1566
1567 //
1568 // We have to update the dirent and FcbOrDcb copies of
1569 // FirstClusterOfFile since before it was 0
1570 //
1571
1572 NT_ASSERT( FcbOrDcb->FcbCondition == FcbGood );
1573
1574 FatGetDirentFromFcbOrDcb( IrpContext, FcbOrDcb, FALSE, &Dirent, &Bcb );
1575
1576 Dirent->FirstClusterOfFile = 0;
1577
1578 if (FatIsFat32(Vcb)) {
1579
1580 Dirent->FirstClusterOfFileHi = 0;
1581 }
1582
1583 FcbOrDcb->FirstClusterOfFile = 0;
1584
1585 FatSetDirtyBcb( IrpContext, Bcb, Vcb, TRUE );
1586 UpdatedDirent = TRUE;
1587
1588 FatDeallocateDiskSpace( IrpContext, Vcb, &FcbOrDcb->Mcb, ((FcbOrDcb->FcbState & FCB_STATE_ZERO_ON_DEALLOCATION) != 0));
1589
1590 FatRemoveMcbEntry( FcbOrDcb->Vcb, &FcbOrDcb->Mcb, 0, 0xFFFFFFFF );
1591
1592 } else {
1593
1594 //
1595 // Split the existing allocation into two parts, one we will keep, and
1596 // one we will deallocate.
1597 //
1598
1599 FsRtlInitializeLargeMcb( &RemainingMcb, PagedPool );
1600 UnwindWeAllocatedMcb = TRUE;
1601
1602 FatSplitAllocation( IrpContext,
1603 Vcb,
1604 &FcbOrDcb->Mcb,
1605 DesiredAllocationSize,
1606 &RemainingMcb );
1607
1608 FatDeallocateDiskSpace( IrpContext, Vcb, &RemainingMcb, ((FcbOrDcb->FcbState & FCB_STATE_ZERO_ON_DEALLOCATION) != 0) );
1609
1610 FsRtlUninitializeLargeMcb( &RemainingMcb );
1611 }
1612
1613 } _SEH2_FINALLY {
1614
1615 DebugUnwind( FatTruncateFileAllocation );
1616
1617 //
1618 // Is this really the right backout strategy? It would be nice if we could
1619 // pretend the truncate worked if we knew that the file had gotten into
1620 // a consistent state. Leaving dangled clusters is probably quite preferable.
1621 //
1622
1623 if ( _SEH2_AbnormalTermination() ) {
1624
1625 FcbOrDcb->Header.AllocationSize.LowPart = UnwindInitialAllocationSize;
1626
1627 if ( (DesiredAllocationSize == 0) && (Dirent != NULL)) {
1628
1629 if (UpdatedDirent) {
1630
1631 //
1632 // If the dirent has been updated ok and marked dirty, then we
1633 // failed in deallocatediscspace, and don't know what state
1634 // the on disc fat chain is in. So we throw away the mcb,
1635 // and potentially loose a few clusters until the next
1636 // chkdsk. The operation has succeeded, but the exception
1637 // will still propogate. 5.1
1638 //
1639
1640 FatRemoveMcbEntry( Vcb, &FcbOrDcb->Mcb, 0, 0xFFFFFFFF );
1641 FcbOrDcb->Header.AllocationSize.QuadPart = 0;
1642 }
1643 else if (FcbOrDcb->FirstClusterOfFile == 0) {
1644
1645 Dirent->FirstClusterOfFile = (USHORT)UnwindInitialFirstClusterOfFile;
1646
1647 if ( FatIsFat32(Vcb) ) {
1648
1649 Dirent->FirstClusterOfFileHi =
1650 (USHORT)(UnwindInitialFirstClusterOfFile >> 16);
1651 }
1652
1653 FcbOrDcb->FirstClusterOfFile = UnwindInitialFirstClusterOfFile;
1654 }
1655 }
1656
1657 if ( UnwindWeAllocatedMcb ) {
1658
1659 FsRtlUninitializeLargeMcb( &RemainingMcb );
1660 }
1661
1662 //
1663 // Note that in the non zero truncation case, we will also
1664 // leak clusters. However, apart from this, the in memory and on disc
1665 // structures will agree.
1666 }
1667
1668 FatUnpinBcb( IrpContext, Bcb );
1669
1670 //
1671 // Give FlushFileBuffer/Cleanup a clue here, regardless of success/fail.
1672 //
1673
1674 SetFlag(FcbOrDcb->FcbState, FCB_STATE_FLUSH_FAT);
1675
1676 DebugTrace(-1, Dbg, "FatTruncateFileAllocation -> (VOID)\n", 0);
1677 } _SEH2_END;
1678 }
1679
1680 \f
1681 _Requires_lock_held_(_Global_critical_region_)
1682 VOID
1683 FatLookupFileAllocationSize (
1684 IN PIRP_CONTEXT IrpContext,
1685 IN PFCB FcbOrDcb
1686 )
1687
1688 /*++
1689
1690 Routine Description:
1691
1692 This routine retrieves the current file allocatio size for the
1693 specified file/directory.
1694
1695 Arguments:
1696
1697 FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being modified
1698
1699 --*/
1700
1701 {
1702 LBO Lbo;
1703 ULONG ByteCount;
1704 BOOLEAN DontCare;
1705
1706 PAGED_CODE();
1707
1708 DebugTrace(+1, Dbg, "FatLookupAllocationSize\n", 0);
1709 DebugTrace( 0, Dbg, " FcbOrDcb = %p\n", FcbOrDcb);
1710
1711 //
1712 // We call FatLookupFileAllocation with Vbo of 0xffffffff - 1.
1713 //
1714
1715 FatLookupFileAllocation( IrpContext,
1716 FcbOrDcb,
1717 MAXULONG - 1,
1718 &Lbo,
1719 &ByteCount,
1720 &DontCare,
1721 &DontCare,
1722 NULL );
1723
1724 //
1725 // FileSize was set at Fcb creation time from the contents of the directory entry,
1726 // and we are only now looking up the real length of the allocation chain. If it
1727 // cannot be contained, this is trash. Probably more where that came from.
1728 //
1729
1730 if (FcbOrDcb->Header.FileSize.LowPart > FcbOrDcb->Header.AllocationSize.LowPart) {
1731
1732 FatPopUpFileCorrupt( IrpContext, FcbOrDcb );
1733 FatRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR );
1734 }
1735
1736 DebugTrace(-1, Dbg, "FatLookupFileAllocationSize -> (VOID)\n", 0);
1737 return;
1738 }
1739
1740 \f
1741 _Requires_lock_held_(_Global_critical_region_)
1742 VOID
1743 FatAllocateDiskSpace (
1744 IN PIRP_CONTEXT IrpContext,
1745 IN PVCB Vcb,
1746 IN ULONG AbsoluteClusterHint,
1747 IN PULONG ByteCount,
1748 IN BOOLEAN ExactMatchRequired,
1749 OUT PLARGE_MCB Mcb
1750 )
1751
1752 /*++
1753
1754 Routine Description:
1755
1756 This procedure allocates additional disk space and builds an mcb
1757 representing the newly allocated space. If the space cannot be
1758 allocated then this procedure raises an appropriate status.
1759
1760 Searching starts from the hint index in the Vcb unless an alternative
1761 non-zero hint is given in AlternateClusterHint. If we are using the
1762 hint field in the Vcb, it is set to the cluster following our allocation
1763 when we are done.
1764
1765 Disk space can only be allocated in cluster units so this procedure
1766 will round up any byte count to the next cluster boundary.
1767
1768 Pictorially what is done is the following (where ! denotes the end of
1769 the fat chain (i.e., FAT_CLUSTER_LAST)):
1770
1771
1772 Mcb (empty)
1773
1774 becomes
1775
1776 Mcb |--a--|--b--|--c--!
1777
1778 ^
1779 ByteCount ----------+
1780
1781 Arguments:
1782
1783 Vcb - Supplies the VCB being modified
1784
1785 AbsoluteClusterHint - Supplies an alternate hint index to start the
1786 search from. If this is zero we use, and update,
1787 the Vcb hint field.
1788
1789 ByteCount - Supplies the number of bytes that we are requesting, and
1790 receives the number of bytes that we got.
1791
1792 ExactMatchRequired - Caller should set this to TRUE if only the precise run requested
1793 is acceptable.
1794
1795 Mcb - Receives the MCB describing the newly allocated disk space. The
1796 caller passes in an initialized Mcb that is filled in by this procedure.
1797
1798 Return Value:
1799
1800 TRUE - Allocated ok
1801 FALSE - Failed to allocate exactly as requested (=> ExactMatchRequired was TRUE)
1802
1803 --*/
1804
1805 {
1806 UCHAR LogOfBytesPerCluster;
1807 ULONG BytesPerCluster;
1808 ULONG StartingCluster;
1809 ULONG ClusterCount;
1810 ULONG WindowRelativeHint;
1811 #if DBG
1812 ULONG PreviousClear = 0;
1813 #endif
1814
1815 PFAT_WINDOW Window;
1816 BOOLEAN Wait = FALSE;
1817 BOOLEAN Result = TRUE;
1818
1819 PAGED_CODE();
1820
1821 DebugTrace(+1, Dbg, "FatAllocateDiskSpace\n", 0);
1822 DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
1823 DebugTrace( 0, Dbg, " *ByteCount = %8lx\n", *ByteCount);
1824 DebugTrace( 0, Dbg, " Mcb = %p\n", Mcb);
1825 DebugTrace( 0, Dbg, " Hint = %8lx\n", AbsoluteClusterHint);
1826
1827 NT_ASSERT((AbsoluteClusterHint <= Vcb->AllocationSupport.NumberOfClusters + 2) && (1 != AbsoluteClusterHint));
1828
1829 //
1830 // Make sure byte count is not zero
1831 //
1832
1833 if (*ByteCount == 0) {
1834
1835 DebugTrace(0, Dbg, "Nothing to allocate.\n", 0);
1836
1837 DebugTrace(-1, Dbg, "FatAllocateDiskSpace -> (VOID)\n", 0);
1838 return;
1839 }
1840
1841 //
1842 // Compute the cluster count based on the byte count, rounding up
1843 // to the next cluster if there is any remainder. Note that the
1844 // pathalogical case BytesCount == 0 has been eliminated above.
1845 //
1846
1847 LogOfBytesPerCluster = Vcb->AllocationSupport.LogOfBytesPerCluster;
1848 BytesPerCluster = 1 << LogOfBytesPerCluster;
1849
1850 *ByteCount = (*ByteCount + (BytesPerCluster - 1))
1851 & ~(BytesPerCluster - 1);
1852
1853 //
1854 // If ByteCount is NOW zero, then we were asked for the maximal
1855 // filesize (or at least for bytes in the last allocatable sector).
1856 //
1857
1858 if (*ByteCount == 0) {
1859
1860 *ByteCount = 0xffffffff;
1861 ClusterCount = 1 << (32 - LogOfBytesPerCluster);
1862
1863 } else {
1864
1865 ClusterCount = (*ByteCount >> LogOfBytesPerCluster);
1866 }
1867
1868 //
1869 // Analysis tools don't figure out that ClusterCount is not zero because
1870 // of the ByteCount == 0 checks, so give them a hint.
1871 //
1872 _Analysis_assume_(ClusterCount > 0);
1873
1874 //
1875 // Make sure there are enough free clusters to start with, and
1876 // take them now so that nobody else takes them from us.
1877 //
1878
1879 ExAcquireResourceSharedLite(&Vcb->ChangeBitMapResource, TRUE);
1880 FatLockFreeClusterBitMap( Vcb );
1881
1882 if (ClusterCount <= Vcb->AllocationSupport.NumberOfFreeClusters) {
1883
1884 Vcb->AllocationSupport.NumberOfFreeClusters -= ClusterCount;
1885
1886 } else {
1887
1888 FatUnlockFreeClusterBitMap( Vcb );
1889 ExReleaseResourceLite(&Vcb->ChangeBitMapResource);
1890
1891 DebugTrace(0, Dbg, "Disk Full. Raise Status.\n", 0);
1892 FatRaiseStatus( IrpContext, STATUS_DISK_FULL );
1893 }
1894
1895 //
1896 // Did the caller supply a hint?
1897 //
1898
1899 if ((0 != AbsoluteClusterHint) && (AbsoluteClusterHint < (Vcb->AllocationSupport.NumberOfClusters + 2))) {
1900
1901 if (Vcb->NumberOfWindows > 1) {
1902
1903 //
1904 // If we're being called upon to allocate clusters outside the
1905 // current window (which happens only via MoveFile), it's a problem.
1906 // We address this by changing the current window to be the one which
1907 // contains the alternate cluster hint. Note that if the user's
1908 // request would cross a window boundary, he doesn't really get what
1909 // he wanted.
1910 //
1911
1912 if (AbsoluteClusterHint < Vcb->CurrentWindow->FirstCluster ||
1913 AbsoluteClusterHint > Vcb->CurrentWindow->LastCluster) {
1914
1915 ULONG BucketNum = FatWindowOfCluster( AbsoluteClusterHint );
1916
1917 NT_ASSERT( BucketNum < Vcb->NumberOfWindows);
1918
1919 //
1920 // Drop our shared lock on the ChangeBitMapResource, and pick it up again
1921 // exclusive in preparation for making the window swap.
1922 //
1923
1924 FatUnlockFreeClusterBitMap(Vcb);
1925 ExReleaseResourceLite(&Vcb->ChangeBitMapResource);
1926 ExAcquireResourceExclusiveLite(&Vcb->ChangeBitMapResource, TRUE);
1927 FatLockFreeClusterBitMap(Vcb);
1928
1929 Window = &Vcb->Windows[BucketNum];
1930
1931 //
1932 // Again, test the current window against the one we want - some other
1933 // thread could have sneaked in behind our backs and kindly set it to the one
1934 // we need, when we dropped and reacquired the ChangeBitMapResource above.
1935 //
1936
1937 if (Window != Vcb->CurrentWindow) {
1938
1939 _SEH2_TRY {
1940
1941 Wait = BooleanFlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT);
1942 SetFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT);
1943
1944 //
1945 // Change to the new window (update Vcb->CurrentWindow) and scan it
1946 // to build up a freespace bitmap etc.
1947 //
1948
1949 FatExamineFatEntries( IrpContext, Vcb,
1950 0,
1951 0,
1952 FALSE,
1953 Window,
1954 NULL);
1955
1956 } _SEH2_FINALLY {
1957
1958 if (!Wait) {
1959
1960 ClearFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT);
1961 }
1962
1963 if (_SEH2_AbnormalTermination()) {
1964
1965 //
1966 // We will have raised as a result of failing to pick up the
1967 // chunk of the FAT for this window move. Release our resources
1968 // and return the cluster count to the volume.
1969 //
1970
1971 Vcb->AllocationSupport.NumberOfFreeClusters += ClusterCount;
1972
1973 FatUnlockFreeClusterBitMap( Vcb );
1974 ExReleaseResourceLite(&Vcb->ChangeBitMapResource);
1975 }
1976 } _SEH2_END;
1977 }
1978 }
1979
1980 //
1981 // Make the hint cluster number relative to the base of the current window...
1982 //
1983 // Currentwindow->Firstcluster is baised by +2 already, so we will lose the
1984 // bias already in AbsoluteClusterHint. Put it back....
1985 //
1986
1987 WindowRelativeHint = AbsoluteClusterHint - Vcb->CurrentWindow->FirstCluster + 2;
1988 }
1989 else {
1990
1991 //
1992 // Only one 'window', ie fat16/12. No modification necessary.
1993 //
1994
1995 WindowRelativeHint = AbsoluteClusterHint;
1996 }
1997 }
1998 else {
1999
2000 //
2001 // Either no hint supplied, or it was out of range, so grab one from the Vcb
2002 //
2003 // NOTE: Clusterhint in the Vcb is not guaranteed to be set (may be -1)
2004 //
2005
2006 WindowRelativeHint = Vcb->ClusterHint;
2007 AbsoluteClusterHint = 0;
2008
2009 //
2010 // Vcb hint may not have been initialized yet. Force to valid cluster.
2011 //
2012
2013 if (-1 == WindowRelativeHint) {
2014
2015 WindowRelativeHint = 2;
2016 }
2017 }
2018
2019 NT_ASSERT((WindowRelativeHint >= 2) && (WindowRelativeHint < Vcb->FreeClusterBitMap.SizeOfBitMap + 2));
2020
2021 //
2022 // Keep track of the window we're allocating from, so we can clean
2023 // up correctly if the current window changes after we unlock the
2024 // bitmap.
2025 //
2026
2027 Window = Vcb->CurrentWindow;
2028
2029 //
2030 // Try to find a run of free clusters large enough for us.
2031 //
2032
2033 StartingCluster = FatFindFreeClusterRun( IrpContext,
2034 Vcb,
2035 ClusterCount,
2036 WindowRelativeHint );
2037 //
2038 // If the above call was successful, we can just update the fat
2039 // and Mcb and exit. Otherwise we have to look for smaller free
2040 // runs.
2041 //
2042 // This test is a bit funky. Note that the error return from
2043 // RtlFindClearBits is -1, and adding two to that is 1.
2044 //
2045
2046 if ((StartingCluster != 1) &&
2047 ((0 == AbsoluteClusterHint) || (StartingCluster == WindowRelativeHint))
2048 ) {
2049
2050 #if DBG
2051 PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap );
2052 #endif // DBG
2053
2054 //
2055 // Take the clusters we found, and unlock the bit map.
2056 //
2057
2058 FatReserveClusters(IrpContext, Vcb, StartingCluster, ClusterCount);
2059
2060 Window->ClustersFree -= ClusterCount;
2061
2062 StartingCluster += Window->FirstCluster;
2063 StartingCluster -= 2;
2064
2065 NT_ASSERT( PreviousClear - ClusterCount == Window->ClustersFree );
2066
2067 FatUnlockFreeClusterBitMap( Vcb );
2068
2069 //
2070 // Note that this call will never fail since there is always
2071 // room for one entry in an empty Mcb.
2072 //
2073
2074 FatAddMcbEntry( Vcb, Mcb,
2075 0,
2076 FatGetLboFromIndex( Vcb, StartingCluster ),
2077 *ByteCount);
2078 _SEH2_TRY {
2079
2080 //
2081 // Update the fat.
2082 //
2083
2084 FatAllocateClusters(IrpContext, Vcb,
2085 StartingCluster,
2086 ClusterCount);
2087
2088 } _SEH2_FINALLY {
2089
2090 DebugUnwind( FatAllocateDiskSpace );
2091
2092 //
2093 // If the allocate clusters failed, remove the run from the Mcb,
2094 // unreserve the clusters, and reset the free cluster count.
2095 //
2096
2097 if (_SEH2_AbnormalTermination()) {
2098
2099 FatRemoveMcbEntry( Vcb, Mcb, 0, *ByteCount );
2100
2101 FatLockFreeClusterBitMap( Vcb );
2102
2103 // Only clear bits if the bitmap window is the same.
2104
2105 if (Window == Vcb->CurrentWindow) {
2106
2107 // Both values (startingcluster and window->firstcluster) are
2108 // already biased by 2, so will cancel, so we need to add in the 2 again.
2109
2110 FatUnreserveClusters( IrpContext, Vcb,
2111 StartingCluster - Window->FirstCluster + 2,
2112 ClusterCount );
2113 }
2114
2115 Window->ClustersFree += ClusterCount;
2116 Vcb->AllocationSupport.NumberOfFreeClusters += ClusterCount;
2117
2118 FatUnlockFreeClusterBitMap( Vcb );
2119 }
2120
2121 ExReleaseResourceLite(&Vcb->ChangeBitMapResource);
2122 } _SEH2_END;
2123
2124 } else {
2125
2126 //
2127 // Note that Index is a zero-based window-relative number. When appropriate
2128 // it'll get converted into a true cluster number and put in Cluster, which
2129 // will be a volume relative true cluster number.
2130 //
2131
2132 ULONG Index = 0;
2133 ULONG Cluster = 0;
2134 ULONG CurrentVbo = 0;
2135 ULONG PriorLastCluster = 0;
2136 ULONG BytesFound = 0;
2137
2138 ULONG ClustersFound = 0;
2139 ULONG ClustersRemaining = 0;
2140
2141 BOOLEAN LockedBitMap = FALSE;
2142 BOOLEAN SelectNextContigWindow = FALSE;
2143
2144 //
2145 // Drop our shared lock on the ChangeBitMapResource, and pick it up again
2146 // exclusive in preparation for making a window swap.
2147 //
2148
2149 FatUnlockFreeClusterBitMap(Vcb);
2150 ExReleaseResourceLite(&Vcb->ChangeBitMapResource);
2151 ExAcquireResourceExclusiveLite(&Vcb->ChangeBitMapResource, TRUE);
2152 FatLockFreeClusterBitMap(Vcb);
2153 LockedBitMap = TRUE;
2154
2155 _SEH2_TRY {
2156
2157 if ( ExactMatchRequired && (1 == Vcb->NumberOfWindows)) {
2158
2159 //
2160 // Give up right now, there are no more windows to search! RtlFindClearBits
2161 // searchs the whole bitmap, so we would have found any contiguous run
2162 // large enough.
2163 //
2164
2165 try_leave( Result = FALSE);
2166 }
2167
2168 //
2169 // While the request is still incomplete, look for the largest
2170 // run of free clusters, mark them taken, allocate the run in
2171 // the Mcb and Fat, and if this isn't the first time through
2172 // the loop link it to prior run on the fat. The Mcb will
2173 // coalesce automatically.
2174 //
2175
2176 ClustersRemaining = ClusterCount;
2177 CurrentVbo = 0;
2178 PriorLastCluster = 0;
2179
2180 while (ClustersRemaining != 0) {
2181
2182 //
2183 // If we just entered the loop, the bit map is already locked
2184 //
2185
2186 if ( !LockedBitMap ) {
2187
2188 FatLockFreeClusterBitMap( Vcb );
2189 LockedBitMap = TRUE;
2190 }
2191
2192 //
2193 // Find the largest run of free clusters. If the run is
2194 // bigger than we need, only use what we need. Note that
2195 // this will then be the last while() iteration.
2196 //
2197
2198 // 12/3/95: need to bias bitmap by 2 bits for the defrag
2199 // hooks and the below macro became impossible to do without in-line
2200 // procedures.
2201 //
2202 // ClustersFound = FatLongestFreeClusterRun( IrpContext, Vcb, &Index );
2203
2204 ClustersFound = 0;
2205
2206 if (!SelectNextContigWindow) {
2207
2208 if ( 0 != WindowRelativeHint) {
2209
2210 ULONG Desired = Vcb->FreeClusterBitMap.SizeOfBitMap - (WindowRelativeHint - 2);
2211
2212 //
2213 // We will try to allocate contiguously. Try from the current hint the to
2214 // end of current window. Don't try for more than we actually need.
2215 //
2216
2217 if (Desired > ClustersRemaining) {
2218
2219 Desired = ClustersRemaining;
2220 }
2221
2222 if (RtlAreBitsClear( &Vcb->FreeClusterBitMap,
2223 WindowRelativeHint - 2,
2224 Desired))
2225 {
2226 //
2227 // Clusters from hint->...windowend are free. Take them.
2228 //
2229
2230 Index = WindowRelativeHint - 2;
2231 ClustersFound = Desired;
2232
2233 if (FatIsFat32(Vcb)) {
2234
2235 //
2236 // We're now up against the end of the current window, so indicate that we
2237 // want the next window in the sequence next time around. (If we're not up
2238 // against the end of the window, then we got what we needed and won't be
2239 // coming around again anyway).
2240 //
2241
2242 SelectNextContigWindow = TRUE;
2243 WindowRelativeHint = 2;
2244 }
2245 else {
2246
2247 //
2248 // FAT 12/16 - we've run up against the end of the volume. Clear the
2249 // hint, since we now have no idea where to look.
2250 //
2251
2252 WindowRelativeHint = 0;
2253 }
2254 #if DBG
2255 PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap );
2256 #endif // DBG
2257 }
2258 else {
2259
2260 if (ExactMatchRequired) {
2261
2262 //
2263 // If our caller required an exact match, then we're hosed. Bail out now.
2264 //
2265
2266 try_leave( Result = FALSE);
2267 }
2268
2269 //
2270 // Hint failed, drop back to pot luck
2271 //
2272
2273 WindowRelativeHint = 0;
2274 }
2275 }
2276
2277 if ((0 == WindowRelativeHint) && (0 == ClustersFound)) {
2278
2279 if (ClustersRemaining <= Vcb->CurrentWindow->ClustersFree) {
2280
2281 //
2282 // The remaining allocation could be satisfied entirely from this
2283 // window. We will ask only for what we need, to try and avoid
2284 // unnecessarily fragmenting large runs of space by always using
2285 // (part of) the largest run we can find. This call will return the
2286 // first run large enough.
2287 //
2288
2289 Index = RtlFindClearBits( &Vcb->FreeClusterBitMap, ClustersRemaining, 0);
2290
2291 if (-1 != Index) {
2292
2293 ClustersFound = ClustersRemaining;
2294 }
2295 }
2296
2297 if (0 == ClustersFound) {
2298
2299 //
2300 // Still nothing, so just take the largest free run we can find.
2301 //
2302
2303 ClustersFound = RtlFindLongestRunClear( &Vcb->FreeClusterBitMap, &Index );
2304
2305 }
2306 #if DBG
2307 PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap );
2308 #endif // DBG
2309 if (ClustersFound >= ClustersRemaining) {
2310
2311 ClustersFound = ClustersRemaining;
2312 }
2313 else {
2314
2315 //
2316 // If we just ran up to the end of a window, set up a hint that
2317 // we'd like the next consecutive window after this one. (FAT32 only)
2318 //
2319
2320 if ( ((Index + ClustersFound) == Vcb->FreeClusterBitMap.SizeOfBitMap) &&
2321 FatIsFat32( Vcb)
2322 ) {
2323
2324 SelectNextContigWindow = TRUE;
2325 WindowRelativeHint = 2;
2326 }
2327 }
2328 }
2329 }
2330
2331 if (ClustersFound == 0) {
2332
2333 ULONG FaveWindow = 0;
2334 BOOLEAN SelectedWindow;
2335
2336 //
2337 // If we found no free clusters on a single-window FAT,
2338 // there was a bad problem with the free cluster count.
2339 //
2340
2341 if (1 == Vcb->NumberOfWindows) {
2342
2343 #ifdef _MSC_VER
2344 #pragma prefast( suppress: 28159, "we bugcheck here because our internal data structures are seriously corrupted if this happens" )
2345 #endif
2346 FatBugCheck( 0, 5, 0 );
2347 }
2348
2349 //
2350 // Switch to a new bucket. Possibly the next one if we're
2351 // currently on a roll (allocating contiguously)
2352 //
2353
2354 SelectedWindow = FALSE;
2355
2356 if ( SelectNextContigWindow) {
2357
2358 ULONG NextWindow;
2359
2360 NextWindow = (((ULONG)((PUCHAR)Vcb->CurrentWindow - (PUCHAR)Vcb->Windows)) / sizeof( FAT_WINDOW)) + 1;
2361
2362 if ((NextWindow < Vcb->NumberOfWindows) &&
2363 ( Vcb->Windows[ NextWindow].ClustersFree > 0)
2364 ) {
2365
2366 FaveWindow = NextWindow;
2367 SelectedWindow = TRUE;
2368 }
2369 else {
2370
2371 if (ExactMatchRequired) {
2372
2373 //
2374 // Some dope tried to allocate a run past the end of the volume...
2375 //
2376
2377 try_leave( Result = FALSE);
2378 }
2379
2380 //
2381 // Give up on the contiguous allocation attempts
2382 //
2383
2384 WindowRelativeHint = 0;
2385 }
2386
2387 SelectNextContigWindow = FALSE;
2388 }
2389
2390 if (!SelectedWindow) {
2391
2392 //
2393 // Select a new window to begin allocating from
2394 //
2395
2396 FaveWindow = FatSelectBestWindow( Vcb);
2397 }
2398
2399 //
2400 // By now we'd better have found a window with some free clusters
2401 //
2402
2403 if (0 == Vcb->Windows[ FaveWindow].ClustersFree) {
2404
2405 #ifdef _MSC_VER
2406 #pragma prefast( suppress: 28159, "we bugcheck here because our internal data structures are seriously corrupted if this happens" )
2407 #endif
2408 FatBugCheck( 0, 5, 1 );
2409 }
2410
2411 Wait = BooleanFlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT);
2412 SetFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT);
2413
2414 FatExamineFatEntries( IrpContext, Vcb,
2415 0,
2416 0,
2417 FALSE,
2418 &Vcb->Windows[FaveWindow],
2419 NULL);
2420
2421 if (!Wait) {
2422
2423 ClearFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT);
2424 }
2425
2426 //
2427 // Now we'll just go around the loop again, having switched windows,
2428 // and allocate....
2429 //
2430 #if DBG
2431 PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap );
2432 #endif //DBG
2433 } // if (clustersfound == 0)
2434 else {
2435
2436 //
2437 // Take the clusters we found, convert our index to a cluster number
2438 // and unlock the bit map.
2439 //
2440
2441 Window = Vcb->CurrentWindow;
2442
2443 FatReserveClusters( IrpContext, Vcb, (Index + 2), ClustersFound );
2444
2445 Cluster = Index + Window->FirstCluster;
2446
2447 Window->ClustersFree -= ClustersFound;
2448 NT_ASSERT( PreviousClear - ClustersFound == Window->ClustersFree );
2449
2450 FatUnlockFreeClusterBitMap( Vcb );
2451 LockedBitMap = FALSE;
2452
2453 //
2454 // Add the newly alloced run to the Mcb.
2455 //
2456
2457 BytesFound = ClustersFound << LogOfBytesPerCluster;
2458
2459 FatAddMcbEntry( Vcb, Mcb,
2460 CurrentVbo,
2461 FatGetLboFromIndex( Vcb, Cluster ),
2462 BytesFound );
2463
2464 //
2465 // Connect the last allocated run with this one, and allocate
2466 // this run on the Fat.
2467 //
2468
2469 if (PriorLastCluster != 0) {
2470
2471 FatSetFatEntry( IrpContext,
2472 Vcb,
2473 PriorLastCluster,
2474 (FAT_ENTRY)Cluster );
2475 }
2476
2477 //
2478 // Update the fat
2479 //
2480
2481 FatAllocateClusters( IrpContext, Vcb, Cluster, ClustersFound );
2482
2483 //
2484 // Prepare for the next iteration.
2485 //
2486
2487 CurrentVbo += BytesFound;
2488 ClustersRemaining -= ClustersFound;
2489 PriorLastCluster = Cluster + ClustersFound - 1;
2490 }
2491 } // while (clustersremaining)
2492
2493 } _SEH2_FINALLY {
2494
2495 DebugUnwind( FatAllocateDiskSpace );
2496
2497 ExReleaseResourceLite(&Vcb->ChangeBitMapResource);
2498
2499 //
2500 // Is there any unwinding to do?
2501 //
2502
2503 if ( _SEH2_AbnormalTermination() || (FALSE == Result)) {
2504
2505 //
2506 // Flag to the caller that they're getting nothing
2507 //
2508
2509 *ByteCount = 0;
2510
2511 //
2512 // There are three places we could have taken this exception:
2513 // when switching the window (FatExamineFatEntries), adding
2514 // a found run to the Mcb (FatAddMcbEntry), or when writing
2515 // the changes to the FAT (FatSetFatEntry). In the first case
2516 // we don't have anything to unwind before deallocation, and
2517 // can detect this by seeing if we have the ClusterBitmap
2518 // mutex out.
2519
2520 if (!LockedBitMap) {
2521
2522 FatLockFreeClusterBitMap( Vcb );
2523
2524 //
2525 // In these cases, we have the possiblity that the FAT
2526 // window is still in place and we need to clear the bits.
2527 // If the Mcb entry isn't there (we raised trying to add
2528 // it), the effect of trying to remove it is a noop.
2529 //
2530
2531 if (Window == Vcb->CurrentWindow) {
2532
2533 //
2534 // Cluster reservation works on cluster 2 based window-relative
2535 // numbers, so we must convert. The subtraction will lose the
2536 // cluster 2 base, so bias the result.
2537 //
2538
2539 FatUnreserveClusters( IrpContext, Vcb,
2540 (Cluster - Window->FirstCluster) + 2,
2541 ClustersFound );
2542 }
2543
2544 //
2545 // Note that FatDeallocateDiskSpace will take care of adjusting
2546 // to account for the entries in the Mcb. All we have to account
2547 // for is the last run that didn't make it.
2548 //
2549
2550 Window->ClustersFree += ClustersFound;
2551 Vcb->AllocationSupport.NumberOfFreeClusters += ClustersFound;
2552
2553 FatUnlockFreeClusterBitMap( Vcb );
2554
2555 FatRemoveMcbEntry( Vcb, Mcb, CurrentVbo, BytesFound );
2556
2557 } else {
2558
2559 //
2560 // Just drop the mutex now - we didn't manage to do anything
2561 // that needs to be backed out.
2562 //
2563
2564 FatUnlockFreeClusterBitMap( Vcb );
2565 }
2566
2567 _SEH2_TRY {
2568
2569 //
2570 // Now we have tidied up, we are ready to just send the Mcb
2571 // off to deallocate disk space
2572 //
2573
2574 FatDeallocateDiskSpace( IrpContext, Vcb, Mcb, FALSE );
2575
2576 } _SEH2_FINALLY {
2577
2578 //
2579 // Now finally (really), remove all the entries from the mcb
2580 //
2581
2582 FatRemoveMcbEntry( Vcb, Mcb, 0, 0xFFFFFFFF );
2583 } _SEH2_END;
2584 }
2585
2586 DebugTrace(-1, Dbg, "FatAllocateDiskSpace -> (VOID)\n", 0);
2587
2588 } _SEH2_END; // finally
2589 }
2590
2591 return;
2592 }
2593
2594 \f
2595
2596 //
2597 // Limit our zeroing writes to 1 MB.
2598 //
2599
2600 #define MAX_ZERO_MDL_SIZE (1*1024*1024)
2601
2602 _Requires_lock_held_(_Global_critical_region_)
2603 VOID
2604 FatDeallocateDiskSpace (
2605 IN PIRP_CONTEXT IrpContext,
2606 IN PVCB Vcb,
2607 IN PLARGE_MCB Mcb,
2608 IN BOOLEAN ZeroOnDeallocate
2609 )
2610
2611 /*++
2612
2613 Routine Description:
2614
2615 This procedure deallocates the disk space denoted by an input
2616 mcb. Note that the input MCB does not need to necessarily describe
2617 a chain that ends with a FAT_CLUSTER_LAST entry.
2618
2619 Pictorially what is done is the following
2620
2621 Fat |--a--|--b--|--c--|
2622 Mcb |--a--|--b--|--c--|
2623
2624 becomes
2625
2626 Fat |--0--|--0--|--0--|
2627 Mcb |--a--|--b--|--c--|
2628
2629 Arguments:
2630
2631 Vcb - Supplies the VCB being modified
2632
2633 Mcb - Supplies the MCB describing the disk space to deallocate. Note
2634 that Mcb is unchanged by this procedure.
2635
2636
2637 Return Value:
2638
2639 None.
2640
2641 --*/
2642
2643 {
2644 LBO Lbo;
2645 VBO Vbo;
2646
2647 ULONG RunsInMcb;
2648 ULONG ByteCount;
2649 ULONG ClusterCount = 0;
2650 ULONG ClusterIndex = 0;
2651 ULONG McbIndex = 0;
2652
2653 UCHAR LogOfBytesPerCluster;
2654
2655 PFAT_WINDOW Window;
2656
2657 NTSTATUS ZeroingStatus = STATUS_SUCCESS;
2658
2659 PAGED_CODE();
2660
2661 DebugTrace(+1, Dbg, "FatDeallocateDiskSpace\n", 0);
2662 DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
2663 DebugTrace( 0, Dbg, " Mcb = %p\n", Mcb);
2664
2665 LogOfBytesPerCluster = Vcb->AllocationSupport.LogOfBytesPerCluster;
2666
2667 RunsInMcb = FsRtlNumberOfRunsInLargeMcb( Mcb );
2668
2669 if ( RunsInMcb == 0 ) {
2670
2671 DebugTrace(-1, Dbg, "FatDeallocateDiskSpace -> (VOID)\n", 0);
2672 return;
2673 }
2674
2675 //
2676 // If we are supposed to zero out the allocation before freeing it, do so.
2677 //
2678
2679 if (ZeroOnDeallocate) {
2680
2681 _SEH2_TRY {
2682
2683 PIRP IoIrp;
2684 KEVENT IoEvent;
2685 IO_STATUS_BLOCK Iosb;
2686 PVOID Buffer = NULL;
2687 PMDL Mdl;
2688 ULONG ByteCountToZero;
2689 ULONG MdlSizeMapped;
2690
2691 //
2692 // Issue the writes down for each run in the Mcb
2693 //
2694
2695 KeInitializeEvent( &IoEvent,
2696 NotificationEvent,
2697 FALSE );
2698
2699 for ( McbIndex = 0; McbIndex < RunsInMcb; McbIndex++ ) {
2700
2701 FatGetNextMcbEntry( Vcb, Mcb, McbIndex, &Vbo, &Lbo, &ByteCount );
2702
2703 //
2704 // Assert that Fat files have no holes.
2705 //
2706
2707 NT_ASSERT( Lbo != 0 );
2708
2709 //
2710 // Setup our MDL for the this run.
2711 //
2712
2713 if (ByteCount > MAX_ZERO_MDL_SIZE) {
2714 Mdl = FatBuildZeroMdl( IrpContext, MAX_ZERO_MDL_SIZE);
2715 } else {
2716 Mdl = FatBuildZeroMdl( IrpContext, ByteCount);
2717 }
2718
2719 if (!Mdl) {
2720 ZeroingStatus = STATUS_INSUFFICIENT_RESOURCES;
2721 goto try_exit;
2722 }
2723
2724 _SEH2_TRY {
2725
2726 //
2727 // Map the MDL.
2728 //
2729
2730 #ifndef __REACTOS__
2731 Buffer = MmGetSystemAddressForMdlSafe(Mdl, HighPagePriority|MdlMappingNoExecute);
2732 #else
2733 Buffer = MmGetSystemAddressForMdlSafe(Mdl, HighPagePriority);
2734 #endif
2735 if (!Buffer) {
2736 NT_ASSERT( FALSE );
2737 ZeroingStatus = STATUS_INSUFFICIENT_RESOURCES;
2738 goto try_exit2;
2739 }
2740
2741 //
2742 // We might not have not been able to get an MDL big enough to map the whole
2743 // run. In this case, break up the write.
2744 //
2745
2746 MdlSizeMapped = min( ByteCount, Mdl->ByteCount );
2747 ByteCountToZero = ByteCount;
2748
2749 //
2750 // Loop until there are no bytes left to write
2751 //
2752
2753 while (ByteCountToZero != 0) {
2754
2755 //
2756 // Write zeros to each run.
2757 //
2758
2759 KeClearEvent( &IoEvent );
2760
2761 IoIrp = IoBuildSynchronousFsdRequest( IRP_MJ_WRITE,
2762 Vcb->TargetDeviceObject,
2763 Buffer,
2764 MdlSizeMapped,
2765 (PLARGE_INTEGER)&Lbo,
2766 &IoEvent,
2767 &Iosb );
2768
2769 if (IoIrp == NULL) {
2770 NT_ASSERT( FALSE );
2771 ZeroingStatus = STATUS_INSUFFICIENT_RESOURCES;
2772 goto try_exit2;
2773 }
2774
2775 //
2776 // Set a flag indicating that we want to write through any
2777 // cache on the controller. This eliminates the need for
2778 // an explicit flush-device after the write.
2779 //
2780
2781 SetFlag( IoGetNextIrpStackLocation(IoIrp)->Flags, SL_WRITE_THROUGH );
2782
2783 ZeroingStatus = IoCallDriver( Vcb->TargetDeviceObject, IoIrp );
2784
2785 if (ZeroingStatus == STATUS_PENDING) {
2786
2787 (VOID)KeWaitForSingleObject( &IoEvent,
2788 Executive,
2789 KernelMode,
2790 FALSE,
2791 (PLARGE_INTEGER)NULL );
2792
2793 ZeroingStatus = Iosb.Status;
2794 }
2795
2796 if (!NT_SUCCESS( ZeroingStatus )) {
2797 NT_ASSERT( FALSE );
2798 goto try_exit2;
2799 }
2800
2801 //
2802 // Increment the starting offset where we will zero.
2803 //
2804
2805 Lbo += MdlSizeMapped;
2806
2807 //
2808 // Decrement ByteCount
2809 //
2810
2811 ByteCountToZero -= MdlSizeMapped;
2812
2813 if (ByteCountToZero < MdlSizeMapped) {
2814 MdlSizeMapped = ByteCountToZero;
2815 }
2816
2817 }
2818
2819 try_exit2:
2820
2821 NOTHING;
2822
2823 } _SEH2_FINALLY {
2824
2825 if (!FlagOn( Mdl->MdlFlags, MDL_SOURCE_IS_NONPAGED_POOL) &&
2826 FlagOn( Mdl->MdlFlags, MDL_MAPPED_TO_SYSTEM_VA )) {
2827
2828 MmUnmapLockedPages( Mdl->MappedSystemVa, Mdl );
2829 }
2830 IoFreeMdl( Mdl );
2831 } _SEH2_END;
2832
2833 }
2834
2835 try_exit:
2836
2837 NOTHING;
2838
2839 } _SEH2_EXCEPT(FatExceptionFilter( NULL, _SEH2_GetExceptionInformation() )) {
2840
2841 //
2842 // If we failed to zero for some reason, still go ahead and deallocate
2843 // the clusters. Otherwise we'll leak space from the volume.
2844 //
2845
2846 ZeroingStatus = _SEH2_GetExceptionCode();
2847
2848 } _SEH2_END;
2849
2850 }
2851
2852 NT_ASSERT( NT_SUCCESS(ZeroingStatus) );
2853
2854 _SEH2_TRY {
2855
2856 //
2857 // Run though the Mcb, freeing all the runs in the fat.
2858 //
2859 // We do this in two steps (first update the fat, then the bitmap
2860 // (which can't fail)) to prevent other people from taking clusters
2861 // that we need to re-allocate in the event of unwind.
2862 //
2863
2864 ExAcquireResourceSharedLite(&Vcb->ChangeBitMapResource, TRUE);
2865
2866 RunsInMcb = FsRtlNumberOfRunsInLargeMcb( Mcb );
2867
2868 for ( McbIndex = 0; McbIndex < RunsInMcb; McbIndex++ ) {
2869
2870 FatGetNextMcbEntry( Vcb, Mcb, McbIndex, &Vbo, &Lbo, &ByteCount );
2871
2872 //
2873 // Assert that Fat files have no holes.
2874 //
2875
2876 NT_ASSERT( Lbo != 0 );
2877
2878 //
2879 // Write FAT_CLUSTER_AVAILABLE to each cluster in the run.
2880 //
2881
2882 if (ByteCount == 0xFFFFFFFF) {
2883
2884 //
2885 // Special case the computation of ClusterCout
2886 // when file is of max size (4GiB - 1).
2887 //
2888
2889 ClusterCount = (1 << (32 - LogOfBytesPerCluster));
2890
2891 } else {
2892
2893 ClusterCount = ByteCount >> LogOfBytesPerCluster;
2894 }
2895
2896 ClusterIndex = FatGetIndexFromLbo( Vcb, Lbo );
2897
2898 FatFreeClusters( IrpContext, Vcb, ClusterIndex, ClusterCount );
2899 }
2900
2901 //
2902 // From now on, nothing can go wrong .... (as in raise)
2903 //
2904
2905 FatLockFreeClusterBitMap( Vcb );
2906
2907 for ( McbIndex = 0; McbIndex < RunsInMcb; McbIndex++ ) {
2908
2909 ULONG ClusterEnd;
2910 ULONG MyStart, MyLength, count;
2911 #if DBG
2912 #ifndef __REACTOS__
2913 ULONG PreviousClear = 0;
2914 #endif
2915 ULONG i = 0;
2916 #endif
2917
2918 FatGetNextMcbEntry( Vcb, Mcb, McbIndex, &Vbo, &Lbo, &ByteCount );
2919
2920 //
2921 // Mark the bits clear in the FreeClusterBitMap.
2922 //
2923
2924 if (ByteCount == 0xFFFFFFFF) {
2925
2926 //
2927 // Special case the computation of ClusterCout
2928 // when file is of max size (2^32 - 1).
2929 //
2930
2931 ClusterCount = (1 << (32 - LogOfBytesPerCluster));
2932
2933 } else {
2934
2935 ClusterCount = ByteCount >> LogOfBytesPerCluster;
2936 }
2937
2938 ClusterIndex = FatGetIndexFromLbo( Vcb, Lbo );
2939
2940 Window = Vcb->CurrentWindow;
2941
2942 //
2943 // If we've divided the bitmap, elide bitmap manipulation for
2944 // runs that are outside the current bucket.
2945 //
2946
2947 ClusterEnd = ClusterIndex + ClusterCount - 1;
2948
2949 if (!(ClusterIndex > Window->LastCluster ||
2950 ClusterEnd < Window->FirstCluster)) {
2951
2952 //
2953 // The run being freed overlaps the current bucket, so we'll
2954 // have to clear some bits.
2955 //
2956
2957 if (ClusterIndex < Window->FirstCluster &&
2958 ClusterEnd > Window->LastCluster) {
2959
2960 MyStart = Window->FirstCluster;
2961 MyLength = Window->LastCluster - Window->FirstCluster + 1;
2962
2963 } else if (ClusterIndex < Window->FirstCluster) {
2964
2965 MyStart = Window->FirstCluster;
2966 MyLength = ClusterEnd - Window->FirstCluster + 1;
2967
2968 } else {
2969
2970 //
2971 // The range being freed starts in the bucket, and may possibly
2972 // extend beyond the bucket.
2973 //
2974
2975 MyStart = ClusterIndex;
2976
2977 if (ClusterEnd <= Window->LastCluster) {
2978
2979 MyLength = ClusterCount;
2980
2981 } else {
2982
2983 MyLength = Window->LastCluster - ClusterIndex + 1;
2984 }
2985 }
2986
2987 if (MyLength == 0) {
2988
2989 continue;
2990 }
2991
2992 #if DBG
2993 #ifndef __REACTOS__
2994 #ifdef _MSC_VER
2995 #pragma prefast( suppress:28931, "this is DBG build only" )
2996 #endif
2997 PreviousClear = RtlNumberOfClearBits( &Vcb->FreeClusterBitMap );
2998 #endif
2999
3000
3001 //
3002 // Verify that the Bits are all really set.
3003 //
3004
3005 NT_ASSERT( MyStart + MyLength - Window->FirstCluster <= Vcb->FreeClusterBitMap.SizeOfBitMap );
3006
3007 for (i = 0; i < MyLength; i++) {
3008
3009 NT_ASSERT( RtlCheckBit(&Vcb->FreeClusterBitMap,
3010 MyStart - Window->FirstCluster + i) == 1 );
3011 }
3012 #endif // DBG
3013
3014 FatUnreserveClusters( IrpContext, Vcb,
3015 MyStart - Window->FirstCluster + 2,
3016 MyLength );
3017 }
3018
3019 //
3020 // Adjust the ClustersFree count for each bitmap window, even the ones
3021 // that are not the current window.
3022 //
3023
3024 if (FatIsFat32(Vcb)) {
3025
3026 Window = &Vcb->Windows[FatWindowOfCluster( ClusterIndex )];
3027
3028 } else {
3029
3030 Window = &Vcb->Windows[0];
3031 }
3032
3033 MyStart = ClusterIndex;
3034
3035 for (MyLength = ClusterCount; MyLength > 0; MyLength -= count) {
3036
3037 count = FatMin(Window->LastCluster - MyStart + 1, MyLength);
3038 Window->ClustersFree += count;
3039
3040 //
3041 // If this was not the last window this allocation spanned,
3042 // advance to the next.
3043 //
3044
3045 if (MyLength != count) {
3046
3047 Window++;
3048 MyStart = Window->FirstCluster;
3049 }
3050 }
3051
3052 //
3053 // Deallocation is now complete. Adjust the free cluster count.
3054 //
3055
3056 Vcb->AllocationSupport.NumberOfFreeClusters += ClusterCount;
3057 }
3058
3059 #if DBG
3060 if (Vcb->CurrentWindow->ClustersFree !=
3061 RtlNumberOfClearBits(&Vcb->FreeClusterBitMap)) {
3062
3063 DbgPrint("%x vs %x\n", Vcb->CurrentWindow->ClustersFree,
3064 RtlNumberOfClearBits(&Vcb->FreeClusterBitMap));
3065
3066 DbgPrint("%x for %x\n", ClusterIndex, ClusterCount);
3067 }
3068 #endif
3069
3070 FatUnlockFreeClusterBitMap( Vcb );
3071
3072
3073 } _SEH2_FINALLY {
3074
3075 DebugUnwind( FatDeallocateDiskSpace );
3076
3077 //
3078 // Is there any unwinding to do?
3079 //
3080
3081 ExReleaseResourceLite(&Vcb->ChangeBitMapResource);
3082
3083 if ( _SEH2_AbnormalTermination() ) {
3084
3085 LBO LocalLbo;
3086 VBO LocalVbo;
3087
3088 ULONG Index;
3089 ULONG Clusters;
3090 ULONG FatIndex;
3091 ULONG PriorLastIndex;
3092
3093 //
3094 // For each entry we already deallocated, reallocate it,
3095 // chaining together as nessecary. Note that we continue
3096 // up to and including the last "for" iteration even though
3097 // the SetFatRun could not have been successful. This
3098 // allows us a convienent way to re-link the final successful
3099 // SetFatRun.
3100 //
3101 // It is possible that the reason we got here will prevent us
3102 // from succeeding in this operation.
3103 //
3104
3105 PriorLastIndex = 0;
3106
3107 for (Index = 0; Index <= McbIndex; Index++) {
3108
3109 FatGetNextMcbEntry(Vcb, Mcb, Index, &LocalVbo, &LocalLbo, &ByteCount);
3110
3111 if (ByteCount == 0xFFFFFFFF) {
3112
3113 //
3114 // Special case the computation of ClusterCout
3115 // when file is of max size (2^32 - 1).
3116 //
3117
3118 Clusters = (1 << (32 - LogOfBytesPerCluster));
3119
3120 } else {
3121
3122 Clusters = ByteCount >> LogOfBytesPerCluster;
3123 }
3124
3125 FatIndex = FatGetIndexFromLbo( Vcb, LocalLbo );
3126
3127 //
3128 // We must always restore the prior iteration's last
3129 // entry, pointing it to the first cluster of this run.
3130 //
3131
3132 if (PriorLastIndex != 0) {
3133
3134 FatSetFatEntry( IrpContext,
3135 Vcb,
3136 PriorLastIndex,
3137 (FAT_ENTRY)FatIndex );
3138 }
3139
3140 //
3141 // If this is not the last entry (the one that failed)
3142 // then reallocate the disk space on the fat.
3143 //
3144
3145 if ( Index < McbIndex ) {
3146
3147 FatAllocateClusters(IrpContext, Vcb, FatIndex, Clusters);
3148
3149 PriorLastIndex = FatIndex + Clusters - 1;
3150 }
3151 }
3152 }
3153
3154 DebugTrace(-1, Dbg, "FatDeallocateDiskSpace -> (VOID)\n", 0);
3155 } _SEH2_END;
3156
3157 return;
3158 }
3159
3160 \f
3161 _Requires_lock_held_(_Global_critical_region_)
3162 VOID
3163 FatSplitAllocation (
3164 IN PIRP_CONTEXT IrpContext,
3165 IN PVCB Vcb,
3166 IN OUT PLARGE_MCB Mcb,
3167 IN VBO SplitAtVbo,
3168 OUT PLARGE_MCB RemainingMcb
3169 )
3170
3171 /*++
3172
3173 Routine Description:
3174
3175 This procedure takes a single mcb and splits its allocation into
3176 two separate allocation units. The separation must only be done
3177 on cluster boundaries, otherwise we bugcheck.
3178
3179 On the disk this actually works by inserting a FAT_CLUSTER_LAST into
3180 the last index of the first part being split out.
3181
3182 Pictorially what is done is the following (where ! denotes the end of
3183 the fat chain (i.e., FAT_CLUSTER_LAST)):
3184
3185
3186 Mcb |--a--|--b--|--c--|--d--|--e--|--f--|
3187
3188 ^
3189 SplitAtVbo ---------------------+
3190
3191 RemainingMcb (empty)
3192
3193 becomes
3194
3195 Mcb |--a--|--b--|--c--!
3196
3197
3198 RemainingMcb |--d--|--e--|--f--|
3199
3200 Arguments:
3201
3202 Vcb - Supplies the VCB being modified
3203
3204 Mcb - Supplies the MCB describing the allocation being split into
3205 two parts. Upon return this Mcb now contains the first chain.
3206
3207 SplitAtVbo - Supplies the VBO of the first byte for the second chain
3208 that we creating.
3209
3210 RemainingMcb - Receives the MCB describing the second chain of allocated
3211 disk space. The caller passes in an initialized Mcb that
3212 is filled in by this procedure STARTING AT VBO 0.
3213
3214 Return Value:
3215
3216 VOID - TRUE if the operation completed and FALSE if it had to
3217 block but could not.
3218
3219 --*/
3220
3221 {
3222 VBO SourceVbo;
3223 VBO TargetVbo;
3224 VBO DontCare;
3225
3226 LBO Lbo;
3227
3228 ULONG ByteCount;
3229
3230 #if DBG
3231 ULONG BytesPerCluster;
3232 #endif
3233
3234 PAGED_CODE();
3235
3236 DebugTrace(+1, Dbg, "FatSplitAllocation\n", 0);
3237 DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
3238 DebugTrace( 0, Dbg, " Mcb = %p\n", Mcb);
3239 DebugTrace( 0, Dbg, " SplitAtVbo = %8lx\n", SplitAtVbo);
3240 DebugTrace( 0, Dbg, " RemainingMcb = %p\n", RemainingMcb);
3241
3242 #if DBG
3243 BytesPerCluster = 1 << Vcb->AllocationSupport.LogOfBytesPerCluster;
3244 #endif
3245
3246 //
3247 // Assert that the split point is cluster alligned
3248 //
3249
3250 NT_ASSERT( (SplitAtVbo & (BytesPerCluster - 1)) == 0 );
3251
3252 //
3253 // We should never be handed an empty source MCB and asked to split
3254 // at a non zero point.
3255 //
3256
3257 NT_ASSERT( !((0 != SplitAtVbo) && (0 == FsRtlNumberOfRunsInLargeMcb( Mcb))));
3258
3259 //
3260 // Assert we were given an empty target Mcb.
3261 //
3262
3263 //
3264 // This assert is commented out to avoid hitting in the Ea error
3265 // path. In that case we will be using the same Mcb's to split the
3266 // allocation that we used to merge them. The target Mcb will contain
3267 // the runs that the split will attempt to insert.
3268 //
3269 //
3270 // NT_ASSERT( FsRtlNumberOfRunsInMcb( RemainingMcb ) == 0 );
3271 //
3272
3273 _SEH2_TRY {
3274
3275 //
3276 // Move the runs after SplitAtVbo from the souce to the target
3277 //
3278
3279 SourceVbo = SplitAtVbo;
3280 TargetVbo = 0;
3281
3282 while (FatLookupMcbEntry(Vcb, Mcb, SourceVbo, &Lbo, &ByteCount, NULL)) {
3283
3284 FatAddMcbEntry( Vcb, RemainingMcb, TargetVbo, Lbo, ByteCount );
3285
3286 FatRemoveMcbEntry( Vcb, Mcb, SourceVbo, ByteCount );
3287
3288 TargetVbo += ByteCount;
3289 SourceVbo += ByteCount;
3290
3291 //
3292 // If SourceVbo overflows, we were actually snipping off the end
3293 // of the maximal file ... and are now done.
3294 //
3295
3296 if (SourceVbo == 0) {
3297
3298 break;
3299 }
3300 }
3301
3302 //
3303 // Mark the last pre-split cluster as a FAT_LAST_CLUSTER
3304 //
3305
3306 if ( SplitAtVbo != 0 ) {
3307
3308 FatLookupLastMcbEntry( Vcb, Mcb, &DontCare, &Lbo, NULL );
3309
3310 FatSetFatEntry( IrpContext,
3311 Vcb,
3312 FatGetIndexFromLbo( Vcb, Lbo ),
3313 FAT_CLUSTER_LAST );
3314 }
3315
3316 } _SEH2_FINALLY {
3317
3318 DebugUnwind( FatSplitAllocation );
3319
3320 //
3321 // If we got an exception, we must glue back together the Mcbs
3322 //
3323
3324 if ( _SEH2_AbnormalTermination() ) {
3325
3326 TargetVbo = SplitAtVbo;
3327 SourceVbo = 0;
3328
3329 while (FatLookupMcbEntry(Vcb, RemainingMcb, SourceVbo, &Lbo, &ByteCount, NULL)) {
3330
3331 FatAddMcbEntry( Vcb, Mcb, TargetVbo, Lbo, ByteCount );
3332
3333 FatRemoveMcbEntry( Vcb, RemainingMcb, SourceVbo, ByteCount );
3334
3335 TargetVbo += ByteCount;
3336 SourceVbo += ByteCount;
3337 }
3338 }
3339
3340 DebugTrace(-1, Dbg, "FatSplitAllocation -> (VOID)\n", 0);
3341 } _SEH2_END;
3342
3343 return;
3344 }
3345
3346 \f
3347 _Requires_lock_held_(_Global_critical_region_)
3348 VOID
3349 FatMergeAllocation (
3350 IN PIRP_CONTEXT IrpContext,
3351 IN PVCB Vcb,
3352 IN OUT PLARGE_MCB Mcb,
3353 IN PLARGE_MCB SecondMcb
3354 )
3355
3356 /*++
3357
3358 Routine Description:
3359
3360 This routine takes two separate allocations described by two MCBs and
3361 joins them together into one allocation.
3362
3363 Pictorially what is done is the following (where ! denotes the end of
3364 the fat chain (i.e., FAT_CLUSTER_LAST)):
3365
3366
3367 Mcb |--a--|--b--|--c--!
3368
3369 SecondMcb |--d--|--e--|--f--|
3370
3371 becomes
3372
3373 Mcb |--a--|--b--|--c--|--d--|--e--|--f--|
3374
3375 SecondMcb |--d--|--e--|--f--|
3376
3377
3378 Arguments:
3379
3380 Vcb - Supplies the VCB being modified
3381
3382 Mcb - Supplies the MCB of the first allocation that is being modified.
3383 Upon return this Mcb will also describe the newly enlarged
3384 allocation
3385
3386 SecondMcb - Supplies the ZERO VBO BASED MCB of the second allocation
3387 that is being appended to the first allocation. This
3388 procedure leaves SecondMcb unchanged.
3389
3390 Return Value:
3391
3392 VOID - TRUE if the operation completed and FALSE if it had to
3393 block but could not.
3394
3395 --*/
3396
3397 {
3398 VBO SpliceVbo = 0;
3399 LBO SpliceLbo;
3400
3401 VBO SourceVbo;
3402 VBO TargetVbo = 0;
3403
3404 LBO Lbo;
3405
3406 ULONG ByteCount;
3407
3408 PAGED_CODE();
3409
3410 DebugTrace(+1, Dbg, "FatMergeAllocation\n", 0);
3411 DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
3412 DebugTrace( 0, Dbg, " Mcb = %p\n", Mcb);
3413 DebugTrace( 0, Dbg, " SecondMcb = %p\n", SecondMcb);
3414
3415 _SEH2_TRY {
3416
3417 //
3418 // Append the runs from SecondMcb to Mcb
3419 //
3420
3421 (void)FatLookupLastMcbEntry( Vcb, Mcb, &SpliceVbo, &SpliceLbo, NULL );
3422
3423 SourceVbo = 0;
3424 TargetVbo = SpliceVbo + 1;
3425
3426 while (FatLookupMcbEntry(Vcb, SecondMcb, SourceVbo, &Lbo, &ByteCount, NULL)) {
3427
3428 FatAddMcbEntry( Vcb, Mcb, TargetVbo, Lbo, ByteCount );
3429
3430 SourceVbo += ByteCount;
3431 TargetVbo += ByteCount;
3432 }
3433
3434 //
3435 // Link the last pre-merge cluster to the first cluster of SecondMcb
3436 //
3437
3438 FatLookupMcbEntry( Vcb, SecondMcb, 0, &Lbo, (PULONG)NULL, NULL );
3439
3440 FatSetFatEntry( IrpContext,
3441 Vcb,
3442 FatGetIndexFromLbo( Vcb, SpliceLbo ),
3443 (FAT_ENTRY)FatGetIndexFromLbo( Vcb, Lbo ) );
3444
3445 } _SEH2_FINALLY {
3446
3447 DebugUnwind( FatMergeAllocation );
3448
3449 //
3450 // If we got an exception, we must remove the runs added to Mcb
3451 //
3452
3453 if ( _SEH2_AbnormalTermination() ) {
3454
3455 ULONG CutLength;
3456
3457 if ((CutLength = TargetVbo - (SpliceVbo + 1)) != 0) {
3458
3459 FatRemoveMcbEntry( Vcb, Mcb, SpliceVbo + 1, CutLength);
3460 }
3461 }
3462
3463 DebugTrace(-1, Dbg, "FatMergeAllocation -> (VOID)\n", 0);
3464 } _SEH2_END;
3465
3466 return;
3467 }
3468
3469 \f
3470 //
3471 // Internal support routine
3472 //
3473
3474 CLUSTER_TYPE
3475 FatInterpretClusterType (
3476 IN PVCB Vcb,
3477 IN FAT_ENTRY Entry
3478 )
3479
3480 /*++
3481
3482 Routine Description:
3483
3484 This procedure tells the caller how to interpret the input fat table
3485 entry. It will indicate if the fat cluster is available, resereved,
3486 bad, the last one, or the another fat index. This procedure can deal
3487 with both 12 and 16 bit fat.
3488
3489 Arguments:
3490
3491 Vcb - Supplies the Vcb to examine, yields 12/16 bit info
3492
3493 Entry - Supplies the fat entry to examine
3494
3495 Return Value:
3496
3497 CLUSTER_TYPE - Is the type of the input Fat entry
3498
3499 --*/
3500
3501 {
3502 DebugTrace(+1, Dbg, "InterpretClusterType\n", 0);
3503 DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
3504 DebugTrace( 0, Dbg, " Entry = %8lx\n", Entry);
3505
3506 PAGED_CODE();
3507
3508 switch(Vcb->AllocationSupport.FatIndexBitSize ) {
3509 case 32:
3510 Entry &= FAT32_ENTRY_MASK;
3511 break;
3512
3513 case 12:
3514 NT_ASSERT( Entry <= 0xfff );
3515 if (Entry >= 0x0ff0) {
3516 Entry |= 0x0FFFF000;
3517 }
3518 break;
3519
3520 default:
3521 case 16:
3522 NT_ASSERT( Entry <= 0xffff );
3523 if (Entry >= 0x0fff0) {
3524 Entry |= 0x0FFF0000;
3525 }
3526 break;
3527 }
3528
3529 if (Entry == FAT_CLUSTER_AVAILABLE) {
3530
3531 DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterAvailable\n", 0);
3532
3533 return FatClusterAvailable;
3534
3535 } else if (Entry < FAT_CLUSTER_RESERVED) {
3536
3537 DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterNext\n", 0);
3538
3539 return FatClusterNext;
3540
3541 } else if (Entry < FAT_CLUSTER_BAD) {
3542
3543 DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterReserved\n", 0);
3544
3545 return FatClusterReserved;
3546
3547 } else if (Entry == FAT_CLUSTER_BAD) {
3548
3549 DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterBad\n", 0);
3550
3551 return FatClusterBad;
3552
3553 } else {
3554
3555 DebugTrace(-1, Dbg, "FatInterpretClusterType -> FatClusterLast\n", 0);
3556
3557 return FatClusterLast;
3558 }
3559 }
3560
3561 \f
3562 //
3563 // Internal support routine
3564 //
3565
3566 VOID
3567 FatLookupFatEntry (
3568 IN PIRP_CONTEXT IrpContext,
3569 IN PVCB Vcb,
3570 IN ULONG FatIndex,
3571 IN OUT PULONG FatEntry,
3572 IN OUT PFAT_ENUMERATION_CONTEXT Context
3573 )
3574
3575 /*++
3576
3577 Routine Description:
3578
3579 This routine takes an index into the fat and gives back the value
3580 in the Fat at this index. At any given time, for a 16 bit fat, this
3581 routine allows only one page per volume of the fat to be pinned in
3582 memory. For a 12 bit bit fat, the entire fat (max 6k) is pinned. This
3583 extra layer of caching makes the vast majority of requests very
3584 fast. The context for this caching stored in a structure in the Vcb.
3585
3586 Arguments:
3587
3588 Vcb - Supplies the Vcb to examine, yields 12/16 bit info,
3589 fat access context, etc.
3590
3591 FatIndex - Supplies the fat index to examine.
3592
3593 FatEntry - Receives the fat entry pointed to by FatIndex. Note that
3594 it must point to non-paged pool.
3595
3596 Context - This structure keeps track of a page of pinned fat between calls.
3597
3598 --*/
3599
3600 {
3601 PAGED_CODE();
3602
3603 DebugTrace(+1, Dbg, "FatLookupFatEntry\n", 0);
3604 DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
3605 DebugTrace( 0, Dbg, " FatIndex = %4x\n", FatIndex);
3606 DebugTrace( 0, Dbg, " FatEntry = %8lx\n", FatEntry);
3607
3608 //
3609 // Make sure they gave us a valid fat index.
3610 //
3611
3612 FatVerifyIndexIsValid(IrpContext, Vcb, FatIndex);
3613
3614 //
3615 // Case on 12 or 16 bit fats.
3616 //
3617 // In the 12 bit case (mostly floppies) we always have the whole fat
3618 // (max 6k bytes) pinned during allocation operations. This is possibly
3619 // a wee bit slower, but saves headaches over fat entries with 8 bits
3620 // on one page, and 4 bits on the next.
3621 //
3622 // The 16 bit case always keeps the last used page pinned until all
3623 // operations are done and it is unpinned.
3624 //
3625
3626 //
3627 // DEAL WITH 12 BIT CASE
3628 //
3629
3630 if (Vcb->AllocationSupport.FatIndexBitSize == 12) {
3631
3632 //
3633 // Check to see if the fat is already pinned, otherwise pin it.
3634 //
3635
3636 if (Context->Bcb == NULL) {
3637
3638 FatReadVolumeFile( IrpContext,
3639 Vcb,
3640 FatReservedBytes( &Vcb->Bpb ),
3641 FatBytesPerFat( &Vcb->Bpb ),
3642 &Context->Bcb,
3643 &Context->PinnedPage );
3644 }
3645
3646 //
3647 // Load the return value.
3648 //
3649
3650
3651 FatLookup12BitEntry( Context->PinnedPage, FatIndex, FatEntry );
3652
3653 } else if (Vcb->AllocationSupport.FatIndexBitSize == 32) {
3654
3655 //
3656 // DEAL WITH 32 BIT CASE
3657 //
3658
3659 ULONG PageEntryOffset;
3660 ULONG OffsetIntoVolumeFile;
3661
3662 //
3663 // Initialize two local variables that help us.
3664 //
3665 OffsetIntoVolumeFile = FatReservedBytes(&Vcb->Bpb) + FatIndex * sizeof(FAT_ENTRY);
3666 PageEntryOffset = (OffsetIntoVolumeFile % PAGE_SIZE) / sizeof(FAT_ENTRY);
3667
3668 //
3669 // Check to see if we need to read in a new page of fat
3670 //
3671
3672 if ((Context->Bcb == NULL) ||
3673 (OffsetIntoVolumeFile / PAGE_SIZE != Context->VboOfPinnedPage / PAGE_SIZE)) {
3674
3675 //
3676 // The entry wasn't in the pinned page, so must we unpin the current
3677 // page (if any) and read in a new page.
3678 //
3679
3680 FatUnpinBcb( IrpContext, Context->Bcb );
3681
3682 FatReadVolumeFile( IrpContext,
3683 Vcb,
3684 OffsetIntoVolumeFile & ~(PAGE_SIZE - 1),
3685 PAGE_SIZE,
3686 &Context->Bcb,
3687 &Context->PinnedPage );
3688
3689 Context->VboOfPinnedPage = OffsetIntoVolumeFile & ~(PAGE_SIZE - 1);
3690 }
3691
3692 //
3693 // Grab the fat entry from the pinned page, and return
3694 //
3695
3696 *FatEntry = ((PULONG)(Context->PinnedPage))[PageEntryOffset] & FAT32_ENTRY_MASK;
3697
3698 } else {
3699
3700 //
3701 // DEAL WITH 16 BIT CASE
3702 //
3703
3704 ULONG PageEntryOffset;
3705 ULONG OffsetIntoVolumeFile;
3706
3707 //
3708 // Initialize two local variables that help us.
3709 //
3710
3711 OffsetIntoVolumeFile = FatReservedBytes(&Vcb->Bpb) + FatIndex * sizeof(USHORT);
3712 PageEntryOffset = (OffsetIntoVolumeFile % PAGE_SIZE) / sizeof(USHORT);
3713
3714 //
3715 // Check to see if we need to read in a new page of fat
3716 //
3717
3718 if ((Context->Bcb == NULL) ||
3719 (OffsetIntoVolumeFile / PAGE_SIZE != Context->VboOfPinnedPage / PAGE_SIZE)) {
3720
3721 //
3722 // The entry wasn't in the pinned page, so must we unpin the current
3723 // page (if any) and read in a new page.
3724 //
3725
3726 FatUnpinBcb( IrpContext, Context->Bcb );
3727
3728 FatReadVolumeFile( IrpContext,
3729 Vcb,
3730 OffsetIntoVolumeFile & ~(PAGE_SIZE - 1),
3731 PAGE_SIZE,
3732 &Context->Bcb,
3733 &Context->PinnedPage );
3734
3735 Context->VboOfPinnedPage = OffsetIntoVolumeFile & ~(PAGE_SIZE - 1);
3736 }
3737
3738 //
3739 // Grab the fat entry from the pinned page, and return
3740 //
3741
3742 *FatEntry = ((PUSHORT)(Context->PinnedPage))[PageEntryOffset];
3743 }
3744
3745 DebugTrace(-1, Dbg, "FatLookupFatEntry -> (VOID)\n", 0);
3746 return;
3747 }
3748
3749 \f
3750 _Requires_lock_held_(_Global_critical_region_)
3751 VOID
3752 FatSetFatEntry (
3753 IN PIRP_CONTEXT IrpContext,
3754 IN PVCB Vcb,
3755 IN ULONG FatIndex,
3756 IN FAT_ENTRY FatEntry
3757 )
3758
3759 /*++
3760
3761 Routine Description:
3762
3763 This routine takes an index into the fat and puts a value in the Fat
3764 at this index. The routine special cases 12, 16 and 32 bit fats. In
3765 all cases we go to the cache manager for a piece of the fat.
3766
3767 We have a special form of this call for setting the DOS-style dirty bit.
3768 Unlike the dirty bit in the boot sector, we do not go to special effort
3769 to make sure that this hits the disk synchronously - if the system goes
3770 down in the window between the dirty bit being set in the boot sector
3771 and the FAT index zero dirty bit being lazy written, then life is tough.
3772
3773 The only possible scenario is that Win9x may see what it thinks is a clean
3774 volume that really isn't (hopefully Memphis will pay attention to our dirty
3775 bit as well). The dirty bit will get out quickly, and if heavy activity is
3776 occurring, then the dirty bit should actually be there virtually all of the
3777 time since the act of cleaning the volume is the "rare" occurance.
3778
3779 There are synchronization concerns that would crop up if we tried to make
3780 this synchronous. This thread may already own the Bcb shared for the first
3781 sector of the FAT (so we can't get it exclusive for a writethrough). This
3782 would require some more serious replumbing to work around than I want to
3783 consider at this time.
3784
3785 We can and do, however, synchronously set the bit clean.
3786
3787 At this point the reader should understand why the NT dirty bit is where it is.
3788
3789 Arguments:
3790
3791 Vcb - Supplies the Vcb to examine, yields 12/16/32 bit info, etc.
3792
3793 FatIndex - Supplies the destination fat index.
3794
3795 FatEntry - Supplies the source fat entry.
3796
3797 --*/
3798
3799 {
3800 LBO Lbo;
3801 PBCB Bcb = NULL;
3802 ULONG SectorSize;
3803 ULONG OffsetIntoVolumeFile;
3804 ULONG WasWait = TRUE;
3805 BOOLEAN RegularOperation = TRUE;
3806 BOOLEAN CleaningOperation = FALSE;
3807 BOOLEAN ReleaseMutex = FALSE;
3808
3809 PAGED_CODE();
3810
3811 DebugTrace(+1, Dbg, "FatSetFatEntry\n", 0);
3812 DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
3813 DebugTrace( 0, Dbg, " FatIndex = %4x\n", FatIndex);
3814 DebugTrace( 0, Dbg, " FatEntry = %4x\n", FatEntry);
3815
3816 //
3817 // Make sure they gave us a valid fat index if this isn't the special
3818 // clean-bit modifying call.
3819 //
3820
3821 if (FatIndex == FAT_DIRTY_BIT_INDEX) {
3822
3823 //
3824 // We are setting the clean bit state. Of course, we could
3825 // have corruption that would cause us to try to fiddle the
3826 // reserved index - we guard against this by having the
3827 // special entry values use the reserved high 4 bits that
3828 // we know that we'll never try to set.
3829 //
3830
3831 //
3832 // We don't want to repin the FAT pages involved here. Just
3833 // let the lazy writer hit them when it can.
3834 //
3835
3836 RegularOperation = FALSE;
3837
3838 switch (FatEntry) {
3839 case FAT_CLEAN_VOLUME:
3840 FatEntry = (FAT_ENTRY)FAT_CLEAN_ENTRY;
3841 CleaningOperation = TRUE;
3842 break;
3843
3844 case FAT_DIRTY_VOLUME:
3845 switch (Vcb->AllocationSupport.FatIndexBitSize) {
3846 case 12:
3847 FatEntry = FAT12_DIRTY_ENTRY;
3848 break;
3849
3850 case 32:
3851 FatEntry = FAT32_DIRTY_ENTRY;
3852 break;
3853
3854 default:
3855 FatEntry = FAT16_DIRTY_ENTRY;
3856 break;
3857 }
3858 break;
3859
3860 default:
3861 FatRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR );
3862 break;
3863 }
3864
3865 //
3866 // Disable dirtying semantics for the duration of this operation. Force this
3867 // operation to wait for the duration.
3868 //
3869
3870 WasWait = FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT );
3871 SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT | IRP_CONTEXT_FLAG_DISABLE_DIRTY );
3872
3873 } else {
3874
3875 NT_ASSERT( !(FatEntry & ~FAT32_ENTRY_MASK) );
3876 FatVerifyIndexIsValid(IrpContext, Vcb, FatIndex);
3877 }
3878
3879 //
3880 // Set Sector Size
3881 //
3882
3883 SectorSize = 1 << Vcb->AllocationSupport.LogOfBytesPerSector;
3884
3885 //
3886 // Case on 12 or 16 bit fats.
3887 //
3888 // In the 12 bit case (mostly floppies) we always have the whole fat
3889 // (max 6k bytes) pinned during allocation operations. This is possibly
3890 // a wee bit slower, but saves headaches over fat entries with 8 bits
3891 // on one page, and 4 bits on the next.
3892 //
3893 // In the 16 bit case we only read the page that we need to set the fat
3894 // entry.
3895 //
3896
3897 //
3898 // DEAL WITH 12 BIT CASE
3899 //
3900
3901 _SEH2_TRY {
3902
3903 if (Vcb->AllocationSupport.FatIndexBitSize == 12) {
3904
3905 PVOID PinnedFat;
3906
3907 //
3908 // Make sure we have a valid entry
3909 //
3910
3911 FatEntry &= 0xfff;
3912
3913 //
3914 // We read in the entire fat. Note that using prepare write marks
3915 // the bcb pre-dirty, so we don't have to do it explicitly.
3916 //
3917
3918 OffsetIntoVolumeFile = FatReservedBytes( &Vcb->Bpb ) + FatIndex * 3 / 2;
3919
3920 FatPrepareWriteVolumeFile( IrpContext,
3921 Vcb,
3922 FatReservedBytes( &Vcb->Bpb ),
3923 FatBytesPerFat( &Vcb->Bpb ),
3924 &Bcb,
3925 &PinnedFat,
3926 RegularOperation,
3927 FALSE );
3928
3929 //
3930 // Mark the sector(s) dirty in the DirtyFatMcb. This call is
3931 // complicated somewhat for the 12 bit case since a single
3932 // entry write can span two sectors (and pages).
3933 //
3934 // Get the Lbo for the sector where the entry starts, and add it to
3935 // the dirty fat Mcb.
3936 //
3937
3938 Lbo = OffsetIntoVolumeFile & ~(SectorSize - 1);
3939
3940 FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize);
3941
3942 //
3943 // If the entry started on the last byte of the sector, it continues
3944 // to the next sector, so mark the next sector dirty as well.
3945 //
3946 // Note that this entry will simply coalese with the last entry,
3947 // so this operation cannot fail. Also if we get this far, we have
3948 // made it, so no unwinding will be needed.
3949 //
3950
3951 if ( (OffsetIntoVolumeFile & (SectorSize - 1)) == (SectorSize - 1) ) {
3952
3953 Lbo += SectorSize;
3954
3955 FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize );
3956 }
3957
3958 //
3959 // Store the entry into the fat; we need a little synchonization
3960 // here and can't use a spinlock since the bytes might not be
3961 // resident.
3962 //
3963
3964 FatLockFreeClusterBitMap( Vcb );
3965 ReleaseMutex = TRUE;
3966
3967 FatSet12BitEntry( PinnedFat, FatIndex, FatEntry );
3968
3969 FatUnlockFreeClusterBitMap( Vcb );
3970 ReleaseMutex = FALSE;
3971
3972 } else if (Vcb->AllocationSupport.FatIndexBitSize == 32) {
3973
3974 //
3975 // DEAL WITH 32 BIT CASE
3976 //
3977
3978 PULONG PinnedFatEntry32;
3979
3980 //
3981 // Read in a new page of fat
3982 //
3983
3984 OffsetIntoVolumeFile = FatReservedBytes( &Vcb->Bpb ) +
3985 FatIndex * sizeof( FAT_ENTRY );
3986
3987 FatPrepareWriteVolumeFile( IrpContext,
3988 Vcb,
3989 OffsetIntoVolumeFile,
3990 sizeof(FAT_ENTRY),
3991 &Bcb,
3992 (PVOID *)&PinnedFatEntry32,
3993 RegularOperation,
3994 FALSE );
3995 //
3996 // Mark the sector dirty in the DirtyFatMcb
3997 //
3998
3999 Lbo = OffsetIntoVolumeFile & ~(SectorSize - 1);
4000
4001 FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize);
4002
4003 //
4004 // Store the FatEntry to the pinned page.
4005 //
4006 // Preserve the reserved bits in FAT32 entries in the file heap.
4007 //
4008
4009 #ifdef ALPHA
4010 FatLockFreeClusterBitMap( Vcb );
4011 ReleaseMutex = TRUE;
4012 #endif // ALPHA
4013
4014 if (FatIndex != FAT_DIRTY_BIT_INDEX) {
4015
4016 *PinnedFatEntry32 = ((*PinnedFatEntry32 & ~FAT32_ENTRY_MASK) | FatEntry);
4017
4018 } else {
4019
4020 *PinnedFatEntry32 = FatEntry;
4021 }
4022
4023 #ifdef ALPHA
4024 FatUnlockFreeClusterBitMap( Vcb );
4025 ReleaseMutex = FALSE;
4026 #endif // ALPHA
4027
4028 } else {
4029
4030 //
4031 // DEAL WITH 16 BIT CASE
4032 //
4033
4034 PUSHORT PinnedFatEntry;
4035
4036 //
4037 // Read in a new page of fat
4038 //
4039
4040 OffsetIntoVolumeFile = FatReservedBytes( &Vcb->Bpb ) +
4041 FatIndex * sizeof(USHORT);
4042
4043 FatPrepareWriteVolumeFile( IrpContext,
4044 Vcb,
4045 OffsetIntoVolumeFile,
4046 sizeof(USHORT),
4047 &Bcb,
4048 (PVOID *)&PinnedFatEntry,
4049 RegularOperation,
4050 FALSE );
4051 //
4052 // Mark the sector dirty in the DirtyFatMcb
4053 //
4054
4055 Lbo = OffsetIntoVolumeFile & ~(SectorSize - 1);
4056
4057 FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize);
4058
4059 //
4060 // Store the FatEntry to the pinned page.
4061 //
4062 // We need extra synchronization here for broken architectures
4063 // like the ALPHA that don't support atomic 16 bit writes.
4064 //
4065
4066 #ifdef ALPHA
4067 FatLockFreeClusterBitMap( Vcb );
4068 ReleaseMutex = TRUE;
4069 #endif // ALPHA
4070
4071 *PinnedFatEntry = (USHORT)FatEntry;
4072
4073 #ifdef ALPHA
4074 FatUnlockFreeClusterBitMap( Vcb );
4075 ReleaseMutex = FALSE;
4076 #endif // ALPHA
4077 }
4078
4079 } _SEH2_FINALLY {
4080
4081 DebugUnwind( FatSetFatEntry );
4082
4083 //
4084 // Re-enable volume dirtying in case this was a dirty bit operation.
4085 //
4086
4087 ClearFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_DISABLE_DIRTY );
4088
4089 //
4090 // Make this operation asynchronous again if needed.
4091 //
4092
4093 if (!WasWait) {
4094
4095 ClearFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT );
4096 }
4097
4098 //
4099 // If we still somehow have the Mutex, release it.
4100 //
4101
4102 if (ReleaseMutex) {
4103
4104 NT_ASSERT( _SEH2_AbnormalTermination() );
4105
4106 FatUnlockFreeClusterBitMap( Vcb );
4107 }
4108
4109 //
4110 // Unpin the Bcb. For cleaning operations or if the corruption was detected while mounting we make this write-through.
4111 //
4112
4113 if ((CleaningOperation ||
4114 FlagOn(Vcb->VcbState, VCB_STATE_FLAG_MOUNT_IN_PROGRESS)) &&
4115 Bcb) {
4116
4117 IO_STATUS_BLOCK IgnoreStatus;
4118
4119 CcRepinBcb( Bcb );
4120 CcUnpinData( Bcb );
4121 DbgDoit( IrpContext->PinCount -= 1 );
4122 CcUnpinRepinnedBcb( Bcb, TRUE, &IgnoreStatus );
4123
4124 } else {
4125
4126 FatUnpinBcb(IrpContext, Bcb);
4127 }
4128
4129 DebugTrace(-1, Dbg, "FatSetFatEntry -> (VOID)\n", 0);
4130 } _SEH2_END;
4131
4132 return;
4133 }
4134
4135 \f
4136 //
4137 // Internal support routine
4138 //
4139
4140 VOID
4141 FatSetFatRun (
4142 IN PIRP_CONTEXT IrpContext,
4143 IN PVCB Vcb,
4144 IN ULONG StartingFatIndex,
4145 IN ULONG ClusterCount,
4146 IN BOOLEAN ChainTogether
4147 )
4148
4149 /*++
4150
4151 Routine Description:
4152
4153 This routine sets a continuous run of clusters in the fat. If ChainTogether
4154 is TRUE, then the clusters are linked together as in normal Fat fasion,
4155 with the last cluster receiving FAT_CLUSTER_LAST. If ChainTogether is
4156 FALSE, all the entries are set to FAT_CLUSTER_AVAILABLE, effectively
4157 freeing all the clusters in the run.
4158
4159 Arguments:
4160
4161 Vcb - Supplies the Vcb to examine, yields 12/16 bit info, etc.
4162
4163 StartingFatIndex - Supplies the destination fat index.
4164
4165 ClusterCount - Supplies the number of contiguous clusters to work on.
4166
4167 ChainTogether - Tells us whether to fill the entries with links, or
4168 FAT_CLUSTER_AVAILABLE
4169
4170
4171 Return Value:
4172
4173 VOID
4174
4175 --*/
4176
4177 {
4178 #define MAXCOUNTCLUS 0x10000
4179 #define COUNTSAVEDBCBS ((MAXCOUNTCLUS * sizeof(FAT_ENTRY) / PAGE_SIZE) + 2)
4180 PBCB SavedBcbs[COUNTSAVEDBCBS][2];
4181
4182 ULONG SectorSize;
4183 ULONG Cluster;
4184
4185 LBO StartSectorLbo;
4186 LBO FinalSectorLbo;
4187 LBO Lbo;
4188
4189 PVOID PinnedFat;
4190
4191 BOOLEAN ReleaseMutex = FALSE;
4192
4193 ULONG SavedStartingFatIndex = StartingFatIndex;
4194
4195 PAGED_CODE();
4196
4197 DebugTrace(+1, Dbg, "FatSetFatRun\n", 0);
4198 DebugTrace( 0, Dbg, " Vcb = %p\n", Vcb);
4199 DebugTrace( 0, Dbg, " StartingFatIndex = %8x\n", StartingFatIndex);
4200 DebugTrace( 0, Dbg, " ClusterCount = %8lx\n", ClusterCount);
4201 DebugTrace( 0, Dbg, " ChainTogether = %s\n", ChainTogether ? "TRUE":"FALSE");
4202
4203 //
4204 // Make sure they gave us a valid fat run.
4205 //
4206
4207 FatVerifyIndexIsValid(IrpContext, Vcb, StartingFatIndex);
4208 FatVerifyIndexIsValid(IrpContext, Vcb, StartingFatIndex + ClusterCount - 1);
4209
4210 //
4211 // Check special case
4212 //
4213
4214 if (ClusterCount == 0) {
4215
4216 DebugTrace(-1, Dbg, "FatSetFatRun -> (VOID)\n", 0);
4217 return;
4218 }
4219
4220 //
4221 // Set Sector Size
4222 //
4223
4224 SectorSize = 1 << Vcb->AllocationSupport.LogOfBytesPerSector;
4225
4226 //
4227 // Case on 12 or 16 bit fats.
4228 //
4229 // In the 12 bit case (mostly floppies) we always have the whole fat
4230 // (max 6k bytes) pinned during allocation operations. This is possibly
4231 // a wee bit slower, but saves headaches over fat entries with 8 bits
4232 // on one page, and 4 bits on the next.
4233 //
4234 // In the 16 bit case we only read one page at a time, as needed.
4235 //
4236
4237 //
4238 // DEAL WITH 12 BIT CASE
4239 //
4240
4241 _SEH2_TRY {
4242
4243 if (Vcb->AllocationSupport.FatIndexBitSize == 12) {
4244
4245 //
4246 // We read in the entire fat. Note that using prepare write marks
4247 // the bcb pre-dirty, so we don't have to do it explicitly.
4248 //
4249
4250 RtlZeroMemory( &SavedBcbs[0][0], 2 * sizeof(PBCB) * 2);
4251
4252 FatPrepareWriteVolumeFile( IrpContext,
4253 Vcb,
4254 FatReservedBytes( &Vcb->Bpb ),
4255 FatBytesPerFat( &Vcb->Bpb ),
4256 &SavedBcbs[0][0],
4257 &PinnedFat,
4258 TRUE,
4259 FALSE );
4260
4261 //
4262 // Mark the affected sectors dirty. Note that FinalSectorLbo is
4263 // the Lbo of the END of the entry (Thus * 3 + 2). This makes sure
4264 // we catch the case of a dirty fat entry straddling a sector boundry.
4265 //
4266 // Note that if the first AddMcbEntry succeeds, all following ones
4267 // will simply coalese, and thus also succeed.
4268 //
4269
4270 StartSectorLbo = (FatReservedBytes( &Vcb->Bpb ) + StartingFatIndex * 3 / 2)
4271 & ~(SectorSize - 1);
4272
4273 FinalSectorLbo = (FatReservedBytes( &Vcb->Bpb ) + ((StartingFatIndex +
4274 ClusterCount) * 3 + 2) / 2) & ~(SectorSize - 1);
4275
4276 for (Lbo = StartSectorLbo; Lbo <= FinalSectorLbo; Lbo += SectorSize) {
4277
4278 FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize );
4279 }
4280
4281 //
4282 // Store the entries into the fat; we need a little
4283 // synchonization here and can't use a spinlock since the bytes
4284 // might not be resident.
4285 //
4286
4287 FatLockFreeClusterBitMap( Vcb );
4288 ReleaseMutex = TRUE;
4289
4290 for (Cluster = StartingFatIndex;
4291 Cluster < StartingFatIndex + ClusterCount - 1;
4292 Cluster++) {
4293
4294 FatSet12BitEntry( PinnedFat,
4295 Cluster,
4296 ChainTogether ? Cluster + 1 : FAT_CLUSTER_AVAILABLE );
4297 }
4298
4299 //
4300 // Save the last entry
4301 //
4302
4303 FatSet12BitEntry( PinnedFat,
4304 Cluster,
4305 ChainTogether ?
4306 FAT_CLUSTER_LAST & 0xfff : FAT_CLUSTER_AVAILABLE );
4307
4308 FatUnlockFreeClusterBitMap( Vcb );
4309 ReleaseMutex = FALSE;
4310
4311 } else if (Vcb->AllocationSupport.FatIndexBitSize == 32) {
4312
4313 //
4314 // DEAL WITH 32 BIT CASE
4315 //
4316
4317 for (;;) {
4318
4319 VBO StartOffsetInVolume;
4320 VBO FinalOffsetInVolume;
4321
4322 ULONG Page;
4323 ULONG FinalCluster;
4324 PULONG FatEntry = NULL;
4325 ULONG ClusterCountThisRun;
4326
4327 StartOffsetInVolume = FatReservedBytes(&Vcb->Bpb) +
4328 StartingFatIndex * sizeof(FAT_ENTRY);
4329
4330 if (ClusterCount > MAXCOUNTCLUS) {
4331 ClusterCountThisRun = MAXCOUNTCLUS;
4332 } else {
4333 ClusterCountThisRun = ClusterCount;
4334 }
4335
4336 FinalOffsetInVolume = StartOffsetInVolume +
4337 (ClusterCountThisRun - 1) * sizeof(FAT_ENTRY);
4338
4339 {
4340 ULONG NumberOfPages;
4341 ULONG Offset;
4342
4343 NumberOfPages = (FinalOffsetInVolume / PAGE_SIZE) -
4344 (StartOffsetInVolume / PAGE_SIZE) + 1;
4345
4346 RtlZeroMemory( &SavedBcbs[0][0], (NumberOfPages + 1) * sizeof(PBCB) * 2 );
4347
4348 for ( Page = 0, Offset = StartOffsetInVolume & ~(PAGE_SIZE - 1);
4349 Page < NumberOfPages;
4350 Page++, Offset += PAGE_SIZE ) {
4351
4352 FatPrepareWriteVolumeFile( IrpContext,
4353 Vcb,
4354 Offset,
4355 PAGE_SIZE,
4356 &SavedBcbs[Page][0],
4357 (PVOID *)&SavedBcbs[Page][1],
4358 TRUE,
4359 FALSE );
4360
4361 if (Page == 0) {
4362
4363 FatEntry = (PULONG)((PUCHAR)SavedBcbs[0][1] +
4364 (StartOffsetInVolume % PAGE_SIZE));
4365 }
4366 }
4367 }
4368
4369 //
4370 // Mark the run dirty
4371 //
4372
4373 StartSectorLbo = StartOffsetInVolume & ~(SectorSize - 1);
4374 FinalSectorLbo = FinalOffsetInVolume & ~(SectorSize - 1);
4375
4376 for (Lbo = StartSectorLbo; Lbo <= FinalSectorLbo; Lbo += SectorSize) {
4377
4378 FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO)Lbo, Lbo, SectorSize );
4379 }
4380
4381 //
4382 // Store the entries
4383 //
4384 // We need extra synchronization here for broken architectures
4385 // like the ALPHA that don't support atomic 16 bit writes.
4386 //
4387
4388 #ifdef ALPHA
4389 FatLockFreeClusterBitMap( Vcb );
4390 ReleaseMutex = TRUE;
4391 #endif // ALPHA
4392
4393 FinalCluster = StartingFatIndex + ClusterCountThisRun - 1;
4394 Page = 0;
4395
4396 for (Cluster = StartingFatIndex;
4397 Cluster <= FinalCluster;
4398 Cluster++, FatEntry++) {
4399
4400 //
4401 // If we just crossed a page boundry (as opposed to starting
4402 // on one), update our idea of FatEntry.
4403
4404 if ( (((ULONG_PTR)FatEntry & (PAGE_SIZE-1)) == 0) &&
4405 (Cluster != StartingFatIndex) ) {
4406
4407 Page += 1;
4408 FatEntry = (PULONG)SavedBcbs[Page][1];
4409 }
4410
4411 *FatEntry = ChainTogether ? (FAT_ENTRY)(Cluster + 1) :
4412 FAT_CLUSTER_AVAILABLE;
4413 }
4414
4415 //
4416 // Fix up the last entry if we were chaining together
4417 //
4418
4419 if ((ClusterCount <= MAXCOUNTCLUS) &&
4420 ChainTogether ) {
4421
4422 *(FatEntry-1) = FAT_CLUSTER_LAST;
4423 }
4424
4425 #ifdef ALPHA
4426 FatUnlockFreeClusterBitMap( Vcb );
4427 ReleaseMutex = FALSE;
4428 #endif // ALPHA
4429
4430 {
4431 ULONG i;
4432
4433 //
4434 // Unpin the Bcbs
4435 //
4436
4437 for (i = 0; (i < COUNTSAVEDBCBS) && (SavedBcbs[i][0] != NULL); i++) {
4438
4439 FatUnpinBcb( IrpContext, SavedBcbs[i][0] );
4440 SavedBcbs[i][0] = NULL;
4441 }
4442 }
4443
4444 if (ClusterCount <= MAXCOUNTCLUS) {
4445
4446 break;
4447
4448 } else {
4449
4450 StartingFatIndex += MAXCOUNTCLUS;
4451 ClusterCount -= MAXCOUNTCLUS;
4452 }
4453 }
4454
4455 } else {
4456
4457 //
4458 // DEAL WITH 16 BIT CASE
4459 //
4460
4461 VBO StartOffsetInVolume;
4462 VBO FinalOffsetInVolume;
4463
4464 ULONG Page;
4465 ULONG FinalCluster;
4466 PUSHORT FatEntry = NULL;
4467
4468 StartOffsetInVolume = FatReservedBytes(&Vcb->Bpb) +
4469 StartingFatIndex * sizeof(USHORT);
4470
4471 FinalOffsetInVolume = StartOffsetInVolume +
4472 (ClusterCount - 1) * sizeof(USHORT);
4473
4474 //
4475 // Read in one page of fat at a time. We cannot read in the
4476 // all of the fat we need because of cache manager limitations.
4477 //
4478 // SavedBcb was initialized to be able to hold the largest
4479 // possible number of pages in a fat plus and extra one to
4480 // accomadate the boot sector, plus one more to make sure there
4481 // is enough room for the RtlZeroMemory below that needs the mark
4482 // the first Bcb after all the ones we will use as an end marker.
4483 //
4484
4485 {
4486 ULONG NumberOfPages;
4487 ULONG Offset;
4488
4489 NumberOfPages = (FinalOffsetInVolume / PAGE_SIZE) -
4490 (StartOffsetInVolume / PAGE_SIZE) + 1;
4491
4492 RtlZeroMemory( &SavedBcbs[0][0], (NumberOfPages + 1) * sizeof(PBCB) * 2 );
4493
4494 for ( Page = 0, Offset = StartOffsetInVolume & ~(PAGE_SIZE - 1);
4495 Page < NumberOfPages;
4496 Page++, Offset += PAGE_SIZE ) {
4497
4498 FatPrepareWriteVolumeFile( IrpContext,
4499 Vcb,
4500 Offset,
4501 PAGE_SIZE,
4502 &SavedBcbs[Page][0],
4503 (PVOID *)&SavedBcbs[Page][1],
4504 TRUE,
4505 FALSE );
4506
4507 if (Page == 0) {
4508
4509 FatEntry = (PUSHORT)((PUCHAR)SavedBcbs[0][1] +
4510 (StartOffsetInVolume % PAGE_SIZE));
4511 }
4512 }
4513 }
4514
4515 //
4516 // Mark the run dirty
4517 //
4518
4519 StartSectorLbo = StartOffsetInVolume & ~(SectorSize - 1);
4520 FinalSectorLbo = FinalOffsetInVolume & ~(SectorSize - 1);
4521
4522 for (Lbo = StartSectorLbo; Lbo <= FinalSectorLbo; Lbo += SectorSize) {
4523
4524 FatAddMcbEntry( Vcb, &Vcb->DirtyFatMcb, (VBO) Lbo, Lbo, SectorSize );
4525 }
4526
4527 //
4528 // Store the entries
4529 //
4530 // We need extra synchronization here for broken architectures
4531 // like the ALPHA that don't support atomic 16 bit writes.
4532 //
4533
4534 #ifdef ALPHA
4535 FatLockFreeClusterBitMap( Vcb );
4536 ReleaseMutex = TRUE;
4537 #endif // ALPHA
4538
4539 FinalCluster = StartingFatIndex + ClusterCount - 1;
4540 Page = 0;
4541
4542 for (Cluster = StartingFatIndex;
4543 Cluster <= FinalCluster;
4544 Cluster++, FatEntry++) {
4545
4546 //
4547 // If we just crossed a page boundry (as opposed to starting
4548 // on one), update our idea of FatEntry.
4549
4550 if ( (((ULONG_PTR)FatEntry & (PAGE_SIZE-1)) == 0) &&
4551 (Cluster != StartingFatIndex) ) {
4552
4553 Page += 1;
4554 FatEntry = (PUSHORT)SavedBcbs[Page][1];
4555 }
4556
4557 *FatEntry = (USHORT) (ChainTogether ? (FAT_ENTRY)(Cluster + 1) :
4558 FAT_CLUSTER_AVAILABLE);
4559 }
4560
4561 //
4562 // Fix up the last entry if we were chaining together
4563 //
4564
4565 if ( ChainTogether ) {
4566
4567 #ifdef _MSC_VER
4568 #pragma warning( suppress: 4310 )
4569 #endif
4570 *(FatEntry-1) = (USHORT)FAT_CLUSTER_LAST;
4571
4572 }
4573 #ifdef ALPHA
4574 FatUnlockFreeClusterBitMap( Vcb );
4575 ReleaseMutex = FALSE;
4576 #endif // ALPHA
4577 }
4578
4579 } _SEH2_FINALLY {
4580
4581 ULONG i;
4582
4583 DebugUnwind( FatSetFatRun );
4584
4585 //
4586 // If we still somehow have the Mutex, release it.
4587 //
4588
4589 if (ReleaseMutex) {
4590
4591 NT_ASSERT( _SEH2_AbnormalTermination() );
4592
4593 FatUnlockFreeClusterBitMap( Vcb );
4594 }
4595
4596 //
4597 // Unpin the Bcbs
4598 //
4599
4600 for (i = 0; (i < COUNTSAVEDBCBS) && (SavedBcbs[i][0] != NULL); i++) {
4601
4602 FatUnpinBcb( IrpContext, SavedBcbs[i][0] );
4603 }
4604
4605 //
4606 // At this point nothing in this finally clause should have raised.
4607 // So, now comes the unsafe (sigh) stuff.
4608 //
4609
4610 if ( _SEH2_AbnormalTermination() &&
4611 (Vcb->AllocationSupport.FatIndexBitSize == 32) ) {
4612
4613 //
4614 // Fat32 unwind
4615 //
4616 // This case is more complex because the FAT12 and FAT16 cases
4617 // pin all the needed FAT pages (128K max), after which it
4618 // can't fail, before changing any FAT entries. In the Fat32
4619 // case, it may not be practical to pin all the needed FAT
4620 // pages, because that could span many megabytes. So Fat32
4621 // attacks in chunks, and if a failure occurs once the first
4622 // chunk has been updated, we have to back out the updates.
4623 //
4624 // The unwind consists of walking back over each FAT entry we
4625 // have changed, setting it back to the previous value. Note
4626 // that the previous value with either be FAT_CLUSTER_AVAILABLE
4627 // (if ChainTogether==TRUE) or a simple link to the successor
4628 // (if ChainTogether==FALSE).
4629 //
4630 // We concede that any one of these calls could fail too; our
4631 // objective is to make this case no more likely than the case
4632 // for a file consisting of multiple disjoint runs.
4633 //
4634
4635 while ( StartingFatIndex > SavedStartingFatIndex ) {
4636
4637 StartingFatIndex--;
4638
4639 FatSetFatEntry( IrpContext, Vcb, StartingFatIndex,
4640 ChainTogether ?
4641 StartingFatIndex + 1 : FAT_CLUSTER_AVAILABLE );
4642 }
4643 }
4644
4645 DebugTrace(-1, Dbg, "FatSetFatRun -> (VOID)\n", 0);
4646 } _SEH2_END;
4647
4648 return;
4649 }
4650
4651 \f
4652 //
4653 // Internal support routine
4654 //
4655
4656 UCHAR
4657 FatLogOf (
4658 IN ULONG Value
4659 )
4660
4661 /*++
4662
4663 Routine Description:
4664
4665 This routine just computes the base 2 log of an integer. It is only used
4666 on objects that are know to be powers of two.
4667
4668 Arguments:
4669
4670 Value - The value to take the base 2 log of.
4671
4672 Return Value:
4673
4674 UCHAR - The base 2 log of Value.
4675
4676 --*/
4677
4678 {
4679 UCHAR Log = 0;
4680
4681 #if FASTFATDBG
4682 ULONG OrigValue = Value;
4683 #endif
4684
4685 PAGED_CODE();
4686
4687 //
4688 // Knock bits off until we we get a one at position 0
4689 //
4690
4691 while ( (Value & 0xfffffffe) != 0 ) {
4692
4693 Log++;
4694 Value >>= 1;
4695 }
4696
4697 //
4698 // If there was more than one bit set, the file system messed up,
4699 // Bug Check.
4700 //
4701
4702 if (Value != 0x1) {
4703
4704 DebugTrace(+1, Dbg, "LogOf\n", 0);
4705 DebugTrace( 0, Dbg, " Value = %8lx\n", OrigValue);
4706
4707 DebugTrace( 0, Dbg, "Received non power of 2.\n", 0);
4708
4709 DebugTrace(-1, Dbg, "LogOf -> %8lx\n", Log);
4710
4711 #ifdef _MSC_VER
4712 #pragma prefast( suppress: 28159, "we bugcheck here because our internal data structures are seriously corrupted if this happens" )
4713 #endif
4714 FatBugCheck( Value, Log, 0 );
4715 }
4716
4717 return Log;
4718 }
4719
4720 \f
4721 VOID
4722 FatExamineFatEntries(
4723 IN PIRP_CONTEXT IrpContext,
4724 IN PVCB Vcb,
4725 IN ULONG StartIndex OPTIONAL,
4726 IN ULONG EndIndex OPTIONAL,
4727 IN BOOLEAN SetupWindows,
4728 IN PFAT_WINDOW SwitchToWindow OPTIONAL,
4729 IN PULONG BitMapBuffer OPTIONAL
4730 )
4731 /*++
4732
4733 Routine Description:
4734
4735 This routine handles scanning a segment of the FAT into in-memory structures.
4736
4737 There are three fundamental cases, with variations depending on the FAT type:
4738
4739 1) During volume setup, FatSetupAllocations
4740
4741 1a) for FAT12/16, read the FAT into our free clusterbitmap
4742 1b) for FAT32, perform the initial scan for window free cluster counts
4743
4744 2) Switching FAT32 windows on the fly during system operation
4745
4746 3) Reading arbitrary segments of the FAT for the purposes of the GetVolumeBitmap
4747 call (only for FAT32)
4748
4749 There really is too much going on in here. At some point this should be
4750 substantially rewritten.
4751
4752 Arguments:
4753
4754 Vcb - Supplies the volume involved
4755
4756 StartIndex - Supplies the starting cluster, ignored if SwitchToWindow supplied
4757
4758 EndIndex - Supplies the ending cluster, ignored if SwitchToWindow supplied
4759
4760 SetupWindows - Indicates if we are doing the initial FAT32 scan
4761
4762 SwitchToWindow - Supplies the FAT window we are examining and will switch to
4763
4764 BitMapBuffer - Supplies a specific bitmap to fill in, if not supplied we fill
4765 in the volume free cluster bitmap if !SetupWindows
4766
4767 Return Value:
4768
4769 None. Lots of side effects.
4770
4771 --*/
4772 {
4773 ULONG FatIndexBitSize;
4774 ULONG Page = 0;
4775 ULONG Offset = 0;
4776 ULONG FatIndex;
4777 FAT_ENTRY FatEntry = FAT_CLUSTER_AVAILABLE;
4778 FAT_ENTRY FirstFatEntry = FAT_CLUSTER_AVAILABLE;
4779 PUSHORT FatBuffer;
4780 PVOID pv;
4781 PBCB Bcb = NULL;
4782 ULONG EntriesPerWindow;
4783
4784 ULONG ClustersThisRun;
4785 ULONG StartIndexOfThisRun;
4786
4787 PULONG FreeClusterCount = NULL;
4788
4789 PFAT_WINDOW CurrentWindow = NULL;
4790
4791 PVOID NewBitMapBuffer = NULL;
4792 PRTL_BITMAP BitMap = NULL;
4793 RTL_BITMAP PrivateBitMap;
4794
4795 ULONG ClusterSize = 0;
4796 ULONG PrefetchPages = 0;
4797 ULONG FatPages = 0;
4798
4799 VBO BadClusterVbo = 0;
4800 LBO Lbo = 0;
4801
4802 enum RunType {
4803 FreeClusters,
4804 AllocatedClusters,
4805 UnknownClusters
4806 } CurrentRun;
4807
4808 PAGED_CODE();
4809
4810 //
4811 // Now assert correct usage.
4812 //
4813
4814 FatIndexBitSize = Vcb->AllocationSupport.FatIndexBitSize;
4815
4816 NT_ASSERT( !(SetupWindows && (SwitchToWindow || BitMapBuffer)));
4817 NT_ASSERT( !(SetupWindows && FatIndexBitSize != 32));
4818
4819 if (Vcb->NumberOfWindows > 1) {
4820
4821 //
4822 // FAT32: Calculate the number of FAT entries covered by a window. This is
4823 // equal to the number of bits in the freespace bitmap, the size of which
4824 // is hardcoded.
4825 //
4826
4827 EntriesPerWindow = MAX_CLUSTER_BITMAP_SIZE;
4828
4829 } else {
4830
4831 EntriesPerWindow = Vcb->AllocationSupport.NumberOfClusters;
4832 }
4833
4834 //
4835 // We will also fill in the cumulative count of free clusters for
4836 // the entire volume. If this is not appropriate, NULL it out
4837 // shortly.
4838 //
4839
4840 FreeClusterCount = &Vcb->AllocationSupport.NumberOfFreeClusters;
4841
4842 if (SetupWindows) {
4843
4844 NT_ASSERT(BitMapBuffer == NULL);
4845
4846 //
4847 // In this case we're just supposed to scan the fat and set up
4848 // the information regarding where the buckets fall and how many
4849 // free clusters are in each.
4850 //
4851 // It is fine to monkey with the real windows, we must be able
4852 // to do this to activate the volume.
4853 //
4854
4855 BitMap = NULL;
4856
4857 CurrentWindow = &Vcb->Windows[0];
4858 CurrentWindow->FirstCluster = StartIndex;
4859 CurrentWindow->ClustersFree = 0;
4860
4861 //
4862 // We always wish to calculate total free clusters when
4863 // setting up the FAT windows.
4864 //
4865
4866 } else if (BitMapBuffer == NULL) {
4867
4868 //
4869 // We will be filling in the free cluster bitmap for the volume.
4870 // Careful, we can raise out of here and be hopelessly hosed if
4871 // we built this up in the main bitmap/window itself.
4872 //
4873 // For simplicity's sake, we'll do the swap for everyone. FAT32
4874 // provokes the need since we can't tolerate partial results
4875 // when switching windows.
4876 //
4877
4878 NT_ASSERT( SwitchToWindow );
4879
4880 CurrentWindow = SwitchToWindow;
4881 StartIndex = CurrentWindow->FirstCluster;
4882 EndIndex = CurrentWindow->LastCluster;
4883
4884 BitMap = &PrivateBitMap;
4885 NewBitMapBuffer = FsRtlAllocatePoolWithTag( PagedPool,
4886 (EntriesPerWindow + 7) / 8,
4887 TAG_FAT_BITMAP );
4888
4889 RtlInitializeBitMap( &PrivateBitMap,
4890 NewBitMapBuffer,
4891 EndIndex - StartIndex + 1);
4892
4893 if ((FatIndexBitSize == 32) &&
4894 (Vcb->NumberOfWindows > 1)) {
4895
4896 //
4897 // We do not wish count total clusters here.
4898 //
4899
4900 FreeClusterCount = NULL;
4901
4902 }
4903
4904 } else {
4905
4906 BitMap = &PrivateBitMap;
4907 RtlInitializeBitMap(&PrivateBitMap,
4908 BitMapBuffer,
4909 EndIndex - StartIndex + 1);
4910
4911 //
4912 // We do not count total clusters here.
4913 //
4914
4915 FreeClusterCount = NULL;
4916 }
4917
4918 //
4919 // Now, our start index better be in the file heap.
4920 //
4921
4922 NT_ASSERT( StartIndex >= 2 );
4923
4924 _SEH2_TRY {
4925
4926 //
4927 // Pick up the initial chunk of the FAT and first entry.
4928 //
4929
4930 if (FatIndexBitSize == 12) {
4931
4932 //
4933 // We read in the entire fat in the 12 bit case.
4934 //
4935
4936 FatReadVolumeFile( IrpContext,
4937 Vcb,
4938 FatReservedBytes( &Vcb->Bpb ),
4939 FatBytesPerFat( &Vcb->Bpb ),
4940 &Bcb,
4941 (PVOID *)&FatBuffer );
4942
4943 FatLookup12BitEntry(FatBuffer, 0, &FirstFatEntry);
4944
4945 } else {
4946
4947 //
4948 // Read in one page of fat at a time. We cannot read in the
4949 // all of the fat we need because of cache manager limitations.
4950 //
4951
4952 ULONG BytesPerEntry = FatIndexBitSize >> 3;
4953
4954 FatPages = (FatReservedBytes(&Vcb->Bpb) + FatBytesPerFat(&Vcb->Bpb) + (PAGE_SIZE - 1)) / PAGE_SIZE;
4955 Page = (FatReservedBytes(&Vcb->Bpb) + StartIndex * BytesPerEntry) / PAGE_SIZE;
4956
4957 Offset = Page * PAGE_SIZE;
4958
4959 //
4960 // Prefetch the FAT entries in memory for optimal performance.
4961 //
4962
4963 PrefetchPages = FatPages - Page;
4964
4965 if (PrefetchPages > FAT_PREFETCH_PAGE_COUNT) {
4966
4967 PrefetchPages = ALIGN_UP_BY(Page, FAT_PREFETCH_PAGE_COUNT) - Page;
4968 }
4969
4970 #if (NTDDI_VERSION >= NTDDI_WIN8)
4971 FatPrefetchPages( IrpContext,
4972 Vcb->VirtualVolumeFile,
4973 Page,
4974 PrefetchPages );
4975 #endif
4976
4977 FatReadVolumeFile( IrpContext,
4978 Vcb,
4979 Offset,
4980 PAGE_SIZE,
4981 &Bcb,
4982 &pv);
4983
4984 if (FatIndexBitSize == 32) {
4985
4986 FatBuffer = (PUSHORT)((PUCHAR)pv +
4987 (FatReservedBytes(&Vcb->Bpb) + StartIndex * BytesPerEntry) %
4988 PAGE_SIZE);
4989
4990 FirstFatEntry = *((PULONG)FatBuffer);
4991 FirstFatEntry = FirstFatEntry & FAT32_ENTRY_MASK;
4992
4993 } else {
4994
4995 FatBuffer = (PUSHORT)((PUCHAR)pv +
4996 FatReservedBytes(&Vcb->Bpb) % PAGE_SIZE) + 2;
4997
4998 FirstFatEntry = *FatBuffer;
4999 }
5000
5001 }
5002
5003 ClusterSize = 1 << (Vcb->AllocationSupport.LogOfBytesPerCluster);
5004
5005 CurrentRun = (FirstFatEntry == FAT_CLUSTER_AVAILABLE) ?
5006 FreeClusters : AllocatedClusters;
5007
5008 StartIndexOfThisRun = StartIndex;
5009
5010 for (FatIndex = StartIndex; FatIndex <= EndIndex; FatIndex++) {
5011
5012 if (FatIndexBitSize == 12) {
5013
5014 FatLookup12BitEntry(FatBuffer, FatIndex, &FatEntry);
5015
5016 } else {
5017
5018 //
5019 // If we are setting up the FAT32 windows and have stepped into a new
5020 // bucket, finalize this one and move forward.
5021 //
5022
5023 if (SetupWindows &&
5024 FatIndex > StartIndex &&
5025 (FatIndex - 2) % EntriesPerWindow == 0) {
5026
5027 CurrentWindow->LastCluster = FatIndex - 1;
5028
5029 if (CurrentRun == FreeClusters) {
5030
5031 //
5032 // We must be counting clusters in order to modify the
5033 // contents of the window.
5034 //
5035
5036 NT_ASSERT( FreeClusterCount );
5037
5038 ClustersThisRun = FatIndex - StartIndexOfThisRun;
5039 CurrentWindow->ClustersFree += ClustersThisRun;
5040
5041 if (FreeClusterCount) {
5042 *FreeClusterCount += ClustersThisRun;
5043 }
5044
5045 } else {
5046
5047 NT_ASSERT(CurrentRun == AllocatedClusters);
5048
5049 }
5050
5051 StartIndexOfThisRun = FatIndex;
5052 CurrentRun = UnknownClusters;
5053
5054 CurrentWindow++;
5055 CurrentWindow->ClustersFree = 0;
5056 CurrentWindow->FirstCluster = FatIndex;
5057 }
5058
5059 //
5060 // If we just stepped onto a new page, grab a new pointer.
5061 //
5062
5063 if (((ULONG_PTR)FatBuffer & (PAGE_SIZE - 1)) == 0) {
5064
5065 FatUnpinBcb( IrpContext, Bcb );
5066
5067 Page++;
5068 Offset += PAGE_SIZE;
5069
5070 #if (NTDDI_VERSION >= NTDDI_WIN8)
5071 //
5072 // If we have exhausted all the prefetch pages, prefetch the next chunk.
5073 //
5074
5075 if (--PrefetchPages == 0) {
5076
5077 PrefetchPages = FatPages - Page;
5078
5079 if (PrefetchPages > FAT_PREFETCH_PAGE_COUNT) {
5080
5081 PrefetchPages = FAT_PREFETCH_PAGE_COUNT;
5082 }
5083
5084 FatPrefetchPages( IrpContext,
5085 Vcb->VirtualVolumeFile,
5086 Page,
5087 PrefetchPages );
5088 }
5089 #endif
5090
5091 FatReadVolumeFile( IrpContext,
5092 Vcb,
5093 Offset,
5094 PAGE_SIZE,
5095 &Bcb,
5096 &pv );
5097
5098 FatBuffer = (PUSHORT)pv;
5099 }
5100
5101 if (FatIndexBitSize == 32) {
5102
5103 #ifndef __REACTOS__
5104 #ifdef _MSC_VER
5105 #pragma warning( suppress: 4213 )
5106 #endif
5107 FatEntry = *((PULONG)FatBuffer)++;
5108 FatEntry = FatEntry & FAT32_ENTRY_MASK;
5109 #else
5110 FatEntry = *FatBuffer;
5111 FatBuffer += 1;
5112 FatEntry = FatEntry & FAT32_ENTRY_MASK;
5113 #endif
5114
5115 } else {
5116
5117 FatEntry = *FatBuffer;
5118 FatBuffer += 1;
5119 }
5120 }
5121
5122 if (CurrentRun == UnknownClusters) {
5123
5124 CurrentRun = (FatEntry == FAT_CLUSTER_AVAILABLE) ?
5125 FreeClusters : AllocatedClusters;
5126 }
5127
5128 //
5129 // Are we switching from a free run to an allocated run?
5130 //
5131
5132 if (CurrentRun == FreeClusters &&
5133 FatEntry != FAT_CLUSTER_AVAILABLE) {
5134
5135 ClustersThisRun = FatIndex - StartIndexOfThisRun;
5136
5137 if (FreeClusterCount) {
5138
5139 *FreeClusterCount += ClustersThisRun;
5140 CurrentWindow->ClustersFree += ClustersThisRun;
5141 }
5142
5143 if (BitMap) {
5144
5145 RtlClearBits( BitMap,
5146 StartIndexOfThisRun - StartIndex,
5147 ClustersThisRun );
5148 }
5149
5150 CurrentRun = AllocatedClusters;
5151 StartIndexOfThisRun = FatIndex;
5152 }
5153
5154 //
5155 // Are we switching from an allocated run to a free run?
5156 //
5157
5158 if (CurrentRun == AllocatedClusters &&
5159 FatEntry == FAT_CLUSTER_AVAILABLE) {
5160
5161 ClustersThisRun = FatIndex - StartIndexOfThisRun;
5162
5163 if (BitMap) {
5164
5165 RtlSetBits( BitMap,
5166 StartIndexOfThisRun - StartIndex,
5167 ClustersThisRun );
5168 }
5169
5170 CurrentRun = FreeClusters;
5171 StartIndexOfThisRun = FatIndex;
5172 }
5173
5174 //
5175 // If the entry is marked bad, add it to the bad block MCB
5176 //
5177
5178 if ((SetupWindows || (Vcb->NumberOfWindows == 1)) &&
5179 (FatInterpretClusterType( Vcb, FatEntry ) == FatClusterBad)) {
5180
5181 //
5182 // This cluster is marked bad.
5183 // Add it to the BadBlockMcb.
5184 //
5185
5186 Lbo = FatGetLboFromIndex( Vcb, FatIndex );
5187 FatAddMcbEntry( Vcb, &Vcb->BadBlockMcb, BadClusterVbo, Lbo, ClusterSize );
5188 BadClusterVbo += ClusterSize;
5189 }
5190 }
5191
5192 //
5193 // If we finished the scan, then we know about all the possible bad clusters.
5194 //
5195
5196 SetFlag( Vcb->VcbState, VCB_STATE_FLAG_BAD_BLOCKS_POPULATED);
5197
5198 //
5199 // Now we have to record the final run we encountered
5200 //
5201
5202 ClustersThisRun = FatIndex - StartIndexOfThisRun;
5203
5204 if (CurrentRun == FreeClusters) {
5205
5206 if (FreeClusterCount) {
5207
5208 *FreeClusterCount += ClustersThisRun;
5209 CurrentWindow->ClustersFree += ClustersThisRun;
5210 }
5211
5212 if (BitMap) {
5213
5214 RtlClearBits( BitMap,
5215 StartIndexOfThisRun - StartIndex,
5216 ClustersThisRun );
5217 }
5218
5219 } else {
5220
5221 if (BitMap) {
5222
5223 RtlSetBits( BitMap,
5224 StartIndexOfThisRun - StartIndex,
5225 ClustersThisRun );
5226 }
5227 }
5228
5229 //
5230 // And finish the last window if we are in setup.
5231 //
5232
5233 if (SetupWindows) {
5234
5235 CurrentWindow->LastCluster = FatIndex - 1;
5236 }
5237
5238 //
5239 // Now switch the active window if required. We've succesfully gotten everything
5240 // nailed down.
5241 //
5242 // If we were tracking the free cluster count, this means we should update the
5243 // window. This is the case of FAT12/16 initialization.
5244 //
5245
5246 if (SwitchToWindow) {
5247
5248 if (Vcb->FreeClusterBitMap.Buffer) {
5249
5250 ExFreePool( Vcb->FreeClusterBitMap.Buffer );
5251 }
5252
5253 RtlInitializeBitMap( &Vcb->FreeClusterBitMap,
5254 NewBitMapBuffer,
5255 EndIndex - StartIndex + 1 );
5256
5257 NewBitMapBuffer = NULL;
5258
5259 Vcb->CurrentWindow = SwitchToWindow;
5260 Vcb->ClusterHint = (ULONG)-1;
5261
5262 if (FreeClusterCount) {
5263
5264 NT_ASSERT( !SetupWindows );
5265
5266 Vcb->CurrentWindow->ClustersFree = *FreeClusterCount;
5267 }
5268 }
5269
5270 //
5271 // Make sure plausible things occured ...
5272 //
5273
5274 if (!SetupWindows && BitMapBuffer == NULL) {
5275
5276 ASSERT_CURRENT_WINDOW_GOOD( Vcb );
5277 }
5278
5279 NT_ASSERT(Vcb->AllocationSupport.NumberOfFreeClusters <= Vcb->AllocationSupport.NumberOfClusters);
5280
5281 } _SEH2_FINALLY {
5282
5283 //
5284 // Unpin the last bcb and drop the temporary bitmap buffer if it exists.
5285 //
5286
5287 FatUnpinBcb( IrpContext, Bcb);
5288
5289 if (NewBitMapBuffer) {
5290
5291 ExFreePool( NewBitMapBuffer );
5292 }
5293 } _SEH2_END;
5294 }
5295