3 Copyright (c) 1990-2000 Microsoft Corporation
11 This module implements the Allocation support routines for Fat.
19 // The Bug check file id for this module
22 #define BugCheckFileId (FAT_BUG_CHECK_ALLOCSUP)
25 // Local debug trace level
28 #define Dbg (DEBUG_TRACE_ALLOCSUP)
30 #define FatMin(a, b) ((a) < (b) ? (a) : (b))
33 // Define prefetch page count for the FAT
36 #define FAT_PREFETCH_PAGE_COUNT 0x100
39 // Local support routine prototypes
44 IN PIRP_CONTEXT IrpContext
,
47 IN OUT PULONG FatEntry
,
48 IN OUT PFAT_ENUMERATION_CONTEXT Context
53 IN PIRP_CONTEXT IrpContext
,
55 IN ULONG StartingFatIndex
,
56 IN ULONG ClusterCount
,
57 IN BOOLEAN ChainTogether
66 // Note that the KdPrint below will ONLY fire when the assert does. Leave it
71 #define ASSERT_CURRENT_WINDOW_GOOD(VCB) { \
72 ULONG FreeClusterBitMapClear; \
73 NT_ASSERT( (VCB)->FreeClusterBitMap.Buffer != NULL ); \
74 FreeClusterBitMapClear = RtlNumberOfClearBits(&(VCB)->FreeClusterBitMap); \
75 if ((VCB)->CurrentWindow->ClustersFree != FreeClusterBitMapClear) { \
76 KdPrint(("FAT: ClustersFree %x h != FreeClusterBitMapClear %x h\n", \
77 (VCB)->CurrentWindow->ClustersFree, \
78 FreeClusterBitMapClear)); \
80 NT_ASSERT( (VCB)->CurrentWindow->ClustersFree == FreeClusterBitMapClear ); \
83 #define ASSERT_CURRENT_WINDOW_GOOD(VCB)
87 // The following macros provide a convenient way of hiding the details
88 // of bitmap allocation schemes.
94 // FatLockFreeClusterBitMap (
99 #define FatLockFreeClusterBitMap(VCB) { \
100 NT_ASSERT(KeAreApcsDisabled()); \
101 ExAcquireFastMutexUnsafe( &(VCB)->FreeClusterBitMapMutex ); \
102 ASSERT_CURRENT_WINDOW_GOOD(VCB) \
107 // FatUnlockFreeClusterBitMap (
112 #define FatUnlockFreeClusterBitMap(VCB) { \
113 ASSERT_CURRENT_WINDOW_GOOD(VCB) \
114 NT_ASSERT(KeAreApcsDisabled()); \
115 ExReleaseFastMutexUnsafe( &(VCB)->FreeClusterBitMapMutex ); \
120 // FatIsClusterFree (
121 // IN PIRP_CONTEXT IrpContext,
127 #define FatIsClusterFree(IRPCONTEXT,VCB,FAT_INDEX) \
128 (RtlCheckBit(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2) == 0)
133 // IN PIRP_CONTEXT IrpContext,
135 // IN ULONG FatIndex,
136 // IN ULONG ClusterCount
140 #define FatFreeClusters(IRPCONTEXT,VCB,FAT_INDEX,CLUSTER_COUNT) { \
141 if ((CLUSTER_COUNT) == 1) { \
142 FatSetFatEntry((IRPCONTEXT),(VCB),(FAT_INDEX),FAT_CLUSTER_AVAILABLE); \
144 FatSetFatRun((IRPCONTEXT),(VCB),(FAT_INDEX),(CLUSTER_COUNT),FALSE); \
150 // FatAllocateClusters (
151 // IN PIRP_CONTEXT IrpContext,
153 // IN ULONG FatIndex,
154 // IN ULONG ClusterCount
158 #define FatAllocateClusters(IRPCONTEXT,VCB,FAT_INDEX,CLUSTER_COUNT) { \
159 if ((CLUSTER_COUNT) == 1) { \
160 FatSetFatEntry((IRPCONTEXT),(VCB),(FAT_INDEX),FAT_CLUSTER_LAST); \
162 FatSetFatRun((IRPCONTEXT),(VCB),(FAT_INDEX),(CLUSTER_COUNT),TRUE); \
168 // FatUnreserveClusters (
169 // IN PIRP_CONTEXT IrpContext,
171 // IN ULONG FatIndex,
172 // IN ULONG ClusterCount
176 #define FatUnreserveClusters(IRPCONTEXT,VCB,FAT_INDEX,CLUSTER_COUNT) { \
177 NT_ASSERT( (FAT_INDEX) + (CLUSTER_COUNT) - 2 <= (VCB)->FreeClusterBitMap.SizeOfBitMap );\
178 NT_ASSERT( (FAT_INDEX) >= 2); \
179 RtlClearBits(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2,(CLUSTER_COUNT)); \
180 if ((FAT_INDEX) < (VCB)->ClusterHint) { \
181 (VCB)->ClusterHint = (FAT_INDEX); \
187 // FatReserveClusters (
188 // IN PIRP_CONTEXT IrpContext,
190 // IN ULONG FatIndex,
191 // IN ULONG ClusterCount
194 // Handle wrapping the hint back to the front.
197 #define FatReserveClusters(IRPCONTEXT,VCB,FAT_INDEX,CLUSTER_COUNT) { \
198 ULONG _AfterRun = (FAT_INDEX) + (CLUSTER_COUNT); \
199 NT_ASSERT( (FAT_INDEX) + (CLUSTER_COUNT) - 2 <= (VCB)->FreeClusterBitMap.SizeOfBitMap );\
200 NT_ASSERT( (FAT_INDEX) >= 2); \
201 RtlSetBits(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2,(CLUSTER_COUNT)); \
203 if (_AfterRun - 2 >= (VCB)->FreeClusterBitMap.SizeOfBitMap) { \
206 if (RtlCheckBit(&(VCB)->FreeClusterBitMap, _AfterRun - 2)) { \
207 (VCB)->ClusterHint = RtlFindClearBits( &(VCB)->FreeClusterBitMap, 1, _AfterRun - 2) + 2; \
208 if (1 == (VCB)->ClusterHint) { \
209 (VCB)->ClusterHint = 2; \
213 (VCB)->ClusterHint = _AfterRun; \
219 // FatFindFreeClusterRun (
220 // IN PIRP_CONTEXT IrpContext,
222 // IN ULONG ClusterCount,
223 // IN ULONG AlternateClusterHint
226 // Do a special check if only one cluster is desired.
229 #define FatFindFreeClusterRun(IRPCONTEXT,VCB,CLUSTER_COUNT,CLUSTER_HINT) ( \
230 (CLUSTER_COUNT == 1) && \
231 FatIsClusterFree((IRPCONTEXT), (VCB), (CLUSTER_HINT)) ? \
233 RtlFindClearBits( &(VCB)->FreeClusterBitMap, \
235 (CLUSTER_HINT) - 2) + 2 \
239 // FAT32: Define the maximum size of the FreeClusterBitMap to be the
240 // maximum size of a FAT16 FAT. If there are more clusters on the
241 // volume than can be represented by this many bytes of bitmap, the
242 // FAT will be split into "buckets", each of which does fit.
244 // Note this count is in clusters/bits of bitmap.
247 #define MAX_CLUSTER_BITMAP_SIZE (1 << 16)
250 // Calculate the window a given cluster number is in.
253 #define FatWindowOfCluster(C) (((C) - 2) / MAX_CLUSTER_BITMAP_SIZE)
256 #pragma alloc_text(PAGE, FatAddFileAllocation)
257 #pragma alloc_text(PAGE, FatAllocateDiskSpace)
258 #pragma alloc_text(PAGE, FatDeallocateDiskSpace)
259 #pragma alloc_text(PAGE, FatExamineFatEntries)
260 #pragma alloc_text(PAGE, FatInterpretClusterType)
261 #pragma alloc_text(PAGE, FatLogOf)
262 #pragma alloc_text(PAGE, FatLookupFatEntry)
263 #pragma alloc_text(PAGE, FatLookupFileAllocation)
264 #pragma alloc_text(PAGE, FatLookupFileAllocationSize)
265 #pragma alloc_text(PAGE, FatMergeAllocation)
266 #pragma alloc_text(PAGE, FatSetFatEntry)
267 #pragma alloc_text(PAGE, FatSetFatRun)
268 #pragma alloc_text(PAGE, FatSetupAllocationSupport)
269 #pragma alloc_text(PAGE, FatSplitAllocation)
270 #pragma alloc_text(PAGE, FatTearDownAllocationSupport)
271 #pragma alloc_text(PAGE, FatTruncateFileAllocation)
284 Choose a window to allocate clusters from. Order of preference is:
286 1. First window with >50% free clusters
287 2. First empty window
288 3. Window with greatest number of free clusters.
292 Vcb - Supplies the Vcb for the volume
296 'Best window' number (index into Vcb->Windows[])
302 ULONG FirstEmpty
= (ULONG
)-1;
303 ULONG ClustersPerWindow
= MAX_CLUSTER_BITMAP_SIZE
;
305 NT_ASSERT( 1 != Vcb
->NumberOfWindows
);
307 for (i
= 0; i
< Vcb
->NumberOfWindows
; i
++) {
309 if (Vcb
->Windows
[i
].ClustersFree
== ClustersPerWindow
) {
311 if (-1 == FirstEmpty
) {
314 // Keep note of the first empty window on the disc
320 else if (Vcb
->Windows
[i
].ClustersFree
> MaxFree
) {
323 // This window has the most free clusters, so far
326 MaxFree
= Vcb
->Windows
[i
].ClustersFree
;
330 // If this window has >50% free clusters, then we will take it,
331 // so don't bother considering more windows.
334 if (MaxFree
>= (ClustersPerWindow
>> 1)) {
342 // If there were no windows with 50% or more freespace, then select the
343 // first empty window on the disc, if any - otherwise we'll just go with
344 // the one with the most free clusters.
347 if ((MaxFree
< (ClustersPerWindow
>> 1)) && (-1 != FirstEmpty
)) {
357 FatSetupAllocationSupport (
358 IN PIRP_CONTEXT IrpContext
,
366 This routine fills in the Allocation Support structure in the Vcb.
367 Most entries are computed using fat.h macros supplied with data from
368 the Bios Parameter Block. The free cluster count, however, requires
369 going to the Fat and actually counting free sectors. At the same time
370 the free cluster bit map is initalized.
374 Vcb - Supplies the Vcb to fill in.
380 ULONG ClustersDescribableByFat
;
384 DebugTrace(+1, Dbg
, "FatSetupAllocationSupport\n", 0);
385 DebugTrace( 0, Dbg
, " Vcb = %p\n", Vcb
);
388 // Compute a number of fields for Vcb.AllocationSupport
391 Vcb
->AllocationSupport
.RootDirectoryLbo
= FatRootDirectoryLbo( &Vcb
->Bpb
);
392 Vcb
->AllocationSupport
.RootDirectorySize
= FatRootDirectorySize( &Vcb
->Bpb
);
394 Vcb
->AllocationSupport
.FileAreaLbo
= FatFileAreaLbo( &Vcb
->Bpb
);
396 Vcb
->AllocationSupport
.NumberOfClusters
= FatNumberOfClusters( &Vcb
->Bpb
);
398 Vcb
->AllocationSupport
.FatIndexBitSize
= FatIndexBitSize( &Vcb
->Bpb
);
400 Vcb
->AllocationSupport
.LogOfBytesPerSector
= FatLogOf(Vcb
->Bpb
.BytesPerSector
);
401 Vcb
->AllocationSupport
.LogOfBytesPerCluster
= FatLogOf(FatBytesPerCluster( &Vcb
->Bpb
));
402 Vcb
->AllocationSupport
.NumberOfFreeClusters
= 0;
406 // Deal with a bug in DOS 5 format, if the Fat is not big enough to
407 // describe all the clusters on the disk, reduce this number. We expect
408 // that fat32 volumes will not have this problem.
410 // Turns out this was not a good assumption. We have to do this always now.
413 ClustersDescribableByFat
= ( ((FatIsFat32(Vcb
)? Vcb
->Bpb
.LargeSectorsPerFat
:
414 Vcb
->Bpb
.SectorsPerFat
) *
415 Vcb
->Bpb
.BytesPerSector
* 8)
416 / FatIndexBitSize(&Vcb
->Bpb
) ) - 2;
418 if (Vcb
->AllocationSupport
.NumberOfClusters
> ClustersDescribableByFat
) {
420 Vcb
->AllocationSupport
.NumberOfClusters
= ClustersDescribableByFat
;
424 // Extend the virtual volume file to include the Fat
428 CC_FILE_SIZES FileSizes
;
430 FileSizes
.AllocationSize
.QuadPart
=
431 FileSizes
.FileSize
.QuadPart
= (FatReservedBytes( &Vcb
->Bpb
) +
432 FatBytesPerFat( &Vcb
->Bpb
));
433 FileSizes
.ValidDataLength
= FatMaxLarge
;
435 if ( Vcb
->VirtualVolumeFile
->PrivateCacheMap
== NULL
) {
437 FatInitializeCacheMap( Vcb
->VirtualVolumeFile
,
440 &FatData
.CacheManagerNoOpCallbacks
,
445 CcSetFileSizes( Vcb
->VirtualVolumeFile
, &FileSizes
);
451 if (FatIsFat32(Vcb
) &&
452 Vcb
->AllocationSupport
.NumberOfClusters
> MAX_CLUSTER_BITMAP_SIZE
) {
454 Vcb
->NumberOfWindows
= (Vcb
->AllocationSupport
.NumberOfClusters
+
455 MAX_CLUSTER_BITMAP_SIZE
- 1) /
456 MAX_CLUSTER_BITMAP_SIZE
;
460 Vcb
->NumberOfWindows
= 1;
463 Vcb
->Windows
= FsRtlAllocatePoolWithTag( PagedPool
,
464 Vcb
->NumberOfWindows
* sizeof(FAT_WINDOW
),
467 RtlInitializeBitMap( &Vcb
->FreeClusterBitMap
,
472 // Chose a FAT window to begin operation in.
475 if (Vcb
->NumberOfWindows
> 1) {
478 // Read the fat and count up free clusters. We bias by the two reserved
479 // entries in the FAT.
482 FatExamineFatEntries( IrpContext
, Vcb
,
484 Vcb
->AllocationSupport
.NumberOfClusters
+ 2 - 1,
491 // Pick a window to begin allocating from
494 Vcb
->CurrentWindow
= &Vcb
->Windows
[ FatSelectBestWindow( Vcb
)];
498 Vcb
->CurrentWindow
= &Vcb
->Windows
[0];
501 // Carefully bias ourselves by the two reserved entries in the FAT.
504 Vcb
->CurrentWindow
->FirstCluster
= 2;
505 Vcb
->CurrentWindow
->LastCluster
= Vcb
->AllocationSupport
.NumberOfClusters
+ 2 - 1;
509 // Now transition to the FAT window we have chosen.
512 FatExamineFatEntries( IrpContext
, Vcb
,
520 // Now set the ClusterHint to the first free bit in our favorite
521 // window (except the ClusterHint is off by two).
525 (BitIndex
= RtlFindClearBits( &Vcb
->FreeClusterBitMap
, 1, 0 )) != -1 ?
530 DebugUnwind( FatSetupAllocationSupport
);
533 // If we hit an exception, back out.
536 if (_SEH2_AbnormalTermination()) {
538 FatTearDownAllocationSupport( IrpContext
, Vcb
);
547 FatTearDownAllocationSupport (
548 IN PIRP_CONTEXT IrpContext
,
556 This routine prepares the volume for closing. Specifically, we must
557 release the free fat bit map buffer, and uninitialize the dirty fat
562 Vcb - Supplies the Vcb to fill in.
571 DebugTrace(+1, Dbg
, "FatTearDownAllocationSupport\n", 0);
572 DebugTrace( 0, Dbg
, " Vcb = %p\n", Vcb
);
577 // If there are FAT buckets, free them.
580 if ( Vcb
->Windows
!= NULL
) {
582 ExFreePool( Vcb
->Windows
);
587 // Free the memory associated with the free cluster bitmap.
590 if ( Vcb
->FreeClusterBitMap
.Buffer
!= NULL
) {
592 ExFreePool( Vcb
->FreeClusterBitMap
.Buffer
);
595 // NULL this field as an flag.
598 Vcb
->FreeClusterBitMap
.Buffer
= NULL
;
602 // And remove all the runs in the dirty fat Mcb
605 FatRemoveMcbEntry( Vcb
, &Vcb
->DirtyFatMcb
, 0, 0xFFFFFFFF );
607 DebugTrace(-1, Dbg
, "FatTearDownAllocationSupport -> (VOID)\n", 0);
609 UNREFERENCED_PARAMETER( IrpContext
);
615 _Requires_lock_held_(_Global_critical_region_
)
617 FatLookupFileAllocation (
618 IN PIRP_CONTEXT IrpContext
,
622 OUT PULONG ByteCount
,
623 OUT PBOOLEAN Allocated
,
624 OUT PBOOLEAN EndOnMax
,
632 This routine looks up the existing mapping of VBO to LBO for a
633 file/directory. The information it queries is either stored in the
634 mcb field of the fcb/dcb or it is stored on in the fat table and
635 needs to be retrieved and decoded, and updated in the mcb.
639 FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being queried
641 Vbo - Supplies the VBO whose LBO we want returned
643 Lbo - Receives the LBO corresponding to the input Vbo if one exists
645 ByteCount - Receives the number of bytes within the run the run
646 that correpond between the input vbo and output lbo.
648 Allocated - Receives TRUE if the Vbo does have a corresponding Lbo
651 EndOnMax - Receives TRUE if the run ends in the maximal FAT cluster,
652 which results in a fractional bytecount.
654 Index - Receives the Index of the run
663 VBO FirstVboOfCurrentRun
= 0;
664 LBO FirstLboOfCurrentRun
;
671 ULONG BytesPerCluster
;
672 ULARGE_INTEGER BytesOnVolume
;
674 FAT_ENUMERATION_CONTEXT Context
;
681 DebugTrace(+1, Dbg
, "FatLookupFileAllocation\n", 0);
682 DebugTrace( 0, Dbg
, " FcbOrDcb = %p\n", FcbOrDcb
);
683 DebugTrace( 0, Dbg
, " Vbo = %8lx\n", Vbo
);
684 DebugTrace( 0, Dbg
, " pLbo = %8lx\n", Lbo
);
685 DebugTrace( 0, Dbg
, " pByteCount = %8lx\n", ByteCount
);
686 DebugTrace( 0, Dbg
, " pAllocated = %8lx\n", Allocated
);
693 // Check the trivial case that the mapping is already in our
697 if ( FatLookupMcbEntry(Vcb
, &FcbOrDcb
->Mcb
, Vbo
, Lbo
, ByteCount
, Index
) ) {
701 NT_ASSERT( *ByteCount
!= 0 );
704 // Detect the overflow case, trim and claim the condition.
707 if (Vbo
+ *ByteCount
== 0) {
712 DebugTrace( 0, Dbg
, "Found run in Mcb.\n", 0);
713 DebugTrace(-1, Dbg
, "FatLookupFileAllocation -> (VOID)\n", 0);
718 // Initialize the Vcb, the cluster size, LastCluster, and
719 // FirstLboOfCurrentRun (to be used as an indication of the first
720 // iteration through the following while loop).
723 BytesPerCluster
= 1 << Vcb
->AllocationSupport
.LogOfBytesPerCluster
;
725 BytesOnVolume
.QuadPart
= UInt32x32To64( Vcb
->AllocationSupport
.NumberOfClusters
, BytesPerCluster
);
728 FirstLboOfCurrentRun
= 0;
731 // Discard the case that the request extends beyond the end of
732 // allocation. Note that if the allocation size if not known
733 // AllocationSize is set to 0xffffffff.
736 if ( Vbo
>= FcbOrDcb
->Header
.AllocationSize
.LowPart
) {
740 DebugTrace( 0, Dbg
, "Vbo beyond end of file.\n", 0);
741 DebugTrace(-1, Dbg
, "FatLookupFileAllocation -> (VOID)\n", 0);
746 // The Vbo is beyond the last Mcb entry. So we adjust Current Vbo/Lbo
747 // and FatEntry to describe the beginning of the last entry in the Mcb.
748 // This is used as initialization for the following loop.
750 // If the Mcb was empty, we start at the beginning of the file with
751 // CurrentVbo set to 0 to indicate a new run.
754 if (FatLookupLastMcbEntry( Vcb
, &FcbOrDcb
->Mcb
, &CurrentVbo
, &CurrentLbo
, &Runs
)) {
756 DebugTrace( 0, Dbg
, "Current Mcb size = %8lx.\n", CurrentVbo
+ 1);
758 CurrentVbo
-= (BytesPerCluster
- 1);
759 CurrentLbo
-= (BytesPerCluster
- 1);
762 // Convert an index to a count.
769 DebugTrace( 0, Dbg
, "Mcb empty.\n", 0);
772 // Check for an FcbOrDcb that has no allocation
775 if (FcbOrDcb
->FirstClusterOfFile
== 0) {
779 DebugTrace( 0, Dbg
, "File has no allocation.\n", 0);
780 DebugTrace(-1, Dbg
, "FatLookupFileAllocation -> (VOID)\n", 0);
786 CurrentLbo
= FatGetLboFromIndex( Vcb
, FcbOrDcb
->FirstClusterOfFile
);
787 FirstVboOfCurrentRun
= CurrentVbo
;
788 FirstLboOfCurrentRun
= CurrentLbo
;
792 DebugTrace( 0, Dbg
, "First Lbo of file = %8lx\n", CurrentLbo
);
797 // Now we know that we are looking up a valid Vbo, but it is
798 // not in the Mcb, which is a monotonically increasing list of
799 // Vbo's. Thus we have to go to the Fat, and update
800 // the Mcb as we go. We use a try-finally to unpin the page
801 // of fat hanging around. Also we mark *Allocated = FALSE, so that
802 // the caller wont try to use the data if we hit an exception.
809 FatEntry
= (FAT_ENTRY
)FatGetIndexFromLbo( Vcb
, CurrentLbo
);
812 // ASSERT that CurrentVbo and CurrentLbo are now cluster alligned.
813 // The assumption here, is that only whole clusters of Vbos and Lbos
814 // are mapped in the Mcb.
817 NT_ASSERT( ((CurrentLbo
- Vcb
->AllocationSupport
.FileAreaLbo
)
818 % BytesPerCluster
== 0) &&
819 (CurrentVbo
% BytesPerCluster
== 0) );
822 // Starting from the first Vbo after the last Mcb entry, scan through
823 // the Fat looking for our Vbo. We continue through the Fat until we
824 // hit a noncontiguity beyond the desired Vbo, or the last cluster.
827 while ( !LastCluster
) {
830 // Get the next fat entry, and update our Current variables.
833 FatLookupFatEntry( IrpContext
, Vcb
, FatEntry
, (PULONG
)&FatEntry
, &Context
);
835 PriorLbo
= CurrentLbo
;
836 CurrentLbo
= FatGetLboFromIndex( Vcb
, FatEntry
);
837 CurrentVbo
+= BytesPerCluster
;
839 switch ( FatInterpretClusterType( Vcb
, FatEntry
)) {
842 // Check for a break in the Fat allocation chain.
845 case FatClusterAvailable
:
846 case FatClusterReserved
:
849 DebugTrace( 0, Dbg
, "Break in allocation chain, entry = %d\n", FatEntry
);
850 DebugTrace(-1, Dbg
, "FatLookupFileAllocation -> Fat Corrupt. Raise Status.\n", 0);
852 FatPopUpFileCorrupt( IrpContext
, FcbOrDcb
);
853 FatRaiseStatus( IrpContext
, STATUS_FILE_CORRUPT_ERROR
);
857 // If this is the last cluster, we must update the Mcb and
864 // Assert we know where the current run started. If the
865 // Mcb was empty when we were called, thenFirstLboOfCurrentRun
866 // was set to the start of the file. If the Mcb contained an
867 // entry, then FirstLboOfCurrentRun was set on the first
868 // iteration through the loop. Thus if FirstLboOfCurrentRun
869 // is 0, then there was an Mcb entry and we are on our first
870 // iteration, meaing that the last cluster in the Mcb was
871 // really the last allocated cluster, but we checked Vbo
872 // against AllocationSize, and found it OK, thus AllocationSize
873 // must be too large.
875 // Note that, when we finally arrive here, CurrentVbo is actually
876 // the first Vbo beyond the file allocation and CurrentLbo is
880 DebugTrace( 0, Dbg
, "Read last cluster of file.\n", 0);
883 // Detect the case of the maximal file. Note that this really isn't
884 // a proper Vbo - those are zero-based, and this is a one-based number.
885 // The maximal file, of 2^32 - 1 bytes, has a maximum byte offset of
888 // Just so we don't get confused here.
891 if (CurrentVbo
== 0) {
899 if (FirstLboOfCurrentRun
!= 0 ) {
901 DebugTrace( 0, Dbg
, "Adding a run to the Mcb.\n", 0);
902 DebugTrace( 0, Dbg
, " Vbo = %08lx.\n", FirstVboOfCurrentRun
);
903 DebugTrace( 0, Dbg
, " Lbo = %08lx.\n", FirstLboOfCurrentRun
);
904 DebugTrace( 0, Dbg
, " Length = %08lx.\n", CurrentVbo
- FirstVboOfCurrentRun
);
906 (VOID
)FatAddMcbEntry( Vcb
,
908 FirstVboOfCurrentRun
,
909 FirstLboOfCurrentRun
,
910 CurrentVbo
- FirstVboOfCurrentRun
);
916 // Being at the end of allocation, make sure we have found
917 // the Vbo. If we haven't, seeing as we checked VBO
918 // against AllocationSize, the real disk allocation is less
919 // than that of AllocationSize. This comes about when the
920 // real allocation is not yet known, and AllocaitonSize
921 // contains MAXULONG.
923 // KLUDGE! - If we were called by FatLookupFileAllocationSize
924 // Vbo is set to MAXULONG - 1, and AllocationSize to the lookup
925 // hint. Thus we merrily go along looking for a match that isn't
926 // there, but in the meantime building an Mcb. If this is
927 // the case, fill in AllocationSize and return.
930 if ( Vbo
== MAXULONG
- 1 ) {
934 FcbOrDcb
->Header
.AllocationSize
.QuadPart
= CurrentVbo
;
936 DebugTrace( 0, Dbg
, "New file allocation size = %08lx.\n", CurrentVbo
);
937 try_return ( NOTHING
);
941 // We will lie ever so slightly if we really terminated on the
942 // maximal byte of a file. It is really allocated.
945 if (Vbo
>= CurrentVbo
&& !*EndOnMax
) {
948 try_return ( NOTHING
);
954 // This is a continuation in the chain. If the run has a
955 // discontiguity at this point, update the Mcb, and if we are beyond
956 // the desired Vbo, this is the end of the run, so set LastCluster
957 // and exit the loop.
963 // This is the loop check. The Vbo must not be bigger than the size of
964 // the volume, and the Vbo must not have a) wrapped and b) not been at the
965 // very last cluster in the chain, for the case of the maximal file.
968 if ( CurrentVbo
== 0 ||
969 (BytesOnVolume
.HighPart
== 0 && CurrentVbo
> BytesOnVolume
.LowPart
)) {
971 FatPopUpFileCorrupt( IrpContext
, FcbOrDcb
);
972 FatRaiseStatus( IrpContext
, STATUS_FILE_CORRUPT_ERROR
);
975 if ( PriorLbo
+ BytesPerCluster
!= CurrentLbo
) {
978 // Note that on the first time through the loop
979 // (FirstLboOfCurrentRun == 0), we don't add the
980 // run to the Mcb since it curresponds to the last
981 // run already stored in the Mcb.
984 if ( FirstLboOfCurrentRun
!= 0 ) {
986 DebugTrace( 0, Dbg
, "Adding a run to the Mcb.\n", 0);
987 DebugTrace( 0, Dbg
, " Vbo = %08lx.\n", FirstVboOfCurrentRun
);
988 DebugTrace( 0, Dbg
, " Lbo = %08lx.\n", FirstLboOfCurrentRun
);
989 DebugTrace( 0, Dbg
, " Length = %08lx.\n", CurrentVbo
- FirstVboOfCurrentRun
);
993 FirstVboOfCurrentRun
,
994 FirstLboOfCurrentRun
,
995 CurrentVbo
- FirstVboOfCurrentRun
);
1001 // Since we are at a run boundry, with CurrentLbo and
1002 // CurrentVbo being the first cluster of the next run,
1003 // we see if the run we just added encompases the desired
1004 // Vbo, and if so exit. Otherwise we set up two new
1005 // First*boOfCurrentRun, and continue.
1008 if (CurrentVbo
> Vbo
) {
1014 FirstVboOfCurrentRun
= CurrentVbo
;
1015 FirstLboOfCurrentRun
= CurrentLbo
;
1022 DebugTrace(0, Dbg
, "Illegal Cluster Type.\n", FatEntry
);
1025 #pragma prefast( suppress: 28159, "we bugcheck here because our internal data structures are seriously corrupted if this happens" )
1027 FatBugCheck( 0, 0, 0 );
1035 // Load up the return parameters.
1037 // On exit from the loop, Vbo still contains the desired Vbo, and
1038 // CurrentVbo is the first byte after the run that contained the
1044 *Lbo
= FirstLboOfCurrentRun
+ (Vbo
- FirstVboOfCurrentRun
);
1046 *ByteCount
= CurrentVbo
- Vbo
;
1048 if (ARGUMENT_PRESENT(Index
)) {
1051 // Note that Runs only needs to be accurate with respect to where we
1052 // ended. Since partial-lookup cases will occur without exclusive
1053 // synchronization, the Mcb itself may be much bigger by now.
1063 DebugUnwind( FatLookupFileAllocation
);
1066 // We are done reading the Fat, so unpin the last page of fat
1067 // that is hanging around
1070 FatUnpinBcb( IrpContext
, Context
.Bcb
);
1072 DebugTrace(-1, Dbg
, "FatLookupFileAllocation -> (VOID)\n", 0);
1079 _Requires_lock_held_(_Global_critical_region_
)
1081 FatAddFileAllocation (
1082 IN PIRP_CONTEXT IrpContext
,
1084 IN PFILE_OBJECT FileObject OPTIONAL
,
1085 IN ULONG DesiredAllocationSize
1090 Routine Description:
1092 This routine adds additional allocation to the specified file/directory.
1093 Additional allocation is added by appending clusters to the file/directory.
1095 If the file already has a sufficient allocation then this procedure
1096 is effectively a noop.
1100 FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being modified.
1101 This parameter must not specify the root dcb.
1103 FileObject - If supplied inform the cache manager of the change.
1105 DesiredAllocationSize - Supplies the minimum size, in bytes, that we want
1106 allocated to the file/directory.
1112 LARGE_MCB NewMcb
= {0};
1113 PLARGE_MCB McbToCleanup
= NULL
;
1114 PDIRENT Dirent
= NULL
;
1115 ULONG NewAllocation
= 0;
1117 BOOLEAN UnwindWeAllocatedDiskSpace
= FALSE
;
1118 BOOLEAN UnwindAllocationSizeSet
= FALSE
;
1119 BOOLEAN UnwindCacheManagerInformed
= FALSE
;
1120 BOOLEAN UnwindWeInitializedMcb
= FALSE
;
1124 DebugTrace(+1, Dbg
, "FatAddFileAllocation\n", 0);
1125 DebugTrace( 0, Dbg
, " FcbOrDcb = %p\n", FcbOrDcb
);
1126 DebugTrace( 0, Dbg
, " DesiredAllocationSize = %8lx\n", DesiredAllocationSize
);
1128 Vcb
= FcbOrDcb
->Vcb
;
1131 // If we haven't yet set the correct AllocationSize, do so.
1134 if (FcbOrDcb
->Header
.AllocationSize
.QuadPart
== FCB_LOOKUP_ALLOCATIONSIZE_HINT
) {
1136 FatLookupFileAllocationSize( IrpContext
, FcbOrDcb
);
1140 // Check for the benign case that the desired allocation is already
1141 // within the allocation size.
1144 if (DesiredAllocationSize
<= FcbOrDcb
->Header
.AllocationSize
.LowPart
) {
1146 DebugTrace(0, Dbg
, "Desired size within current allocation.\n", 0);
1148 DebugTrace(-1, Dbg
, "FatAddFileAllocation -> (VOID)\n", 0);
1152 DebugTrace( 0, Dbg
, "InitialAllocation = %08lx.\n", FcbOrDcb
->Header
.AllocationSize
.LowPart
);
1155 // Get a chunk of disk space that will fullfill our needs. If there
1156 // was no initial allocation, start from the hint in the Vcb, otherwise
1157 // try to allocate from the cluster after the initial allocation.
1159 // If there was no initial allocation to the file, we can just use the
1160 // Mcb in the FcbOrDcb, otherwise we have to use a new one, and merge
1161 // it to the one in the FcbOrDcb.
1166 if (FcbOrDcb
->Header
.AllocationSize
.LowPart
== 0) {
1170 NT_ASSERT( FcbOrDcb
->FcbCondition
== FcbGood
);
1172 FatGetDirentFromFcbOrDcb( IrpContext
,
1178 // Set this dirty right now since this call can fail.
1181 FatSetDirtyBcb( IrpContext
, Bcb
, Vcb
, TRUE
);
1183 FatAllocateDiskSpace( IrpContext
,
1186 &DesiredAllocationSize
,
1190 UnwindWeAllocatedDiskSpace
= TRUE
;
1191 McbToCleanup
= &FcbOrDcb
->Mcb
;
1194 // We have to update the dirent and FcbOrDcb copies of
1195 // FirstClusterOfFile since before it was 0
1198 FatLookupMcbEntry( FcbOrDcb
->Vcb
,
1205 DebugTrace( 0, Dbg
, "First Lbo of file will be %08lx.\n", FirstLboOfFile
);
1207 FcbOrDcb
->FirstClusterOfFile
= FatGetIndexFromLbo( Vcb
, FirstLboOfFile
);
1209 Dirent
->FirstClusterOfFile
= (USHORT
)FcbOrDcb
->FirstClusterOfFile
;
1211 if ( FatIsFat32(Vcb
) ) {
1213 Dirent
->FirstClusterOfFileHi
= (USHORT
)(FcbOrDcb
->FirstClusterOfFile
>> 16);
1217 // Note the size of the allocation we need to tell the cache manager about.
1220 NewAllocation
= DesiredAllocationSize
;
1224 LBO LastAllocatedLbo
;
1228 // Get the first cluster following the current allocation. It is possible
1229 // the Mcb is empty (or short, etc.) so we need to be slightly careful
1230 // about making sure we don't lie with the hint.
1233 (void)FatLookupLastMcbEntry( FcbOrDcb
->Vcb
, &FcbOrDcb
->Mcb
, &DontCare
, &LastAllocatedLbo
, NULL
);
1236 // Try to get some disk space starting from there.
1239 NewAllocation
= DesiredAllocationSize
- FcbOrDcb
->Header
.AllocationSize
.LowPart
;
1241 FsRtlInitializeLargeMcb( &NewMcb
, PagedPool
);
1242 UnwindWeInitializedMcb
= TRUE
;
1243 McbToCleanup
= &NewMcb
;
1245 FatAllocateDiskSpace( IrpContext
,
1247 (LastAllocatedLbo
!= ~0 ?
1248 FatGetIndexFromLbo(Vcb
,LastAllocatedLbo
+ 1) :
1254 UnwindWeAllocatedDiskSpace
= TRUE
;
1258 // Now that we increased the allocation of the file, mark it in the
1259 // FcbOrDcb. Carefully prepare to handle an inability to grow the cache
1263 FcbOrDcb
->Header
.AllocationSize
.LowPart
+= NewAllocation
;
1266 // Handle the maximal file case, where we may have just wrapped. Note
1267 // that this must be the precise boundary case wrap, i.e. by one byte,
1268 // so that the new allocation is actually one byte "less" as far as we're
1269 // concerned. This is important for the extension case.
1272 if (FcbOrDcb
->Header
.AllocationSize
.LowPart
== 0) {
1275 FcbOrDcb
->Header
.AllocationSize
.LowPart
= 0xffffffff;
1278 UnwindAllocationSizeSet
= TRUE
;
1281 // Inform the cache manager to increase the section size
1284 if ( ARGUMENT_PRESENT(FileObject
) && CcIsFileCached(FileObject
) ) {
1286 CcSetFileSizes( FileObject
,
1287 (PCC_FILE_SIZES
)&FcbOrDcb
->Header
.AllocationSize
);
1288 UnwindCacheManagerInformed
= TRUE
;
1292 // In the extension case, we have held off actually gluing the new
1293 // allocation onto the file. This simplifies exception cleanup since
1294 // if it was already added and the section grow failed, we'd have to
1295 // do extra work to unglue it. This way, we can assume that if we
1296 // raise the only thing we need to do is deallocate the disk space.
1298 // Merge the allocation now.
1301 if (FcbOrDcb
->Header
.AllocationSize
.LowPart
!= NewAllocation
) {
1304 // Tack the new Mcb onto the end of the FcbOrDcb one.
1307 FatMergeAllocation( IrpContext
,
1315 DebugUnwind( FatAddFileAllocation
);
1318 // Give FlushFileBuffer/Cleanup a clue here, regardless of success/fail..
1321 SetFlag(FcbOrDcb
->FcbState
, FCB_STATE_FLUSH_FAT
);
1324 // If we were dogged trying to complete this operation, we need to go
1325 // back various things out.
1328 if (_SEH2_AbnormalTermination()) {
1331 // Pull off the allocation size we tried to add to this object if
1332 // we failed to grow cache structures or Mcb structures.
1335 if (UnwindAllocationSizeSet
) {
1337 FcbOrDcb
->Header
.AllocationSize
.LowPart
-= NewAllocation
;
1340 if (UnwindCacheManagerInformed
) {
1342 CcSetFileSizes( FileObject
,
1343 (PCC_FILE_SIZES
)&FcbOrDcb
->Header
.AllocationSize
);
1347 // In the case of initial allocation, we used the Fcb's Mcb and have
1348 // to clean that up as well as the FAT chain references.
1351 if (FcbOrDcb
->Header
.AllocationSize
.LowPart
== 0) {
1353 if (Dirent
!= NULL
) {
1355 FcbOrDcb
->FirstClusterOfFile
= 0;
1356 Dirent
->FirstClusterOfFile
= 0;
1358 if ( FatIsFat32(Vcb
) ) {
1360 Dirent
->FirstClusterOfFileHi
= 0;
1366 // ... and drop the dirent Bcb if we got it. Do it now
1367 // so we can afford to take the exception if we have to.
1370 FatUnpinBcb( IrpContext
, Bcb
);
1375 // Note this can re-raise.
1378 if ( UnwindWeAllocatedDiskSpace
) {
1380 FatDeallocateDiskSpace( IrpContext
, Vcb
, McbToCleanup
, FALSE
);
1386 // We always want to clean up the non-initial allocation temporary Mcb,
1387 // otherwise we have the Fcb's Mcb and we just truncate it away.
1390 if (UnwindWeInitializedMcb
== TRUE
) {
1393 // Note that we already know a raise is in progress. No danger
1394 // of encountering the normal case code below and doing this again.
1397 FsRtlUninitializeLargeMcb( McbToCleanup
);
1403 FsRtlTruncateLargeMcb( McbToCleanup
, 0 );
1409 DebugTrace(-1, Dbg
, "FatAddFileAllocation -> (VOID)\n", 0);
1413 // Non-exceptional cleanup we always want to do. In handling the re-raise possibilities
1414 // during exceptions we had to make sure these two steps always happened there beforehand.
1415 // So now we handle the usual case.
1418 FatUnpinBcb( IrpContext
, Bcb
);
1420 if (UnwindWeInitializedMcb
== TRUE
) {
1422 FsRtlUninitializeLargeMcb( &NewMcb
);
1426 _Requires_lock_held_(_Global_critical_region_
)
1428 FatTruncateFileAllocation (
1429 IN PIRP_CONTEXT IrpContext
,
1431 IN ULONG DesiredAllocationSize
1436 Routine Description:
1438 This routine truncates the allocation to the specified file/directory.
1440 If the file is already smaller than the indicated size then this procedure
1441 is effectively a noop.
1446 FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being modified
1447 This parameter must not specify the root dcb.
1449 DesiredAllocationSize - Supplies the maximum size, in bytes, that we want
1450 allocated to the file/directory. It is rounded
1451 up to the nearest cluster.
1455 VOID - TRUE if the operation completed and FALSE if it had to
1456 block but could not.
1463 LARGE_MCB RemainingMcb
= {0};
1464 ULONG BytesPerCluster
;
1465 PDIRENT Dirent
= NULL
;
1466 BOOLEAN UpdatedDirent
= FALSE
;
1468 ULONG UnwindInitialAllocationSize
;
1469 ULONG UnwindInitialFirstClusterOfFile
;
1470 BOOLEAN UnwindWeAllocatedMcb
= FALSE
;
1474 Vcb
= FcbOrDcb
->Vcb
;
1476 DebugTrace(+1, Dbg
, "FatTruncateFileAllocation\n", 0);
1477 DebugTrace( 0, Dbg
, " FcbOrDcb = %p\n", FcbOrDcb
);
1478 DebugTrace( 0, Dbg
, " DesiredAllocationSize = %8lx\n", DesiredAllocationSize
);
1481 // If the Fcb isn't in good condition, we have no business whacking around on
1482 // the disk after "its" clusters.
1484 // Inspired by a Prefix complaint.
1487 NT_ASSERT( FcbOrDcb
->FcbCondition
== FcbGood
);
1490 // If we haven't yet set the correct AllocationSize, do so.
1493 if (FcbOrDcb
->Header
.AllocationSize
.QuadPart
== FCB_LOOKUP_ALLOCATIONSIZE_HINT
) {
1495 FatLookupFileAllocationSize( IrpContext
, FcbOrDcb
);
1499 // Round up the Desired Allocation Size to the next cluster size
1502 BytesPerCluster
= 1 << Vcb
->AllocationSupport
.LogOfBytesPerCluster
;
1505 // Note if the desired allocation is zero, to distinguish this from
1506 // the wrap case below.
1509 if (DesiredAllocationSize
!= 0) {
1511 DesiredAllocationSize
= (DesiredAllocationSize
+ (BytesPerCluster
- 1)) &
1512 ~(BytesPerCluster
- 1);
1514 // Check for the benign case that the file is already smaller than
1515 // the desired truncation. Note that if it wraps, then a) it was
1516 // specifying an offset in the maximally allocatable cluster and
1517 // b) we're not asking to extend the file, either. So stop.
1520 if (DesiredAllocationSize
== 0 ||
1521 DesiredAllocationSize
>= FcbOrDcb
->Header
.AllocationSize
.LowPart
) {
1523 DebugTrace(0, Dbg
, "Desired size within current allocation.\n", 0);
1525 DebugTrace(-1, Dbg
, "FatTruncateFileAllocation -> (VOID)\n", 0);
1532 // This is a no-op if the allocation size is already what we want.
1535 if (DesiredAllocationSize
== FcbOrDcb
->Header
.AllocationSize
.LowPart
) {
1537 DebugTrace(0, Dbg
, "Desired size equals current allocation.\n", 0);
1538 DebugTrace(-1, Dbg
, "FatTruncateFileAllocation -> (VOID)\n", 0);
1542 UnwindInitialAllocationSize
= FcbOrDcb
->Header
.AllocationSize
.LowPart
;
1543 UnwindInitialFirstClusterOfFile
= FcbOrDcb
->FirstClusterOfFile
;
1546 // Update the FcbOrDcb allocation size. If it is now zero, we have the
1547 // additional task of modifying the FcbOrDcb and Dirent copies of
1548 // FirstClusterInFile.
1550 // Note that we must pin the dirent before actually deallocating the
1551 // disk space since, in unwind, it would not be possible to reallocate
1552 // deallocated disk space as someone else may have reallocated it and
1553 // may cause an exception when you try to get some more disk space.
1554 // Thus FatDeallocateDiskSpace must be the final dangerous operation.
1559 FcbOrDcb
->Header
.AllocationSize
.QuadPart
= DesiredAllocationSize
;
1565 if (DesiredAllocationSize
== 0) {
1568 // We have to update the dirent and FcbOrDcb copies of
1569 // FirstClusterOfFile since before it was 0
1572 NT_ASSERT( FcbOrDcb
->FcbCondition
== FcbGood
);
1574 FatGetDirentFromFcbOrDcb( IrpContext
, FcbOrDcb
, FALSE
, &Dirent
, &Bcb
);
1576 Dirent
->FirstClusterOfFile
= 0;
1578 if (FatIsFat32(Vcb
)) {
1580 Dirent
->FirstClusterOfFileHi
= 0;
1583 FcbOrDcb
->FirstClusterOfFile
= 0;
1585 FatSetDirtyBcb( IrpContext
, Bcb
, Vcb
, TRUE
);
1586 UpdatedDirent
= TRUE
;
1588 FatDeallocateDiskSpace( IrpContext
, Vcb
, &FcbOrDcb
->Mcb
, ((FcbOrDcb
->FcbState
& FCB_STATE_ZERO_ON_DEALLOCATION
) != 0));
1590 FatRemoveMcbEntry( FcbOrDcb
->Vcb
, &FcbOrDcb
->Mcb
, 0, 0xFFFFFFFF );
1595 // Split the existing allocation into two parts, one we will keep, and
1596 // one we will deallocate.
1599 FsRtlInitializeLargeMcb( &RemainingMcb
, PagedPool
);
1600 UnwindWeAllocatedMcb
= TRUE
;
1602 FatSplitAllocation( IrpContext
,
1605 DesiredAllocationSize
,
1608 FatDeallocateDiskSpace( IrpContext
, Vcb
, &RemainingMcb
, ((FcbOrDcb
->FcbState
& FCB_STATE_ZERO_ON_DEALLOCATION
) != 0) );
1610 FsRtlUninitializeLargeMcb( &RemainingMcb
);
1615 DebugUnwind( FatTruncateFileAllocation
);
1618 // Is this really the right backout strategy? It would be nice if we could
1619 // pretend the truncate worked if we knew that the file had gotten into
1620 // a consistent state. Leaving dangled clusters is probably quite preferable.
1623 if ( _SEH2_AbnormalTermination() ) {
1625 FcbOrDcb
->Header
.AllocationSize
.LowPart
= UnwindInitialAllocationSize
;
1627 if ( (DesiredAllocationSize
== 0) && (Dirent
!= NULL
)) {
1629 if (UpdatedDirent
) {
1632 // If the dirent has been updated ok and marked dirty, then we
1633 // failed in deallocatediscspace, and don't know what state
1634 // the on disc fat chain is in. So we throw away the mcb,
1635 // and potentially loose a few clusters until the next
1636 // chkdsk. The operation has succeeded, but the exception
1637 // will still propogate. 5.1
1640 FatRemoveMcbEntry( Vcb
, &FcbOrDcb
->Mcb
, 0, 0xFFFFFFFF );
1641 FcbOrDcb
->Header
.AllocationSize
.QuadPart
= 0;
1643 else if (FcbOrDcb
->FirstClusterOfFile
== 0) {
1645 Dirent
->FirstClusterOfFile
= (USHORT
)UnwindInitialFirstClusterOfFile
;
1647 if ( FatIsFat32(Vcb
) ) {
1649 Dirent
->FirstClusterOfFileHi
=
1650 (USHORT
)(UnwindInitialFirstClusterOfFile
>> 16);
1653 FcbOrDcb
->FirstClusterOfFile
= UnwindInitialFirstClusterOfFile
;
1657 if ( UnwindWeAllocatedMcb
) {
1659 FsRtlUninitializeLargeMcb( &RemainingMcb
);
1663 // Note that in the non zero truncation case, we will also
1664 // leak clusters. However, apart from this, the in memory and on disc
1665 // structures will agree.
1668 FatUnpinBcb( IrpContext
, Bcb
);
1671 // Give FlushFileBuffer/Cleanup a clue here, regardless of success/fail.
1674 SetFlag(FcbOrDcb
->FcbState
, FCB_STATE_FLUSH_FAT
);
1676 DebugTrace(-1, Dbg
, "FatTruncateFileAllocation -> (VOID)\n", 0);
1681 _Requires_lock_held_(_Global_critical_region_
)
1683 FatLookupFileAllocationSize (
1684 IN PIRP_CONTEXT IrpContext
,
1690 Routine Description:
1692 This routine retrieves the current file allocatio size for the
1693 specified file/directory.
1697 FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being modified
1708 DebugTrace(+1, Dbg
, "FatLookupAllocationSize\n", 0);
1709 DebugTrace( 0, Dbg
, " FcbOrDcb = %p\n", FcbOrDcb
);
1712 // We call FatLookupFileAllocation with Vbo of 0xffffffff - 1.
1715 FatLookupFileAllocation( IrpContext
,
1725 // FileSize was set at Fcb creation time from the contents of the directory entry,
1726 // and we are only now looking up the real length of the allocation chain. If it
1727 // cannot be contained, this is trash. Probably more where that came from.
1730 if (FcbOrDcb
->Header
.FileSize
.LowPart
> FcbOrDcb
->Header
.AllocationSize
.LowPart
) {
1732 FatPopUpFileCorrupt( IrpContext
, FcbOrDcb
);
1733 FatRaiseStatus( IrpContext
, STATUS_FILE_CORRUPT_ERROR
);
1736 DebugTrace(-1, Dbg
, "FatLookupFileAllocationSize -> (VOID)\n", 0);
1741 _Requires_lock_held_(_Global_critical_region_
)
1743 FatAllocateDiskSpace (
1744 IN PIRP_CONTEXT IrpContext
,
1746 IN ULONG AbsoluteClusterHint
,
1747 IN PULONG ByteCount
,
1748 IN BOOLEAN ExactMatchRequired
,
1754 Routine Description:
1756 This procedure allocates additional disk space and builds an mcb
1757 representing the newly allocated space. If the space cannot be
1758 allocated then this procedure raises an appropriate status.
1760 Searching starts from the hint index in the Vcb unless an alternative
1761 non-zero hint is given in AlternateClusterHint. If we are using the
1762 hint field in the Vcb, it is set to the cluster following our allocation
1765 Disk space can only be allocated in cluster units so this procedure
1766 will round up any byte count to the next cluster boundary.
1768 Pictorially what is done is the following (where ! denotes the end of
1769 the fat chain (i.e., FAT_CLUSTER_LAST)):
1776 Mcb |--a--|--b--|--c--!
1779 ByteCount ----------+
1783 Vcb - Supplies the VCB being modified
1785 AbsoluteClusterHint - Supplies an alternate hint index to start the
1786 search from. If this is zero we use, and update,
1789 ByteCount - Supplies the number of bytes that we are requesting, and
1790 receives the number of bytes that we got.
1792 ExactMatchRequired - Caller should set this to TRUE if only the precise run requested
1795 Mcb - Receives the MCB describing the newly allocated disk space. The
1796 caller passes in an initialized Mcb that is filled in by this procedure.
1801 FALSE - Failed to allocate exactly as requested (=> ExactMatchRequired was TRUE)
1806 UCHAR LogOfBytesPerCluster
;
1807 ULONG BytesPerCluster
;
1808 ULONG StartingCluster
;
1810 ULONG WindowRelativeHint
;
1812 ULONG PreviousClear
= 0;
1816 BOOLEAN Wait
= FALSE
;
1817 BOOLEAN Result
= TRUE
;
1821 DebugTrace(+1, Dbg
, "FatAllocateDiskSpace\n", 0);
1822 DebugTrace( 0, Dbg
, " Vcb = %p\n", Vcb
);
1823 DebugTrace( 0, Dbg
, " *ByteCount = %8lx\n", *ByteCount
);
1824 DebugTrace( 0, Dbg
, " Mcb = %p\n", Mcb
);
1825 DebugTrace( 0, Dbg
, " Hint = %8lx\n", AbsoluteClusterHint
);
1827 NT_ASSERT((AbsoluteClusterHint
<= Vcb
->AllocationSupport
.NumberOfClusters
+ 2) && (1 != AbsoluteClusterHint
));
1830 // Make sure byte count is not zero
1833 if (*ByteCount
== 0) {
1835 DebugTrace(0, Dbg
, "Nothing to allocate.\n", 0);
1837 DebugTrace(-1, Dbg
, "FatAllocateDiskSpace -> (VOID)\n", 0);
1842 // Compute the cluster count based on the byte count, rounding up
1843 // to the next cluster if there is any remainder. Note that the
1844 // pathalogical case BytesCount == 0 has been eliminated above.
1847 LogOfBytesPerCluster
= Vcb
->AllocationSupport
.LogOfBytesPerCluster
;
1848 BytesPerCluster
= 1 << LogOfBytesPerCluster
;
1850 *ByteCount
= (*ByteCount
+ (BytesPerCluster
- 1))
1851 & ~(BytesPerCluster
- 1);
1854 // If ByteCount is NOW zero, then we were asked for the maximal
1855 // filesize (or at least for bytes in the last allocatable sector).
1858 if (*ByteCount
== 0) {
1860 *ByteCount
= 0xffffffff;
1861 ClusterCount
= 1 << (32 - LogOfBytesPerCluster
);
1865 ClusterCount
= (*ByteCount
>> LogOfBytesPerCluster
);
1869 // Analysis tools don't figure out that ClusterCount is not zero because
1870 // of the ByteCount == 0 checks, so give them a hint.
1872 _Analysis_assume_(ClusterCount
> 0);
1875 // Make sure there are enough free clusters to start with, and
1876 // take them now so that nobody else takes them from us.
1879 ExAcquireResourceSharedLite(&Vcb
->ChangeBitMapResource
, TRUE
);
1880 FatLockFreeClusterBitMap( Vcb
);
1882 if (ClusterCount
<= Vcb
->AllocationSupport
.NumberOfFreeClusters
) {
1884 Vcb
->AllocationSupport
.NumberOfFreeClusters
-= ClusterCount
;
1888 FatUnlockFreeClusterBitMap( Vcb
);
1889 ExReleaseResourceLite(&Vcb
->ChangeBitMapResource
);
1891 DebugTrace(0, Dbg
, "Disk Full. Raise Status.\n", 0);
1892 FatRaiseStatus( IrpContext
, STATUS_DISK_FULL
);
1896 // Did the caller supply a hint?
1899 if ((0 != AbsoluteClusterHint
) && (AbsoluteClusterHint
< (Vcb
->AllocationSupport
.NumberOfClusters
+ 2))) {
1901 if (Vcb
->NumberOfWindows
> 1) {
1904 // If we're being called upon to allocate clusters outside the
1905 // current window (which happens only via MoveFile), it's a problem.
1906 // We address this by changing the current window to be the one which
1907 // contains the alternate cluster hint. Note that if the user's
1908 // request would cross a window boundary, he doesn't really get what
1912 if (AbsoluteClusterHint
< Vcb
->CurrentWindow
->FirstCluster
||
1913 AbsoluteClusterHint
> Vcb
->CurrentWindow
->LastCluster
) {
1915 ULONG BucketNum
= FatWindowOfCluster( AbsoluteClusterHint
);
1917 NT_ASSERT( BucketNum
< Vcb
->NumberOfWindows
);
1920 // Drop our shared lock on the ChangeBitMapResource, and pick it up again
1921 // exclusive in preparation for making the window swap.
1924 FatUnlockFreeClusterBitMap(Vcb
);
1925 ExReleaseResourceLite(&Vcb
->ChangeBitMapResource
);
1926 ExAcquireResourceExclusiveLite(&Vcb
->ChangeBitMapResource
, TRUE
);
1927 FatLockFreeClusterBitMap(Vcb
);
1929 Window
= &Vcb
->Windows
[BucketNum
];
1932 // Again, test the current window against the one we want - some other
1933 // thread could have sneaked in behind our backs and kindly set it to the one
1934 // we need, when we dropped and reacquired the ChangeBitMapResource above.
1937 if (Window
!= Vcb
->CurrentWindow
) {
1941 Wait
= BooleanFlagOn(IrpContext
->Flags
, IRP_CONTEXT_FLAG_WAIT
);
1942 SetFlag(IrpContext
->Flags
, IRP_CONTEXT_FLAG_WAIT
);
1945 // Change to the new window (update Vcb->CurrentWindow) and scan it
1946 // to build up a freespace bitmap etc.
1949 FatExamineFatEntries( IrpContext
, Vcb
,
1960 ClearFlag(IrpContext
->Flags
, IRP_CONTEXT_FLAG_WAIT
);
1963 if (_SEH2_AbnormalTermination()) {
1966 // We will have raised as a result of failing to pick up the
1967 // chunk of the FAT for this window move. Release our resources
1968 // and return the cluster count to the volume.
1971 Vcb
->AllocationSupport
.NumberOfFreeClusters
+= ClusterCount
;
1973 FatUnlockFreeClusterBitMap( Vcb
);
1974 ExReleaseResourceLite(&Vcb
->ChangeBitMapResource
);
1981 // Make the hint cluster number relative to the base of the current window...
1983 // Currentwindow->Firstcluster is baised by +2 already, so we will lose the
1984 // bias already in AbsoluteClusterHint. Put it back....
1987 WindowRelativeHint
= AbsoluteClusterHint
- Vcb
->CurrentWindow
->FirstCluster
+ 2;
1992 // Only one 'window', ie fat16/12. No modification necessary.
1995 WindowRelativeHint
= AbsoluteClusterHint
;
2001 // Either no hint supplied, or it was out of range, so grab one from the Vcb
2003 // NOTE: Clusterhint in the Vcb is not guaranteed to be set (may be -1)
2006 WindowRelativeHint
= Vcb
->ClusterHint
;
2007 AbsoluteClusterHint
= 0;
2010 // Vcb hint may not have been initialized yet. Force to valid cluster.
2013 if (-1 == WindowRelativeHint
) {
2015 WindowRelativeHint
= 2;
2019 NT_ASSERT((WindowRelativeHint
>= 2) && (WindowRelativeHint
< Vcb
->FreeClusterBitMap
.SizeOfBitMap
+ 2));
2022 // Keep track of the window we're allocating from, so we can clean
2023 // up correctly if the current window changes after we unlock the
2027 Window
= Vcb
->CurrentWindow
;
2030 // Try to find a run of free clusters large enough for us.
2033 StartingCluster
= FatFindFreeClusterRun( IrpContext
,
2036 WindowRelativeHint
);
2038 // If the above call was successful, we can just update the fat
2039 // and Mcb and exit. Otherwise we have to look for smaller free
2042 // This test is a bit funky. Note that the error return from
2043 // RtlFindClearBits is -1, and adding two to that is 1.
2046 if ((StartingCluster
!= 1) &&
2047 ((0 == AbsoluteClusterHint
) || (StartingCluster
== WindowRelativeHint
))
2051 PreviousClear
= RtlNumberOfClearBits( &Vcb
->FreeClusterBitMap
);
2055 // Take the clusters we found, and unlock the bit map.
2058 FatReserveClusters(IrpContext
, Vcb
, StartingCluster
, ClusterCount
);
2060 Window
->ClustersFree
-= ClusterCount
;
2062 StartingCluster
+= Window
->FirstCluster
;
2063 StartingCluster
-= 2;
2065 NT_ASSERT( PreviousClear
- ClusterCount
== Window
->ClustersFree
);
2067 FatUnlockFreeClusterBitMap( Vcb
);
2070 // Note that this call will never fail since there is always
2071 // room for one entry in an empty Mcb.
2074 FatAddMcbEntry( Vcb
, Mcb
,
2076 FatGetLboFromIndex( Vcb
, StartingCluster
),
2084 FatAllocateClusters(IrpContext
, Vcb
,
2090 DebugUnwind( FatAllocateDiskSpace
);
2093 // If the allocate clusters failed, remove the run from the Mcb,
2094 // unreserve the clusters, and reset the free cluster count.
2097 if (_SEH2_AbnormalTermination()) {
2099 FatRemoveMcbEntry( Vcb
, Mcb
, 0, *ByteCount
);
2101 FatLockFreeClusterBitMap( Vcb
);
2103 // Only clear bits if the bitmap window is the same.
2105 if (Window
== Vcb
->CurrentWindow
) {
2107 // Both values (startingcluster and window->firstcluster) are
2108 // already biased by 2, so will cancel, so we need to add in the 2 again.
2110 FatUnreserveClusters( IrpContext
, Vcb
,
2111 StartingCluster
- Window
->FirstCluster
+ 2,
2115 Window
->ClustersFree
+= ClusterCount
;
2116 Vcb
->AllocationSupport
.NumberOfFreeClusters
+= ClusterCount
;
2118 FatUnlockFreeClusterBitMap( Vcb
);
2121 ExReleaseResourceLite(&Vcb
->ChangeBitMapResource
);
2127 // Note that Index is a zero-based window-relative number. When appropriate
2128 // it'll get converted into a true cluster number and put in Cluster, which
2129 // will be a volume relative true cluster number.
2134 ULONG CurrentVbo
= 0;
2135 ULONG PriorLastCluster
= 0;
2136 ULONG BytesFound
= 0;
2138 ULONG ClustersFound
= 0;
2139 ULONG ClustersRemaining
= 0;
2141 BOOLEAN LockedBitMap
= FALSE
;
2142 BOOLEAN SelectNextContigWindow
= FALSE
;
2145 // Drop our shared lock on the ChangeBitMapResource, and pick it up again
2146 // exclusive in preparation for making a window swap.
2149 FatUnlockFreeClusterBitMap(Vcb
);
2150 ExReleaseResourceLite(&Vcb
->ChangeBitMapResource
);
2151 ExAcquireResourceExclusiveLite(&Vcb
->ChangeBitMapResource
, TRUE
);
2152 FatLockFreeClusterBitMap(Vcb
);
2153 LockedBitMap
= TRUE
;
2157 if ( ExactMatchRequired
&& (1 == Vcb
->NumberOfWindows
)) {
2160 // Give up right now, there are no more windows to search! RtlFindClearBits
2161 // searchs the whole bitmap, so we would have found any contiguous run
2165 try_leave( Result
= FALSE
);
2169 // While the request is still incomplete, look for the largest
2170 // run of free clusters, mark them taken, allocate the run in
2171 // the Mcb and Fat, and if this isn't the first time through
2172 // the loop link it to prior run on the fat. The Mcb will
2173 // coalesce automatically.
2176 ClustersRemaining
= ClusterCount
;
2178 PriorLastCluster
= 0;
2180 while (ClustersRemaining
!= 0) {
2183 // If we just entered the loop, the bit map is already locked
2186 if ( !LockedBitMap
) {
2188 FatLockFreeClusterBitMap( Vcb
);
2189 LockedBitMap
= TRUE
;
2193 // Find the largest run of free clusters. If the run is
2194 // bigger than we need, only use what we need. Note that
2195 // this will then be the last while() iteration.
2198 // 12/3/95: need to bias bitmap by 2 bits for the defrag
2199 // hooks and the below macro became impossible to do without in-line
2202 // ClustersFound = FatLongestFreeClusterRun( IrpContext, Vcb, &Index );
2206 if (!SelectNextContigWindow
) {
2208 if ( 0 != WindowRelativeHint
) {
2210 ULONG Desired
= Vcb
->FreeClusterBitMap
.SizeOfBitMap
- (WindowRelativeHint
- 2);
2213 // We will try to allocate contiguously. Try from the current hint the to
2214 // end of current window. Don't try for more than we actually need.
2217 if (Desired
> ClustersRemaining
) {
2219 Desired
= ClustersRemaining
;
2222 if (RtlAreBitsClear( &Vcb
->FreeClusterBitMap
,
2223 WindowRelativeHint
- 2,
2227 // Clusters from hint->...windowend are free. Take them.
2230 Index
= WindowRelativeHint
- 2;
2231 ClustersFound
= Desired
;
2233 if (FatIsFat32(Vcb
)) {
2236 // We're now up against the end of the current window, so indicate that we
2237 // want the next window in the sequence next time around. (If we're not up
2238 // against the end of the window, then we got what we needed and won't be
2239 // coming around again anyway).
2242 SelectNextContigWindow
= TRUE
;
2243 WindowRelativeHint
= 2;
2248 // FAT 12/16 - we've run up against the end of the volume. Clear the
2249 // hint, since we now have no idea where to look.
2252 WindowRelativeHint
= 0;
2255 PreviousClear
= RtlNumberOfClearBits( &Vcb
->FreeClusterBitMap
);
2260 if (ExactMatchRequired
) {
2263 // If our caller required an exact match, then we're hosed. Bail out now.
2266 try_leave( Result
= FALSE
);
2270 // Hint failed, drop back to pot luck
2273 WindowRelativeHint
= 0;
2277 if ((0 == WindowRelativeHint
) && (0 == ClustersFound
)) {
2279 if (ClustersRemaining
<= Vcb
->CurrentWindow
->ClustersFree
) {
2282 // The remaining allocation could be satisfied entirely from this
2283 // window. We will ask only for what we need, to try and avoid
2284 // unnecessarily fragmenting large runs of space by always using
2285 // (part of) the largest run we can find. This call will return the
2286 // first run large enough.
2289 Index
= RtlFindClearBits( &Vcb
->FreeClusterBitMap
, ClustersRemaining
, 0);
2293 ClustersFound
= ClustersRemaining
;
2297 if (0 == ClustersFound
) {
2300 // Still nothing, so just take the largest free run we can find.
2303 ClustersFound
= RtlFindLongestRunClear( &Vcb
->FreeClusterBitMap
, &Index
);
2307 PreviousClear
= RtlNumberOfClearBits( &Vcb
->FreeClusterBitMap
);
2309 if (ClustersFound
>= ClustersRemaining
) {
2311 ClustersFound
= ClustersRemaining
;
2316 // If we just ran up to the end of a window, set up a hint that
2317 // we'd like the next consecutive window after this one. (FAT32 only)
2320 if ( ((Index
+ ClustersFound
) == Vcb
->FreeClusterBitMap
.SizeOfBitMap
) &&
2324 SelectNextContigWindow
= TRUE
;
2325 WindowRelativeHint
= 2;
2331 if (ClustersFound
== 0) {
2333 ULONG FaveWindow
= 0;
2334 BOOLEAN SelectedWindow
;
2337 // If we found no free clusters on a single-window FAT,
2338 // there was a bad problem with the free cluster count.
2341 if (1 == Vcb
->NumberOfWindows
) {
2344 #pragma prefast( suppress: 28159, "we bugcheck here because our internal data structures are seriously corrupted if this happens" )
2346 FatBugCheck( 0, 5, 0 );
2350 // Switch to a new bucket. Possibly the next one if we're
2351 // currently on a roll (allocating contiguously)
2354 SelectedWindow
= FALSE
;
2356 if ( SelectNextContigWindow
) {
2360 NextWindow
= (((ULONG
)((PUCHAR
)Vcb
->CurrentWindow
- (PUCHAR
)Vcb
->Windows
)) / sizeof( FAT_WINDOW
)) + 1;
2362 if ((NextWindow
< Vcb
->NumberOfWindows
) &&
2363 ( Vcb
->Windows
[ NextWindow
].ClustersFree
> 0)
2366 FaveWindow
= NextWindow
;
2367 SelectedWindow
= TRUE
;
2371 if (ExactMatchRequired
) {
2374 // Some dope tried to allocate a run past the end of the volume...
2377 try_leave( Result
= FALSE
);
2381 // Give up on the contiguous allocation attempts
2384 WindowRelativeHint
= 0;
2387 SelectNextContigWindow
= FALSE
;
2390 if (!SelectedWindow
) {
2393 // Select a new window to begin allocating from
2396 FaveWindow
= FatSelectBestWindow( Vcb
);
2400 // By now we'd better have found a window with some free clusters
2403 if (0 == Vcb
->Windows
[ FaveWindow
].ClustersFree
) {
2406 #pragma prefast( suppress: 28159, "we bugcheck here because our internal data structures are seriously corrupted if this happens" )
2408 FatBugCheck( 0, 5, 1 );
2411 Wait
= BooleanFlagOn(IrpContext
->Flags
, IRP_CONTEXT_FLAG_WAIT
);
2412 SetFlag(IrpContext
->Flags
, IRP_CONTEXT_FLAG_WAIT
);
2414 FatExamineFatEntries( IrpContext
, Vcb
,
2418 &Vcb
->Windows
[FaveWindow
],
2423 ClearFlag(IrpContext
->Flags
, IRP_CONTEXT_FLAG_WAIT
);
2427 // Now we'll just go around the loop again, having switched windows,
2431 PreviousClear
= RtlNumberOfClearBits( &Vcb
->FreeClusterBitMap
);
2433 } // if (clustersfound == 0)
2437 // Take the clusters we found, convert our index to a cluster number
2438 // and unlock the bit map.
2441 Window
= Vcb
->CurrentWindow
;
2443 FatReserveClusters( IrpContext
, Vcb
, (Index
+ 2), ClustersFound
);
2445 Cluster
= Index
+ Window
->FirstCluster
;
2447 Window
->ClustersFree
-= ClustersFound
;
2448 NT_ASSERT( PreviousClear
- ClustersFound
== Window
->ClustersFree
);
2450 FatUnlockFreeClusterBitMap( Vcb
);
2451 LockedBitMap
= FALSE
;
2454 // Add the newly alloced run to the Mcb.
2457 BytesFound
= ClustersFound
<< LogOfBytesPerCluster
;
2459 FatAddMcbEntry( Vcb
, Mcb
,
2461 FatGetLboFromIndex( Vcb
, Cluster
),
2465 // Connect the last allocated run with this one, and allocate
2466 // this run on the Fat.
2469 if (PriorLastCluster
!= 0) {
2471 FatSetFatEntry( IrpContext
,
2474 (FAT_ENTRY
)Cluster
);
2481 FatAllocateClusters( IrpContext
, Vcb
, Cluster
, ClustersFound
);
2484 // Prepare for the next iteration.
2487 CurrentVbo
+= BytesFound
;
2488 ClustersRemaining
-= ClustersFound
;
2489 PriorLastCluster
= Cluster
+ ClustersFound
- 1;
2491 } // while (clustersremaining)
2495 DebugUnwind( FatAllocateDiskSpace
);
2497 ExReleaseResourceLite(&Vcb
->ChangeBitMapResource
);
2500 // Is there any unwinding to do?
2503 if ( _SEH2_AbnormalTermination() || (FALSE
== Result
)) {
2506 // Flag to the caller that they're getting nothing
2512 // There are three places we could have taken this exception:
2513 // when switching the window (FatExamineFatEntries), adding
2514 // a found run to the Mcb (FatAddMcbEntry), or when writing
2515 // the changes to the FAT (FatSetFatEntry). In the first case
2516 // we don't have anything to unwind before deallocation, and
2517 // can detect this by seeing if we have the ClusterBitmap
2520 if (!LockedBitMap
) {
2522 FatLockFreeClusterBitMap( Vcb
);
2525 // In these cases, we have the possiblity that the FAT
2526 // window is still in place and we need to clear the bits.
2527 // If the Mcb entry isn't there (we raised trying to add
2528 // it), the effect of trying to remove it is a noop.
2531 if (Window
== Vcb
->CurrentWindow
) {
2534 // Cluster reservation works on cluster 2 based window-relative
2535 // numbers, so we must convert. The subtraction will lose the
2536 // cluster 2 base, so bias the result.
2539 FatUnreserveClusters( IrpContext
, Vcb
,
2540 (Cluster
- Window
->FirstCluster
) + 2,
2545 // Note that FatDeallocateDiskSpace will take care of adjusting
2546 // to account for the entries in the Mcb. All we have to account
2547 // for is the last run that didn't make it.
2550 Window
->ClustersFree
+= ClustersFound
;
2551 Vcb
->AllocationSupport
.NumberOfFreeClusters
+= ClustersFound
;
2553 FatUnlockFreeClusterBitMap( Vcb
);
2555 FatRemoveMcbEntry( Vcb
, Mcb
, CurrentVbo
, BytesFound
);
2560 // Just drop the mutex now - we didn't manage to do anything
2561 // that needs to be backed out.
2564 FatUnlockFreeClusterBitMap( Vcb
);
2570 // Now we have tidied up, we are ready to just send the Mcb
2571 // off to deallocate disk space
2574 FatDeallocateDiskSpace( IrpContext
, Vcb
, Mcb
, FALSE
);
2579 // Now finally (really), remove all the entries from the mcb
2582 FatRemoveMcbEntry( Vcb
, Mcb
, 0, 0xFFFFFFFF );
2586 DebugTrace(-1, Dbg
, "FatAllocateDiskSpace -> (VOID)\n", 0);
2588 } _SEH2_END
; // finally
2597 // Limit our zeroing writes to 1 MB.
2600 #define MAX_ZERO_MDL_SIZE (1*1024*1024)
2602 _Requires_lock_held_(_Global_critical_region_
)
2604 FatDeallocateDiskSpace (
2605 IN PIRP_CONTEXT IrpContext
,
2608 IN BOOLEAN ZeroOnDeallocate
2613 Routine Description:
2615 This procedure deallocates the disk space denoted by an input
2616 mcb. Note that the input MCB does not need to necessarily describe
2617 a chain that ends with a FAT_CLUSTER_LAST entry.
2619 Pictorially what is done is the following
2621 Fat |--a--|--b--|--c--|
2622 Mcb |--a--|--b--|--c--|
2626 Fat |--0--|--0--|--0--|
2627 Mcb |--a--|--b--|--c--|
2631 Vcb - Supplies the VCB being modified
2633 Mcb - Supplies the MCB describing the disk space to deallocate. Note
2634 that Mcb is unchanged by this procedure.
2649 ULONG ClusterCount
= 0;
2650 ULONG ClusterIndex
= 0;
2653 UCHAR LogOfBytesPerCluster
;
2657 NTSTATUS ZeroingStatus
= STATUS_SUCCESS
;
2661 DebugTrace(+1, Dbg
, "FatDeallocateDiskSpace\n", 0);
2662 DebugTrace( 0, Dbg
, " Vcb = %p\n", Vcb
);
2663 DebugTrace( 0, Dbg
, " Mcb = %p\n", Mcb
);
2665 LogOfBytesPerCluster
= Vcb
->AllocationSupport
.LogOfBytesPerCluster
;
2667 RunsInMcb
= FsRtlNumberOfRunsInLargeMcb( Mcb
);
2669 if ( RunsInMcb
== 0 ) {
2671 DebugTrace(-1, Dbg
, "FatDeallocateDiskSpace -> (VOID)\n", 0);
2676 // If we are supposed to zero out the allocation before freeing it, do so.
2679 if (ZeroOnDeallocate
) {
2685 IO_STATUS_BLOCK Iosb
;
2686 PVOID Buffer
= NULL
;
2688 ULONG ByteCountToZero
;
2689 ULONG MdlSizeMapped
;
2692 // Issue the writes down for each run in the Mcb
2695 KeInitializeEvent( &IoEvent
,
2699 for ( McbIndex
= 0; McbIndex
< RunsInMcb
; McbIndex
++ ) {
2701 FatGetNextMcbEntry( Vcb
, Mcb
, McbIndex
, &Vbo
, &Lbo
, &ByteCount
);
2704 // Assert that Fat files have no holes.
2707 NT_ASSERT( Lbo
!= 0 );
2710 // Setup our MDL for the this run.
2713 if (ByteCount
> MAX_ZERO_MDL_SIZE
) {
2714 Mdl
= FatBuildZeroMdl( IrpContext
, MAX_ZERO_MDL_SIZE
);
2716 Mdl
= FatBuildZeroMdl( IrpContext
, ByteCount
);
2720 ZeroingStatus
= STATUS_INSUFFICIENT_RESOURCES
;
2731 Buffer
= MmGetSystemAddressForMdlSafe(Mdl
, HighPagePriority
|MdlMappingNoExecute
);
2733 Buffer
= MmGetSystemAddressForMdlSafe(Mdl
, HighPagePriority
);
2737 ZeroingStatus
= STATUS_INSUFFICIENT_RESOURCES
;
2742 // We might not have not been able to get an MDL big enough to map the whole
2743 // run. In this case, break up the write.
2746 MdlSizeMapped
= min( ByteCount
, Mdl
->ByteCount
);
2747 ByteCountToZero
= ByteCount
;
2750 // Loop until there are no bytes left to write
2753 while (ByteCountToZero
!= 0) {
2756 // Write zeros to each run.
2759 KeClearEvent( &IoEvent
);
2761 IoIrp
= IoBuildSynchronousFsdRequest( IRP_MJ_WRITE
,
2762 Vcb
->TargetDeviceObject
,
2765 (PLARGE_INTEGER
)&Lbo
,
2769 if (IoIrp
== NULL
) {
2771 ZeroingStatus
= STATUS_INSUFFICIENT_RESOURCES
;
2776 // Set a flag indicating that we want to write through any
2777 // cache on the controller. This eliminates the need for
2778 // an explicit flush-device after the write.
2781 SetFlag( IoGetNextIrpStackLocation(IoIrp
)->Flags
, SL_WRITE_THROUGH
);
2783 ZeroingStatus
= IoCallDriver( Vcb
->TargetDeviceObject
, IoIrp
);
2785 if (ZeroingStatus
== STATUS_PENDING
) {
2787 (VOID
)KeWaitForSingleObject( &IoEvent
,
2791 (PLARGE_INTEGER
)NULL
);
2793 ZeroingStatus
= Iosb
.Status
;
2796 if (!NT_SUCCESS( ZeroingStatus
)) {
2802 // Increment the starting offset where we will zero.
2805 Lbo
+= MdlSizeMapped
;
2808 // Decrement ByteCount
2811 ByteCountToZero
-= MdlSizeMapped
;
2813 if (ByteCountToZero
< MdlSizeMapped
) {
2814 MdlSizeMapped
= ByteCountToZero
;
2825 if (!FlagOn( Mdl
->MdlFlags
, MDL_SOURCE_IS_NONPAGED_POOL
) &&
2826 FlagOn( Mdl
->MdlFlags
, MDL_MAPPED_TO_SYSTEM_VA
)) {
2828 MmUnmapLockedPages( Mdl
->MappedSystemVa
, Mdl
);
2839 } _SEH2_EXCEPT(FatExceptionFilter( NULL
, _SEH2_GetExceptionInformation() )) {
2842 // If we failed to zero for some reason, still go ahead and deallocate
2843 // the clusters. Otherwise we'll leak space from the volume.
2846 ZeroingStatus
= _SEH2_GetExceptionCode();
2852 NT_ASSERT( NT_SUCCESS(ZeroingStatus
) );
2857 // Run though the Mcb, freeing all the runs in the fat.
2859 // We do this in two steps (first update the fat, then the bitmap
2860 // (which can't fail)) to prevent other people from taking clusters
2861 // that we need to re-allocate in the event of unwind.
2864 ExAcquireResourceSharedLite(&Vcb
->ChangeBitMapResource
, TRUE
);
2866 RunsInMcb
= FsRtlNumberOfRunsInLargeMcb( Mcb
);
2868 for ( McbIndex
= 0; McbIndex
< RunsInMcb
; McbIndex
++ ) {
2870 FatGetNextMcbEntry( Vcb
, Mcb
, McbIndex
, &Vbo
, &Lbo
, &ByteCount
);
2873 // Assert that Fat files have no holes.
2876 NT_ASSERT( Lbo
!= 0 );
2879 // Write FAT_CLUSTER_AVAILABLE to each cluster in the run.
2882 if (ByteCount
== 0xFFFFFFFF) {
2885 // Special case the computation of ClusterCout
2886 // when file is of max size (4GiB - 1).
2889 ClusterCount
= (1 << (32 - LogOfBytesPerCluster
));
2893 ClusterCount
= ByteCount
>> LogOfBytesPerCluster
;
2896 ClusterIndex
= FatGetIndexFromLbo( Vcb
, Lbo
);
2898 FatFreeClusters( IrpContext
, Vcb
, ClusterIndex
, ClusterCount
);
2902 // From now on, nothing can go wrong .... (as in raise)
2905 FatLockFreeClusterBitMap( Vcb
);
2907 for ( McbIndex
= 0; McbIndex
< RunsInMcb
; McbIndex
++ ) {
2910 ULONG MyStart
, MyLength
, count
;
2913 ULONG PreviousClear
= 0;
2918 FatGetNextMcbEntry( Vcb
, Mcb
, McbIndex
, &Vbo
, &Lbo
, &ByteCount
);
2921 // Mark the bits clear in the FreeClusterBitMap.
2924 if (ByteCount
== 0xFFFFFFFF) {
2927 // Special case the computation of ClusterCout
2928 // when file is of max size (2^32 - 1).
2931 ClusterCount
= (1 << (32 - LogOfBytesPerCluster
));
2935 ClusterCount
= ByteCount
>> LogOfBytesPerCluster
;
2938 ClusterIndex
= FatGetIndexFromLbo( Vcb
, Lbo
);
2940 Window
= Vcb
->CurrentWindow
;
2943 // If we've divided the bitmap, elide bitmap manipulation for
2944 // runs that are outside the current bucket.
2947 ClusterEnd
= ClusterIndex
+ ClusterCount
- 1;
2949 if (!(ClusterIndex
> Window
->LastCluster
||
2950 ClusterEnd
< Window
->FirstCluster
)) {
2953 // The run being freed overlaps the current bucket, so we'll
2954 // have to clear some bits.
2957 if (ClusterIndex
< Window
->FirstCluster
&&
2958 ClusterEnd
> Window
->LastCluster
) {
2960 MyStart
= Window
->FirstCluster
;
2961 MyLength
= Window
->LastCluster
- Window
->FirstCluster
+ 1;
2963 } else if (ClusterIndex
< Window
->FirstCluster
) {
2965 MyStart
= Window
->FirstCluster
;
2966 MyLength
= ClusterEnd
- Window
->FirstCluster
+ 1;
2971 // The range being freed starts in the bucket, and may possibly
2972 // extend beyond the bucket.
2975 MyStart
= ClusterIndex
;
2977 if (ClusterEnd
<= Window
->LastCluster
) {
2979 MyLength
= ClusterCount
;
2983 MyLength
= Window
->LastCluster
- ClusterIndex
+ 1;
2987 if (MyLength
== 0) {
2995 #pragma prefast( suppress:28931, "this is DBG build only" )
2997 PreviousClear
= RtlNumberOfClearBits( &Vcb
->FreeClusterBitMap
);
3002 // Verify that the Bits are all really set.
3005 NT_ASSERT( MyStart
+ MyLength
- Window
->FirstCluster
<= Vcb
->FreeClusterBitMap
.SizeOfBitMap
);
3007 for (i
= 0; i
< MyLength
; i
++) {
3009 NT_ASSERT( RtlCheckBit(&Vcb
->FreeClusterBitMap
,
3010 MyStart
- Window
->FirstCluster
+ i
) == 1 );
3014 FatUnreserveClusters( IrpContext
, Vcb
,
3015 MyStart
- Window
->FirstCluster
+ 2,
3020 // Adjust the ClustersFree count for each bitmap window, even the ones
3021 // that are not the current window.
3024 if (FatIsFat32(Vcb
)) {
3026 Window
= &Vcb
->Windows
[FatWindowOfCluster( ClusterIndex
)];
3030 Window
= &Vcb
->Windows
[0];
3033 MyStart
= ClusterIndex
;
3035 for (MyLength
= ClusterCount
; MyLength
> 0; MyLength
-= count
) {
3037 count
= FatMin(Window
->LastCluster
- MyStart
+ 1, MyLength
);
3038 Window
->ClustersFree
+= count
;
3041 // If this was not the last window this allocation spanned,
3042 // advance to the next.
3045 if (MyLength
!= count
) {
3048 MyStart
= Window
->FirstCluster
;
3053 // Deallocation is now complete. Adjust the free cluster count.
3056 Vcb
->AllocationSupport
.NumberOfFreeClusters
+= ClusterCount
;
3060 if (Vcb
->CurrentWindow
->ClustersFree
!=
3061 RtlNumberOfClearBits(&Vcb
->FreeClusterBitMap
)) {
3063 DbgPrint("%x vs %x\n", Vcb
->CurrentWindow
->ClustersFree
,
3064 RtlNumberOfClearBits(&Vcb
->FreeClusterBitMap
));
3066 DbgPrint("%x for %x\n", ClusterIndex
, ClusterCount
);
3070 FatUnlockFreeClusterBitMap( Vcb
);
3075 DebugUnwind( FatDeallocateDiskSpace
);
3078 // Is there any unwinding to do?
3081 ExReleaseResourceLite(&Vcb
->ChangeBitMapResource
);
3083 if ( _SEH2_AbnormalTermination() ) {
3091 ULONG PriorLastIndex
;
3094 // For each entry we already deallocated, reallocate it,
3095 // chaining together as nessecary. Note that we continue
3096 // up to and including the last "for" iteration even though
3097 // the SetFatRun could not have been successful. This
3098 // allows us a convienent way to re-link the final successful
3101 // It is possible that the reason we got here will prevent us
3102 // from succeeding in this operation.
3107 for (Index
= 0; Index
<= McbIndex
; Index
++) {
3109 FatGetNextMcbEntry(Vcb
, Mcb
, Index
, &LocalVbo
, &LocalLbo
, &ByteCount
);
3111 if (ByteCount
== 0xFFFFFFFF) {
3114 // Special case the computation of ClusterCout
3115 // when file is of max size (2^32 - 1).
3118 Clusters
= (1 << (32 - LogOfBytesPerCluster
));
3122 Clusters
= ByteCount
>> LogOfBytesPerCluster
;
3125 FatIndex
= FatGetIndexFromLbo( Vcb
, LocalLbo
);
3128 // We must always restore the prior iteration's last
3129 // entry, pointing it to the first cluster of this run.
3132 if (PriorLastIndex
!= 0) {
3134 FatSetFatEntry( IrpContext
,
3137 (FAT_ENTRY
)FatIndex
);
3141 // If this is not the last entry (the one that failed)
3142 // then reallocate the disk space on the fat.
3145 if ( Index
< McbIndex
) {
3147 FatAllocateClusters(IrpContext
, Vcb
, FatIndex
, Clusters
);
3149 PriorLastIndex
= FatIndex
+ Clusters
- 1;
3154 DebugTrace(-1, Dbg
, "FatDeallocateDiskSpace -> (VOID)\n", 0);
3161 _Requires_lock_held_(_Global_critical_region_
)
3163 FatSplitAllocation (
3164 IN PIRP_CONTEXT IrpContext
,
3166 IN OUT PLARGE_MCB Mcb
,
3168 OUT PLARGE_MCB RemainingMcb
3173 Routine Description:
3175 This procedure takes a single mcb and splits its allocation into
3176 two separate allocation units. The separation must only be done
3177 on cluster boundaries, otherwise we bugcheck.
3179 On the disk this actually works by inserting a FAT_CLUSTER_LAST into
3180 the last index of the first part being split out.
3182 Pictorially what is done is the following (where ! denotes the end of
3183 the fat chain (i.e., FAT_CLUSTER_LAST)):
3186 Mcb |--a--|--b--|--c--|--d--|--e--|--f--|
3189 SplitAtVbo ---------------------+
3191 RemainingMcb (empty)
3195 Mcb |--a--|--b--|--c--!
3198 RemainingMcb |--d--|--e--|--f--|
3202 Vcb - Supplies the VCB being modified
3204 Mcb - Supplies the MCB describing the allocation being split into
3205 two parts. Upon return this Mcb now contains the first chain.
3207 SplitAtVbo - Supplies the VBO of the first byte for the second chain
3210 RemainingMcb - Receives the MCB describing the second chain of allocated
3211 disk space. The caller passes in an initialized Mcb that
3212 is filled in by this procedure STARTING AT VBO 0.
3216 VOID - TRUE if the operation completed and FALSE if it had to
3217 block but could not.
3231 ULONG BytesPerCluster
;
3236 DebugTrace(+1, Dbg
, "FatSplitAllocation\n", 0);
3237 DebugTrace( 0, Dbg
, " Vcb = %p\n", Vcb
);
3238 DebugTrace( 0, Dbg
, " Mcb = %p\n", Mcb
);
3239 DebugTrace( 0, Dbg
, " SplitAtVbo = %8lx\n", SplitAtVbo
);
3240 DebugTrace( 0, Dbg
, " RemainingMcb = %p\n", RemainingMcb
);
3243 BytesPerCluster
= 1 << Vcb
->AllocationSupport
.LogOfBytesPerCluster
;
3247 // Assert that the split point is cluster alligned
3250 NT_ASSERT( (SplitAtVbo
& (BytesPerCluster
- 1)) == 0 );
3253 // We should never be handed an empty source MCB and asked to split
3254 // at a non zero point.
3257 NT_ASSERT( !((0 != SplitAtVbo
) && (0 == FsRtlNumberOfRunsInLargeMcb( Mcb
))));
3260 // Assert we were given an empty target Mcb.
3264 // This assert is commented out to avoid hitting in the Ea error
3265 // path. In that case we will be using the same Mcb's to split the
3266 // allocation that we used to merge them. The target Mcb will contain
3267 // the runs that the split will attempt to insert.
3270 // NT_ASSERT( FsRtlNumberOfRunsInMcb( RemainingMcb ) == 0 );
3276 // Move the runs after SplitAtVbo from the souce to the target
3279 SourceVbo
= SplitAtVbo
;
3282 while (FatLookupMcbEntry(Vcb
, Mcb
, SourceVbo
, &Lbo
, &ByteCount
, NULL
)) {
3284 FatAddMcbEntry( Vcb
, RemainingMcb
, TargetVbo
, Lbo
, ByteCount
);
3286 FatRemoveMcbEntry( Vcb
, Mcb
, SourceVbo
, ByteCount
);
3288 TargetVbo
+= ByteCount
;
3289 SourceVbo
+= ByteCount
;
3292 // If SourceVbo overflows, we were actually snipping off the end
3293 // of the maximal file ... and are now done.
3296 if (SourceVbo
== 0) {
3303 // Mark the last pre-split cluster as a FAT_LAST_CLUSTER
3306 if ( SplitAtVbo
!= 0 ) {
3308 FatLookupLastMcbEntry( Vcb
, Mcb
, &DontCare
, &Lbo
, NULL
);
3310 FatSetFatEntry( IrpContext
,
3312 FatGetIndexFromLbo( Vcb
, Lbo
),
3318 DebugUnwind( FatSplitAllocation
);
3321 // If we got an exception, we must glue back together the Mcbs
3324 if ( _SEH2_AbnormalTermination() ) {
3326 TargetVbo
= SplitAtVbo
;
3329 while (FatLookupMcbEntry(Vcb
, RemainingMcb
, SourceVbo
, &Lbo
, &ByteCount
, NULL
)) {
3331 FatAddMcbEntry( Vcb
, Mcb
, TargetVbo
, Lbo
, ByteCount
);
3333 FatRemoveMcbEntry( Vcb
, RemainingMcb
, SourceVbo
, ByteCount
);
3335 TargetVbo
+= ByteCount
;
3336 SourceVbo
+= ByteCount
;
3340 DebugTrace(-1, Dbg
, "FatSplitAllocation -> (VOID)\n", 0);
3347 _Requires_lock_held_(_Global_critical_region_
)
3349 FatMergeAllocation (
3350 IN PIRP_CONTEXT IrpContext
,
3352 IN OUT PLARGE_MCB Mcb
,
3353 IN PLARGE_MCB SecondMcb
3358 Routine Description:
3360 This routine takes two separate allocations described by two MCBs and
3361 joins them together into one allocation.
3363 Pictorially what is done is the following (where ! denotes the end of
3364 the fat chain (i.e., FAT_CLUSTER_LAST)):
3367 Mcb |--a--|--b--|--c--!
3369 SecondMcb |--d--|--e--|--f--|
3373 Mcb |--a--|--b--|--c--|--d--|--e--|--f--|
3375 SecondMcb |--d--|--e--|--f--|
3380 Vcb - Supplies the VCB being modified
3382 Mcb - Supplies the MCB of the first allocation that is being modified.
3383 Upon return this Mcb will also describe the newly enlarged
3386 SecondMcb - Supplies the ZERO VBO BASED MCB of the second allocation
3387 that is being appended to the first allocation. This
3388 procedure leaves SecondMcb unchanged.
3392 VOID - TRUE if the operation completed and FALSE if it had to
3393 block but could not.
3410 DebugTrace(+1, Dbg
, "FatMergeAllocation\n", 0);
3411 DebugTrace( 0, Dbg
, " Vcb = %p\n", Vcb
);
3412 DebugTrace( 0, Dbg
, " Mcb = %p\n", Mcb
);
3413 DebugTrace( 0, Dbg
, " SecondMcb = %p\n", SecondMcb
);
3418 // Append the runs from SecondMcb to Mcb
3421 (void)FatLookupLastMcbEntry( Vcb
, Mcb
, &SpliceVbo
, &SpliceLbo
, NULL
);
3424 TargetVbo
= SpliceVbo
+ 1;
3426 while (FatLookupMcbEntry(Vcb
, SecondMcb
, SourceVbo
, &Lbo
, &ByteCount
, NULL
)) {
3428 FatAddMcbEntry( Vcb
, Mcb
, TargetVbo
, Lbo
, ByteCount
);
3430 SourceVbo
+= ByteCount
;
3431 TargetVbo
+= ByteCount
;
3435 // Link the last pre-merge cluster to the first cluster of SecondMcb
3438 FatLookupMcbEntry( Vcb
, SecondMcb
, 0, &Lbo
, (PULONG
)NULL
, NULL
);
3440 FatSetFatEntry( IrpContext
,
3442 FatGetIndexFromLbo( Vcb
, SpliceLbo
),
3443 (FAT_ENTRY
)FatGetIndexFromLbo( Vcb
, Lbo
) );
3447 DebugUnwind( FatMergeAllocation
);
3450 // If we got an exception, we must remove the runs added to Mcb
3453 if ( _SEH2_AbnormalTermination() ) {
3457 if ((CutLength
= TargetVbo
- (SpliceVbo
+ 1)) != 0) {
3459 FatRemoveMcbEntry( Vcb
, Mcb
, SpliceVbo
+ 1, CutLength
);
3463 DebugTrace(-1, Dbg
, "FatMergeAllocation -> (VOID)\n", 0);
3471 // Internal support routine
3475 FatInterpretClusterType (
3482 Routine Description:
3484 This procedure tells the caller how to interpret the input fat table
3485 entry. It will indicate if the fat cluster is available, resereved,
3486 bad, the last one, or the another fat index. This procedure can deal
3487 with both 12 and 16 bit fat.
3491 Vcb - Supplies the Vcb to examine, yields 12/16 bit info
3493 Entry - Supplies the fat entry to examine
3497 CLUSTER_TYPE - Is the type of the input Fat entry
3502 DebugTrace(+1, Dbg
, "InterpretClusterType\n", 0);
3503 DebugTrace( 0, Dbg
, " Vcb = %p\n", Vcb
);
3504 DebugTrace( 0, Dbg
, " Entry = %8lx\n", Entry
);
3508 switch(Vcb
->AllocationSupport
.FatIndexBitSize
) {
3510 Entry
&= FAT32_ENTRY_MASK
;
3514 NT_ASSERT( Entry
<= 0xfff );
3515 if (Entry
>= 0x0ff0) {
3516 Entry
|= 0x0FFFF000;
3522 NT_ASSERT( Entry
<= 0xffff );
3523 if (Entry
>= 0x0fff0) {
3524 Entry
|= 0x0FFF0000;
3529 if (Entry
== FAT_CLUSTER_AVAILABLE
) {
3531 DebugTrace(-1, Dbg
, "FatInterpretClusterType -> FatClusterAvailable\n", 0);
3533 return FatClusterAvailable
;
3535 } else if (Entry
< FAT_CLUSTER_RESERVED
) {
3537 DebugTrace(-1, Dbg
, "FatInterpretClusterType -> FatClusterNext\n", 0);
3539 return FatClusterNext
;
3541 } else if (Entry
< FAT_CLUSTER_BAD
) {
3543 DebugTrace(-1, Dbg
, "FatInterpretClusterType -> FatClusterReserved\n", 0);
3545 return FatClusterReserved
;
3547 } else if (Entry
== FAT_CLUSTER_BAD
) {
3549 DebugTrace(-1, Dbg
, "FatInterpretClusterType -> FatClusterBad\n", 0);
3551 return FatClusterBad
;
3555 DebugTrace(-1, Dbg
, "FatInterpretClusterType -> FatClusterLast\n", 0);
3557 return FatClusterLast
;
3563 // Internal support routine
3568 IN PIRP_CONTEXT IrpContext
,
3571 IN OUT PULONG FatEntry
,
3572 IN OUT PFAT_ENUMERATION_CONTEXT Context
3577 Routine Description:
3579 This routine takes an index into the fat and gives back the value
3580 in the Fat at this index. At any given time, for a 16 bit fat, this
3581 routine allows only one page per volume of the fat to be pinned in
3582 memory. For a 12 bit bit fat, the entire fat (max 6k) is pinned. This
3583 extra layer of caching makes the vast majority of requests very
3584 fast. The context for this caching stored in a structure in the Vcb.
3588 Vcb - Supplies the Vcb to examine, yields 12/16 bit info,
3589 fat access context, etc.
3591 FatIndex - Supplies the fat index to examine.
3593 FatEntry - Receives the fat entry pointed to by FatIndex. Note that
3594 it must point to non-paged pool.
3596 Context - This structure keeps track of a page of pinned fat between calls.
3603 DebugTrace(+1, Dbg
, "FatLookupFatEntry\n", 0);
3604 DebugTrace( 0, Dbg
, " Vcb = %p\n", Vcb
);
3605 DebugTrace( 0, Dbg
, " FatIndex = %4x\n", FatIndex
);
3606 DebugTrace( 0, Dbg
, " FatEntry = %8lx\n", FatEntry
);
3609 // Make sure they gave us a valid fat index.
3612 FatVerifyIndexIsValid(IrpContext
, Vcb
, FatIndex
);
3615 // Case on 12 or 16 bit fats.
3617 // In the 12 bit case (mostly floppies) we always have the whole fat
3618 // (max 6k bytes) pinned during allocation operations. This is possibly
3619 // a wee bit slower, but saves headaches over fat entries with 8 bits
3620 // on one page, and 4 bits on the next.
3622 // The 16 bit case always keeps the last used page pinned until all
3623 // operations are done and it is unpinned.
3627 // DEAL WITH 12 BIT CASE
3630 if (Vcb
->AllocationSupport
.FatIndexBitSize
== 12) {
3633 // Check to see if the fat is already pinned, otherwise pin it.
3636 if (Context
->Bcb
== NULL
) {
3638 FatReadVolumeFile( IrpContext
,
3640 FatReservedBytes( &Vcb
->Bpb
),
3641 FatBytesPerFat( &Vcb
->Bpb
),
3643 &Context
->PinnedPage
);
3647 // Load the return value.
3651 FatLookup12BitEntry( Context
->PinnedPage
, FatIndex
, FatEntry
);
3653 } else if (Vcb
->AllocationSupport
.FatIndexBitSize
== 32) {
3656 // DEAL WITH 32 BIT CASE
3659 ULONG PageEntryOffset
;
3660 ULONG OffsetIntoVolumeFile
;
3663 // Initialize two local variables that help us.
3665 OffsetIntoVolumeFile
= FatReservedBytes(&Vcb
->Bpb
) + FatIndex
* sizeof(FAT_ENTRY
);
3666 PageEntryOffset
= (OffsetIntoVolumeFile
% PAGE_SIZE
) / sizeof(FAT_ENTRY
);
3669 // Check to see if we need to read in a new page of fat
3672 if ((Context
->Bcb
== NULL
) ||
3673 (OffsetIntoVolumeFile
/ PAGE_SIZE
!= Context
->VboOfPinnedPage
/ PAGE_SIZE
)) {
3676 // The entry wasn't in the pinned page, so must we unpin the current
3677 // page (if any) and read in a new page.
3680 FatUnpinBcb( IrpContext
, Context
->Bcb
);
3682 FatReadVolumeFile( IrpContext
,
3684 OffsetIntoVolumeFile
& ~(PAGE_SIZE
- 1),
3687 &Context
->PinnedPage
);
3689 Context
->VboOfPinnedPage
= OffsetIntoVolumeFile
& ~(PAGE_SIZE
- 1);
3693 // Grab the fat entry from the pinned page, and return
3696 *FatEntry
= ((PULONG
)(Context
->PinnedPage
))[PageEntryOffset
] & FAT32_ENTRY_MASK
;
3701 // DEAL WITH 16 BIT CASE
3704 ULONG PageEntryOffset
;
3705 ULONG OffsetIntoVolumeFile
;
3708 // Initialize two local variables that help us.
3711 OffsetIntoVolumeFile
= FatReservedBytes(&Vcb
->Bpb
) + FatIndex
* sizeof(USHORT
);
3712 PageEntryOffset
= (OffsetIntoVolumeFile
% PAGE_SIZE
) / sizeof(USHORT
);
3715 // Check to see if we need to read in a new page of fat
3718 if ((Context
->Bcb
== NULL
) ||
3719 (OffsetIntoVolumeFile
/ PAGE_SIZE
!= Context
->VboOfPinnedPage
/ PAGE_SIZE
)) {
3722 // The entry wasn't in the pinned page, so must we unpin the current
3723 // page (if any) and read in a new page.
3726 FatUnpinBcb( IrpContext
, Context
->Bcb
);
3728 FatReadVolumeFile( IrpContext
,
3730 OffsetIntoVolumeFile
& ~(PAGE_SIZE
- 1),
3733 &Context
->PinnedPage
);
3735 Context
->VboOfPinnedPage
= OffsetIntoVolumeFile
& ~(PAGE_SIZE
- 1);
3739 // Grab the fat entry from the pinned page, and return
3742 *FatEntry
= ((PUSHORT
)(Context
->PinnedPage
))[PageEntryOffset
];
3745 DebugTrace(-1, Dbg
, "FatLookupFatEntry -> (VOID)\n", 0);
3750 _Requires_lock_held_(_Global_critical_region_
)
3753 IN PIRP_CONTEXT IrpContext
,
3756 IN FAT_ENTRY FatEntry
3761 Routine Description:
3763 This routine takes an index into the fat and puts a value in the Fat
3764 at this index. The routine special cases 12, 16 and 32 bit fats. In
3765 all cases we go to the cache manager for a piece of the fat.
3767 We have a special form of this call for setting the DOS-style dirty bit.
3768 Unlike the dirty bit in the boot sector, we do not go to special effort
3769 to make sure that this hits the disk synchronously - if the system goes
3770 down in the window between the dirty bit being set in the boot sector
3771 and the FAT index zero dirty bit being lazy written, then life is tough.
3773 The only possible scenario is that Win9x may see what it thinks is a clean
3774 volume that really isn't (hopefully Memphis will pay attention to our dirty
3775 bit as well). The dirty bit will get out quickly, and if heavy activity is
3776 occurring, then the dirty bit should actually be there virtually all of the
3777 time since the act of cleaning the volume is the "rare" occurance.
3779 There are synchronization concerns that would crop up if we tried to make
3780 this synchronous. This thread may already own the Bcb shared for the first
3781 sector of the FAT (so we can't get it exclusive for a writethrough). This
3782 would require some more serious replumbing to work around than I want to
3783 consider at this time.
3785 We can and do, however, synchronously set the bit clean.
3787 At this point the reader should understand why the NT dirty bit is where it is.
3791 Vcb - Supplies the Vcb to examine, yields 12/16/32 bit info, etc.
3793 FatIndex - Supplies the destination fat index.
3795 FatEntry - Supplies the source fat entry.
3803 ULONG OffsetIntoVolumeFile
;
3804 ULONG WasWait
= TRUE
;
3805 BOOLEAN RegularOperation
= TRUE
;
3806 BOOLEAN CleaningOperation
= FALSE
;
3807 BOOLEAN ReleaseMutex
= FALSE
;
3811 DebugTrace(+1, Dbg
, "FatSetFatEntry\n", 0);
3812 DebugTrace( 0, Dbg
, " Vcb = %p\n", Vcb
);
3813 DebugTrace( 0, Dbg
, " FatIndex = %4x\n", FatIndex
);
3814 DebugTrace( 0, Dbg
, " FatEntry = %4x\n", FatEntry
);
3817 // Make sure they gave us a valid fat index if this isn't the special
3818 // clean-bit modifying call.
3821 if (FatIndex
== FAT_DIRTY_BIT_INDEX
) {
3824 // We are setting the clean bit state. Of course, we could
3825 // have corruption that would cause us to try to fiddle the
3826 // reserved index - we guard against this by having the
3827 // special entry values use the reserved high 4 bits that
3828 // we know that we'll never try to set.
3832 // We don't want to repin the FAT pages involved here. Just
3833 // let the lazy writer hit them when it can.
3836 RegularOperation
= FALSE
;
3839 case FAT_CLEAN_VOLUME
:
3840 FatEntry
= (FAT_ENTRY
)FAT_CLEAN_ENTRY
;
3841 CleaningOperation
= TRUE
;
3844 case FAT_DIRTY_VOLUME
:
3845 switch (Vcb
->AllocationSupport
.FatIndexBitSize
) {
3847 FatEntry
= FAT12_DIRTY_ENTRY
;
3851 FatEntry
= FAT32_DIRTY_ENTRY
;
3855 FatEntry
= FAT16_DIRTY_ENTRY
;
3861 FatRaiseStatus( IrpContext
, STATUS_FILE_CORRUPT_ERROR
);
3866 // Disable dirtying semantics for the duration of this operation. Force this
3867 // operation to wait for the duration.
3870 WasWait
= FlagOn( IrpContext
->Flags
, IRP_CONTEXT_FLAG_WAIT
);
3871 SetFlag( IrpContext
->Flags
, IRP_CONTEXT_FLAG_WAIT
| IRP_CONTEXT_FLAG_DISABLE_DIRTY
);
3875 NT_ASSERT( !(FatEntry
& ~FAT32_ENTRY_MASK
) );
3876 FatVerifyIndexIsValid(IrpContext
, Vcb
, FatIndex
);
3883 SectorSize
= 1 << Vcb
->AllocationSupport
.LogOfBytesPerSector
;
3886 // Case on 12 or 16 bit fats.
3888 // In the 12 bit case (mostly floppies) we always have the whole fat
3889 // (max 6k bytes) pinned during allocation operations. This is possibly
3890 // a wee bit slower, but saves headaches over fat entries with 8 bits
3891 // on one page, and 4 bits on the next.
3893 // In the 16 bit case we only read the page that we need to set the fat
3898 // DEAL WITH 12 BIT CASE
3903 if (Vcb
->AllocationSupport
.FatIndexBitSize
== 12) {
3908 // Make sure we have a valid entry
3914 // We read in the entire fat. Note that using prepare write marks
3915 // the bcb pre-dirty, so we don't have to do it explicitly.
3918 OffsetIntoVolumeFile
= FatReservedBytes( &Vcb
->Bpb
) + FatIndex
* 3 / 2;
3920 FatPrepareWriteVolumeFile( IrpContext
,
3922 FatReservedBytes( &Vcb
->Bpb
),
3923 FatBytesPerFat( &Vcb
->Bpb
),
3930 // Mark the sector(s) dirty in the DirtyFatMcb. This call is
3931 // complicated somewhat for the 12 bit case since a single
3932 // entry write can span two sectors (and pages).
3934 // Get the Lbo for the sector where the entry starts, and add it to
3935 // the dirty fat Mcb.
3938 Lbo
= OffsetIntoVolumeFile
& ~(SectorSize
- 1);
3940 FatAddMcbEntry( Vcb
, &Vcb
->DirtyFatMcb
, (VBO
) Lbo
, Lbo
, SectorSize
);
3943 // If the entry started on the last byte of the sector, it continues
3944 // to the next sector, so mark the next sector dirty as well.
3946 // Note that this entry will simply coalese with the last entry,
3947 // so this operation cannot fail. Also if we get this far, we have
3948 // made it, so no unwinding will be needed.
3951 if ( (OffsetIntoVolumeFile
& (SectorSize
- 1)) == (SectorSize
- 1) ) {
3955 FatAddMcbEntry( Vcb
, &Vcb
->DirtyFatMcb
, (VBO
) Lbo
, Lbo
, SectorSize
);
3959 // Store the entry into the fat; we need a little synchonization
3960 // here and can't use a spinlock since the bytes might not be
3964 FatLockFreeClusterBitMap( Vcb
);
3965 ReleaseMutex
= TRUE
;
3967 FatSet12BitEntry( PinnedFat
, FatIndex
, FatEntry
);
3969 FatUnlockFreeClusterBitMap( Vcb
);
3970 ReleaseMutex
= FALSE
;
3972 } else if (Vcb
->AllocationSupport
.FatIndexBitSize
== 32) {
3975 // DEAL WITH 32 BIT CASE
3978 PULONG PinnedFatEntry32
;
3981 // Read in a new page of fat
3984 OffsetIntoVolumeFile
= FatReservedBytes( &Vcb
->Bpb
) +
3985 FatIndex
* sizeof( FAT_ENTRY
);
3987 FatPrepareWriteVolumeFile( IrpContext
,
3989 OffsetIntoVolumeFile
,
3992 (PVOID
*)&PinnedFatEntry32
,
3996 // Mark the sector dirty in the DirtyFatMcb
3999 Lbo
= OffsetIntoVolumeFile
& ~(SectorSize
- 1);
4001 FatAddMcbEntry( Vcb
, &Vcb
->DirtyFatMcb
, (VBO
) Lbo
, Lbo
, SectorSize
);
4004 // Store the FatEntry to the pinned page.
4006 // Preserve the reserved bits in FAT32 entries in the file heap.
4010 FatLockFreeClusterBitMap( Vcb
);
4011 ReleaseMutex
= TRUE
;
4014 if (FatIndex
!= FAT_DIRTY_BIT_INDEX
) {
4016 *PinnedFatEntry32
= ((*PinnedFatEntry32
& ~FAT32_ENTRY_MASK
) | FatEntry
);
4020 *PinnedFatEntry32
= FatEntry
;
4024 FatUnlockFreeClusterBitMap( Vcb
);
4025 ReleaseMutex
= FALSE
;
4031 // DEAL WITH 16 BIT CASE
4034 PUSHORT PinnedFatEntry
;
4037 // Read in a new page of fat
4040 OffsetIntoVolumeFile
= FatReservedBytes( &Vcb
->Bpb
) +
4041 FatIndex
* sizeof(USHORT
);
4043 FatPrepareWriteVolumeFile( IrpContext
,
4045 OffsetIntoVolumeFile
,
4048 (PVOID
*)&PinnedFatEntry
,
4052 // Mark the sector dirty in the DirtyFatMcb
4055 Lbo
= OffsetIntoVolumeFile
& ~(SectorSize
- 1);
4057 FatAddMcbEntry( Vcb
, &Vcb
->DirtyFatMcb
, (VBO
) Lbo
, Lbo
, SectorSize
);
4060 // Store the FatEntry to the pinned page.
4062 // We need extra synchronization here for broken architectures
4063 // like the ALPHA that don't support atomic 16 bit writes.
4067 FatLockFreeClusterBitMap( Vcb
);
4068 ReleaseMutex
= TRUE
;
4071 *PinnedFatEntry
= (USHORT
)FatEntry
;
4074 FatUnlockFreeClusterBitMap( Vcb
);
4075 ReleaseMutex
= FALSE
;
4081 DebugUnwind( FatSetFatEntry
);
4084 // Re-enable volume dirtying in case this was a dirty bit operation.
4087 ClearFlag( IrpContext
->Flags
, IRP_CONTEXT_FLAG_DISABLE_DIRTY
);
4090 // Make this operation asynchronous again if needed.
4095 ClearFlag( IrpContext
->Flags
, IRP_CONTEXT_FLAG_WAIT
);
4099 // If we still somehow have the Mutex, release it.
4104 NT_ASSERT( _SEH2_AbnormalTermination() );
4106 FatUnlockFreeClusterBitMap( Vcb
);
4110 // Unpin the Bcb. For cleaning operations or if the corruption was detected while mounting we make this write-through.
4113 if ((CleaningOperation
||
4114 FlagOn(Vcb
->VcbState
, VCB_STATE_FLAG_MOUNT_IN_PROGRESS
)) &&
4117 IO_STATUS_BLOCK IgnoreStatus
;
4121 DbgDoit( IrpContext
->PinCount
-= 1 );
4122 CcUnpinRepinnedBcb( Bcb
, TRUE
, &IgnoreStatus
);
4126 FatUnpinBcb(IrpContext
, Bcb
);
4129 DebugTrace(-1, Dbg
, "FatSetFatEntry -> (VOID)\n", 0);
4137 // Internal support routine
4142 IN PIRP_CONTEXT IrpContext
,
4144 IN ULONG StartingFatIndex
,
4145 IN ULONG ClusterCount
,
4146 IN BOOLEAN ChainTogether
4151 Routine Description:
4153 This routine sets a continuous run of clusters in the fat. If ChainTogether
4154 is TRUE, then the clusters are linked together as in normal Fat fasion,
4155 with the last cluster receiving FAT_CLUSTER_LAST. If ChainTogether is
4156 FALSE, all the entries are set to FAT_CLUSTER_AVAILABLE, effectively
4157 freeing all the clusters in the run.
4161 Vcb - Supplies the Vcb to examine, yields 12/16 bit info, etc.
4163 StartingFatIndex - Supplies the destination fat index.
4165 ClusterCount - Supplies the number of contiguous clusters to work on.
4167 ChainTogether - Tells us whether to fill the entries with links, or
4168 FAT_CLUSTER_AVAILABLE
4178 #define MAXCOUNTCLUS 0x10000
4179 #define COUNTSAVEDBCBS ((MAXCOUNTCLUS * sizeof(FAT_ENTRY) / PAGE_SIZE) + 2)
4180 PBCB SavedBcbs
[COUNTSAVEDBCBS
][2];
4191 BOOLEAN ReleaseMutex
= FALSE
;
4193 ULONG SavedStartingFatIndex
= StartingFatIndex
;
4197 DebugTrace(+1, Dbg
, "FatSetFatRun\n", 0);
4198 DebugTrace( 0, Dbg
, " Vcb = %p\n", Vcb
);
4199 DebugTrace( 0, Dbg
, " StartingFatIndex = %8x\n", StartingFatIndex
);
4200 DebugTrace( 0, Dbg
, " ClusterCount = %8lx\n", ClusterCount
);
4201 DebugTrace( 0, Dbg
, " ChainTogether = %s\n", ChainTogether
? "TRUE":"FALSE");
4204 // Make sure they gave us a valid fat run.
4207 FatVerifyIndexIsValid(IrpContext
, Vcb
, StartingFatIndex
);
4208 FatVerifyIndexIsValid(IrpContext
, Vcb
, StartingFatIndex
+ ClusterCount
- 1);
4211 // Check special case
4214 if (ClusterCount
== 0) {
4216 DebugTrace(-1, Dbg
, "FatSetFatRun -> (VOID)\n", 0);
4224 SectorSize
= 1 << Vcb
->AllocationSupport
.LogOfBytesPerSector
;
4227 // Case on 12 or 16 bit fats.
4229 // In the 12 bit case (mostly floppies) we always have the whole fat
4230 // (max 6k bytes) pinned during allocation operations. This is possibly
4231 // a wee bit slower, but saves headaches over fat entries with 8 bits
4232 // on one page, and 4 bits on the next.
4234 // In the 16 bit case we only read one page at a time, as needed.
4238 // DEAL WITH 12 BIT CASE
4243 if (Vcb
->AllocationSupport
.FatIndexBitSize
== 12) {
4246 // We read in the entire fat. Note that using prepare write marks
4247 // the bcb pre-dirty, so we don't have to do it explicitly.
4250 RtlZeroMemory( &SavedBcbs
[0][0], 2 * sizeof(PBCB
) * 2);
4252 FatPrepareWriteVolumeFile( IrpContext
,
4254 FatReservedBytes( &Vcb
->Bpb
),
4255 FatBytesPerFat( &Vcb
->Bpb
),
4262 // Mark the affected sectors dirty. Note that FinalSectorLbo is
4263 // the Lbo of the END of the entry (Thus * 3 + 2). This makes sure
4264 // we catch the case of a dirty fat entry straddling a sector boundry.
4266 // Note that if the first AddMcbEntry succeeds, all following ones
4267 // will simply coalese, and thus also succeed.
4270 StartSectorLbo
= (FatReservedBytes( &Vcb
->Bpb
) + StartingFatIndex
* 3 / 2)
4271 & ~(SectorSize
- 1);
4273 FinalSectorLbo
= (FatReservedBytes( &Vcb
->Bpb
) + ((StartingFatIndex
+
4274 ClusterCount
) * 3 + 2) / 2) & ~(SectorSize
- 1);
4276 for (Lbo
= StartSectorLbo
; Lbo
<= FinalSectorLbo
; Lbo
+= SectorSize
) {
4278 FatAddMcbEntry( Vcb
, &Vcb
->DirtyFatMcb
, (VBO
) Lbo
, Lbo
, SectorSize
);
4282 // Store the entries into the fat; we need a little
4283 // synchonization here and can't use a spinlock since the bytes
4284 // might not be resident.
4287 FatLockFreeClusterBitMap( Vcb
);
4288 ReleaseMutex
= TRUE
;
4290 for (Cluster
= StartingFatIndex
;
4291 Cluster
< StartingFatIndex
+ ClusterCount
- 1;
4294 FatSet12BitEntry( PinnedFat
,
4296 ChainTogether
? Cluster
+ 1 : FAT_CLUSTER_AVAILABLE
);
4300 // Save the last entry
4303 FatSet12BitEntry( PinnedFat
,
4306 FAT_CLUSTER_LAST
& 0xfff : FAT_CLUSTER_AVAILABLE
);
4308 FatUnlockFreeClusterBitMap( Vcb
);
4309 ReleaseMutex
= FALSE
;
4311 } else if (Vcb
->AllocationSupport
.FatIndexBitSize
== 32) {
4314 // DEAL WITH 32 BIT CASE
4319 VBO StartOffsetInVolume
;
4320 VBO FinalOffsetInVolume
;
4324 PULONG FatEntry
= NULL
;
4325 ULONG ClusterCountThisRun
;
4327 StartOffsetInVolume
= FatReservedBytes(&Vcb
->Bpb
) +
4328 StartingFatIndex
* sizeof(FAT_ENTRY
);
4330 if (ClusterCount
> MAXCOUNTCLUS
) {
4331 ClusterCountThisRun
= MAXCOUNTCLUS
;
4333 ClusterCountThisRun
= ClusterCount
;
4336 FinalOffsetInVolume
= StartOffsetInVolume
+
4337 (ClusterCountThisRun
- 1) * sizeof(FAT_ENTRY
);
4340 ULONG NumberOfPages
;
4343 NumberOfPages
= (FinalOffsetInVolume
/ PAGE_SIZE
) -
4344 (StartOffsetInVolume
/ PAGE_SIZE
) + 1;
4346 RtlZeroMemory( &SavedBcbs
[0][0], (NumberOfPages
+ 1) * sizeof(PBCB
) * 2 );
4348 for ( Page
= 0, Offset
= StartOffsetInVolume
& ~(PAGE_SIZE
- 1);
4349 Page
< NumberOfPages
;
4350 Page
++, Offset
+= PAGE_SIZE
) {
4352 FatPrepareWriteVolumeFile( IrpContext
,
4356 &SavedBcbs
[Page
][0],
4357 (PVOID
*)&SavedBcbs
[Page
][1],
4363 FatEntry
= (PULONG
)((PUCHAR
)SavedBcbs
[0][1] +
4364 (StartOffsetInVolume
% PAGE_SIZE
));
4370 // Mark the run dirty
4373 StartSectorLbo
= StartOffsetInVolume
& ~(SectorSize
- 1);
4374 FinalSectorLbo
= FinalOffsetInVolume
& ~(SectorSize
- 1);
4376 for (Lbo
= StartSectorLbo
; Lbo
<= FinalSectorLbo
; Lbo
+= SectorSize
) {
4378 FatAddMcbEntry( Vcb
, &Vcb
->DirtyFatMcb
, (VBO
)Lbo
, Lbo
, SectorSize
);
4382 // Store the entries
4384 // We need extra synchronization here for broken architectures
4385 // like the ALPHA that don't support atomic 16 bit writes.
4389 FatLockFreeClusterBitMap( Vcb
);
4390 ReleaseMutex
= TRUE
;
4393 FinalCluster
= StartingFatIndex
+ ClusterCountThisRun
- 1;
4396 for (Cluster
= StartingFatIndex
;
4397 Cluster
<= FinalCluster
;
4398 Cluster
++, FatEntry
++) {
4401 // If we just crossed a page boundry (as opposed to starting
4402 // on one), update our idea of FatEntry.
4404 if ( (((ULONG_PTR
)FatEntry
& (PAGE_SIZE
-1)) == 0) &&
4405 (Cluster
!= StartingFatIndex
) ) {
4408 FatEntry
= (PULONG
)SavedBcbs
[Page
][1];
4411 *FatEntry
= ChainTogether
? (FAT_ENTRY
)(Cluster
+ 1) :
4412 FAT_CLUSTER_AVAILABLE
;
4416 // Fix up the last entry if we were chaining together
4419 if ((ClusterCount
<= MAXCOUNTCLUS
) &&
4422 *(FatEntry
-1) = FAT_CLUSTER_LAST
;
4426 FatUnlockFreeClusterBitMap( Vcb
);
4427 ReleaseMutex
= FALSE
;
4437 for (i
= 0; (i
< COUNTSAVEDBCBS
) && (SavedBcbs
[i
][0] != NULL
); i
++) {
4439 FatUnpinBcb( IrpContext
, SavedBcbs
[i
][0] );
4440 SavedBcbs
[i
][0] = NULL
;
4444 if (ClusterCount
<= MAXCOUNTCLUS
) {
4450 StartingFatIndex
+= MAXCOUNTCLUS
;
4451 ClusterCount
-= MAXCOUNTCLUS
;
4458 // DEAL WITH 16 BIT CASE
4461 VBO StartOffsetInVolume
;
4462 VBO FinalOffsetInVolume
;
4466 PUSHORT FatEntry
= NULL
;
4468 StartOffsetInVolume
= FatReservedBytes(&Vcb
->Bpb
) +
4469 StartingFatIndex
* sizeof(USHORT
);
4471 FinalOffsetInVolume
= StartOffsetInVolume
+
4472 (ClusterCount
- 1) * sizeof(USHORT
);
4475 // Read in one page of fat at a time. We cannot read in the
4476 // all of the fat we need because of cache manager limitations.
4478 // SavedBcb was initialized to be able to hold the largest
4479 // possible number of pages in a fat plus and extra one to
4480 // accomadate the boot sector, plus one more to make sure there
4481 // is enough room for the RtlZeroMemory below that needs the mark
4482 // the first Bcb after all the ones we will use as an end marker.
4486 ULONG NumberOfPages
;
4489 NumberOfPages
= (FinalOffsetInVolume
/ PAGE_SIZE
) -
4490 (StartOffsetInVolume
/ PAGE_SIZE
) + 1;
4492 RtlZeroMemory( &SavedBcbs
[0][0], (NumberOfPages
+ 1) * sizeof(PBCB
) * 2 );
4494 for ( Page
= 0, Offset
= StartOffsetInVolume
& ~(PAGE_SIZE
- 1);
4495 Page
< NumberOfPages
;
4496 Page
++, Offset
+= PAGE_SIZE
) {
4498 FatPrepareWriteVolumeFile( IrpContext
,
4502 &SavedBcbs
[Page
][0],
4503 (PVOID
*)&SavedBcbs
[Page
][1],
4509 FatEntry
= (PUSHORT
)((PUCHAR
)SavedBcbs
[0][1] +
4510 (StartOffsetInVolume
% PAGE_SIZE
));
4516 // Mark the run dirty
4519 StartSectorLbo
= StartOffsetInVolume
& ~(SectorSize
- 1);
4520 FinalSectorLbo
= FinalOffsetInVolume
& ~(SectorSize
- 1);
4522 for (Lbo
= StartSectorLbo
; Lbo
<= FinalSectorLbo
; Lbo
+= SectorSize
) {
4524 FatAddMcbEntry( Vcb
, &Vcb
->DirtyFatMcb
, (VBO
) Lbo
, Lbo
, SectorSize
);
4528 // Store the entries
4530 // We need extra synchronization here for broken architectures
4531 // like the ALPHA that don't support atomic 16 bit writes.
4535 FatLockFreeClusterBitMap( Vcb
);
4536 ReleaseMutex
= TRUE
;
4539 FinalCluster
= StartingFatIndex
+ ClusterCount
- 1;
4542 for (Cluster
= StartingFatIndex
;
4543 Cluster
<= FinalCluster
;
4544 Cluster
++, FatEntry
++) {
4547 // If we just crossed a page boundry (as opposed to starting
4548 // on one), update our idea of FatEntry.
4550 if ( (((ULONG_PTR
)FatEntry
& (PAGE_SIZE
-1)) == 0) &&
4551 (Cluster
!= StartingFatIndex
) ) {
4554 FatEntry
= (PUSHORT
)SavedBcbs
[Page
][1];
4557 *FatEntry
= (USHORT
) (ChainTogether
? (FAT_ENTRY
)(Cluster
+ 1) :
4558 FAT_CLUSTER_AVAILABLE
);
4562 // Fix up the last entry if we were chaining together
4565 if ( ChainTogether
) {
4568 #pragma warning( suppress: 4310 )
4570 *(FatEntry
-1) = (USHORT
)FAT_CLUSTER_LAST
;
4574 FatUnlockFreeClusterBitMap( Vcb
);
4575 ReleaseMutex
= FALSE
;
4583 DebugUnwind( FatSetFatRun
);
4586 // If we still somehow have the Mutex, release it.
4591 NT_ASSERT( _SEH2_AbnormalTermination() );
4593 FatUnlockFreeClusterBitMap( Vcb
);
4600 for (i
= 0; (i
< COUNTSAVEDBCBS
) && (SavedBcbs
[i
][0] != NULL
); i
++) {
4602 FatUnpinBcb( IrpContext
, SavedBcbs
[i
][0] );
4606 // At this point nothing in this finally clause should have raised.
4607 // So, now comes the unsafe (sigh) stuff.
4610 if ( _SEH2_AbnormalTermination() &&
4611 (Vcb
->AllocationSupport
.FatIndexBitSize
== 32) ) {
4616 // This case is more complex because the FAT12 and FAT16 cases
4617 // pin all the needed FAT pages (128K max), after which it
4618 // can't fail, before changing any FAT entries. In the Fat32
4619 // case, it may not be practical to pin all the needed FAT
4620 // pages, because that could span many megabytes. So Fat32
4621 // attacks in chunks, and if a failure occurs once the first
4622 // chunk has been updated, we have to back out the updates.
4624 // The unwind consists of walking back over each FAT entry we
4625 // have changed, setting it back to the previous value. Note
4626 // that the previous value with either be FAT_CLUSTER_AVAILABLE
4627 // (if ChainTogether==TRUE) or a simple link to the successor
4628 // (if ChainTogether==FALSE).
4630 // We concede that any one of these calls could fail too; our
4631 // objective is to make this case no more likely than the case
4632 // for a file consisting of multiple disjoint runs.
4635 while ( StartingFatIndex
> SavedStartingFatIndex
) {
4639 FatSetFatEntry( IrpContext
, Vcb
, StartingFatIndex
,
4641 StartingFatIndex
+ 1 : FAT_CLUSTER_AVAILABLE
);
4645 DebugTrace(-1, Dbg
, "FatSetFatRun -> (VOID)\n", 0);
4653 // Internal support routine
4663 Routine Description:
4665 This routine just computes the base 2 log of an integer. It is only used
4666 on objects that are know to be powers of two.
4670 Value - The value to take the base 2 log of.
4674 UCHAR - The base 2 log of Value.
4682 ULONG OrigValue
= Value
;
4688 // Knock bits off until we we get a one at position 0
4691 while ( (Value
& 0xfffffffe) != 0 ) {
4698 // If there was more than one bit set, the file system messed up,
4704 DebugTrace(+1, Dbg
, "LogOf\n", 0);
4705 DebugTrace( 0, Dbg
, " Value = %8lx\n", OrigValue
);
4707 DebugTrace( 0, Dbg
, "Received non power of 2.\n", 0);
4709 DebugTrace(-1, Dbg
, "LogOf -> %8lx\n", Log
);
4712 #pragma prefast( suppress: 28159, "we bugcheck here because our internal data structures are seriously corrupted if this happens" )
4714 FatBugCheck( Value
, Log
, 0 );
4722 FatExamineFatEntries(
4723 IN PIRP_CONTEXT IrpContext
,
4725 IN ULONG StartIndex OPTIONAL
,
4726 IN ULONG EndIndex OPTIONAL
,
4727 IN BOOLEAN SetupWindows
,
4728 IN PFAT_WINDOW SwitchToWindow OPTIONAL
,
4729 IN PULONG BitMapBuffer OPTIONAL
4733 Routine Description:
4735 This routine handles scanning a segment of the FAT into in-memory structures.
4737 There are three fundamental cases, with variations depending on the FAT type:
4739 1) During volume setup, FatSetupAllocations
4741 1a) for FAT12/16, read the FAT into our free clusterbitmap
4742 1b) for FAT32, perform the initial scan for window free cluster counts
4744 2) Switching FAT32 windows on the fly during system operation
4746 3) Reading arbitrary segments of the FAT for the purposes of the GetVolumeBitmap
4747 call (only for FAT32)
4749 There really is too much going on in here. At some point this should be
4750 substantially rewritten.
4754 Vcb - Supplies the volume involved
4756 StartIndex - Supplies the starting cluster, ignored if SwitchToWindow supplied
4758 EndIndex - Supplies the ending cluster, ignored if SwitchToWindow supplied
4760 SetupWindows - Indicates if we are doing the initial FAT32 scan
4762 SwitchToWindow - Supplies the FAT window we are examining and will switch to
4764 BitMapBuffer - Supplies a specific bitmap to fill in, if not supplied we fill
4765 in the volume free cluster bitmap if !SetupWindows
4769 None. Lots of side effects.
4773 ULONG FatIndexBitSize
;
4777 FAT_ENTRY FatEntry
= FAT_CLUSTER_AVAILABLE
;
4778 FAT_ENTRY FirstFatEntry
= FAT_CLUSTER_AVAILABLE
;
4782 ULONG EntriesPerWindow
;
4784 ULONG ClustersThisRun
;
4785 ULONG StartIndexOfThisRun
;
4787 PULONG FreeClusterCount
= NULL
;
4789 PFAT_WINDOW CurrentWindow
= NULL
;
4791 PVOID NewBitMapBuffer
= NULL
;
4792 PRTL_BITMAP BitMap
= NULL
;
4793 RTL_BITMAP PrivateBitMap
;
4795 ULONG ClusterSize
= 0;
4796 ULONG PrefetchPages
= 0;
4799 VBO BadClusterVbo
= 0;
4811 // Now assert correct usage.
4814 FatIndexBitSize
= Vcb
->AllocationSupport
.FatIndexBitSize
;
4816 NT_ASSERT( !(SetupWindows
&& (SwitchToWindow
|| BitMapBuffer
)));
4817 NT_ASSERT( !(SetupWindows
&& FatIndexBitSize
!= 32));
4819 if (Vcb
->NumberOfWindows
> 1) {
4822 // FAT32: Calculate the number of FAT entries covered by a window. This is
4823 // equal to the number of bits in the freespace bitmap, the size of which
4827 EntriesPerWindow
= MAX_CLUSTER_BITMAP_SIZE
;
4831 EntriesPerWindow
= Vcb
->AllocationSupport
.NumberOfClusters
;
4835 // We will also fill in the cumulative count of free clusters for
4836 // the entire volume. If this is not appropriate, NULL it out
4840 FreeClusterCount
= &Vcb
->AllocationSupport
.NumberOfFreeClusters
;
4844 NT_ASSERT(BitMapBuffer
== NULL
);
4847 // In this case we're just supposed to scan the fat and set up
4848 // the information regarding where the buckets fall and how many
4849 // free clusters are in each.
4851 // It is fine to monkey with the real windows, we must be able
4852 // to do this to activate the volume.
4857 CurrentWindow
= &Vcb
->Windows
[0];
4858 CurrentWindow
->FirstCluster
= StartIndex
;
4859 CurrentWindow
->ClustersFree
= 0;
4862 // We always wish to calculate total free clusters when
4863 // setting up the FAT windows.
4866 } else if (BitMapBuffer
== NULL
) {
4869 // We will be filling in the free cluster bitmap for the volume.
4870 // Careful, we can raise out of here and be hopelessly hosed if
4871 // we built this up in the main bitmap/window itself.
4873 // For simplicity's sake, we'll do the swap for everyone. FAT32
4874 // provokes the need since we can't tolerate partial results
4875 // when switching windows.
4878 NT_ASSERT( SwitchToWindow
);
4880 CurrentWindow
= SwitchToWindow
;
4881 StartIndex
= CurrentWindow
->FirstCluster
;
4882 EndIndex
= CurrentWindow
->LastCluster
;
4884 BitMap
= &PrivateBitMap
;
4885 NewBitMapBuffer
= FsRtlAllocatePoolWithTag( PagedPool
,
4886 (EntriesPerWindow
+ 7) / 8,
4889 RtlInitializeBitMap( &PrivateBitMap
,
4891 EndIndex
- StartIndex
+ 1);
4893 if ((FatIndexBitSize
== 32) &&
4894 (Vcb
->NumberOfWindows
> 1)) {
4897 // We do not wish count total clusters here.
4900 FreeClusterCount
= NULL
;
4906 BitMap
= &PrivateBitMap
;
4907 RtlInitializeBitMap(&PrivateBitMap
,
4909 EndIndex
- StartIndex
+ 1);
4912 // We do not count total clusters here.
4915 FreeClusterCount
= NULL
;
4919 // Now, our start index better be in the file heap.
4922 NT_ASSERT( StartIndex
>= 2 );
4927 // Pick up the initial chunk of the FAT and first entry.
4930 if (FatIndexBitSize
== 12) {
4933 // We read in the entire fat in the 12 bit case.
4936 FatReadVolumeFile( IrpContext
,
4938 FatReservedBytes( &Vcb
->Bpb
),
4939 FatBytesPerFat( &Vcb
->Bpb
),
4941 (PVOID
*)&FatBuffer
);
4943 FatLookup12BitEntry(FatBuffer
, 0, &FirstFatEntry
);
4948 // Read in one page of fat at a time. We cannot read in the
4949 // all of the fat we need because of cache manager limitations.
4952 ULONG BytesPerEntry
= FatIndexBitSize
>> 3;
4954 FatPages
= (FatReservedBytes(&Vcb
->Bpb
) + FatBytesPerFat(&Vcb
->Bpb
) + (PAGE_SIZE
- 1)) / PAGE_SIZE
;
4955 Page
= (FatReservedBytes(&Vcb
->Bpb
) + StartIndex
* BytesPerEntry
) / PAGE_SIZE
;
4957 Offset
= Page
* PAGE_SIZE
;
4960 // Prefetch the FAT entries in memory for optimal performance.
4963 PrefetchPages
= FatPages
- Page
;
4965 if (PrefetchPages
> FAT_PREFETCH_PAGE_COUNT
) {
4967 PrefetchPages
= ALIGN_UP_BY(Page
, FAT_PREFETCH_PAGE_COUNT
) - Page
;
4970 #if (NTDDI_VERSION >= NTDDI_WIN8)
4971 FatPrefetchPages( IrpContext
,
4972 Vcb
->VirtualVolumeFile
,
4977 FatReadVolumeFile( IrpContext
,
4984 if (FatIndexBitSize
== 32) {
4986 FatBuffer
= (PUSHORT
)((PUCHAR
)pv
+
4987 (FatReservedBytes(&Vcb
->Bpb
) + StartIndex
* BytesPerEntry
) %
4990 FirstFatEntry
= *((PULONG
)FatBuffer
);
4991 FirstFatEntry
= FirstFatEntry
& FAT32_ENTRY_MASK
;
4995 FatBuffer
= (PUSHORT
)((PUCHAR
)pv
+
4996 FatReservedBytes(&Vcb
->Bpb
) % PAGE_SIZE
) + 2;
4998 FirstFatEntry
= *FatBuffer
;
5003 ClusterSize
= 1 << (Vcb
->AllocationSupport
.LogOfBytesPerCluster
);
5005 CurrentRun
= (FirstFatEntry
== FAT_CLUSTER_AVAILABLE
) ?
5006 FreeClusters
: AllocatedClusters
;
5008 StartIndexOfThisRun
= StartIndex
;
5010 for (FatIndex
= StartIndex
; FatIndex
<= EndIndex
; FatIndex
++) {
5012 if (FatIndexBitSize
== 12) {
5014 FatLookup12BitEntry(FatBuffer
, FatIndex
, &FatEntry
);
5019 // If we are setting up the FAT32 windows and have stepped into a new
5020 // bucket, finalize this one and move forward.
5024 FatIndex
> StartIndex
&&
5025 (FatIndex
- 2) % EntriesPerWindow
== 0) {
5027 CurrentWindow
->LastCluster
= FatIndex
- 1;
5029 if (CurrentRun
== FreeClusters
) {
5032 // We must be counting clusters in order to modify the
5033 // contents of the window.
5036 NT_ASSERT( FreeClusterCount
);
5038 ClustersThisRun
= FatIndex
- StartIndexOfThisRun
;
5039 CurrentWindow
->ClustersFree
+= ClustersThisRun
;
5041 if (FreeClusterCount
) {
5042 *FreeClusterCount
+= ClustersThisRun
;
5047 NT_ASSERT(CurrentRun
== AllocatedClusters
);
5051 StartIndexOfThisRun
= FatIndex
;
5052 CurrentRun
= UnknownClusters
;
5055 CurrentWindow
->ClustersFree
= 0;
5056 CurrentWindow
->FirstCluster
= FatIndex
;
5060 // If we just stepped onto a new page, grab a new pointer.
5063 if (((ULONG_PTR
)FatBuffer
& (PAGE_SIZE
- 1)) == 0) {
5065 FatUnpinBcb( IrpContext
, Bcb
);
5068 Offset
+= PAGE_SIZE
;
5070 #if (NTDDI_VERSION >= NTDDI_WIN8)
5072 // If we have exhausted all the prefetch pages, prefetch the next chunk.
5075 if (--PrefetchPages
== 0) {
5077 PrefetchPages
= FatPages
- Page
;
5079 if (PrefetchPages
> FAT_PREFETCH_PAGE_COUNT
) {
5081 PrefetchPages
= FAT_PREFETCH_PAGE_COUNT
;
5084 FatPrefetchPages( IrpContext
,
5085 Vcb
->VirtualVolumeFile
,
5091 FatReadVolumeFile( IrpContext
,
5098 FatBuffer
= (PUSHORT
)pv
;
5101 if (FatIndexBitSize
== 32) {
5105 #pragma warning( suppress: 4213 )
5107 FatEntry
= *((PULONG
)FatBuffer
)++;
5108 FatEntry
= FatEntry
& FAT32_ENTRY_MASK
;
5110 FatEntry
= *FatBuffer
;
5112 FatEntry
= FatEntry
& FAT32_ENTRY_MASK
;
5117 FatEntry
= *FatBuffer
;
5122 if (CurrentRun
== UnknownClusters
) {
5124 CurrentRun
= (FatEntry
== FAT_CLUSTER_AVAILABLE
) ?
5125 FreeClusters
: AllocatedClusters
;
5129 // Are we switching from a free run to an allocated run?
5132 if (CurrentRun
== FreeClusters
&&
5133 FatEntry
!= FAT_CLUSTER_AVAILABLE
) {
5135 ClustersThisRun
= FatIndex
- StartIndexOfThisRun
;
5137 if (FreeClusterCount
) {
5139 *FreeClusterCount
+= ClustersThisRun
;
5140 CurrentWindow
->ClustersFree
+= ClustersThisRun
;
5145 RtlClearBits( BitMap
,
5146 StartIndexOfThisRun
- StartIndex
,
5150 CurrentRun
= AllocatedClusters
;
5151 StartIndexOfThisRun
= FatIndex
;
5155 // Are we switching from an allocated run to a free run?
5158 if (CurrentRun
== AllocatedClusters
&&
5159 FatEntry
== FAT_CLUSTER_AVAILABLE
) {
5161 ClustersThisRun
= FatIndex
- StartIndexOfThisRun
;
5166 StartIndexOfThisRun
- StartIndex
,
5170 CurrentRun
= FreeClusters
;
5171 StartIndexOfThisRun
= FatIndex
;
5175 // If the entry is marked bad, add it to the bad block MCB
5178 if ((SetupWindows
|| (Vcb
->NumberOfWindows
== 1)) &&
5179 (FatInterpretClusterType( Vcb
, FatEntry
) == FatClusterBad
)) {
5182 // This cluster is marked bad.
5183 // Add it to the BadBlockMcb.
5186 Lbo
= FatGetLboFromIndex( Vcb
, FatIndex
);
5187 FatAddMcbEntry( Vcb
, &Vcb
->BadBlockMcb
, BadClusterVbo
, Lbo
, ClusterSize
);
5188 BadClusterVbo
+= ClusterSize
;
5193 // If we finished the scan, then we know about all the possible bad clusters.
5196 SetFlag( Vcb
->VcbState
, VCB_STATE_FLAG_BAD_BLOCKS_POPULATED
);
5199 // Now we have to record the final run we encountered
5202 ClustersThisRun
= FatIndex
- StartIndexOfThisRun
;
5204 if (CurrentRun
== FreeClusters
) {
5206 if (FreeClusterCount
) {
5208 *FreeClusterCount
+= ClustersThisRun
;
5209 CurrentWindow
->ClustersFree
+= ClustersThisRun
;
5214 RtlClearBits( BitMap
,
5215 StartIndexOfThisRun
- StartIndex
,
5224 StartIndexOfThisRun
- StartIndex
,
5230 // And finish the last window if we are in setup.
5235 CurrentWindow
->LastCluster
= FatIndex
- 1;
5239 // Now switch the active window if required. We've succesfully gotten everything
5242 // If we were tracking the free cluster count, this means we should update the
5243 // window. This is the case of FAT12/16 initialization.
5246 if (SwitchToWindow
) {
5248 if (Vcb
->FreeClusterBitMap
.Buffer
) {
5250 ExFreePool( Vcb
->FreeClusterBitMap
.Buffer
);
5253 RtlInitializeBitMap( &Vcb
->FreeClusterBitMap
,
5255 EndIndex
- StartIndex
+ 1 );
5257 NewBitMapBuffer
= NULL
;
5259 Vcb
->CurrentWindow
= SwitchToWindow
;
5260 Vcb
->ClusterHint
= (ULONG
)-1;
5262 if (FreeClusterCount
) {
5264 NT_ASSERT( !SetupWindows
);
5266 Vcb
->CurrentWindow
->ClustersFree
= *FreeClusterCount
;
5271 // Make sure plausible things occured ...
5274 if (!SetupWindows
&& BitMapBuffer
== NULL
) {
5276 ASSERT_CURRENT_WINDOW_GOOD( Vcb
);
5279 NT_ASSERT(Vcb
->AllocationSupport
.NumberOfFreeClusters
<= Vcb
->AllocationSupport
.NumberOfClusters
);
5284 // Unpin the last bcb and drop the temporary bitmap buffer if it exists.
5287 FatUnpinBcb( IrpContext
, Bcb
);
5289 if (NewBitMapBuffer
) {
5291 ExFreePool( NewBitMapBuffer
);