3 Copyright (c) 1990-2000 Microsoft Corporation
11 This module implements the Allocation support routines for Fat.
19 // The Bug check file id for this module
22 #define BugCheckFileId (FAT_BUG_CHECK_ALLOCSUP)
25 // Local debug trace level
28 #define Dbg (DEBUG_TRACE_ALLOCSUP)
30 #define FatMin(a, b) ((a) < (b) ? (a) : (b))
33 // This strucure is used by FatLookupFatEntry to remember a pinned page
37 typedef struct _FAT_ENUMERATION_CONTEXT
{
43 } FAT_ENUMERATION_CONTEXT
, *PFAT_ENUMERATION_CONTEXT
;
46 // Local support routine prototypes
51 IN PIRP_CONTEXT IrpContext
,
54 IN OUT PULONG FatEntry
,
55 IN OUT PFAT_ENUMERATION_CONTEXT Context
60 IN PIRP_CONTEXT IrpContext
,
62 IN ULONG StartingFatIndex
,
63 IN ULONG ClusterCount
,
64 IN BOOLEAN ChainTogether
73 // Note that the KdPrint below will ONLY fire when the assert does. Leave it
78 #define ASSERT_CURRENT_WINDOW_GOOD(VCB) { \
79 ULONG FreeClusterBitMapClear; \
80 ASSERT( (VCB)->FreeClusterBitMap.Buffer != NULL ); \
81 FreeClusterBitMapClear = RtlNumberOfClearBits(&(VCB)->FreeClusterBitMap); \
82 if ((VCB)->CurrentWindow->ClustersFree != FreeClusterBitMapClear) { \
83 KdPrint(("FAT: ClustersFree %x h != FreeClusterBitMapClear %x h\n", \
84 (VCB)->CurrentWindow->ClustersFree, \
85 FreeClusterBitMapClear)); \
87 ASSERT( (VCB)->CurrentWindow->ClustersFree == FreeClusterBitMapClear ); \
90 #define ASSERT_CURRENT_WINDOW_GOOD(VCB)
94 // The following macros provide a convenient way of hiding the details
95 // of bitmap allocation schemes.
101 // FatLockFreeClusterBitMap (
106 #define FatLockFreeClusterBitMap(VCB) { \
107 ASSERT(KeAreApcsDisabled()); \
108 ExAcquireFastMutexUnsafe( &(VCB)->FreeClusterBitMapMutex ); \
109 ASSERT_CURRENT_WINDOW_GOOD(VCB) \
114 // FatUnlockFreeClusterBitMap (
119 #define FatUnlockFreeClusterBitMap(VCB) { \
120 ASSERT_CURRENT_WINDOW_GOOD(VCB) \
121 ASSERT(KeAreApcsDisabled()); \
122 ExReleaseFastMutexUnsafe( &(VCB)->FreeClusterBitMapMutex ); \
127 // FatIsClusterFree (
128 // IN PIRP_CONTEXT IrpContext,
134 #define FatIsClusterFree(IRPCONTEXT,VCB,FAT_INDEX) \
135 (RtlCheckBit(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2) == 0)
140 // IN PIRP_CONTEXT IrpContext,
142 // IN ULONG FatIndex,
143 // IN ULONG ClusterCount
147 #define FatFreeClusters(IRPCONTEXT,VCB,FAT_INDEX,CLUSTER_COUNT) { \
148 if ((CLUSTER_COUNT) == 1) { \
149 FatSetFatEntry((IRPCONTEXT),(VCB),(FAT_INDEX),FAT_CLUSTER_AVAILABLE); \
151 FatSetFatRun((IRPCONTEXT),(VCB),(FAT_INDEX),(CLUSTER_COUNT),FALSE); \
157 // FatAllocateClusters (
158 // IN PIRP_CONTEXT IrpContext,
160 // IN ULONG FatIndex,
161 // IN ULONG ClusterCount
165 #define FatAllocateClusters(IRPCONTEXT,VCB,FAT_INDEX,CLUSTER_COUNT) { \
166 if ((CLUSTER_COUNT) == 1) { \
167 FatSetFatEntry((IRPCONTEXT),(VCB),(FAT_INDEX),FAT_CLUSTER_LAST); \
169 FatSetFatRun((IRPCONTEXT),(VCB),(FAT_INDEX),(CLUSTER_COUNT),TRUE); \
175 // FatUnreserveClusters (
176 // IN PIRP_CONTEXT IrpContext,
178 // IN ULONG FatIndex,
179 // IN ULONG ClusterCount
183 #define FatUnreserveClusters(IRPCONTEXT,VCB,FAT_INDEX,CLUSTER_COUNT) { \
184 ASSERT( (FAT_INDEX) + (CLUSTER_COUNT) - 2 <= (VCB)->FreeClusterBitMap.SizeOfBitMap ); \
185 ASSERT( (FAT_INDEX) >= 2); \
186 RtlClearBits(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2,(CLUSTER_COUNT)); \
187 if ((FAT_INDEX) < (VCB)->ClusterHint) { \
188 (VCB)->ClusterHint = (FAT_INDEX); \
194 // FatReserveClusters (
195 // IN PIRP_CONTEXT IrpContext,
197 // IN ULONG FatIndex,
198 // IN ULONG ClusterCount
201 // Handle wrapping the hint back to the front.
204 #define FatReserveClusters(IRPCONTEXT,VCB,FAT_INDEX,CLUSTER_COUNT) { \
205 ULONG _AfterRun = (FAT_INDEX) + (CLUSTER_COUNT); \
206 ASSERT( (FAT_INDEX) + (CLUSTER_COUNT) - 2 <= (VCB)->FreeClusterBitMap.SizeOfBitMap ); \
207 ASSERT( (FAT_INDEX) >= 2); \
208 RtlSetBits(&(VCB)->FreeClusterBitMap,(FAT_INDEX)-2,(CLUSTER_COUNT)); \
210 if (_AfterRun - 2 >= (VCB)->FreeClusterBitMap.SizeOfBitMap) { \
213 if (RtlCheckBit(&(VCB)->FreeClusterBitMap, _AfterRun - 2)) { \
214 (VCB)->ClusterHint = RtlFindClearBits( &(VCB)->FreeClusterBitMap, 1, _AfterRun - 2) + 2; \
215 if (1 == (VCB)->ClusterHint) { \
216 (VCB)->ClusterHint = 2; \
220 (VCB)->ClusterHint = _AfterRun; \
226 // FatFindFreeClusterRun (
227 // IN PIRP_CONTEXT IrpContext,
229 // IN ULONG ClusterCount,
230 // IN ULONG AlternateClusterHint
233 // Do a special check if only one cluster is desired.
236 #define FatFindFreeClusterRun(IRPCONTEXT,VCB,CLUSTER_COUNT,CLUSTER_HINT) ( \
237 (CLUSTER_COUNT == 1) && \
238 FatIsClusterFree((IRPCONTEXT), (VCB), (CLUSTER_HINT)) ? \
240 RtlFindClearBits( &(VCB)->FreeClusterBitMap, \
242 (CLUSTER_HINT) - 2) + 2 \
246 // FAT32: Define the maximum size of the FreeClusterBitMap to be the
247 // maximum size of a FAT16 FAT. If there are more clusters on the
248 // volume than can be represented by this many bytes of bitmap, the
249 // FAT will be split into "buckets", each of which does fit.
251 // Note this count is in clusters/bits of bitmap.
254 #define MAX_CLUSTER_BITMAP_SIZE (1 << 16)
257 // Calculate the window a given cluster number is in.
260 #define FatWindowOfCluster(C) (((C) - 2) / MAX_CLUSTER_BITMAP_SIZE)
263 #pragma alloc_text(PAGE, FatAddFileAllocation)
264 #pragma alloc_text(PAGE, FatAllocateDiskSpace)
265 #pragma alloc_text(PAGE, FatDeallocateDiskSpace)
266 #pragma alloc_text(PAGE, FatExamineFatEntries)
267 #pragma alloc_text(PAGE, FatInterpretClusterType)
268 #pragma alloc_text(PAGE, FatLogOf)
269 #pragma alloc_text(PAGE, FatLookupFatEntry)
270 #pragma alloc_text(PAGE, FatLookupFileAllocation)
271 #pragma alloc_text(PAGE, FatLookupFileAllocationSize)
272 #pragma alloc_text(PAGE, FatMergeAllocation)
273 #pragma alloc_text(PAGE, FatSetFatEntry)
274 #pragma alloc_text(PAGE, FatSetFatRun)
275 #pragma alloc_text(PAGE, FatSetupAllocationSupport)
276 #pragma alloc_text(PAGE, FatSplitAllocation)
277 #pragma alloc_text(PAGE, FatTearDownAllocationSupport)
278 #pragma alloc_text(PAGE, FatTruncateFileAllocation)
291 Choose a window to allocate clusters from. Order of preference is:
293 1. First window with >50% free clusters
294 2. First empty window
295 3. Window with greatest number of free clusters.
299 Vcb - Supplies the Vcb for the volume
303 'Best window' number (index into Vcb->Windows[])
309 ULONG FirstEmpty
= -1;
310 ULONG ClustersPerWindow
= MAX_CLUSTER_BITMAP_SIZE
;
312 ASSERT( 1 != Vcb
->NumberOfWindows
);
314 for (i
= 0; i
< Vcb
->NumberOfWindows
; i
++) {
316 if (Vcb
->Windows
[i
].ClustersFree
== ClustersPerWindow
) {
318 if (-1 == FirstEmpty
) {
321 // Keep note of the first empty window on the disc
327 else if (Vcb
->Windows
[i
].ClustersFree
> MaxFree
) {
330 // This window has the most free clusters, so far
333 MaxFree
= Vcb
->Windows
[i
].ClustersFree
;
337 // If this window has >50% free clusters, then we will take it,
338 // so don't bother considering more windows.
341 if (MaxFree
>= (ClustersPerWindow
>> 1)) {
349 // If there were no windows with 50% or more freespace, then select the
350 // first empty window on the disc, if any - otherwise we'll just go with
351 // the one with the most free clusters.
354 if ((MaxFree
< (ClustersPerWindow
>> 1)) && (-1 != FirstEmpty
)) {
364 FatSetupAllocationSupport (
365 IN PIRP_CONTEXT IrpContext
,
373 This routine fills in the Allocation Support structure in the Vcb.
374 Most entries are computed using fat.h macros supplied with data from
375 the Bios Parameter Block. The free cluster count, however, requires
376 going to the Fat and actually counting free sectors. At the same time
377 the free cluster bit map is initalized.
381 Vcb - Supplies the Vcb to fill in.
397 ULONG FatIndexBitSize
;
399 ULONG ClustersDescribableByFat
;
403 DebugTrace(+1, Dbg
, "FatSetupAllocationSupport\n", 0);
404 DebugTrace( 0, Dbg
, " Vcb = %8lx\n", Vcb
);
407 // Compute a number of fields for Vcb.AllocationSupport
410 Vcb
->AllocationSupport
.RootDirectoryLbo
= FatRootDirectoryLbo( &Vcb
->Bpb
);
411 Vcb
->AllocationSupport
.RootDirectorySize
= FatRootDirectorySize( &Vcb
->Bpb
);
413 Vcb
->AllocationSupport
.FileAreaLbo
= FatFileAreaLbo( &Vcb
->Bpb
);
415 Vcb
->AllocationSupport
.NumberOfClusters
= FatNumberOfClusters( &Vcb
->Bpb
);
417 Vcb
->AllocationSupport
.FatIndexBitSize
= FatIndexBitSize( &Vcb
->Bpb
);
419 Vcb
->AllocationSupport
.LogOfBytesPerSector
= FatLogOf(Vcb
->Bpb
.BytesPerSector
);
420 Vcb
->AllocationSupport
.LogOfBytesPerCluster
= FatLogOf(FatBytesPerCluster( &Vcb
->Bpb
));
421 Vcb
->AllocationSupport
.NumberOfFreeClusters
= 0;
424 // Deal with a bug in DOS 5 format, if the Fat is not big enough to
425 // describe all the clusters on the disk, reduce this number. We expect
426 // that fat32 volumes will not have this problem.
428 // Turns out this was not a good assumption. We have to do this always now.
431 ClustersDescribableByFat
= ( ((FatIsFat32(Vcb
)? Vcb
->Bpb
.LargeSectorsPerFat
:
432 Vcb
->Bpb
.SectorsPerFat
) *
433 Vcb
->Bpb
.BytesPerSector
* 8)
434 / FatIndexBitSize(&Vcb
->Bpb
) ) - 2;
436 if (Vcb
->AllocationSupport
.NumberOfClusters
> ClustersDescribableByFat
) {
438 Vcb
->AllocationSupport
.NumberOfClusters
= ClustersDescribableByFat
;
442 // Extend the virtual volume file to include the Fat
446 CC_FILE_SIZES FileSizes
;
448 FileSizes
.AllocationSize
.QuadPart
=
449 FileSizes
.FileSize
.QuadPart
= (FatReservedBytes( &Vcb
->Bpb
) +
450 FatBytesPerFat( &Vcb
->Bpb
));
451 FileSizes
.ValidDataLength
= FatMaxLarge
;
453 if ( Vcb
->VirtualVolumeFile
->PrivateCacheMap
== NULL
) {
455 CcInitializeCacheMap( Vcb
->VirtualVolumeFile
,
458 &FatData
.CacheManagerNoOpCallbacks
,
463 CcSetFileSizes( Vcb
->VirtualVolumeFile
, &FileSizes
);
469 if (FatIsFat32(Vcb
) &&
470 Vcb
->AllocationSupport
.NumberOfClusters
> MAX_CLUSTER_BITMAP_SIZE
) {
472 Vcb
->NumberOfWindows
= (Vcb
->AllocationSupport
.NumberOfClusters
+
473 MAX_CLUSTER_BITMAP_SIZE
- 1) /
474 MAX_CLUSTER_BITMAP_SIZE
;
477 BitMapSize
= MAX_CLUSTER_BITMAP_SIZE
;
482 Vcb
->NumberOfWindows
= 1;
484 BitMapSize
= Vcb
->AllocationSupport
.NumberOfClusters
;
488 Vcb
->Windows
= FsRtlAllocatePoolWithTag( PagedPool
,
489 Vcb
->NumberOfWindows
* sizeof(FAT_WINDOW
),
492 RtlInitializeBitMap( &Vcb
->FreeClusterBitMap
,
497 // Chose a FAT window to begin operation in.
500 if (Vcb
->NumberOfWindows
> 1) {
503 // Read the fat and count up free clusters. We bias by the two reserved
504 // entries in the FAT.
507 FatExamineFatEntries( IrpContext
, Vcb
,
509 Vcb
->AllocationSupport
.NumberOfClusters
+ 2 - 1,
516 // Pick a window to begin allocating from
519 Vcb
->CurrentWindow
= &Vcb
->Windows
[ FatSelectBestWindow( Vcb
)];
523 Vcb
->CurrentWindow
= &Vcb
->Windows
[0];
526 // Carefully bias ourselves by the two reserved entries in the FAT.
529 Vcb
->CurrentWindow
->FirstCluster
= 2;
530 Vcb
->CurrentWindow
->LastCluster
= Vcb
->AllocationSupport
.NumberOfClusters
+ 2 - 1;
534 // Now transition to the FAT window we have chosen.
537 FatExamineFatEntries( IrpContext
, Vcb
,
545 // Now set the ClusterHint to the first free bit in our favorite
546 // window (except the ClusterHint is off by two).
550 (BitIndex
= RtlFindClearBits( &Vcb
->FreeClusterBitMap
, 1, 0 )) != -1 ?
555 DebugUnwind( FatSetupAllocationSupport
);
558 // If we hit an exception, back out.
561 if (_SEH2_AbnormalTermination()) {
563 FatTearDownAllocationSupport( IrpContext
, Vcb
);
572 FatTearDownAllocationSupport (
573 IN PIRP_CONTEXT IrpContext
,
581 This routine prepares the volume for closing. Specifically, we must
582 release the free fat bit map buffer, and uninitialize the dirty fat
587 Vcb - Supplies the Vcb to fill in.
596 DebugTrace(+1, Dbg
, "FatTearDownAllocationSupport\n", 0);
597 DebugTrace( 0, Dbg
, " Vcb = %8lx\n", Vcb
);
602 // If there are FAT buckets, free them.
605 if ( Vcb
->Windows
!= NULL
) {
607 ExFreePool( Vcb
->Windows
);
612 // Free the memory associated with the free cluster bitmap.
615 if ( Vcb
->FreeClusterBitMap
.Buffer
!= NULL
) {
617 ExFreePool( Vcb
->FreeClusterBitMap
.Buffer
);
620 // NULL this field as an flag.
623 Vcb
->FreeClusterBitMap
.Buffer
= NULL
;
627 // And remove all the runs in the dirty fat Mcb
630 FatRemoveMcbEntry( Vcb
, &Vcb
->DirtyFatMcb
, 0, 0xFFFFFFFF );
632 DebugTrace(-1, Dbg
, "FatTearDownAllocationSupport -> (VOID)\n", 0);
634 UNREFERENCED_PARAMETER( IrpContext
);
641 FatLookupFileAllocation (
642 IN PIRP_CONTEXT IrpContext
,
646 OUT PULONG ByteCount
,
647 OUT PBOOLEAN Allocated
,
648 OUT PBOOLEAN EndOnMax
,
656 This routine looks up the existing mapping of VBO to LBO for a
657 file/directory. The information it queries is either stored in the
658 mcb field of the fcb/dcb or it is stored on in the fat table and
659 needs to be retrieved and decoded, and updated in the mcb.
663 FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being queried
665 Vbo - Supplies the VBO whose LBO we want returned
667 Lbo - Receives the LBO corresponding to the input Vbo if one exists
669 ByteCount - Receives the number of bytes within the run the run
670 that correpond between the input vbo and output lbo.
672 Allocated - Receives TRUE if the Vbo does have a corresponding Lbo
675 EndOnMax - Receives TRUE if the run ends in the maximal FAT cluster,
676 which results in a fractional bytecount.
678 Index - Receives the Index of the run
687 VBO FirstVboOfCurrentRun
;
688 LBO FirstLboOfCurrentRun
;
695 ULONG BytesPerCluster
;
696 ULARGE_INTEGER BytesOnVolume
;
698 FAT_ENUMERATION_CONTEXT Context
;
702 DebugTrace(+1, Dbg
, "FatLookupFileAllocation\n", 0);
703 DebugTrace( 0, Dbg
, " FcbOrDcb = %8lx\n", FcbOrDcb
);
704 DebugTrace( 0, Dbg
, " Vbo = %8lx\n", Vbo
);
705 DebugTrace( 0, Dbg
, " Lbo = %8lx\n", Lbo
);
706 DebugTrace( 0, Dbg
, " ByteCount = %8lx\n", ByteCount
);
707 DebugTrace( 0, Dbg
, " Allocated = %8lx\n", Allocated
);
716 // Check the trivial case that the mapping is already in our
720 if ( FatLookupMcbEntry(Vcb
, &FcbOrDcb
->Mcb
, Vbo
, Lbo
, ByteCount
, Index
) ) {
724 ASSERT( ByteCount
!= 0);
727 // Detect the overflow case, trim and claim the condition.
730 if (Vbo
+ *ByteCount
== 0) {
735 DebugTrace( 0, Dbg
, "Found run in Mcb.\n", 0);
736 DebugTrace(-1, Dbg
, "FatLookupFileAllocation -> (VOID)\n", 0);
741 // Initialize the Vcb, the cluster size, LastCluster, and
742 // FirstLboOfCurrentRun (to be used as an indication of the first
743 // iteration through the following while loop).
746 BytesPerCluster
= 1 << Vcb
->AllocationSupport
.LogOfBytesPerCluster
;
748 BytesOnVolume
.QuadPart
= UInt32x32To64( Vcb
->AllocationSupport
.NumberOfClusters
, BytesPerCluster
);
751 FirstLboOfCurrentRun
= 0;
754 // Discard the case that the request extends beyond the end of
755 // allocation. Note that if the allocation size if not known
756 // AllocationSize is set to 0xffffffff.
759 if ( Vbo
>= FcbOrDcb
->Header
.AllocationSize
.LowPart
) {
763 DebugTrace( 0, Dbg
, "Vbo beyond end of file.\n", 0);
764 DebugTrace(-1, Dbg
, "FatLookupFileAllocation -> (VOID)\n", 0);
769 // The Vbo is beyond the last Mcb entry. So we adjust Current Vbo/Lbo
770 // and FatEntry to describe the beginning of the last entry in the Mcb.
771 // This is used as initialization for the following loop.
773 // If the Mcb was empty, we start at the beginning of the file with
774 // CurrentVbo set to 0 to indicate a new run.
777 if (FatLookupLastMcbEntry( Vcb
, &FcbOrDcb
->Mcb
, &CurrentVbo
, &CurrentLbo
, &Runs
)) {
779 DebugTrace( 0, Dbg
, "Current Mcb size = %8lx.\n", CurrentVbo
+ 1);
781 CurrentVbo
-= (BytesPerCluster
- 1);
782 CurrentLbo
-= (BytesPerCluster
- 1);
785 // Convert an index to a count.
792 DebugTrace( 0, Dbg
, "Mcb empty.\n", 0);
795 // Check for an FcbOrDcb that has no allocation
798 if (FcbOrDcb
->FirstClusterOfFile
== 0) {
802 DebugTrace( 0, Dbg
, "File has no allocation.\n", 0);
803 DebugTrace(-1, Dbg
, "FatLookupFileAllocation -> (VOID)\n", 0);
809 CurrentLbo
= FatGetLboFromIndex( Vcb
, FcbOrDcb
->FirstClusterOfFile
);
810 FirstVboOfCurrentRun
= CurrentVbo
;
811 FirstLboOfCurrentRun
= CurrentLbo
;
815 DebugTrace( 0, Dbg
, "First Lbo of file = %8lx\n", CurrentLbo
);
820 // Now we know that we are looking up a valid Vbo, but it is
821 // not in the Mcb, which is a monotonically increasing list of
822 // Vbo's. Thus we have to go to the Fat, and update
823 // the Mcb as we go. We use a try-finally to unpin the page
824 // of fat hanging around. Also we mark *Allocated = FALSE, so that
825 // the caller wont try to use the data if we hit an exception.
832 FatEntry
= (FAT_ENTRY
)FatGetIndexFromLbo( Vcb
, CurrentLbo
);
835 // ASSERT that CurrentVbo and CurrentLbo are now cluster alligned.
836 // The assumption here, is that only whole clusters of Vbos and Lbos
837 // are mapped in the Mcb.
840 ASSERT( ((CurrentLbo
- Vcb
->AllocationSupport
.FileAreaLbo
)
841 % BytesPerCluster
== 0) &&
842 (CurrentVbo
% BytesPerCluster
== 0) );
845 // Starting from the first Vbo after the last Mcb entry, scan through
846 // the Fat looking for our Vbo. We continue through the Fat until we
847 // hit a noncontiguity beyond the desired Vbo, or the last cluster.
850 while ( !LastCluster
) {
853 // Get the next fat entry, and update our Current variables.
857 FatLookupFatEntry( IrpContext
, Vcb
, FatEntry
, &FatEntry
, &Context
);
859 FatLookupFatEntry( IrpContext
, Vcb
, FatEntry
, (PULONG
)&FatEntry
, &Context
);
862 PriorLbo
= CurrentLbo
;
863 CurrentLbo
= FatGetLboFromIndex( Vcb
, FatEntry
);
864 CurrentVbo
+= BytesPerCluster
;
866 switch ( FatInterpretClusterType( Vcb
, FatEntry
)) {
869 // Check for a break in the Fat allocation chain.
872 case FatClusterAvailable
:
873 case FatClusterReserved
:
876 DebugTrace( 0, Dbg
, "Break in allocation chain, entry = %d\n", FatEntry
);
877 DebugTrace(-1, Dbg
, "FatLookupFileAllocation -> Fat Corrupt. Raise Status.\n", 0);
879 FatPopUpFileCorrupt( IrpContext
, FcbOrDcb
);
880 FatRaiseStatus( IrpContext
, STATUS_FILE_CORRUPT_ERROR
);
884 // If this is the last cluster, we must update the Mcb and
891 // Assert we know where the current run started. If the
892 // Mcb was empty when we were called, thenFirstLboOfCurrentRun
893 // was set to the start of the file. If the Mcb contained an
894 // entry, then FirstLboOfCurrentRun was set on the first
895 // iteration through the loop. Thus if FirstLboOfCurrentRun
896 // is 0, then there was an Mcb entry and we are on our first
897 // iteration, meaing that the last cluster in the Mcb was
898 // really the last allocated cluster, but we checked Vbo
899 // against AllocationSize, and found it OK, thus AllocationSize
900 // must be too large.
902 // Note that, when we finally arrive here, CurrentVbo is actually
903 // the first Vbo beyond the file allocation and CurrentLbo is
907 DebugTrace( 0, Dbg
, "Read last cluster of file.\n", 0);
910 // Detect the case of the maximal file. Note that this really isn't
911 // a proper Vbo - those are zero-based, and this is a one-based number.
912 // The maximal file, of 2^32 - 1 bytes, has a maximum byte offset of
915 // Just so we don't get confused here.
918 if (CurrentVbo
== 0) {
926 if (FirstLboOfCurrentRun
!= 0 ) {
928 DebugTrace( 0, Dbg
, "Adding a run to the Mcb.\n", 0);
929 DebugTrace( 0, Dbg
, " Vbo = %08lx.\n", FirstVboOfCurrentRun
);
930 DebugTrace( 0, Dbg
, " Lbo = %08lx.\n", FirstLboOfCurrentRun
);
931 DebugTrace( 0, Dbg
, " Length = %08lx.\n", CurrentVbo
- FirstVboOfCurrentRun
);
933 (VOID
)FatAddMcbEntry( Vcb
,
935 FirstVboOfCurrentRun
,
936 FirstLboOfCurrentRun
,
937 CurrentVbo
- FirstVboOfCurrentRun
);
943 // Being at the end of allocation, make sure we have found
944 // the Vbo. If we haven't, seeing as we checked VBO
945 // against AllocationSize, the real disk allocation is less
946 // than that of AllocationSize. This comes about when the
947 // real allocation is not yet known, and AllocaitonSize
948 // contains MAXULONG.
950 // KLUDGE! - If we were called by FatLookupFileAllocationSize
951 // Vbo is set to MAXULONG - 1, and AllocationSize to the lookup
952 // hint. Thus we merrily go along looking for a match that isn't
953 // there, but in the meantime building an Mcb. If this is
954 // the case, fill in AllocationSize and return.
957 if ( Vbo
== MAXULONG
- 1 ) {
960 FcbOrDcb
->Header
.AllocationSize
.QuadPart
= CurrentVbo
;
962 DebugTrace( 0, Dbg
, "New file allocation size = %08lx.\n", CurrentVbo
);
963 try_return ( NOTHING
);
967 // We will lie ever so slightly if we really terminated on the
968 // maximal byte of a file. It is really allocated.
971 if (Vbo
>= CurrentVbo
&& !*EndOnMax
) {
974 try_return ( NOTHING
);
980 // This is a continuation in the chain. If the run has a
981 // discontiguity at this point, update the Mcb, and if we are beyond
982 // the desired Vbo, this is the end of the run, so set LastCluster
983 // and exit the loop.
989 // This is the loop check. The Vbo must not be bigger than the size of
990 // the volume, and the Vbo must not have a) wrapped and b) not been at the
991 // very last cluster in the chain, for the case of the maximal file.
994 if ( CurrentVbo
== 0 ||
995 (BytesOnVolume
.HighPart
== 0 && CurrentVbo
> BytesOnVolume
.LowPart
)) {
997 FatPopUpFileCorrupt( IrpContext
, FcbOrDcb
);
998 FatRaiseStatus( IrpContext
, STATUS_FILE_CORRUPT_ERROR
);
1001 if ( PriorLbo
+ BytesPerCluster
!= CurrentLbo
) {
1004 // Note that on the first time through the loop
1005 // (FirstLboOfCurrentRun == 0), we don't add the
1006 // run to the Mcb since it curresponds to the last
1007 // run already stored in the Mcb.
1010 if ( FirstLboOfCurrentRun
!= 0 ) {
1012 DebugTrace( 0, Dbg
, "Adding a run to the Mcb.\n", 0);
1013 DebugTrace( 0, Dbg
, " Vbo = %08lx.\n", FirstVboOfCurrentRun
);
1014 DebugTrace( 0, Dbg
, " Lbo = %08lx.\n", FirstLboOfCurrentRun
);
1015 DebugTrace( 0, Dbg
, " Length = %08lx.\n", CurrentVbo
- FirstVboOfCurrentRun
);
1017 FatAddMcbEntry( Vcb
,
1019 FirstVboOfCurrentRun
,
1020 FirstLboOfCurrentRun
,
1021 CurrentVbo
- FirstVboOfCurrentRun
);
1027 // Since we are at a run boundry, with CurrentLbo and
1028 // CurrentVbo being the first cluster of the next run,
1029 // we see if the run we just added encompases the desired
1030 // Vbo, and if so exit. Otherwise we set up two new
1031 // First*boOfCurrentRun, and continue.
1034 if (CurrentVbo
> Vbo
) {
1040 FirstVboOfCurrentRun
= CurrentVbo
;
1041 FirstLboOfCurrentRun
= CurrentLbo
;
1048 DebugTrace(0, Dbg
, "Illegal Cluster Type.\n", FatEntry
);
1050 FatBugCheck( 0, 0, 0 );
1058 // Load up the return parameters.
1060 // On exit from the loop, Vbo still contains the desired Vbo, and
1061 // CurrentVbo is the first byte after the run that contained the
1067 *Lbo
= FirstLboOfCurrentRun
+ (Vbo
- FirstVboOfCurrentRun
);
1069 *ByteCount
= CurrentVbo
- Vbo
;
1071 if (ARGUMENT_PRESENT(Index
)) {
1074 // Note that Runs only needs to be accurate with respect to where we
1075 // ended. Since partial-lookup cases will occur without exclusive
1076 // synchronization, the Mcb itself may be much bigger by now.
1086 DebugUnwind( FatLookupFileAllocation
);
1089 // We are done reading the Fat, so unpin the last page of fat
1090 // that is hanging around
1093 FatUnpinBcb( IrpContext
, Context
.Bcb
);
1095 DebugTrace(-1, Dbg
, "FatLookupFileAllocation -> (VOID)\n", 0);
1103 FatAddFileAllocation (
1104 IN PIRP_CONTEXT IrpContext
,
1106 IN PFILE_OBJECT FileObject OPTIONAL
,
1107 IN ULONG DesiredAllocationSize
1112 Routine Description:
1114 This routine adds additional allocation to the specified file/directory.
1115 Additional allocation is added by appending clusters to the file/directory.
1117 If the file already has a sufficient allocation then this procedure
1118 is effectively a noop.
1122 FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being modified.
1123 This parameter must not specify the root dcb.
1125 FileObject - If supplied inform the cache manager of the change.
1127 DesiredAllocationSize - Supplies the minimum size, in bytes, that we want
1128 allocated to the file/directory.
1135 PLARGE_MCB McbToCleanup
= NULL
;
1136 PDIRENT Dirent
= NULL
;
1137 ULONG NewAllocation
;
1139 BOOLEAN UnwindWeAllocatedDiskSpace
= FALSE
;
1140 BOOLEAN UnwindAllocationSizeSet
= FALSE
;
1141 BOOLEAN UnwindCacheManagerInformed
= FALSE
;
1142 BOOLEAN UnwindWeInitializedMcb
= FALSE
;
1146 DebugTrace(+1, Dbg
, "FatAddFileAllocation\n", 0);
1147 DebugTrace( 0, Dbg
, " FcbOrDcb = %8lx\n", FcbOrDcb
);
1148 DebugTrace( 0, Dbg
, " DesiredAllocationSize = %8lx\n", DesiredAllocationSize
);
1151 // If we haven't yet set the correct AllocationSize, do so.
1154 if (FcbOrDcb
->Header
.AllocationSize
.QuadPart
== FCB_LOOKUP_ALLOCATIONSIZE_HINT
) {
1156 FatLookupFileAllocationSize( IrpContext
, FcbOrDcb
);
1160 // Check for the benign case that the desired allocation is already
1161 // within the allocation size.
1164 if (DesiredAllocationSize
<= FcbOrDcb
->Header
.AllocationSize
.LowPart
) {
1166 DebugTrace(0, Dbg
, "Desired size within current allocation.\n", 0);
1168 DebugTrace(-1, Dbg
, "FatAddFileAllocation -> (VOID)\n", 0);
1172 DebugTrace( 0, Dbg
, "InitialAllocation = %08lx.\n", FcbOrDcb
->Header
.AllocationSize
.LowPart
);
1175 // Get a chunk of disk space that will fullfill our needs. If there
1176 // was no initial allocation, start from the hint in the Vcb, otherwise
1177 // try to allocate from the cluster after the initial allocation.
1179 // If there was no initial allocation to the file, we can just use the
1180 // Mcb in the FcbOrDcb, otherwise we have to use a new one, and merge
1181 // it to the one in the FcbOrDcb.
1184 Vcb
= FcbOrDcb
->Vcb
;
1188 if (FcbOrDcb
->Header
.AllocationSize
.LowPart
== 0) {
1192 ASSERT( FcbOrDcb
->FcbCondition
== FcbGood
);
1194 FatGetDirentFromFcbOrDcb( IrpContext
,
1199 ASSERT( Bcb
!= NULL
);
1202 // Set this dirty right now since this call can fail.
1205 FatSetDirtyBcb( IrpContext
, Bcb
, Vcb
, TRUE
);
1208 FatAllocateDiskSpace( IrpContext
,
1211 &DesiredAllocationSize
,
1215 UnwindWeAllocatedDiskSpace
= TRUE
;
1216 McbToCleanup
= &FcbOrDcb
->Mcb
;
1219 // We have to update the dirent and FcbOrDcb copies of
1220 // FirstClusterOfFile since before it was 0
1223 FatLookupMcbEntry( FcbOrDcb
->Vcb
,
1230 DebugTrace( 0, Dbg
, "First Lbo of file will be %08lx.\n", FirstLboOfFile
);
1232 FcbOrDcb
->FirstClusterOfFile
= FatGetIndexFromLbo( Vcb
, FirstLboOfFile
);
1234 Dirent
->FirstClusterOfFile
= (USHORT
)FcbOrDcb
->FirstClusterOfFile
;
1236 if ( FatIsFat32(Vcb
) ) {
1238 Dirent
->FirstClusterOfFileHi
= (USHORT
)(FcbOrDcb
->FirstClusterOfFile
>> 16);
1242 // Note the size of the allocation we need to tell the cache manager about.
1245 NewAllocation
= DesiredAllocationSize
;
1249 LBO LastAllocatedLbo
;
1253 // Get the first cluster following the current allocation. It is possible
1254 // the Mcb is empty (or short, etc.) so we need to be slightly careful
1255 // about making sure we don't lie with the hint.
1258 (void)FatLookupLastMcbEntry( FcbOrDcb
->Vcb
, &FcbOrDcb
->Mcb
, &DontCare
, &LastAllocatedLbo
, NULL
);
1261 // Try to get some disk space starting from there.
1264 NewAllocation
= DesiredAllocationSize
- FcbOrDcb
->Header
.AllocationSize
.LowPart
;
1266 FsRtlInitializeLargeMcb( &NewMcb
, PagedPool
);
1267 UnwindWeInitializedMcb
= TRUE
;
1268 McbToCleanup
= &NewMcb
;
1270 FatAllocateDiskSpace( IrpContext
,
1272 (LastAllocatedLbo
!= ~0 ?
1273 FatGetIndexFromLbo(Vcb
,LastAllocatedLbo
+ 1) :
1279 UnwindWeAllocatedDiskSpace
= TRUE
;
1283 // Now that we increased the allocation of the file, mark it in the
1284 // FcbOrDcb. Carefully prepare to handle an inability to grow the cache
1288 FcbOrDcb
->Header
.AllocationSize
.LowPart
+= NewAllocation
;
1291 // Handle the maximal file case, where we may have just wrapped. Note
1292 // that this must be the precise boundary case wrap, i.e. by one byte,
1293 // so that the new allocation is actually one byte "less" as far as we're
1294 // concerned. This is important for the extension case.
1297 if (FcbOrDcb
->Header
.AllocationSize
.LowPart
== 0) {
1300 FcbOrDcb
->Header
.AllocationSize
.LowPart
= 0xffffffff;
1303 UnwindAllocationSizeSet
= TRUE
;
1306 // Inform the cache manager to increase the section size
1309 if ( ARGUMENT_PRESENT(FileObject
) && CcIsFileCached(FileObject
) ) {
1311 CcSetFileSizes( FileObject
,
1312 (PCC_FILE_SIZES
)&FcbOrDcb
->Header
.AllocationSize
);
1313 UnwindCacheManagerInformed
= TRUE
;
1317 // In the extension case, we have held off actually gluing the new
1318 // allocation onto the file. This simplifies exception cleanup since
1319 // if it was already added and the section grow failed, we'd have to
1320 // do extra work to unglue it. This way, we can assume that if we
1321 // raise the only thing we need to do is deallocate the disk space.
1323 // Merge the allocation now.
1326 if (FcbOrDcb
->Header
.AllocationSize
.LowPart
!= NewAllocation
) {
1329 // Tack the new Mcb onto the end of the FcbOrDcb one.
1332 FatMergeAllocation( IrpContext
,
1340 DebugUnwind( FatAddFileAllocation
);
1343 // Give FlushFileBuffer a clue here.
1346 SetFlag(FcbOrDcb
->FcbState
, FCB_STATE_FLUSH_FAT
);
1349 // If we were dogged trying to complete this operation, we need to go
1350 // back various things out.
1353 if (_SEH2_AbnormalTermination()) {
1356 // Pull off the allocation size we tried to add to this object if
1357 // we failed to grow cache structures or Mcb structures.
1360 if (UnwindAllocationSizeSet
) {
1362 FcbOrDcb
->Header
.AllocationSize
.LowPart
-= NewAllocation
;
1365 if (UnwindCacheManagerInformed
) {
1367 CcSetFileSizes( FileObject
,
1368 (PCC_FILE_SIZES
)&FcbOrDcb
->Header
.AllocationSize
);
1372 // In the case of initial allocation, we used the Fcb's Mcb and have
1373 // to clean that up as well as the FAT chain references.
1376 if (FcbOrDcb
->Header
.AllocationSize
.LowPart
== 0) {
1378 if (Dirent
!= NULL
) {
1380 FcbOrDcb
->FirstClusterOfFile
= 0;
1381 Dirent
->FirstClusterOfFile
= 0;
1383 if ( FatIsFat32(Vcb
) ) {
1385 Dirent
->FirstClusterOfFileHi
= 0;
1391 // ... and drop the dirent Bcb if we got it. Do it now
1392 // so we can afford to take the exception if we have to.
1395 FatUnpinBcb( IrpContext
, Bcb
);
1400 // Note this can re-raise.
1403 if ( UnwindWeAllocatedDiskSpace
) {
1405 FatDeallocateDiskSpace( IrpContext
, Vcb
, McbToCleanup
);
1411 // We always want to clean up the non-initial allocation temporary Mcb,
1412 // otherwise we have the Fcb's Mcb and we just truncate it away.
1415 if (UnwindWeInitializedMcb
== TRUE
) {
1418 // Note that we already know a raise is in progress. No danger
1419 // of encountering the normal case code below and doing this again.
1422 FsRtlUninitializeLargeMcb( McbToCleanup
);
1428 FsRtlTruncateLargeMcb( McbToCleanup
, 0 );
1434 DebugTrace(-1, Dbg
, "FatAddFileAllocation -> (VOID)\n", 0);
1438 // Non-exceptional cleanup we always want to do. In handling the re-raise possibilities
1439 // during exceptions we had to make sure these two steps always happened there beforehand.
1440 // So now we handle the usual case.
1443 FatUnpinBcb( IrpContext
, Bcb
);
1445 if (UnwindWeInitializedMcb
== TRUE
) {
1447 FsRtlUninitializeLargeMcb( &NewMcb
);
1453 FatTruncateFileAllocation (
1454 IN PIRP_CONTEXT IrpContext
,
1456 IN ULONG DesiredAllocationSize
1461 Routine Description:
1463 This routine truncates the allocation to the specified file/directory.
1465 If the file is already smaller than the indicated size then this procedure
1466 is effectively a noop.
1471 FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being modified
1472 This parameter must not specify the root dcb.
1474 DesiredAllocationSize - Supplies the maximum size, in bytes, that we want
1475 allocated to the file/directory. It is rounded
1476 up to the nearest cluster.
1480 VOID - TRUE if the operation completed and FALSE if it had to
1481 block but could not.
1488 LARGE_MCB RemainingMcb
;
1489 ULONG BytesPerCluster
;
1490 PDIRENT Dirent
= NULL
;
1491 BOOLEAN UpdatedDirent
= FALSE
;
1493 ULONG UnwindInitialAllocationSize
;
1494 ULONG UnwindInitialFirstClusterOfFile
;
1495 BOOLEAN UnwindWeAllocatedMcb
= FALSE
;
1499 DebugTrace(+1, Dbg
, "FatTruncateFileAllocation\n", 0);
1500 DebugTrace( 0, Dbg
, " FcbOrDcb = %8lx\n", FcbOrDcb
);
1501 DebugTrace( 0, Dbg
, " DesiredAllocationSize = %8lx\n", DesiredAllocationSize
);
1504 // If the Fcb isn't in good condition, we have no business whacking around on
1505 // the disk after "its" clusters.
1507 // Inspired by a Prefix complaint.
1510 ASSERT( FcbOrDcb
->FcbCondition
== FcbGood
);
1513 // If we haven't yet set the correct AllocationSize, do so.
1516 if (FcbOrDcb
->Header
.AllocationSize
.QuadPart
== FCB_LOOKUP_ALLOCATIONSIZE_HINT
) {
1518 FatLookupFileAllocationSize( IrpContext
, FcbOrDcb
);
1522 // Round up the Desired Allocation Size to the next cluster size
1525 Vcb
= FcbOrDcb
->Vcb
;
1527 BytesPerCluster
= 1 << Vcb
->AllocationSupport
.LogOfBytesPerCluster
;
1530 // Note if the desired allocation is zero, to distinguish this from
1531 // the wrap case below.
1534 if (DesiredAllocationSize
!= 0) {
1536 DesiredAllocationSize
= (DesiredAllocationSize
+ (BytesPerCluster
- 1)) &
1537 ~(BytesPerCluster
- 1);
1539 // Check for the benign case that the file is already smaller than
1540 // the desired truncation. Note that if it wraps, then a) it was
1541 // specifying an offset in the maximally allocatable cluster and
1542 // b) we're not asking to extend the file, either. So stop.
1545 if (DesiredAllocationSize
== 0 ||
1546 DesiredAllocationSize
>= FcbOrDcb
->Header
.AllocationSize
.LowPart
) {
1548 DebugTrace(0, Dbg
, "Desired size within current allocation.\n", 0);
1550 DebugTrace(-1, Dbg
, "FatTruncateFileAllocation -> (VOID)\n", 0);
1556 UnwindInitialAllocationSize
= FcbOrDcb
->Header
.AllocationSize
.LowPart
;
1557 UnwindInitialFirstClusterOfFile
= FcbOrDcb
->FirstClusterOfFile
;
1560 // Update the FcbOrDcb allocation size. If it is now zero, we have the
1561 // additional task of modifying the FcbOrDcb and Dirent copies of
1562 // FirstClusterInFile.
1564 // Note that we must pin the dirent before actually deallocating the
1565 // disk space since, in unwind, it would not be possible to reallocate
1566 // deallocated disk space as someone else may have reallocated it and
1567 // may cause an exception when you try to get some more disk space.
1568 // Thus FatDeallocateDiskSpace must be the final dangerous operation.
1573 FcbOrDcb
->Header
.AllocationSize
.QuadPart
= DesiredAllocationSize
;
1579 if (DesiredAllocationSize
== 0) {
1582 // We have to update the dirent and FcbOrDcb copies of
1583 // FirstClusterOfFile since before it was 0
1586 ASSERT( FcbOrDcb
->FcbCondition
== FcbGood
);
1588 FatGetDirentFromFcbOrDcb( IrpContext
, FcbOrDcb
, &Dirent
, &Bcb
);
1590 ASSERT( Dirent
&& Bcb
);
1592 Dirent
->FirstClusterOfFile
= 0;
1594 if (FatIsFat32(Vcb
)) {
1596 Dirent
->FirstClusterOfFileHi
= 0;
1599 FcbOrDcb
->FirstClusterOfFile
= 0;
1601 FatSetDirtyBcb( IrpContext
, Bcb
, Vcb
, TRUE
);
1602 UpdatedDirent
= TRUE
;
1604 FatDeallocateDiskSpace( IrpContext
, Vcb
, &FcbOrDcb
->Mcb
);
1606 FatRemoveMcbEntry( FcbOrDcb
->Vcb
, &FcbOrDcb
->Mcb
, 0, 0xFFFFFFFF );
1611 // Split the existing allocation into two parts, one we will keep, and
1612 // one we will deallocate.
1615 FsRtlInitializeLargeMcb( &RemainingMcb
, PagedPool
);
1616 UnwindWeAllocatedMcb
= TRUE
;
1618 FatSplitAllocation( IrpContext
,
1621 DesiredAllocationSize
,
1624 FatDeallocateDiskSpace( IrpContext
, Vcb
, &RemainingMcb
);
1626 FsRtlUninitializeLargeMcb( &RemainingMcb
);
1631 DebugUnwind( FatTruncateFileAllocation
);
1634 // Is this really the right backout strategy? It would be nice if we could
1635 // pretend the truncate worked if we knew that the file had gotten into
1636 // a consistent state. Leaving dangled clusters is probably quite preferable.
1639 if ( _SEH2_AbnormalTermination() ) {
1641 FcbOrDcb
->Header
.AllocationSize
.LowPart
= UnwindInitialAllocationSize
;
1643 if ( (DesiredAllocationSize
== 0) && (Dirent
!= NULL
)) {
1645 if (UpdatedDirent
) {
1648 // If the dirent has been updated ok and marked dirty, then we
1649 // failed in deallocatediscspace, and don't know what state
1650 // the on disc fat chain is in. So we throw away the mcb,
1651 // and potentially loose a few clusters until the next
1652 // chkdsk. The operation has succeeded, but the exception
1653 // will still propogate. 5.1
1656 FatRemoveMcbEntry( Vcb
, &FcbOrDcb
->Mcb
, 0, 0xFFFFFFFF );
1657 FcbOrDcb
->Header
.AllocationSize
.QuadPart
= 0;
1661 Dirent
->FirstClusterOfFile
= (USHORT
)UnwindInitialFirstClusterOfFile
;
1663 if ( FatIsFat32(Vcb
) ) {
1665 Dirent
->FirstClusterOfFileHi
=
1666 (USHORT
)(UnwindInitialFirstClusterOfFile
>> 16);
1669 FcbOrDcb
->FirstClusterOfFile
= UnwindInitialFirstClusterOfFile
;
1673 if ( UnwindWeAllocatedMcb
) {
1675 FsRtlUninitializeLargeMcb( &RemainingMcb
);
1679 // Note that in the non zero truncation case, we will also
1680 // leak clusters. However, apart from this, the in memory and on disc
1681 // structures will agree.
1684 FatUnpinBcb( IrpContext
, Bcb
);
1687 // Give FlushFileBuffer a clue here.
1690 SetFlag(FcbOrDcb
->FcbState
, FCB_STATE_FLUSH_FAT
);
1692 DebugTrace(-1, Dbg
, "FatTruncateFileAllocation -> (VOID)\n", 0);
1698 FatLookupFileAllocationSize (
1699 IN PIRP_CONTEXT IrpContext
,
1705 Routine Description:
1707 This routine retrieves the current file allocatio size for the
1708 specified file/directory.
1712 FcbOrDcb - Supplies the Fcb/Dcb of the file/directory being modified
1723 DebugTrace(+1, Dbg
, "FatLookupAllocationSize\n", 0);
1724 DebugTrace( 0, Dbg
, " FcbOrDcb = %8lx\n", FcbOrDcb
);
1727 // We call FatLookupFileAllocation with Vbo of 0xffffffff - 1.
1730 FatLookupFileAllocation( IrpContext
,
1740 // FileSize was set at Fcb creation time from the contents of the directory entry,
1741 // and we are only now looking up the real length of the allocation chain. If it
1742 // cannot be contained, this is trash. Probably more where that came from.
1745 if (FcbOrDcb
->Header
.FileSize
.LowPart
> FcbOrDcb
->Header
.AllocationSize
.LowPart
) {
1747 FatPopUpFileCorrupt( IrpContext
, FcbOrDcb
);
1748 FatRaiseStatus( IrpContext
, STATUS_FILE_CORRUPT_ERROR
);
1751 DebugTrace(-1, Dbg
, "FatLookupFileAllocationSize -> (VOID)\n", 0);
1757 FatAllocateDiskSpace (
1758 IN PIRP_CONTEXT IrpContext
,
1760 IN ULONG AbsoluteClusterHint
,
1761 IN PULONG ByteCount
,
1762 IN BOOLEAN ExactMatchRequired
,
1768 Routine Description:
1770 This procedure allocates additional disk space and builds an mcb
1771 representing the newly allocated space. If the space cannot be
1772 allocated then this procedure raises an appropriate status.
1774 Searching starts from the hint index in the Vcb unless an alternative
1775 non-zero hint is given in AlternateClusterHint. If we are using the
1776 hint field in the Vcb, it is set to the cluster following our allocation
1779 Disk space can only be allocated in cluster units so this procedure
1780 will round up any byte count to the next cluster boundary.
1782 Pictorially what is done is the following (where ! denotes the end of
1783 the fat chain (i.e., FAT_CLUSTER_LAST)):
1790 Mcb |--a--|--b--|--c--!
1793 ByteCount ----------+
1797 Vcb - Supplies the VCB being modified
1799 AbsoluteClusterHint - Supplies an alternate hint index to start the
1800 search from. If this is zero we use, and update,
1803 ByteCount - Supplies the number of bytes that we are requesting, and
1804 receives the number of bytes that we got.
1806 ExactMatchRequired - Caller should set this to TRUE if only the precise run requested
1809 Mcb - Receives the MCB describing the newly allocated disk space. The
1810 caller passes in an initialized Mcb that is filled in by this procedure.
1815 FALSE - Failed to allocate exactly as requested (=> ExactMatchRequired was TRUE)
1820 UCHAR LogOfBytesPerCluster
;
1821 ULONG BytesPerCluster
;
1822 ULONG StartingCluster
;
1824 ULONG WindowRelativeHint
;
1829 ULONG PreviousClear
;
1834 BOOLEAN Result
= TRUE
;
1838 DebugTrace(+1, Dbg
, "FatAllocateDiskSpace\n", 0);
1839 DebugTrace( 0, Dbg
, " Vcb = %8lx\n", Vcb
);
1840 DebugTrace( 0, Dbg
, " *ByteCount = %8lx\n", *ByteCount
);
1841 DebugTrace( 0, Dbg
, " Mcb = %8lx\n", Mcb
);
1842 DebugTrace( 0, Dbg
, " Hint = %8lx\n", AbsoluteClusterHint
);
1844 ASSERT((AbsoluteClusterHint
<= Vcb
->AllocationSupport
.NumberOfClusters
+ 2) && (1 != AbsoluteClusterHint
));
1847 // Make sure byte count is not zero
1850 if (*ByteCount
== 0) {
1852 DebugTrace(0, Dbg
, "Nothing to allocate.\n", 0);
1854 DebugTrace(-1, Dbg
, "FatAllocateDiskSpace -> (VOID)\n", 0);
1859 // Compute the cluster count based on the byte count, rounding up
1860 // to the next cluster if there is any remainder. Note that the
1861 // pathalogical case BytesCount == 0 has been eliminated above.
1864 LogOfBytesPerCluster
= Vcb
->AllocationSupport
.LogOfBytesPerCluster
;
1865 BytesPerCluster
= 1 << LogOfBytesPerCluster
;
1867 *ByteCount
= (*ByteCount
+ (BytesPerCluster
- 1))
1868 & ~(BytesPerCluster
- 1);
1871 // If ByteCount is NOW zero, then we were asked for the maximal
1872 // filesize (or at least for bytes in the last allocatable sector).
1875 if (*ByteCount
== 0) {
1877 *ByteCount
= 0xffffffff;
1878 ClusterCount
= 1 << (32 - LogOfBytesPerCluster
);
1882 ClusterCount
= (*ByteCount
>> LogOfBytesPerCluster
);
1886 // Make sure there are enough free clusters to start with, and
1887 // take them now so that nobody else takes them from us.
1890 ExAcquireResourceSharedLite(&Vcb
->ChangeBitMapResource
, TRUE
);
1891 FatLockFreeClusterBitMap( Vcb
);
1893 if (ClusterCount
<= Vcb
->AllocationSupport
.NumberOfFreeClusters
) {
1895 Vcb
->AllocationSupport
.NumberOfFreeClusters
-= ClusterCount
;
1899 FatUnlockFreeClusterBitMap( Vcb
);
1900 ExReleaseResourceLite(&Vcb
->ChangeBitMapResource
);
1902 DebugTrace(0, Dbg
, "Disk Full. Raise Status.\n", 0);
1903 FatRaiseStatus( IrpContext
, STATUS_DISK_FULL
);
1907 // Did the caller supply a hint?
1910 if ((0 != AbsoluteClusterHint
) && (AbsoluteClusterHint
< (Vcb
->AllocationSupport
.NumberOfClusters
+ 2))) {
1912 if (Vcb
->NumberOfWindows
> 1) {
1915 // If we're being called upon to allocate clusters outside the
1916 // current window (which happens only via MoveFile), it's a problem.
1917 // We address this by changing the current window to be the one which
1918 // contains the alternate cluster hint. Note that if the user's
1919 // request would cross a window boundary, he doesn't really get what
1923 if (AbsoluteClusterHint
< Vcb
->CurrentWindow
->FirstCluster
||
1924 AbsoluteClusterHint
> Vcb
->CurrentWindow
->LastCluster
) {
1926 ULONG BucketNum
= FatWindowOfCluster( AbsoluteClusterHint
);
1928 ASSERT( BucketNum
< Vcb
->NumberOfWindows
);
1931 // Drop our shared lock on the ChangeBitMapResource, and pick it up again
1932 // exclusive in preparation for making the window swap.
1935 FatUnlockFreeClusterBitMap(Vcb
);
1936 ExReleaseResourceLite(&Vcb
->ChangeBitMapResource
);
1937 ExAcquireResourceExclusiveLite(&Vcb
->ChangeBitMapResource
, TRUE
);
1938 FatLockFreeClusterBitMap(Vcb
);
1940 Window
= &Vcb
->Windows
[BucketNum
];
1943 // Again, test the current window against the one we want - some other
1944 // thread could have sneaked in behind our backs and kindly set it to the one
1945 // we need, when we dropped and reacquired the ChangeBitMapResource above.
1948 if (Window
!= Vcb
->CurrentWindow
) {
1952 Wait
= BooleanFlagOn(IrpContext
->Flags
, IRP_CONTEXT_FLAG_WAIT
);
1953 SetFlag(IrpContext
->Flags
, IRP_CONTEXT_FLAG_WAIT
);
1956 // Change to the new window (update Vcb->CurrentWindow) and scan it
1957 // to build up a freespace bitmap etc.
1960 FatExamineFatEntries( IrpContext
, Vcb
,
1971 ClearFlag(IrpContext
->Flags
, IRP_CONTEXT_FLAG_WAIT
);
1974 if (_SEH2_AbnormalTermination()) {
1977 // We will have raised as a result of failing to pick up the
1978 // chunk of the FAT for this window move. Release our resources
1979 // and return the cluster count to the volume.
1982 Vcb
->AllocationSupport
.NumberOfFreeClusters
+= ClusterCount
;
1984 FatUnlockFreeClusterBitMap( Vcb
);
1985 ExReleaseResourceLite(&Vcb
->ChangeBitMapResource
);
1992 // Make the hint cluster number relative to the base of the current window...
1994 // Currentwindow->Firstcluster is baised by +2 already, so we will lose the
1995 // bias already in AbsoluteClusterHint. Put it back....
1998 WindowRelativeHint
= AbsoluteClusterHint
- Vcb
->CurrentWindow
->FirstCluster
+ 2;
2003 // Only one 'window', ie fat16/12. No modification necessary.
2006 WindowRelativeHint
= AbsoluteClusterHint
;
2012 // Either no hint supplied, or it was out of range, so grab one from the Vcb
2014 // NOTE: Clusterhint in the Vcb is not guaranteed to be set (may be -1)
2017 WindowRelativeHint
= Vcb
->ClusterHint
;
2018 AbsoluteClusterHint
= 0;
2021 // Vcb hint may not have been initialized yet. Force to valid cluster.
2024 if (-1 == WindowRelativeHint
) {
2026 WindowRelativeHint
= 2;
2030 ASSERT((WindowRelativeHint
>= 2) && (WindowRelativeHint
< Vcb
->FreeClusterBitMap
.SizeOfBitMap
+ 2));
2033 // Keep track of the window we're allocating from, so we can clean
2034 // up correctly if the current window changes after we unlock the
2038 Window
= Vcb
->CurrentWindow
;
2041 // Try to find a run of free clusters large enough for us.
2044 StartingCluster
= FatFindFreeClusterRun( IrpContext
,
2047 WindowRelativeHint
);
2049 // If the above call was successful, we can just update the fat
2050 // and Mcb and exit. Otherwise we have to look for smaller free
2053 // This test is a bit funky. Note that the error return from
2054 // RtlFindClearBits is -1, and adding two to that is 1.
2057 if ((StartingCluster
!= 1) &&
2058 ((0 == AbsoluteClusterHint
) || (StartingCluster
== WindowRelativeHint
))
2062 PreviousClear
= RtlNumberOfClearBits( &Vcb
->FreeClusterBitMap
);
2066 // Take the clusters we found, and unlock the bit map.
2069 FatReserveClusters(IrpContext
, Vcb
, StartingCluster
, ClusterCount
);
2071 Window
->ClustersFree
-= ClusterCount
;
2073 StartingCluster
+= Window
->FirstCluster
;
2074 StartingCluster
-= 2;
2076 ASSERT( PreviousClear
- ClusterCount
== Window
->ClustersFree
);
2078 FatUnlockFreeClusterBitMap( Vcb
);
2081 // Note that this call will never fail since there is always
2082 // room for one entry in an empty Mcb.
2085 FatAddMcbEntry( Vcb
, Mcb
,
2087 FatGetLboFromIndex( Vcb
, StartingCluster
),
2095 FatAllocateClusters(IrpContext
, Vcb
,
2101 DebugUnwind( FatAllocateDiskSpace
);
2104 // If the allocate clusters failed, remove the run from the Mcb,
2105 // unreserve the clusters, and reset the free cluster count.
2108 if (_SEH2_AbnormalTermination()) {
2110 FatRemoveMcbEntry( Vcb
, Mcb
, 0, *ByteCount
);
2112 FatLockFreeClusterBitMap( Vcb
);
2114 // Only clear bits if the bitmap window is the same.
2116 if (Window
== Vcb
->CurrentWindow
) {
2118 // Both values (startingcluster and window->firstcluster) are
2119 // already biased by 2, so will cancel, so we need to add in the 2 again.
2121 FatUnreserveClusters( IrpContext
, Vcb
,
2122 StartingCluster
- Window
->FirstCluster
+ 2,
2126 Window
->ClustersFree
+= ClusterCount
;
2127 Vcb
->AllocationSupport
.NumberOfFreeClusters
+= ClusterCount
;
2129 FatUnlockFreeClusterBitMap( Vcb
);
2132 ExReleaseResourceLite(&Vcb
->ChangeBitMapResource
);
2138 // Note that Index is a zero-based window-relative number. When appropriate
2139 // it'll get converted into a true cluster number and put in Cluster, which
2140 // will be a volume relative true cluster number.
2146 ULONG PriorLastCluster
;
2149 ULONG ClustersFound
= 0;
2150 ULONG ClustersRemaining
;
2152 BOOLEAN LockedBitMap
= FALSE
;
2153 BOOLEAN SelectNextContigWindow
= FALSE
;
2156 // Drop our shared lock on the ChangeBitMapResource, and pick it up again
2157 // exclusive in preparation for making a window swap.
2160 FatUnlockFreeClusterBitMap(Vcb
);
2161 ExReleaseResourceLite(&Vcb
->ChangeBitMapResource
);
2162 ExAcquireResourceExclusiveLite(&Vcb
->ChangeBitMapResource
, TRUE
);
2163 FatLockFreeClusterBitMap(Vcb
);
2164 LockedBitMap
= TRUE
;
2168 if ( ExactMatchRequired
&& (1 == Vcb
->NumberOfWindows
)) {
2171 // Give up right now, there are no more windows to search! RtlFindClearBits
2172 // searchs the whole bitmap, so we would have found any contiguous run
2176 try_leave( Result
= FALSE
);
2180 // While the request is still incomplete, look for the largest
2181 // run of free clusters, mark them taken, allocate the run in
2182 // the Mcb and Fat, and if this isn't the first time through
2183 // the loop link it to prior run on the fat. The Mcb will
2184 // coalesce automatically.
2187 ClustersRemaining
= ClusterCount
;
2189 PriorLastCluster
= 0;
2191 while (ClustersRemaining
!= 0) {
2194 // If we just entered the loop, the bit map is already locked
2197 if ( !LockedBitMap
) {
2199 FatLockFreeClusterBitMap( Vcb
);
2200 LockedBitMap
= TRUE
;
2204 // Find the largest run of free clusters. If the run is
2205 // bigger than we need, only use what we need. Note that
2206 // this will then be the last while() iteration.
2209 // 12/3/95: need to bias bitmap by 2 bits for the defrag
2210 // hooks and the below macro became impossible to do without in-line
2213 // ClustersFound = FatLongestFreeClusterRun( IrpContext, Vcb, &Index );
2217 if (!SelectNextContigWindow
) {
2219 if ( 0 != WindowRelativeHint
) {
2221 ULONG Desired
= Vcb
->FreeClusterBitMap
.SizeOfBitMap
- (WindowRelativeHint
- 2);
2224 // We will try to allocate contiguously. Try from the current hint the to
2225 // end of current window. Don't try for more than we actually need.
2228 if (Desired
> ClustersRemaining
) {
2230 Desired
= ClustersRemaining
;
2233 if (RtlAreBitsClear( &Vcb
->FreeClusterBitMap
,
2234 WindowRelativeHint
- 2,
2238 // Clusters from hint->...windowend are free. Take them.
2241 Index
= WindowRelativeHint
- 2;
2242 ClustersFound
= Desired
;
2244 if (FatIsFat32(Vcb
)) {
2247 // We're now up against the end of the current window, so indicate that we
2248 // want the next window in the sequence next time around. (If we're not up
2249 // against the end of the window, then we got what we needed and won't be
2250 // coming around again anyway).
2253 SelectNextContigWindow
= TRUE
;
2254 WindowRelativeHint
= 2;
2259 // FAT 12/16 - we've run up against the end of the volume. Clear the
2260 // hint, since we now have no idea where to look.
2263 WindowRelativeHint
= 0;
2266 PreviousClear
= RtlNumberOfClearBits( &Vcb
->FreeClusterBitMap
);
2271 if (ExactMatchRequired
) {
2274 // If our caller required an exact match, then we're hosed. Bail out now.
2277 try_leave( Result
= FALSE
);
2281 // Hint failed, drop back to pot luck
2284 WindowRelativeHint
= 0;
2288 if ((0 == WindowRelativeHint
) && (0 == ClustersFound
)) {
2290 if (ClustersRemaining
<= Vcb
->CurrentWindow
->ClustersFree
) {
2293 // The remaining allocation could be satisfied entirely from this
2294 // window. We will ask only for what we need, to try and avoid
2295 // unnecessarily fragmenting large runs of space by always using
2296 // (part of) the largest run we can find. This call will return the
2297 // first run large enough.
2300 Index
= RtlFindClearBits( &Vcb
->FreeClusterBitMap
, ClustersRemaining
, 0);
2304 ClustersFound
= ClustersRemaining
;
2308 if (0 == ClustersFound
) {
2311 // Still nothing, so just take the largest free run we can find.
2314 ClustersFound
= RtlFindLongestRunClear( &Vcb
->FreeClusterBitMap
, &Index
);
2318 PreviousClear
= RtlNumberOfClearBits( &Vcb
->FreeClusterBitMap
);
2320 if (ClustersFound
>= ClustersRemaining
) {
2322 ClustersFound
= ClustersRemaining
;
2327 // If we just ran up to the end of a window, set up a hint that
2328 // we'd like the next consecutive window after this one. (FAT32 only)
2331 if ( ((Index
+ ClustersFound
) == Vcb
->FreeClusterBitMap
.SizeOfBitMap
) &&
2335 SelectNextContigWindow
= TRUE
;
2336 WindowRelativeHint
= 2;
2342 if (ClustersFound
== 0) {
2344 ULONG FaveWindow
= 0;
2345 BOOLEAN SelectedWindow
;
2348 // If we found no free clusters on a single-window FAT,
2349 // there was a bad problem with the free cluster count.
2352 if (1 == Vcb
->NumberOfWindows
) {
2354 FatBugCheck( 0, 5, 0 );
2358 // Switch to a new bucket. Possibly the next one if we're
2359 // currently on a roll (allocating contiguously)
2362 SelectedWindow
= FALSE
;
2364 if ( SelectNextContigWindow
) {
2368 NextWindow
= (((ULONG
)((PUCHAR
)Vcb
->CurrentWindow
- (PUCHAR
)Vcb
->Windows
)) / sizeof( FAT_WINDOW
)) + 1;
2370 if ((NextWindow
< Vcb
->NumberOfWindows
) &&
2371 ( Vcb
->Windows
[ NextWindow
].ClustersFree
> 0)
2374 FaveWindow
= NextWindow
;
2375 SelectedWindow
= TRUE
;
2379 if (ExactMatchRequired
) {
2382 // Some dope tried to allocate a run past the end of the volume...
2385 try_leave( Result
= FALSE
);
2389 // Give up on the contiguous allocation attempts
2392 WindowRelativeHint
= 0;
2395 SelectNextContigWindow
= FALSE
;
2398 if (!SelectedWindow
) {
2401 // Select a new window to begin allocating from
2404 FaveWindow
= FatSelectBestWindow( Vcb
);
2408 // By now we'd better have found a window with some free clusters
2411 if (0 == Vcb
->Windows
[ FaveWindow
].ClustersFree
) {
2413 FatBugCheck( 0, 5, 1 );
2416 Wait
= BooleanFlagOn(IrpContext
->Flags
, IRP_CONTEXT_FLAG_WAIT
);
2417 SetFlag(IrpContext
->Flags
, IRP_CONTEXT_FLAG_WAIT
);
2419 FatExamineFatEntries( IrpContext
, Vcb
,
2423 &Vcb
->Windows
[FaveWindow
],
2428 ClearFlag(IrpContext
->Flags
, IRP_CONTEXT_FLAG_WAIT
);
2432 // Now we'll just go around the loop again, having switched windows,
2436 PreviousClear
= RtlNumberOfClearBits( &Vcb
->FreeClusterBitMap
);
2438 } // if (clustersfound == 0)
2442 // Take the clusters we found, convert our index to a cluster number
2443 // and unlock the bit map.
2446 Window
= Vcb
->CurrentWindow
;
2448 FatReserveClusters( IrpContext
, Vcb
, (Index
+ 2), ClustersFound
);
2450 Cluster
= Index
+ Window
->FirstCluster
;
2452 Window
->ClustersFree
-= ClustersFound
;
2453 ASSERT( PreviousClear
- ClustersFound
== Window
->ClustersFree
);
2455 FatUnlockFreeClusterBitMap( Vcb
);
2456 LockedBitMap
= FALSE
;
2459 // Add the newly alloced run to the Mcb.
2462 BytesFound
= ClustersFound
<< LogOfBytesPerCluster
;
2464 FatAddMcbEntry( Vcb
, Mcb
,
2466 FatGetLboFromIndex( Vcb
, Cluster
),
2470 // Connect the last allocated run with this one, and allocate
2471 // this run on the Fat.
2474 if (PriorLastCluster
!= 0) {
2476 FatSetFatEntry( IrpContext
,
2479 (FAT_ENTRY
)Cluster
);
2486 FatAllocateClusters( IrpContext
, Vcb
, Cluster
, ClustersFound
);
2489 // Prepare for the next iteration.
2492 CurrentVbo
+= BytesFound
;
2493 ClustersRemaining
-= ClustersFound
;
2494 PriorLastCluster
= Cluster
+ ClustersFound
- 1;
2496 } // while (clustersremaining)
2500 DebugUnwind( FatAllocateDiskSpace
);
2502 ExReleaseResourceLite(&Vcb
->ChangeBitMapResource
);
2505 // Is there any unwinding to do?
2508 if ( _SEH2_AbnormalTermination() || (FALSE
== Result
)) {
2511 // Flag to the caller that they're getting nothing
2517 // There are three places we could have taken this exception:
2518 // when switching the window (FatExamineFatEntries), adding
2519 // a found run to the Mcb (FatAddMcbEntry), or when writing
2520 // the changes to the FAT (FatSetFatEntry). In the first case
2521 // we don't have anything to unwind before deallocation, and
2522 // can detect this by seeing if we have the ClusterBitmap
2525 if (!LockedBitMap
) {
2527 FatLockFreeClusterBitMap( Vcb
);
2530 // In these cases, we have the possiblity that the FAT
2531 // window is still in place and we need to clear the bits.
2532 // If the Mcb entry isn't there (we raised trying to add
2533 // it), the effect of trying to remove it is a noop.
2536 if (Window
== Vcb
->CurrentWindow
) {
2539 // Cluster reservation works on cluster 2 based window-relative
2540 // numbers, so we must convert. The subtraction will lose the
2541 // cluster 2 base, so bias the result.
2544 FatUnreserveClusters( IrpContext
, Vcb
,
2545 (Cluster
- Window
->FirstCluster
) + 2,
2550 // Note that FatDeallocateDiskSpace will take care of adjusting
2551 // to account for the entries in the Mcb. All we have to account
2552 // for is the last run that didn't make it.
2555 Window
->ClustersFree
+= ClustersFound
;
2556 Vcb
->AllocationSupport
.NumberOfFreeClusters
+= ClustersFound
;
2558 FatUnlockFreeClusterBitMap( Vcb
);
2560 FatRemoveMcbEntry( Vcb
, Mcb
, CurrentVbo
, BytesFound
);
2565 // Just drop the mutex now - we didn't manage to do anything
2566 // that needs to be backed out.
2569 FatUnlockFreeClusterBitMap( Vcb
);
2575 // Now we have tidied up, we are ready to just send the Mcb
2576 // off to deallocate disk space
2579 FatDeallocateDiskSpace( IrpContext
, Vcb
, Mcb
);
2584 // Now finally (really), remove all the entries from the mcb
2587 FatRemoveMcbEntry( Vcb
, Mcb
, 0, 0xFFFFFFFF );
2591 DebugTrace(-1, Dbg
, "FatAllocateDiskSpace -> (VOID)\n", 0);
2593 } _SEH2_END
; // finally
2601 FatDeallocateDiskSpace (
2602 IN PIRP_CONTEXT IrpContext
,
2609 Routine Description:
2611 This procedure deallocates the disk space denoted by an input
2612 mcb. Note that the input MCB does not need to necessarily describe
2613 a chain that ends with a FAT_CLUSTER_LAST entry.
2615 Pictorially what is done is the following
2617 Fat |--a--|--b--|--c--|
2618 Mcb |--a--|--b--|--c--|
2622 Fat |--0--|--0--|--0--|
2623 Mcb |--a--|--b--|--c--|
2627 Vcb - Supplies the VCB being modified
2629 Mcb - Supplies the MCB describing the disk space to deallocate. Note
2630 that Mcb is unchanged by this procedure.
2649 UCHAR LogOfBytesPerCluster
;
2655 DebugTrace(+1, Dbg
, "FatDeallocateDiskSpace\n", 0);
2656 DebugTrace( 0, Dbg
, " Vcb = %8lx\n", Vcb
);
2657 DebugTrace( 0, Dbg
, " Mcb = %8lx\n", Mcb
);
2659 LogOfBytesPerCluster
= Vcb
->AllocationSupport
.LogOfBytesPerCluster
;
2661 RunsInMcb
= FsRtlNumberOfRunsInLargeMcb( Mcb
);
2663 if ( RunsInMcb
== 0 ) {
2665 DebugTrace(-1, Dbg
, "FatDeallocateDiskSpace -> (VOID)\n", 0);
2672 // Run though the Mcb, freeing all the runs in the fat.
2674 // We do this in two steps (first update the fat, then the bitmap
2675 // (which can't fail)) to prevent other people from taking clusters
2676 // that we need to re-allocate in the event of unwind.
2679 ExAcquireResourceSharedLite(&Vcb
->ChangeBitMapResource
, TRUE
);
2681 RunsInMcb
= FsRtlNumberOfRunsInLargeMcb( Mcb
);
2683 for ( McbIndex
= 0; McbIndex
< RunsInMcb
; McbIndex
++ ) {
2685 FatGetNextMcbEntry( Vcb
, Mcb
, McbIndex
, &Vbo
, &Lbo
, &ByteCount
);
2688 // Assert that Fat files have no holes.
2694 // Write FAT_CLUSTER_AVAILABLE to each cluster in the run.
2697 ClusterCount
= ByteCount
>> LogOfBytesPerCluster
;
2698 ClusterIndex
= FatGetIndexFromLbo( Vcb
, Lbo
);
2700 FatFreeClusters( IrpContext
, Vcb
, ClusterIndex
, ClusterCount
);
2704 // From now on, nothing can go wrong .... (as in raise)
2707 FatLockFreeClusterBitMap( Vcb
);
2709 for ( McbIndex
= 0; McbIndex
< RunsInMcb
; McbIndex
++ ) {
2712 ULONG MyStart
, MyLength
, count
;
2715 ULONG PreviousClear
, i
;
2721 FatGetNextMcbEntry( Vcb
, Mcb
, McbIndex
, &Vbo
, &Lbo
, &ByteCount
);
2724 // Mark the bits clear in the FreeClusterBitMap.
2727 ClusterCount
= ByteCount
>> LogOfBytesPerCluster
;
2728 ClusterIndex
= FatGetIndexFromLbo( Vcb
, Lbo
);
2730 Window
= Vcb
->CurrentWindow
;
2733 // If we've divided the bitmap, elide bitmap manipulation for
2734 // runs that are outside the current bucket.
2737 ClusterEnd
= ClusterIndex
+ ClusterCount
- 1;
2739 if (!(ClusterIndex
> Window
->LastCluster
||
2740 ClusterEnd
< Window
->FirstCluster
)) {
2743 // The run being freed overlaps the current bucket, so we'll
2744 // have to clear some bits.
2747 if (ClusterIndex
< Window
->FirstCluster
&&
2748 ClusterEnd
> Window
->LastCluster
) {
2750 MyStart
= Window
->FirstCluster
;
2751 MyLength
= Window
->LastCluster
- Window
->FirstCluster
+ 1;
2753 } else if (ClusterIndex
< Window
->FirstCluster
) {
2755 MyStart
= Window
->FirstCluster
;
2756 MyLength
= ClusterEnd
- Window
->FirstCluster
+ 1;
2761 // The range being freed starts in the bucket, and may possibly
2762 // extend beyond the bucket.
2765 MyStart
= ClusterIndex
;
2767 if (ClusterEnd
<= Window
->LastCluster
) {
2769 MyLength
= ClusterCount
;
2773 MyLength
= Window
->LastCluster
- ClusterIndex
+ 1;
2777 if (MyLength
== 0) {
2784 PreviousClear
= RtlNumberOfClearBits( &Vcb
->FreeClusterBitMap
);
2789 // Verify that the Bits are all really set.
2792 ASSERT( MyStart
+ MyLength
- Window
->FirstCluster
<= Vcb
->FreeClusterBitMap
.SizeOfBitMap
);
2794 for (i
= 0; i
< MyLength
; i
++) {
2796 ASSERT( RtlCheckBit(&Vcb
->FreeClusterBitMap
,
2797 MyStart
- Window
->FirstCluster
+ i
) == 1 );
2801 FatUnreserveClusters( IrpContext
, Vcb
,
2802 MyStart
- Window
->FirstCluster
+ 2,
2807 // Adjust the ClustersFree count for each bitmap window, even the ones
2808 // that are not the current window.
2811 if (FatIsFat32(Vcb
)) {
2813 Window
= &Vcb
->Windows
[FatWindowOfCluster( ClusterIndex
)];
2817 Window
= &Vcb
->Windows
[0];
2820 MyStart
= ClusterIndex
;
2822 for (MyLength
= ClusterCount
; MyLength
> 0; MyLength
-= count
) {
2824 count
= FatMin(Window
->LastCluster
- MyStart
+ 1, MyLength
);
2825 Window
->ClustersFree
+= count
;
2828 // If this was not the last window this allocation spanned,
2829 // advance to the next.
2832 if (MyLength
!= count
) {
2835 MyStart
= Window
->FirstCluster
;
2840 // Deallocation is now complete. Adjust the free cluster count.
2843 Vcb
->AllocationSupport
.NumberOfFreeClusters
+= ClusterCount
;
2847 if (Vcb
->CurrentWindow
->ClustersFree
!=
2848 RtlNumberOfClearBits(&Vcb
->FreeClusterBitMap
)) {
2850 DbgPrint("%x vs %x\n", Vcb
->CurrentWindow
->ClustersFree
,
2851 RtlNumberOfClearBits(&Vcb
->FreeClusterBitMap
));
2853 DbgPrint("%x for %x\n", ClusterIndex
, ClusterCount
);
2857 FatUnlockFreeClusterBitMap( Vcb
);
2862 DebugUnwind( FatDeallocateDiskSpace
);
2865 // Is there any unwinding to do?
2868 ExReleaseResourceLite(&Vcb
->ChangeBitMapResource
);
2870 if ( _SEH2_AbnormalTermination() ) {
2878 ULONG PriorLastIndex
;
2881 // For each entry we already deallocated, reallocate it,
2882 // chaining together as nessecary. Note that we continue
2883 // up to and including the last "for" iteration even though
2884 // the SetFatRun could not have been successful. This
2885 // allows us a convienent way to re-link the final successful
2888 // It is possible that the reason we got here will prevent us
2889 // from succeeding in this operation.
2894 for (Index
= 0; Index
<= McbIndex
; Index
++) {
2896 FatGetNextMcbEntry(Vcb
, Mcb
, Index
, &Vbo
, &Lbo
, &ByteCount
);
2898 FatIndex
= FatGetIndexFromLbo( Vcb
, Lbo
);
2899 Clusters
= ByteCount
>> LogOfBytesPerCluster
;
2902 // We must always restore the prior iteration's last
2903 // entry, pointing it to the first cluster of this run.
2906 if (PriorLastIndex
!= 0) {
2908 FatSetFatEntry( IrpContext
,
2911 (FAT_ENTRY
)FatIndex
);
2915 // If this is not the last entry (the one that failed)
2916 // then reallocate the disk space on the fat.
2919 if ( Index
< McbIndex
) {
2921 FatAllocateClusters(IrpContext
, Vcb
, FatIndex
, Clusters
);
2923 PriorLastIndex
= FatIndex
+ Clusters
- 1;
2928 DebugTrace(-1, Dbg
, "FatDeallocateDiskSpace -> (VOID)\n", 0);
2936 FatSplitAllocation (
2937 IN PIRP_CONTEXT IrpContext
,
2939 IN OUT PLARGE_MCB Mcb
,
2941 OUT PLARGE_MCB RemainingMcb
2946 Routine Description:
2948 This procedure takes a single mcb and splits its allocation into
2949 two separate allocation units. The separation must only be done
2950 on cluster boundaries, otherwise we bugcheck.
2952 On the disk this actually works by inserting a FAT_CLUSTER_LAST into
2953 the last index of the first part being split out.
2955 Pictorially what is done is the following (where ! denotes the end of
2956 the fat chain (i.e., FAT_CLUSTER_LAST)):
2959 Mcb |--a--|--b--|--c--|--d--|--e--|--f--|
2962 SplitAtVbo ---------------------+
2964 RemainingMcb (empty)
2968 Mcb |--a--|--b--|--c--!
2971 RemainingMcb |--d--|--e--|--f--|
2975 Vcb - Supplies the VCB being modified
2977 Mcb - Supplies the MCB describing the allocation being split into
2978 two parts. Upon return this Mcb now contains the first chain.
2980 SplitAtVbo - Supplies the VBO of the first byte for the second chain
2983 RemainingMcb - Receives the MCB describing the second chain of allocated
2984 disk space. The caller passes in an initialized Mcb that
2985 is filled in by this procedure STARTING AT VBO 0.
2989 VOID - TRUE if the operation completed and FALSE if it had to
2990 block but could not.
3002 ULONG BytesPerCluster
;
3006 DebugTrace(+1, Dbg
, "FatSplitAllocation\n", 0);
3007 DebugTrace( 0, Dbg
, " Vcb = %8lx\n", Vcb
);
3008 DebugTrace( 0, Dbg
, " Mcb = %8lx\n", Mcb
);
3009 DebugTrace( 0, Dbg
, " SplitAtVbo = %8lx\n", SplitAtVbo
);
3010 DebugTrace( 0, Dbg
, " RemainingMcb = %8lx\n", RemainingMcb
);
3012 BytesPerCluster
= 1 << Vcb
->AllocationSupport
.LogOfBytesPerCluster
;
3015 // Assert that the split point is cluster alligned
3018 ASSERT( (SplitAtVbo
& (BytesPerCluster
- 1)) == 0 );
3021 // We should never be handed an empty source MCB and asked to split
3022 // at a non zero point.
3025 ASSERT( !((0 != SplitAtVbo
) && (0 == FsRtlNumberOfRunsInLargeMcb( Mcb
))));
3028 // Assert we were given an empty target Mcb.
3032 // This assert is commented out to avoid hitting in the Ea error
3033 // path. In that case we will be using the same Mcb's to split the
3034 // allocation that we used to merge them. The target Mcb will contain
3035 // the runs that the split will attempt to insert.
3038 // ASSERT( FsRtlNumberOfRunsInMcb( RemainingMcb ) == 0 );
3044 // Move the runs after SplitAtVbo from the souce to the target
3047 SourceVbo
= SplitAtVbo
;
3050 while (FatLookupMcbEntry(Vcb
, Mcb
, SourceVbo
, &Lbo
, &ByteCount
, NULL
)) {
3052 FatAddMcbEntry( Vcb
, RemainingMcb
, TargetVbo
, Lbo
, ByteCount
);
3054 FatRemoveMcbEntry( Vcb
, Mcb
, SourceVbo
, ByteCount
);
3056 TargetVbo
+= ByteCount
;
3057 SourceVbo
+= ByteCount
;
3060 // If SourceVbo overflows, we were actually snipping off the end
3061 // of the maximal file ... and are now done.
3064 if (SourceVbo
== 0) {
3071 // Mark the last pre-split cluster as a FAT_LAST_CLUSTER
3074 if ( SplitAtVbo
!= 0 ) {
3076 FatLookupLastMcbEntry( Vcb
, Mcb
, &DontCare
, &Lbo
, NULL
);
3078 FatSetFatEntry( IrpContext
,
3080 FatGetIndexFromLbo( Vcb
, Lbo
),
3086 DebugUnwind( FatSplitAllocation
);
3089 // If we got an exception, we must glue back together the Mcbs
3092 if ( _SEH2_AbnormalTermination() ) {
3094 TargetVbo
= SplitAtVbo
;
3097 while (FatLookupMcbEntry(Vcb
, RemainingMcb
, SourceVbo
, &Lbo
, &ByteCount
, NULL
)) {
3099 FatAddMcbEntry( Vcb
, Mcb
, TargetVbo
, Lbo
, ByteCount
);
3101 FatRemoveMcbEntry( Vcb
, RemainingMcb
, SourceVbo
, ByteCount
);
3103 TargetVbo
+= ByteCount
;
3104 SourceVbo
+= ByteCount
;
3108 DebugTrace(-1, Dbg
, "FatSplitAllocation -> (VOID)\n", 0);
3116 FatMergeAllocation (
3117 IN PIRP_CONTEXT IrpContext
,
3119 IN OUT PLARGE_MCB Mcb
,
3120 IN PLARGE_MCB SecondMcb
3125 Routine Description:
3127 This routine takes two separate allocations described by two MCBs and
3128 joins them together into one allocation.
3130 Pictorially what is done is the following (where ! denotes the end of
3131 the fat chain (i.e., FAT_CLUSTER_LAST)):
3134 Mcb |--a--|--b--|--c--!
3136 SecondMcb |--d--|--e--|--f--|
3140 Mcb |--a--|--b--|--c--|--d--|--e--|--f--|
3142 SecondMcb |--d--|--e--|--f--|
3147 Vcb - Supplies the VCB being modified
3149 Mcb - Supplies the MCB of the first allocation that is being modified.
3150 Upon return this Mcb will also describe the newly enlarged
3153 SecondMcb - Supplies the ZERO VBO BASED MCB of the second allocation
3154 that is being appended to the first allocation. This
3155 procedure leaves SecondMcb unchanged.
3159 VOID - TRUE if the operation completed and FALSE if it had to
3160 block but could not.
3177 DebugTrace(+1, Dbg
, "FatMergeAllocation\n", 0);
3178 DebugTrace( 0, Dbg
, " Vcb = %8lx\n", Vcb
);
3179 DebugTrace( 0, Dbg
, " Mcb = %8lx\n", Mcb
);
3180 DebugTrace( 0, Dbg
, " SecondMcb = %8lx\n", SecondMcb
);
3185 // Append the runs from SecondMcb to Mcb
3188 (void)FatLookupLastMcbEntry( Vcb
, Mcb
, &SpliceVbo
, &SpliceLbo
, NULL
);
3191 TargetVbo
= SpliceVbo
+ 1;
3193 while (FatLookupMcbEntry(Vcb
, SecondMcb
, SourceVbo
, &Lbo
, &ByteCount
, NULL
)) {
3195 FatAddMcbEntry( Vcb
, Mcb
, TargetVbo
, Lbo
, ByteCount
);
3197 SourceVbo
+= ByteCount
;
3198 TargetVbo
+= ByteCount
;
3202 // Link the last pre-merge cluster to the first cluster of SecondMcb
3205 FatLookupMcbEntry( Vcb
, SecondMcb
, 0, &Lbo
, (PULONG
)NULL
, NULL
);
3207 FatSetFatEntry( IrpContext
,
3209 FatGetIndexFromLbo( Vcb
, SpliceLbo
),
3210 (FAT_ENTRY
)FatGetIndexFromLbo( Vcb
, Lbo
) );
3214 DebugUnwind( FatMergeAllocation
);
3217 // If we got an exception, we must remove the runs added to Mcb
3220 if ( _SEH2_AbnormalTermination() ) {
3224 if ((CutLength
= TargetVbo
- (SpliceVbo
+ 1)) != 0) {
3226 FatRemoveMcbEntry( Vcb
, Mcb
, SpliceVbo
+ 1, CutLength
);
3230 DebugTrace(-1, Dbg
, "FatMergeAllocation -> (VOID)\n", 0);
3238 // Internal support routine
3242 FatInterpretClusterType (
3249 Routine Description:
3251 This procedure tells the caller how to interpret the input fat table
3252 entry. It will indicate if the fat cluster is available, resereved,
3253 bad, the last one, or the another fat index. This procedure can deal
3254 with both 12 and 16 bit fat.
3258 Vcb - Supplies the Vcb to examine, yields 12/16 bit info
3260 Entry - Supplies the fat entry to examine
3264 CLUSTER_TYPE - Is the type of the input Fat entry
3269 DebugTrace(+1, Dbg
, "InterpretClusterType\n", 0);
3270 DebugTrace( 0, Dbg
, " Vcb = %8lx\n", Vcb
);
3271 DebugTrace( 0, Dbg
, " Entry = %8lx\n", Entry
);
3275 switch(Vcb
->AllocationSupport
.FatIndexBitSize
) {
3277 Entry
&= FAT32_ENTRY_MASK
;
3281 ASSERT( Entry
<= 0xfff );
3282 if (Entry
>= 0x0ff0) {
3283 Entry
|= 0x0FFFF000;
3289 ASSERT( Entry
<= 0xffff );
3290 if (Entry
>= 0x0fff0) {
3291 Entry
|= 0x0FFF0000;
3296 if (Entry
== FAT_CLUSTER_AVAILABLE
) {
3298 DebugTrace(-1, Dbg
, "FatInterpretClusterType -> FatClusterAvailable\n", 0);
3300 return FatClusterAvailable
;
3302 } else if (Entry
< FAT_CLUSTER_RESERVED
) {
3304 DebugTrace(-1, Dbg
, "FatInterpretClusterType -> FatClusterNext\n", 0);
3306 return FatClusterNext
;
3308 } else if (Entry
< FAT_CLUSTER_BAD
) {
3310 DebugTrace(-1, Dbg
, "FatInterpretClusterType -> FatClusterReserved\n", 0);
3312 return FatClusterReserved
;
3314 } else if (Entry
== FAT_CLUSTER_BAD
) {
3316 DebugTrace(-1, Dbg
, "FatInterpretClusterType -> FatClusterBad\n", 0);
3318 return FatClusterBad
;
3322 DebugTrace(-1, Dbg
, "FatInterpretClusterType -> FatClusterLast\n", 0);
3324 return FatClusterLast
;
3330 // Internal support routine
3335 IN PIRP_CONTEXT IrpContext
,
3338 IN OUT PULONG FatEntry
,
3339 IN OUT PFAT_ENUMERATION_CONTEXT Context
3344 Routine Description:
3346 This routine takes an index into the fat and gives back the value
3347 in the Fat at this index. At any given time, for a 16 bit fat, this
3348 routine allows only one page per volume of the fat to be pinned in
3349 memory. For a 12 bit bit fat, the entire fat (max 6k) is pinned. This
3350 extra layer of caching makes the vast majority of requests very
3351 fast. The context for this caching stored in a structure in the Vcb.
3355 Vcb - Supplies the Vcb to examine, yields 12/16 bit info,
3356 fat access context, etc.
3358 FatIndex - Supplies the fat index to examine.
3360 FatEntry - Receives the fat entry pointed to by FatIndex. Note that
3361 it must point to non-paged pool.
3363 Context - This structure keeps track of a page of pinned fat between calls.
3370 DebugTrace(+1, Dbg
, "FatLookupFatEntry\n", 0);
3371 DebugTrace( 0, Dbg
, " Vcb = %8lx\n", Vcb
);
3372 DebugTrace( 0, Dbg
, " FatIndex = %4x\n", FatIndex
);
3373 DebugTrace( 0, Dbg
, " FatEntry = %8lx\n", FatEntry
);
3376 // Make sure they gave us a valid fat index.
3379 FatVerifyIndexIsValid(IrpContext
, Vcb
, FatIndex
);
3382 // Case on 12 or 16 bit fats.
3384 // In the 12 bit case (mostly floppies) we always have the whole fat
3385 // (max 6k bytes) pinned during allocation operations. This is possibly
3386 // a wee bit slower, but saves headaches over fat entries with 8 bits
3387 // on one page, and 4 bits on the next.
3389 // The 16 bit case always keeps the last used page pinned until all
3390 // operations are done and it is unpinned.
3394 // DEAL WITH 12 BIT CASE
3397 if (Vcb
->AllocationSupport
.FatIndexBitSize
== 12) {
3400 // Check to see if the fat is already pinned, otherwise pin it.
3403 if (Context
->Bcb
== NULL
) {
3405 FatReadVolumeFile( IrpContext
,
3407 FatReservedBytes( &Vcb
->Bpb
),
3408 FatBytesPerFat( &Vcb
->Bpb
),
3410 &Context
->PinnedPage
);
3414 // Load the return value.
3418 FatLookup12BitEntry( Context
->PinnedPage
, FatIndex
, FatEntry
);
3420 } else if (Vcb
->AllocationSupport
.FatIndexBitSize
== 32) {
3423 // DEAL WITH 32 BIT CASE
3426 ULONG PageEntryOffset
;
3427 ULONG OffsetIntoVolumeFile
;
3430 // Initialize two local variables that help us.
3432 OffsetIntoVolumeFile
= FatReservedBytes(&Vcb
->Bpb
) + FatIndex
* sizeof(FAT_ENTRY
);
3433 PageEntryOffset
= (OffsetIntoVolumeFile
% PAGE_SIZE
) / sizeof(FAT_ENTRY
);
3436 // Check to see if we need to read in a new page of fat
3439 if ((Context
->Bcb
== NULL
) ||
3440 (OffsetIntoVolumeFile
/ PAGE_SIZE
!= Context
->VboOfPinnedPage
/ PAGE_SIZE
)) {
3443 // The entry wasn't in the pinned page, so must we unpin the current
3444 // page (if any) and read in a new page.
3447 FatUnpinBcb( IrpContext
, Context
->Bcb
);
3449 FatReadVolumeFile( IrpContext
,
3451 OffsetIntoVolumeFile
& ~(PAGE_SIZE
- 1),
3454 &Context
->PinnedPage
);
3456 Context
->VboOfPinnedPage
= OffsetIntoVolumeFile
& ~(PAGE_SIZE
- 1);
3460 // Grab the fat entry from the pinned page, and return
3463 *FatEntry
= ((PULONG
)(Context
->PinnedPage
))[PageEntryOffset
] & FAT32_ENTRY_MASK
;
3468 // DEAL WITH 16 BIT CASE
3471 ULONG PageEntryOffset
;
3472 ULONG OffsetIntoVolumeFile
;
3475 // Initialize two local variables that help us.
3478 OffsetIntoVolumeFile
= FatReservedBytes(&Vcb
->Bpb
) + FatIndex
* sizeof(USHORT
);
3479 PageEntryOffset
= (OffsetIntoVolumeFile
% PAGE_SIZE
) / sizeof(USHORT
);
3482 // Check to see if we need to read in a new page of fat
3485 if ((Context
->Bcb
== NULL
) ||
3486 (OffsetIntoVolumeFile
/ PAGE_SIZE
!= Context
->VboOfPinnedPage
/ PAGE_SIZE
)) {
3489 // The entry wasn't in the pinned page, so must we unpin the current
3490 // page (if any) and read in a new page.
3493 FatUnpinBcb( IrpContext
, Context
->Bcb
);
3495 FatReadVolumeFile( IrpContext
,
3497 OffsetIntoVolumeFile
& ~(PAGE_SIZE
- 1),
3500 &Context
->PinnedPage
);
3502 Context
->VboOfPinnedPage
= OffsetIntoVolumeFile
& ~(PAGE_SIZE
- 1);
3506 // Grab the fat entry from the pinned page, and return
3509 *FatEntry
= ((PUSHORT
)(Context
->PinnedPage
))[PageEntryOffset
];
3512 DebugTrace(-1, Dbg
, "FatLookupFatEntry -> (VOID)\n", 0);
3519 IN PIRP_CONTEXT IrpContext
,
3522 IN FAT_ENTRY FatEntry
3527 Routine Description:
3529 This routine takes an index into the fat and puts a value in the Fat
3530 at this index. The routine special cases 12, 16 and 32 bit fats. In
3531 all cases we go to the cache manager for a piece of the fat.
3533 We have a special form of this call for setting the DOS-style dirty bit.
3534 Unlike the dirty bit in the boot sector, we do not go to special effort
3535 to make sure that this hits the disk synchronously - if the system goes
3536 down in the window between the dirty bit being set in the boot sector
3537 and the FAT index zero dirty bit being lazy written, then life is tough.
3539 The only possible scenario is that Win9x may see what it thinks is a clean
3540 volume that really isn't (hopefully Memphis will pay attention to our dirty
3541 bit as well). The dirty bit will get out quickly, and if heavy activity is
3542 occurring, then the dirty bit should actually be there virtually all of the
3543 time since the act of cleaning the volume is the "rare" occurance.
3545 There are synchronization concerns that would crop up if we tried to make
3546 this synchronous. This thread may already own the Bcb shared for the first
3547 sector of the FAT (so we can't get it exclusive for a writethrough). This
3548 would require some more serious replumbing to work around than I want to
3549 consider at this time.
3551 We can and do, however, synchronously set the bit clean.
3553 At this point the reader should understand why the NT dirty bit is where it is.
3557 Vcb - Supplies the Vcb to examine, yields 12/16/32 bit info, etc.
3559 FatIndex - Supplies the destination fat index.
3561 FatEntry - Supplies the source fat entry.
3569 ULONG OffsetIntoVolumeFile
;
3570 ULONG WasWait
= TRUE
;
3571 BOOLEAN RegularOperation
= TRUE
;
3572 BOOLEAN CleaningOperation
= FALSE
;
3573 BOOLEAN ReleaseMutex
= FALSE
;
3577 DebugTrace(+1, Dbg
, "FatSetFatEntry\n", 0);
3578 DebugTrace( 0, Dbg
, " Vcb = %8lx\n", Vcb
);
3579 DebugTrace( 0, Dbg
, " FatIndex = %4x\n", FatIndex
);
3580 DebugTrace( 0, Dbg
, " FatEntry = %4x\n", FatEntry
);
3583 // Make sure they gave us a valid fat index if this isn't the special
3584 // clean-bit modifying call.
3587 if (FatIndex
== FAT_DIRTY_BIT_INDEX
) {
3590 // We are setting the clean bit state. Of course, we could
3591 // have corruption that would cause us to try to fiddle the
3592 // reserved index - we guard against this by having the
3593 // special entry values use the reserved high 4 bits that
3594 // we know that we'll never try to set.
3598 // We don't want to repin the FAT pages involved here. Just
3599 // let the lazy writer hit them when it can.
3602 RegularOperation
= FALSE
;
3605 case FAT_CLEAN_VOLUME
:
3606 FatEntry
= FAT_CLEAN_ENTRY
;
3607 CleaningOperation
= TRUE
;
3610 case FAT_DIRTY_VOLUME
:
3611 switch (Vcb
->AllocationSupport
.FatIndexBitSize
) {
3613 FatEntry
= FAT12_DIRTY_ENTRY
;
3617 FatEntry
= FAT32_DIRTY_ENTRY
;
3621 FatEntry
= FAT16_DIRTY_ENTRY
;
3627 FatRaiseStatus( IrpContext
, STATUS_FILE_CORRUPT_ERROR
);
3632 // Disable dirtying semantics for the duration of this operation. Force this
3633 // operation to wait for the duration.
3636 WasWait
= FlagOn( IrpContext
->Flags
, IRP_CONTEXT_FLAG_WAIT
);
3637 SetFlag( IrpContext
->Flags
, IRP_CONTEXT_FLAG_WAIT
| IRP_CONTEXT_FLAG_DISABLE_DIRTY
);
3641 ASSERT( !(FatEntry
& ~FAT32_ENTRY_MASK
) );
3642 FatVerifyIndexIsValid(IrpContext
, Vcb
, FatIndex
);
3649 SectorSize
= 1 << Vcb
->AllocationSupport
.LogOfBytesPerSector
;
3652 // Case on 12 or 16 bit fats.
3654 // In the 12 bit case (mostly floppies) we always have the whole fat
3655 // (max 6k bytes) pinned during allocation operations. This is possibly
3656 // a wee bit slower, but saves headaches over fat entries with 8 bits
3657 // on one page, and 4 bits on the next.
3659 // In the 16 bit case we only read the page that we need to set the fat
3664 // DEAL WITH 12 BIT CASE
3669 if (Vcb
->AllocationSupport
.FatIndexBitSize
== 12) {
3674 // Make sure we have a valid entry
3680 // We read in the entire fat. Note that using prepare write marks
3681 // the bcb pre-dirty, so we don't have to do it explicitly.
3684 OffsetIntoVolumeFile
= FatReservedBytes( &Vcb
->Bpb
) + FatIndex
* 3 / 2;
3686 FatPrepareWriteVolumeFile( IrpContext
,
3688 FatReservedBytes( &Vcb
->Bpb
),
3689 FatBytesPerFat( &Vcb
->Bpb
),
3696 // Mark the sector(s) dirty in the DirtyFatMcb. This call is
3697 // complicated somewhat for the 12 bit case since a single
3698 // entry write can span two sectors (and pages).
3700 // Get the Lbo for the sector where the entry starts, and add it to
3701 // the dirty fat Mcb.
3704 Lbo
= OffsetIntoVolumeFile
& ~(SectorSize
- 1);
3706 FatAddMcbEntry( Vcb
, &Vcb
->DirtyFatMcb
, (VBO
) Lbo
, Lbo
, SectorSize
);
3709 // If the entry started on the last byte of the sector, it continues
3710 // to the next sector, so mark the next sector dirty as well.
3712 // Note that this entry will simply coalese with the last entry,
3713 // so this operation cannot fail. Also if we get this far, we have
3714 // made it, so no unwinding will be needed.
3717 if ( (OffsetIntoVolumeFile
& (SectorSize
- 1)) == (SectorSize
- 1) ) {
3721 FatAddMcbEntry( Vcb
, &Vcb
->DirtyFatMcb
, (VBO
) Lbo
, Lbo
, SectorSize
);
3725 // Store the entry into the fat; we need a little synchonization
3726 // here and can't use a spinlock since the bytes might not be
3730 FatLockFreeClusterBitMap( Vcb
);
3731 ReleaseMutex
= TRUE
;
3733 FatSet12BitEntry( PinnedFat
, FatIndex
, FatEntry
);
3735 FatUnlockFreeClusterBitMap( Vcb
);
3736 ReleaseMutex
= FALSE
;
3738 } else if (Vcb
->AllocationSupport
.FatIndexBitSize
== 32) {
3741 // DEAL WITH 32 BIT CASE
3744 PULONG PinnedFatEntry32
;
3747 // Read in a new page of fat
3750 OffsetIntoVolumeFile
= FatReservedBytes( &Vcb
->Bpb
) +
3751 FatIndex
* sizeof( FAT_ENTRY
);
3753 FatPrepareWriteVolumeFile( IrpContext
,
3755 OffsetIntoVolumeFile
,
3758 (PVOID
*)&PinnedFatEntry32
,
3762 // Mark the sector dirty in the DirtyFatMcb
3765 Lbo
= OffsetIntoVolumeFile
& ~(SectorSize
- 1);
3767 FatAddMcbEntry( Vcb
, &Vcb
->DirtyFatMcb
, (VBO
) Lbo
, Lbo
, SectorSize
);
3770 // Store the FatEntry to the pinned page.
3772 // Preserve the reserved bits in FAT32 entries in the file heap.
3776 FatLockFreeClusterBitMap( Vcb
);
3777 ReleaseMutex
= TRUE
;
3780 if (FatIndex
!= FAT_DIRTY_BIT_INDEX
) {
3782 *PinnedFatEntry32
= ((*PinnedFatEntry32
& ~FAT32_ENTRY_MASK
) | FatEntry
);
3786 *PinnedFatEntry32
= FatEntry
;
3790 FatUnlockFreeClusterBitMap( Vcb
);
3791 ReleaseMutex
= FALSE
;
3797 // DEAL WITH 16 BIT CASE
3800 PUSHORT PinnedFatEntry
;
3803 // Read in a new page of fat
3806 OffsetIntoVolumeFile
= FatReservedBytes( &Vcb
->Bpb
) +
3807 FatIndex
* sizeof(USHORT
);
3809 FatPrepareWriteVolumeFile( IrpContext
,
3811 OffsetIntoVolumeFile
,
3814 (PVOID
*)&PinnedFatEntry
,
3818 // Mark the sector dirty in the DirtyFatMcb
3821 Lbo
= OffsetIntoVolumeFile
& ~(SectorSize
- 1);
3823 FatAddMcbEntry( Vcb
, &Vcb
->DirtyFatMcb
, (VBO
) Lbo
, Lbo
, SectorSize
);
3826 // Store the FatEntry to the pinned page.
3828 // We need extra synchronization here for broken architectures
3829 // like the ALPHA that don't support atomic 16 bit writes.
3833 FatLockFreeClusterBitMap( Vcb
);
3834 ReleaseMutex
= TRUE
;
3837 *PinnedFatEntry
= (USHORT
)FatEntry
;
3840 FatUnlockFreeClusterBitMap( Vcb
);
3841 ReleaseMutex
= FALSE
;
3847 DebugUnwind( FatSetFatEntry
);
3850 // Re-enable volume dirtying in case this was a dirty bit operation.
3853 ClearFlag( IrpContext
->Flags
, IRP_CONTEXT_FLAG_DISABLE_DIRTY
);
3856 // Make this operation asynchronous again if needed.
3861 ClearFlag( IrpContext
->Flags
, IRP_CONTEXT_FLAG_WAIT
);
3865 // If we still somehow have the Mutex, release it.
3870 ASSERT( _SEH2_AbnormalTermination() );
3872 FatUnlockFreeClusterBitMap( Vcb
);
3876 // Unpin the Bcb. For cleaning operations, we make this write-through.
3879 if (CleaningOperation
&& Bcb
) {
3881 IO_STATUS_BLOCK IgnoreStatus
;
3885 DbgDoit( IrpContext
->PinCount
-= 1 );
3886 CcUnpinRepinnedBcb( Bcb
, TRUE
, &IgnoreStatus
);
3890 FatUnpinBcb(IrpContext
, Bcb
);
3893 DebugTrace(-1, Dbg
, "FatSetFatEntry -> (VOID)\n", 0);
3901 // Internal support routine
3906 IN PIRP_CONTEXT IrpContext
,
3908 IN ULONG StartingFatIndex
,
3909 IN ULONG ClusterCount
,
3910 IN BOOLEAN ChainTogether
3915 Routine Description:
3917 This routine sets a continuous run of clusters in the fat. If ChainTogether
3918 is TRUE, then the clusters are linked together as in normal Fat fasion,
3919 with the last cluster receiving FAT_CLUSTER_LAST. If ChainTogether is
3920 FALSE, all the entries are set to FAT_CLUSTER_AVAILABLE, effectively
3921 freeing all the clusters in the run.
3925 Vcb - Supplies the Vcb to examine, yields 12/16 bit info, etc.
3927 StartingFatIndex - Supplies the destination fat index.
3929 ClusterCount - Supplies the number of contiguous clusters to work on.
3931 ChainTogether - Tells us whether to fill the entries with links, or
3932 FAT_CLUSTER_AVAILABLE
3942 #define MAXCOUNTCLUS 0x10000
3943 #define COUNTSAVEDBCBS ((MAXCOUNTCLUS * sizeof(FAT_ENTRY) / PAGE_SIZE) + 2)
3944 PBCB SavedBcbs
[COUNTSAVEDBCBS
][2];
3959 BOOLEAN ReleaseMutex
= FALSE
;
3961 ULONG SavedStartingFatIndex
= StartingFatIndex
;
3965 DebugTrace(+1, Dbg
, "FatSetFatRun\n", 0);
3966 DebugTrace( 0, Dbg
, " Vcb = %8lx\n", Vcb
);
3967 DebugTrace( 0, Dbg
, " StartingFatIndex = %8x\n", StartingFatIndex
);
3968 DebugTrace( 0, Dbg
, " ClusterCount = %8lx\n", ClusterCount
);
3969 DebugTrace( 0, Dbg
, " ChainTogether = %s\n", ChainTogether
? "TRUE":"FALSE");
3972 // Make sure they gave us a valid fat run.
3975 FatVerifyIndexIsValid(IrpContext
, Vcb
, StartingFatIndex
);
3976 FatVerifyIndexIsValid(IrpContext
, Vcb
, StartingFatIndex
+ ClusterCount
- 1);
3979 // Check special case
3982 if (ClusterCount
== 0) {
3984 DebugTrace(-1, Dbg
, "FatSetFatRun -> (VOID)\n", 0);
3992 SectorSize
= 1 << Vcb
->AllocationSupport
.LogOfBytesPerSector
;
3995 // Case on 12 or 16 bit fats.
3997 // In the 12 bit case (mostly floppies) we always have the whole fat
3998 // (max 6k bytes) pinned during allocation operations. This is possibly
3999 // a wee bit slower, but saves headaches over fat entries with 8 bits
4000 // on one page, and 4 bits on the next.
4002 // In the 16 bit case we only read one page at a time, as needed.
4006 // DEAL WITH 12 BIT CASE
4011 if (Vcb
->AllocationSupport
.FatIndexBitSize
== 12) {
4018 // We read in the entire fat. Note that using prepare write marks
4019 // the bcb pre-dirty, so we don't have to do it explicitly.
4022 RtlZeroMemory( &SavedBcbs
[0], 2 * sizeof(PBCB
) * 2);
4024 FatPrepareWriteVolumeFile( IrpContext
,
4026 FatReservedBytes( &Vcb
->Bpb
),
4027 FatBytesPerFat( &Vcb
->Bpb
),
4034 // Mark the affected sectors dirty. Note that FinalSectorLbo is
4035 // the Lbo of the END of the entry (Thus * 3 + 2). This makes sure
4036 // we catch the case of a dirty fat entry straddling a sector boundry.
4038 // Note that if the first AddMcbEntry succeeds, all following ones
4039 // will simply coalese, and thus also succeed.
4042 StartSectorLbo
= (FatReservedBytes( &Vcb
->Bpb
) + StartingFatIndex
* 3 / 2)
4043 & ~(SectorSize
- 1);
4045 FinalSectorLbo
= (FatReservedBytes( &Vcb
->Bpb
) + ((StartingFatIndex
+
4046 ClusterCount
) * 3 + 2) / 2) & ~(SectorSize
- 1);
4048 for (Lbo
= StartSectorLbo
; Lbo
<= FinalSectorLbo
; Lbo
+= SectorSize
) {
4050 FatAddMcbEntry( Vcb
, &Vcb
->DirtyFatMcb
, (VBO
) Lbo
, Lbo
, SectorSize
);
4054 // Store the entries into the fat; we need a little
4055 // synchonization here and can't use a spinlock since the bytes
4056 // might not be resident.
4059 FatLockFreeClusterBitMap( Vcb
);
4060 ReleaseMutex
= TRUE
;
4062 for (Cluster
= StartingFatIndex
;
4063 Cluster
< StartingFatIndex
+ ClusterCount
- 1;
4066 FatSet12BitEntry( PinnedFat
,
4068 ChainTogether
? Cluster
+ 1 : FAT_CLUSTER_AVAILABLE
);
4072 // Save the last entry
4075 FatSet12BitEntry( PinnedFat
,
4078 FAT_CLUSTER_LAST
& 0xfff : FAT_CLUSTER_AVAILABLE
);
4080 FatUnlockFreeClusterBitMap( Vcb
);
4081 ReleaseMutex
= FALSE
;
4083 } else if (Vcb
->AllocationSupport
.FatIndexBitSize
== 32) {
4086 // DEAL WITH 32 BIT CASE
4091 VBO StartOffsetInVolume
;
4092 VBO FinalOffsetInVolume
;
4097 ULONG ClusterCountThisRun
;
4099 StartOffsetInVolume
= FatReservedBytes(&Vcb
->Bpb
) +
4100 StartingFatIndex
* sizeof(FAT_ENTRY
);
4102 if (ClusterCount
> MAXCOUNTCLUS
) {
4103 ClusterCountThisRun
= MAXCOUNTCLUS
;
4105 ClusterCountThisRun
= ClusterCount
;
4108 FinalOffsetInVolume
= StartOffsetInVolume
+
4109 (ClusterCountThisRun
- 1) * sizeof(FAT_ENTRY
);
4112 StartingPage
= StartOffsetInVolume
/ PAGE_SIZE
;
4116 ULONG NumberOfPages
;
4119 NumberOfPages
= (FinalOffsetInVolume
/ PAGE_SIZE
) -
4120 (StartOffsetInVolume
/ PAGE_SIZE
) + 1;
4122 RtlZeroMemory( &SavedBcbs
[0][0], (NumberOfPages
+ 1) * sizeof(PBCB
) * 2 );
4124 for ( Page
= 0, Offset
= StartOffsetInVolume
& ~(PAGE_SIZE
- 1);
4125 Page
< NumberOfPages
;
4126 Page
++, Offset
+= PAGE_SIZE
) {
4128 FatPrepareWriteVolumeFile( IrpContext
,
4132 &SavedBcbs
[Page
][0],
4133 (PVOID
*)&SavedBcbs
[Page
][1],
4139 FatEntry
= (PULONG
)((PUCHAR
)SavedBcbs
[0][1] +
4140 (StartOffsetInVolume
% PAGE_SIZE
));
4146 // Mark the run dirty
4149 StartSectorLbo
= StartOffsetInVolume
& ~(SectorSize
- 1);
4150 FinalSectorLbo
= FinalOffsetInVolume
& ~(SectorSize
- 1);
4152 for (Lbo
= StartSectorLbo
; Lbo
<= FinalSectorLbo
; Lbo
+= SectorSize
) {
4154 FatAddMcbEntry( Vcb
, &Vcb
->DirtyFatMcb
, (VBO
)Lbo
, Lbo
, SectorSize
);
4158 // Store the entries
4160 // We need extra synchronization here for broken architectures
4161 // like the ALPHA that don't support atomic 16 bit writes.
4165 FatLockFreeClusterBitMap( Vcb
);
4166 ReleaseMutex
= TRUE
;
4169 FinalCluster
= StartingFatIndex
+ ClusterCountThisRun
- 1;
4172 for (Cluster
= StartingFatIndex
;
4173 Cluster
<= FinalCluster
;
4174 Cluster
++, FatEntry
++) {
4177 // If we just crossed a page boundry (as opposed to starting
4178 // on one), update our idea of FatEntry.
4180 if ( (((ULONG_PTR
)FatEntry
& (PAGE_SIZE
-1)) == 0) &&
4181 (Cluster
!= StartingFatIndex
) ) {
4184 FatEntry
= (PULONG
)SavedBcbs
[Page
][1];
4187 *FatEntry
= ChainTogether
? (FAT_ENTRY
)(Cluster
+ 1) :
4188 FAT_CLUSTER_AVAILABLE
;
4192 // Fix up the last entry if we were chaining together
4195 if ((ClusterCount
<= MAXCOUNTCLUS
) &&
4198 *(FatEntry
-1) = FAT_CLUSTER_LAST
;
4202 FatUnlockFreeClusterBitMap( Vcb
);
4203 ReleaseMutex
= FALSE
;
4212 while ( SavedBcbs
[i
][0] != NULL
) {
4214 FatUnpinBcb( IrpContext
, SavedBcbs
[i
][0] );
4215 SavedBcbs
[i
][0] = NULL
;
4221 if (ClusterCount
<= MAXCOUNTCLUS
) {
4227 StartingFatIndex
+= MAXCOUNTCLUS
;
4228 ClusterCount
-= MAXCOUNTCLUS
;
4235 // DEAL WITH 16 BIT CASE
4238 VBO StartOffsetInVolume
;
4239 VBO FinalOffsetInVolume
;
4245 StartOffsetInVolume
= FatReservedBytes(&Vcb
->Bpb
) +
4246 StartingFatIndex
* sizeof(USHORT
);
4248 FinalOffsetInVolume
= StartOffsetInVolume
+
4249 (ClusterCount
- 1) * sizeof(USHORT
);
4252 StartingPage
= StartOffsetInVolume
/ PAGE_SIZE
;
4256 // Read in one page of fat at a time. We cannot read in the
4257 // all of the fat we need because of cache manager limitations.
4259 // SavedBcb was initialized to be able to hold the largest
4260 // possible number of pages in a fat plus and extra one to
4261 // accomadate the boot sector, plus one more to make sure there
4262 // is enough room for the RtlZeroMemory below that needs the mark
4263 // the first Bcb after all the ones we will use as an end marker.
4267 ULONG NumberOfPages
;
4270 NumberOfPages
= (FinalOffsetInVolume
/ PAGE_SIZE
) -
4271 (StartOffsetInVolume
/ PAGE_SIZE
) + 1;
4273 RtlZeroMemory( &SavedBcbs
[0][0], (NumberOfPages
+ 1) * sizeof(PBCB
) * 2 );
4275 for ( Page
= 0, Offset
= StartOffsetInVolume
& ~(PAGE_SIZE
- 1);
4276 Page
< NumberOfPages
;
4277 Page
++, Offset
+= PAGE_SIZE
) {
4279 FatPrepareWriteVolumeFile( IrpContext
,
4283 &SavedBcbs
[Page
][0],
4284 (PVOID
*)&SavedBcbs
[Page
][1],
4290 FatEntry
= (PUSHORT
)((PUCHAR
)SavedBcbs
[0][1] +
4291 (StartOffsetInVolume
% PAGE_SIZE
));
4297 // Mark the run dirty
4300 StartSectorLbo
= StartOffsetInVolume
& ~(SectorSize
- 1);
4301 FinalSectorLbo
= FinalOffsetInVolume
& ~(SectorSize
- 1);
4303 for (Lbo
= StartSectorLbo
; Lbo
<= FinalSectorLbo
; Lbo
+= SectorSize
) {
4305 FatAddMcbEntry( Vcb
, &Vcb
->DirtyFatMcb
, (VBO
) Lbo
, Lbo
, SectorSize
);
4309 // Store the entries
4311 // We need extra synchronization here for broken architectures
4312 // like the ALPHA that don't support atomic 16 bit writes.
4316 FatLockFreeClusterBitMap( Vcb
);
4317 ReleaseMutex
= TRUE
;
4320 FinalCluster
= StartingFatIndex
+ ClusterCount
- 1;
4323 for (Cluster
= StartingFatIndex
;
4324 Cluster
<= FinalCluster
;
4325 Cluster
++, FatEntry
++) {
4328 // If we just crossed a page boundry (as opposed to starting
4329 // on one), update our idea of FatEntry.
4331 if ( (((ULONG_PTR
)FatEntry
& (PAGE_SIZE
-1)) == 0) &&
4332 (Cluster
!= StartingFatIndex
) ) {
4335 FatEntry
= (PUSHORT
)SavedBcbs
[Page
][1];
4338 *FatEntry
= (USHORT
) (ChainTogether
? (FAT_ENTRY
)(Cluster
+ 1) :
4339 FAT_CLUSTER_AVAILABLE
);
4343 // Fix up the last entry if we were chaining together
4346 if ( ChainTogether
) {
4348 *(FatEntry
-1) = (USHORT
)FAT_CLUSTER_LAST
;
4351 FatUnlockFreeClusterBitMap( Vcb
);
4352 ReleaseMutex
= FALSE
;
4360 DebugUnwind( FatSetFatRun
);
4363 // If we still somehow have the Mutex, release it.
4368 ASSERT( _SEH2_AbnormalTermination() );
4370 FatUnlockFreeClusterBitMap( Vcb
);
4377 while ( SavedBcbs
[i
][0] != NULL
) {
4379 FatUnpinBcb( IrpContext
, SavedBcbs
[i
][0] );
4385 // At this point nothing in this finally clause should have raised.
4386 // So, now comes the unsafe (sigh) stuff.
4389 if ( _SEH2_AbnormalTermination() &&
4390 (Vcb
->AllocationSupport
.FatIndexBitSize
== 32) ) {
4395 // This case is more complex because the FAT12 and FAT16 cases
4396 // pin all the needed FAT pages (128K max), after which it
4397 // can't fail, before changing any FAT entries. In the Fat32
4398 // case, it may not be practical to pin all the needed FAT
4399 // pages, because that could span many megabytes. So Fat32
4400 // attacks in chunks, and if a failure occurs once the first
4401 // chunk has been updated, we have to back out the updates.
4403 // The unwind consists of walking back over each FAT entry we
4404 // have changed, setting it back to the previous value. Note
4405 // that the previous value with either be FAT_CLUSTER_AVAILABLE
4406 // (if ChainTogether==TRUE) or a simple link to the successor
4407 // (if ChainTogether==FALSE).
4409 // We concede that any one of these calls could fail too; our
4410 // objective is to make this case no more likely than the case
4411 // for a file consisting of multiple disjoint runs.
4414 while ( StartingFatIndex
> SavedStartingFatIndex
) {
4418 FatSetFatEntry( IrpContext
, Vcb
, StartingFatIndex
,
4420 StartingFatIndex
+ 1 : FAT_CLUSTER_AVAILABLE
);
4424 DebugTrace(-1, Dbg
, "FatSetFatRun -> (VOID)\n", 0);
4432 // Internal support routine
4442 Routine Description:
4444 This routine just computes the base 2 log of an integer. It is only used
4445 on objects that are know to be powers of two.
4449 Value - The value to take the base 2 log of.
4453 UCHAR - The base 2 log of Value.
4462 DebugTrace(+1, Dbg
, "LogOf\n", 0);
4463 DebugTrace( 0, Dbg
, " Value = %8lx\n", Value
);
4466 // Knock bits off until we we get a one at position 0
4469 while ( (Value
& 0xfffffffe) != 0 ) {
4476 // If there was more than one bit set, the file system messed up,
4482 DebugTrace( 0, Dbg
, "Received non power of 2.\n", 0);
4484 FatBugCheck( Value
, Log
, 0 );
4487 DebugTrace(-1, Dbg
, "LogOf -> %8lx\n", Log
);
4494 FatExamineFatEntries(
4495 IN PIRP_CONTEXT IrpContext
,
4497 IN ULONG StartIndex OPTIONAL
,
4498 IN ULONG EndIndex OPTIONAL
,
4499 IN BOOLEAN SetupWindows
,
4500 IN PFAT_WINDOW SwitchToWindow OPTIONAL
,
4501 IN PULONG BitMapBuffer OPTIONAL
4505 Routine Description:
4507 This routine handles scanning a segment of the FAT into in-memory structures.
4509 There are three fundamental cases, with variations depending on the FAT type:
4511 1) During volume setup, FatSetupAllocations
4513 1a) for FAT12/16, read the FAT into our free clusterbitmap
4514 1b) for FAT32, perform the initial scan for window free cluster counts
4516 2) Switching FAT32 windows on the fly during system operation
4518 3) Reading arbitrary segments of the FAT for the purposes of the GetVolumeBitmap
4519 call (only for FAT32)
4521 There really is too much going on in here. At some point this should be
4522 substantially rewritten.
4526 Vcb - Supplies the volume involved
4528 StartIndex - Supplies the starting cluster, ignored if SwitchToWindow supplied
4530 EndIndex - Supplies the ending cluster, ignored if SwitchToWindow supplied
4532 SetupWindows - Indicates if we are doing the initial FAT32 scan
4534 SwitchToWindow - Supplies the FAT window we are examining and will switch to
4536 BitMapBuffer - Supplies a specific bitmap to fill in, if not supplied we fill
4537 in the volume free cluster bitmap if !SetupWindows
4541 None. Lots of side effects.
4545 ULONG FatIndexBitSize
;
4549 FAT_ENTRY FatEntry
= FAT_CLUSTER_AVAILABLE
;
4550 FAT_ENTRY FirstFatEntry
= FAT_CLUSTER_AVAILABLE
;
4554 ULONG EntriesPerWindow
;
4559 ULONG ClustersThisRun
;
4560 ULONG StartIndexOfThisRun
;
4562 PULONG FreeClusterCount
= NULL
;
4564 PFAT_WINDOW CurrentWindow
= NULL
;
4566 PVOID NewBitMapBuffer
= NULL
;
4567 PRTL_BITMAP BitMap
= NULL
;
4568 RTL_BITMAP PrivateBitMap
;
4579 // Now assert correct usage.
4582 FatIndexBitSize
= Vcb
->AllocationSupport
.FatIndexBitSize
;
4584 ASSERT( !(SetupWindows
&& (SwitchToWindow
|| BitMapBuffer
)));
4585 ASSERT( !(SetupWindows
&& FatIndexBitSize
!= 32));
4587 if (Vcb
->NumberOfWindows
> 1) {
4590 // FAT32: Calculate the number of FAT entries covered by a window. This is
4591 // equal to the number of bits in the freespace bitmap, the size of which
4595 EntriesPerWindow
= MAX_CLUSTER_BITMAP_SIZE
;
4599 EntriesPerWindow
= Vcb
->AllocationSupport
.NumberOfClusters
;
4603 // We will also fill in the cumulative count of free clusters for
4604 // the entire volume. If this is not appropriate, NULL it out
4608 FreeClusterCount
= &Vcb
->AllocationSupport
.NumberOfFreeClusters
;
4612 ASSERT(BitMapBuffer
== NULL
);
4615 // In this case we're just supposed to scan the fat and set up
4616 // the information regarding where the buckets fall and how many
4617 // free clusters are in each.
4619 // It is fine to monkey with the real windows, we must be able
4620 // to do this to activate the volume.
4625 CurrentWindow
= &Vcb
->Windows
[0];
4626 CurrentWindow
->FirstCluster
= StartIndex
;
4627 CurrentWindow
->ClustersFree
= 0;
4630 // We always wish to calculate total free clusters when
4631 // setting up the FAT windows.
4634 } else if (BitMapBuffer
== NULL
) {
4637 // We will be filling in the free cluster bitmap for the volume.
4638 // Careful, we can raise out of here and be hopelessly hosed if
4639 // we built this up in the main bitmap/window itself.
4641 // For simplicity's sake, we'll do the swap for everyone. FAT32
4642 // provokes the need since we can't tolerate partial results
4643 // when switching windows.
4646 ASSERT( SwitchToWindow
);
4648 CurrentWindow
= SwitchToWindow
;
4649 StartIndex
= CurrentWindow
->FirstCluster
;
4650 EndIndex
= CurrentWindow
->LastCluster
;
4652 BitMap
= &PrivateBitMap
;
4653 NewBitMapBuffer
= FsRtlAllocatePoolWithTag( PagedPool
,
4654 (EntriesPerWindow
+ 7) / 8,
4657 RtlInitializeBitMap( &PrivateBitMap
,
4659 EndIndex
- StartIndex
+ 1);
4661 if (FatIndexBitSize
== 32) {
4664 // We do not wish count total clusters here.
4667 FreeClusterCount
= NULL
;
4673 BitMap
= &PrivateBitMap
;
4674 RtlInitializeBitMap(&PrivateBitMap
,
4676 EndIndex
- StartIndex
+ 1);
4679 // We do not count total clusters here.
4682 FreeClusterCount
= NULL
;
4686 // Now, our start index better be in the file heap.
4689 ASSERT( StartIndex
>= 2 );
4692 // Pick up the initial chunk of the FAT and first entry.
4695 if (FatIndexBitSize
== 12) {
4698 // We read in the entire fat in the 12 bit case.
4701 FatReadVolumeFile( IrpContext
,
4703 FatReservedBytes( &Vcb
->Bpb
),
4704 FatBytesPerFat( &Vcb
->Bpb
),
4706 (PVOID
*)&FatBuffer
);
4708 FatLookup12BitEntry(FatBuffer
, 0, &FirstFatEntry
);
4713 // Read in one page of fat at a time. We cannot read in the
4714 // all of the fat we need because of cache manager limitations.
4717 ULONG BytesPerEntry
= FatIndexBitSize
>> 3;
4719 ULONG EntriesPerPage
= PAGE_SIZE
/ BytesPerEntry
;
4722 Page
= (FatReservedBytes(&Vcb
->Bpb
) + StartIndex
* BytesPerEntry
) / PAGE_SIZE
;
4724 Offset
= Page
* PAGE_SIZE
;
4726 FatReadVolumeFile( IrpContext
,
4733 if (FatIndexBitSize
== 32) {
4736 FatBuffer
= (PUSHORT
)((PUCHAR
)pv
+
4737 (FatReservedBytes(&Vcb
->Bpb
) + StartIndex
* BytesPerEntry
) %
4740 FirstFatEntry
= *((PULONG
)FatBuffer
);
4741 FirstFatEntry
= FirstFatEntry
& FAT32_ENTRY_MASK
;
4745 FatBuffer
= (PUSHORT
)((PUCHAR
)pv
+
4746 FatReservedBytes(&Vcb
->Bpb
) % PAGE_SIZE
) + 2;
4748 FirstFatEntry
= *FatBuffer
;
4753 CurrentRun
= (FirstFatEntry
== FAT_CLUSTER_AVAILABLE
) ?
4754 FreeClusters
: AllocatedClusters
;
4756 StartIndexOfThisRun
= StartIndex
;
4760 for (FatIndex
= StartIndex
; FatIndex
<= EndIndex
; FatIndex
++) {
4763 if (FatIndexBitSize
== 12) {
4765 FatLookup12BitEntry(FatBuffer
, FatIndex
, &FatEntry
);
4770 // If we are setting up the FAT32 windows and have stepped into a new
4771 // bucket, finalize this one and move forward.
4775 FatIndex
> StartIndex
&&
4776 (FatIndex
- 2) % EntriesPerWindow
== 0) {
4778 CurrentWindow
->LastCluster
= FatIndex
- 1;
4780 if (CurrentRun
== FreeClusters
) {
4783 // We must be counting clusters in order to modify the
4784 // contents of the window.
4787 ASSERT( FreeClusterCount
);
4790 ClustersThisRun
= FatIndex
- StartIndexOfThisRun
;
4791 CurrentWindow
->ClustersFree
+= ClustersThisRun
;
4793 if (FreeClusterCount
) {
4794 *FreeClusterCount
+= ClustersThisRun
;
4799 ASSERT(CurrentRun
== AllocatedClusters
);
4801 ClustersThisRun
= FatIndex
- StartIndexOfThisRun
;
4804 StartIndexOfThisRun
= FatIndex
;
4805 CurrentRun
= UnknownClusters
;
4808 CurrentWindow
->ClustersFree
= 0;
4809 CurrentWindow
->FirstCluster
= FatIndex
;
4813 // If we just stepped onto a new page, grab a new pointer.
4816 if (((ULONG_PTR
)FatBuffer
& (PAGE_SIZE
- 1)) == 0) {
4818 FatUnpinBcb( IrpContext
, Bcb
);
4821 Offset
+= PAGE_SIZE
;
4823 FatReadVolumeFile( IrpContext
,
4830 FatBuffer
= (PUSHORT
)pv
;
4833 if (FatIndexBitSize
== 32) {
4836 FatEntry
= *((PULONG
)FatBuffer
)++;
4838 FatEntry
= *FatBuffer
;
4841 FatEntry
= FatEntry
& FAT32_ENTRY_MASK
;
4845 FatEntry
= *FatBuffer
;
4850 if (CurrentRun
== UnknownClusters
) {
4852 CurrentRun
= (FatEntry
== FAT_CLUSTER_AVAILABLE
) ?
4853 FreeClusters
: AllocatedClusters
;
4857 // Are we switching from a free run to an allocated run?
4860 if (CurrentRun
== FreeClusters
&&
4861 FatEntry
!= FAT_CLUSTER_AVAILABLE
) {
4863 ClustersThisRun
= FatIndex
- StartIndexOfThisRun
;
4865 if (FreeClusterCount
) {
4867 *FreeClusterCount
+= ClustersThisRun
;
4868 CurrentWindow
->ClustersFree
+= ClustersThisRun
;
4873 RtlClearBits( BitMap
,
4874 StartIndexOfThisRun
- StartIndex
,
4878 CurrentRun
= AllocatedClusters
;
4879 StartIndexOfThisRun
= FatIndex
;
4883 // Are we switching from an allocated run to a free run?
4886 if (CurrentRun
== AllocatedClusters
&&
4887 FatEntry
== FAT_CLUSTER_AVAILABLE
) {
4889 ClustersThisRun
= FatIndex
- StartIndexOfThisRun
;
4894 StartIndexOfThisRun
- StartIndex
,
4898 CurrentRun
= FreeClusters
;
4899 StartIndexOfThisRun
= FatIndex
;
4904 // Now we have to record the final run we encountered
4907 ClustersThisRun
= FatIndex
- StartIndexOfThisRun
;
4909 if (CurrentRun
== FreeClusters
) {
4911 if (FreeClusterCount
) {
4913 *FreeClusterCount
+= ClustersThisRun
;
4914 CurrentWindow
->ClustersFree
+= ClustersThisRun
;
4919 RtlClearBits( BitMap
,
4920 StartIndexOfThisRun
- StartIndex
,
4929 StartIndexOfThisRun
- StartIndex
,
4935 // And finish the last window if we are in setup.
4940 CurrentWindow
->LastCluster
= FatIndex
- 1;
4944 // Now switch the active window if required. We've succesfully gotten everything
4947 // If we were tracking the free cluster count, this means we should update the
4948 // window. This is the case of FAT12/16 initialization.
4951 if (SwitchToWindow
) {
4953 if (Vcb
->FreeClusterBitMap
.Buffer
) {
4955 ExFreePool( Vcb
->FreeClusterBitMap
.Buffer
);
4958 RtlInitializeBitMap( &Vcb
->FreeClusterBitMap
,
4960 EndIndex
- StartIndex
+ 1 );
4962 NewBitMapBuffer
= NULL
;
4964 Vcb
->CurrentWindow
= SwitchToWindow
;
4965 Vcb
->ClusterHint
= -1;
4967 if (FreeClusterCount
) {
4969 ASSERT( !SetupWindows
);
4970 ASSERT( FatIndexBitSize
!= 32 );
4972 Vcb
->CurrentWindow
->ClustersFree
= *FreeClusterCount
;
4977 // Make sure plausible things occured ...
4980 if (!SetupWindows
&& BitMapBuffer
== NULL
) {
4982 ASSERT_CURRENT_WINDOW_GOOD( Vcb
);
4985 ASSERT(Vcb
->AllocationSupport
.NumberOfFreeClusters
<= Vcb
->AllocationSupport
.NumberOfClusters
);
4990 // Unpin the last bcb and drop the temporary bitmap buffer if it exists.
4993 FatUnpinBcb( IrpContext
, Bcb
);
4995 if (NewBitMapBuffer
) {
4997 ExFreePool( NewBitMapBuffer
);