3 Copyright (c) 1989-2000 Microsoft Corporation
11 This module implements the File Write routine for Write called by the
20 // The Bug check file id for this module
23 #define BugCheckFileId (FAT_BUG_CHECK_WRITE)
26 // The local debug trace level
29 #define Dbg (DEBUG_TRACE_WRITE)
32 // Macros to increment the appropriate performance counters.
35 #define CollectWriteStats(VCB,OPEN_TYPE,BYTE_COUNT) { \
36 PFILESYSTEM_STATISTICS Stats = &(VCB)->Statistics[KeGetCurrentProcessorNumber() % FatData.NumberProcessors].Common; \
37 if (((OPEN_TYPE) == UserFileOpen)) { \
38 Stats->UserFileWrites += 1; \
39 Stats->UserFileWriteBytes += (ULONG)(BYTE_COUNT); \
40 } else if (((OPEN_TYPE) == VirtualVolumeFile || ((OPEN_TYPE) == DirectoryFile))) { \
41 Stats->MetaDataWrites += 1; \
42 Stats->MetaDataWriteBytes += (ULONG)(BYTE_COUNT); \
46 BOOLEAN FatNoAsync
= FALSE
;
49 // Local support routines
52 KDEFERRED_ROUTINE FatDeferredFlushDpc
;
58 _In_opt_ PVOID DeferredContext
,
59 _In_opt_ PVOID SystemArgument1
,
60 _In_opt_ PVOID SystemArgument2
63 WORKER_THREAD_ROUTINE FatDeferredFlush
;
72 #pragma alloc_text(PAGE, FatDeferredFlush)
73 #pragma alloc_text(PAGE, FatCommonWrite)
77 _Function_class_(IRP_MJ_WRITE
)
78 _Function_class_(DRIVER_DISPATCH
)
82 _In_ PVOLUME_DEVICE_OBJECT VolumeDeviceObject
,
90 This routine implements the FSD part of the NtWriteFile API call
94 VolumeDeviceObject - Supplies the volume device object where the
95 file being Write exists
97 Irp - Supplies the Irp being processed
101 NTSTATUS - The FSD status for the IRP
108 PIRP_CONTEXT IrpContext
= NULL
;
110 BOOLEAN ModWriter
= FALSE
;
111 BOOLEAN TopLevel
= FALSE
;
113 DebugTrace(+1, Dbg
, "FatFsdWrite\n", 0);
116 // Call the common Write routine, with blocking allowed if synchronous
119 FsRtlEnterFileSystem();
122 // We are first going to do a quick check for paging file IO. Since this
123 // is a fast path, we must replicate the check for the fsdo.
126 if (!FatDeviceIsFatFsdo( IoGetCurrentIrpStackLocation(Irp
)->DeviceObject
)) {
128 Fcb
= (PFCB
)(IoGetCurrentIrpStackLocation(Irp
)->FileObject
->FsContext
);
130 if ((NodeType(Fcb
) == FAT_NTC_FCB
) &&
131 FlagOn(Fcb
->FcbState
, FCB_STATE_PAGING_FILE
)) {
134 // Do the usual STATUS_PENDING things.
137 IoMarkIrpPending( Irp
);
140 // Perform the actual IO, it will be completed when the io finishes.
143 FatPagingFileIo( Irp
, Fcb
);
145 FsRtlExitFileSystem();
147 return STATUS_PENDING
;
153 TopLevel
= FatIsIrpTopLevel( Irp
);
155 IrpContext
= FatCreateIrpContext( Irp
, CanFsdWait( Irp
) );
158 // This is a kludge for the mod writer case. The correct state
159 // of recursion is set in IrpContext, however, we much with the
160 // actual top level Irp field to get the correct WriteThrough
164 if (IoGetTopLevelIrp() == (PIRP
)FSRTL_MOD_WRITE_TOP_LEVEL_IRP
) {
168 IoSetTopLevelIrp( Irp
);
172 // If this is an Mdl complete request, don't go through
176 if (FlagOn( IrpContext
->MinorFunction
, IRP_MN_COMPLETE
)) {
178 DebugTrace(0, Dbg
, "Calling FatCompleteMdl\n", 0 );
179 Status
= FatCompleteMdl( IrpContext
, Irp
);
183 Status
= FatCommonWrite( IrpContext
, Irp
);
186 } _SEH2_EXCEPT(FatExceptionFilter( IrpContext
, _SEH2_GetExceptionInformation() )) {
189 // We had some trouble trying to perform the requested
190 // operation, so we'll abort the I/O request with
191 // the error status that we get back from the
195 Status
= FatProcessException( IrpContext
, Irp
, _SEH2_GetExceptionCode() );
198 // NT_ASSERT( !(ModWriter && (Status == STATUS_CANT_WAIT)) );
200 NT_ASSERT( !(ModWriter
&& TopLevel
) );
202 if (ModWriter
) { IoSetTopLevelIrp((PIRP
)FSRTL_MOD_WRITE_TOP_LEVEL_IRP
); }
204 if (TopLevel
) { IoSetTopLevelIrp( NULL
); }
206 FsRtlExitFileSystem();
209 // And return to our caller
212 DebugTrace(-1, Dbg
, "FatFsdWrite -> %08lx\n", Status
);
214 UNREFERENCED_PARAMETER( VolumeDeviceObject
);
220 _Requires_lock_held_(_Global_critical_region_
)
223 IN PIRP_CONTEXT IrpContext
,
231 This is the common write routine for NtWriteFile, called from both
232 the Fsd, or from the Fsp if a request could not be completed without
233 blocking in the Fsd. This routine's actions are
234 conditionalized by the Wait input parameter, which determines whether
235 it is allowed to block or not. If a blocking condition is encountered
236 with Wait == FALSE, however, the request is posted to the Fsp, who
237 always calls with WAIT == TRUE.
241 Irp - Supplies the Irp to process
245 NTSTATUS - The return status for the operation
257 ULONG InitialFileSize
= 0;
258 ULONG InitialValidDataLength
= 0;
260 PIO_STACK_LOCATION IrpSp
;
261 PFILE_OBJECT FileObject
;
262 TYPE_OF_OPEN TypeOfOpen
;
264 BOOLEAN PostIrp
= FALSE
;
265 BOOLEAN OplockPostIrp
= FALSE
;
266 BOOLEAN ExtendingFile
= FALSE
;
267 BOOLEAN FcbOrDcbAcquired
= FALSE
;
268 BOOLEAN SwitchBackToAsync
= FALSE
;
269 BOOLEAN CalledByLazyWriter
= FALSE
;
270 BOOLEAN ExtendingValidData
= FALSE
;
271 BOOLEAN FcbAcquiredExclusive
= FALSE
;
272 BOOLEAN FcbCanDemoteToShared
= FALSE
;
273 BOOLEAN WriteFileSizeToDirent
= FALSE
;
274 BOOLEAN RecursiveWriteThrough
= FALSE
;
275 BOOLEAN UnwindOutstandingAsync
= FALSE
;
276 BOOLEAN PagingIoResourceAcquired
= FALSE
;
277 BOOLEAN SuccessfulPurge
= FALSE
;
279 BOOLEAN SynchronousIo
;
284 NTSTATUS Status
= STATUS_SUCCESS
;
286 FAT_IO_CONTEXT StackFatIoContext
;
289 // A system buffer is only used if we have to access the buffer directly
290 // from the Fsp to clear a portion or to do a synchronous I/O, or a
291 // cached transfer. It is possible that our caller may have already
292 // mapped a system buffer, in which case we must remember this so
293 // we do not unmap it on the way out.
296 PVOID SystemBuffer
= (PVOID
) NULL
;
298 LARGE_INTEGER StartingByte
;
303 // Get current Irp stack location and file object
306 IrpSp
= IoGetCurrentIrpStackLocation( Irp
);
307 FileObject
= IrpSp
->FileObject
;
310 DebugTrace(+1, Dbg
, "FatCommonWrite\n", 0);
311 DebugTrace( 0, Dbg
, "Irp = %p\n", Irp
);
312 DebugTrace( 0, Dbg
, "ByteCount = %8lx\n", IrpSp
->Parameters
.Write
.Length
);
313 DebugTrace( 0, Dbg
, "ByteOffset.LowPart = %8lx\n", IrpSp
->Parameters
.Write
.ByteOffset
.LowPart
);
314 DebugTrace( 0, Dbg
, "ByteOffset.HighPart = %8lx\n", IrpSp
->Parameters
.Write
.ByteOffset
.HighPart
);
317 // Initialize the appropriate local variables.
320 Wait
= BooleanFlagOn(IrpContext
->Flags
, IRP_CONTEXT_FLAG_WAIT
);
321 PagingIo
= BooleanFlagOn(Irp
->Flags
, IRP_PAGING_IO
);
322 NonCachedIo
= BooleanFlagOn(Irp
->Flags
,IRP_NOCACHE
);
323 SynchronousIo
= BooleanFlagOn(FileObject
->Flags
, FO_SYNCHRONOUS_IO
);
325 //NT_ASSERT( PagingIo || FileObject->WriteAccess );
328 // Extract the bytecount and do our noop/throttle checking.
331 ByteCount
= IrpSp
->Parameters
.Write
.Length
;
334 // If there is nothing to write, return immediately.
337 if (ByteCount
== 0) {
339 Irp
->IoStatus
.Information
= 0;
340 FatCompleteRequest( IrpContext
, Irp
, STATUS_SUCCESS
);
341 return STATUS_SUCCESS
;
345 // See if we have to defer the write.
349 !CcCanIWrite(FileObject
,
351 (BOOLEAN
)(Wait
&& !BooleanFlagOn(IrpContext
->Flags
, IRP_CONTEXT_FLAG_IN_FSP
)),
352 BooleanFlagOn(IrpContext
->Flags
, IRP_CONTEXT_FLAG_DEFERRED_WRITE
))) {
354 BOOLEAN Retrying
= BooleanFlagOn(IrpContext
->Flags
, IRP_CONTEXT_FLAG_DEFERRED_WRITE
);
356 FatPrePostIrp( IrpContext
, Irp
);
358 SetFlag( IrpContext
->Flags
, IRP_CONTEXT_FLAG_DEFERRED_WRITE
);
360 CcDeferWrite( FileObject
,
361 (PCC_POST_DEFERRED_WRITE
)FatAddToWorkque
,
367 return STATUS_PENDING
;
371 // Determine our starting position and type. If we are writing
372 // at EOF, then we will need additional synchronization before
373 // the IO is issued to determine where the data will go.
376 StartingByte
= IrpSp
->Parameters
.Write
.ByteOffset
;
377 StartingVbo
= StartingByte
.LowPart
;
379 WriteToEof
= ( (StartingByte
.LowPart
== FILE_WRITE_TO_END_OF_FILE
) &&
380 (StartingByte
.HighPart
== -1) );
383 // Extract the nature of the write from the file object, and case on it
386 TypeOfOpen
= FatDecodeFileObject(FileObject
, &Vcb
, &FcbOrDcb
, &Ccb
);
388 NT_ASSERT( Vcb
!= NULL
);
391 // Save callers who try to do cached IO to the raw volume from themselves.
394 if (TypeOfOpen
== UserVolumeOpen
) {
399 NT_ASSERT(!(NonCachedIo
== FALSE
&& TypeOfOpen
== VirtualVolumeFile
));
402 // Collect interesting statistics. The FLAG_USER_IO bit will indicate
403 // what type of io we're doing in the FatNonCachedIo function.
407 CollectWriteStats(Vcb
, TypeOfOpen
, ByteCount
);
409 if (TypeOfOpen
== UserFileOpen
) {
410 SetFlag(IrpContext
->Flags
, IRP_CONTEXT_FLAG_USER_IO
);
412 ClearFlag(IrpContext
->Flags
, IRP_CONTEXT_FLAG_USER_IO
);
417 // We must disallow writes to regular objects that would require us
418 // to maintain an AllocationSize of greater than 32 significant bits.
420 // If this is paging IO, this is simply a case where we need to trim.
421 // This will occur in due course.
424 if (!PagingIo
&& !WriteToEof
&& (TypeOfOpen
!= UserVolumeOpen
)) {
427 if (!FatIsIoRangeValid( Vcb
, StartingByte
, ByteCount
)) {
430 Irp
->IoStatus
.Information
= 0;
431 FatCompleteRequest( IrpContext
, Irp
, STATUS_DISK_FULL
);
433 return STATUS_DISK_FULL
;
438 // Allocate if necessary and initialize a FAT_IO_CONTEXT block for
439 // all non cached Io. For synchronous Io
440 // we use stack storage, otherwise we allocate pool.
445 if (IrpContext
->FatIoContext
== NULL
) {
449 IrpContext
->FatIoContext
=
451 FsRtlAllocatePoolWithTag( NonPagedPoolNx
,
453 FsRtlAllocatePoolWithTag( NonPagedPool
,
455 sizeof(FAT_IO_CONTEXT
),
456 TAG_FAT_IO_CONTEXT
);
460 IrpContext
->FatIoContext
= &StackFatIoContext
;
462 SetFlag( IrpContext
->Flags
, IRP_CONTEXT_STACK_IO_CONTEXT
);
466 RtlZeroMemory( IrpContext
->FatIoContext
, sizeof(FAT_IO_CONTEXT
) );
470 KeInitializeEvent( &IrpContext
->FatIoContext
->Wait
.SyncEvent
,
478 IrpContext
->FatIoContext
->Wait
.Async
.ResourceThreadId
=
479 ExGetCurrentResourceThread();
483 IrpContext
->FatIoContext
->Wait
.Async
.ResourceThreadId
=
484 ((ULONG_PTR
)IrpContext
->FatIoContext
) | 3;
487 IrpContext
->FatIoContext
->Wait
.Async
.RequestedByteCount
=
490 IrpContext
->FatIoContext
->Wait
.Async
.FileObject
= FileObject
;
496 // Check if this volume has already been shut down. If it has, fail
497 // this write request.
500 if ( FlagOn(Vcb
->VcbState
, VCB_STATE_FLAG_SHUTDOWN
) ) {
502 Irp
->IoStatus
.Information
= 0;
503 FatCompleteRequest( IrpContext
, Irp
, STATUS_TOO_LATE
);
504 return STATUS_TOO_LATE
;
508 // This case corresponds to a write of the volume file (only the first
509 // fat allowed, the other fats are written automatically in parallel).
511 // We use an Mcb keep track of dirty sectors. Actual entries are Vbos
512 // and Lbos (ie. bytes), though they are all added in sector chunks.
513 // Since Vbo == Lbo for the volume file, the Mcb entries
514 // alternate between runs of Vbo == Lbo, and holes (Lbo == 0). We use
515 // the prior to represent runs of dirty fat sectors, and the latter
516 // for runs of clean fat. Note that since the first part of the volume
517 // file (boot sector) is always clean (a hole), and an Mcb never ends in
518 // a hole, there must always be an even number of runs(entries) in the Mcb.
520 // The strategy is to find the first and last dirty run in the desired
521 // write range (which will always be a set of pages), and write from the
522 // former to the later. The may result in writing some clean data, but
523 // will generally be more efficient than writing each runs seperately.
526 if (TypeOfOpen
== VirtualVolumeFile
) {
532 VBO StartingDirtyVbo
;
534 ULONG DirtyByteCount
;
535 ULONG CleanByteCount
;
539 BOOLEAN MoreDirtyRuns
= TRUE
;
541 IO_STATUS_BLOCK RaiseIosb
;
543 DebugTrace(0, Dbg
, "Type of write is Virtual Volume File\n", 0);
546 // If we can't wait we have to post this.
551 DebugTrace( 0, Dbg
, "Passing request to Fsp\n", 0 );
553 Status
= FatFsdPostRequest(IrpContext
, Irp
);
559 // If we weren't called by the Lazy Writer, then this write
560 // must be the result of a write-through or flush operation.
561 // Setting the IrpContext flag, will cause DevIoSup.c to
562 // write-through the data to the disk.
565 if (!FlagOn((ULONG_PTR
)IoGetTopLevelIrp(), FSRTL_CACHE_TOP_LEVEL_IRP
)) {
567 SetFlag( IrpContext
->Flags
, IRP_CONTEXT_FLAG_WRITE_THROUGH
);
571 // Assert an even number of entries in the Mcb, an odd number would
572 // mean that the Mcb is corrupt.
575 NT_ASSERT( (FsRtlNumberOfRunsInLargeMcb( &Vcb
->DirtyFatMcb
) & 1) == 0);
578 // We need to skip over any clean sectors at the start of the write.
580 // Also check the two cases where there are no dirty fats in the
581 // desired write range, and complete them with success.
583 // 1) There is no Mcb entry corresponding to StartingVbo, meaning
584 // we are beyond the end of the Mcb, and thus dirty fats.
586 // 2) The run at StartingVbo is clean and continues beyond the
587 // desired write range.
590 if (!FatLookupMcbEntry( Vcb
, &Vcb
->DirtyFatMcb
,
596 || ( (DirtyLbo
== 0) && (DirtyByteCount
>= ByteCount
) ) ) {
598 DebugTrace(0, DEBUG_TRACE_DEBUG_HOOKS
,
599 "No dirty fat sectors in the write range.\n", 0);
601 FatCompleteRequest( IrpContext
, Irp
, STATUS_SUCCESS
);
602 return STATUS_SUCCESS
;
605 DirtyVbo
= (VBO
)DirtyLbo
;
608 // If the last run was a hole (clean), up DirtyVbo to the next
609 // run, which must be dirty.
614 DirtyVbo
= StartingVbo
+ DirtyByteCount
;
618 // This is where the write will start.
621 StartingDirtyVbo
= DirtyVbo
;
625 // Now start enumerating the dirty fat sectors spanning the desired
626 // write range, this first one of which is now DirtyVbo.
629 while ( MoreDirtyRuns
) {
632 // Find the next dirty run, if it is not there, the Mcb ended
633 // in a hole, or there is some other corruption of the Mcb.
636 if (!FatLookupMcbEntry( Vcb
, &Vcb
->DirtyFatMcb
,
643 #pragma prefast( suppress:28931, "needed for debug build" )
645 DirtyVbo
= (VBO
)DirtyLbo
;
647 DebugTrace(0, Dbg
, "Last dirty fat Mcb entry was a hole: corrupt.\n", 0);
650 #pragma prefast( suppress:28159, "things are seriously wrong if we get here" )
652 FatBugCheck( 0, 0, 0 );
656 DirtyVbo
= (VBO
)DirtyLbo
;
659 // This has to correspond to a dirty run, and must start
660 // within the write range since we check it at entry to,
661 // and at the bottom of this loop.
664 NT_ASSERT((DirtyVbo
!= 0) && (DirtyVbo
< StartingVbo
+ ByteCount
));
667 // There are three ways we can know that this was the
668 // last dirty run we want to write.
670 // 1) The current dirty run extends beyond or to the
671 // desired write range.
673 // 2) On trying to find the following clean run, we
674 // discover that this is the last run in the Mcb.
676 // 3) The following clean run extend beyond the
677 // desired write range.
679 // In any of these cases we set MoreDirtyRuns = FALSE.
683 // If the run is larger than we are writing, we also
684 // must truncate the WriteLength. This is benign in
688 if (DirtyVbo
+ DirtyByteCount
>= StartingVbo
+ ByteCount
) {
690 DirtyByteCount
= StartingVbo
+ ByteCount
- DirtyVbo
;
692 MoreDirtyRuns
= FALSE
;
697 // Scan the clean hole after this dirty run. If this
698 // run was the last, prepare to exit the loop
701 if (!FatLookupMcbEntry( Vcb
, &Vcb
->DirtyFatMcb
,
702 DirtyVbo
+ DirtyByteCount
,
707 MoreDirtyRuns
= FALSE
;
712 // Assert that we actually found a clean run.
713 // and compute the start of the next dirty run.
716 NT_ASSERT (CleanLbo
== 0);
719 // If the next dirty run starts beyond the desired
720 // write, we have found all the runs we need, so
724 if (DirtyVbo
+ DirtyByteCount
+ CleanByteCount
>=
725 StartingVbo
+ ByteCount
) {
727 MoreDirtyRuns
= FALSE
;
732 // Compute the start of the next dirty run.
735 DirtyVbo
+= DirtyByteCount
+ CleanByteCount
;
740 } // while ( MoreDirtyRuns )
743 // At this point DirtyVbo and DirtyByteCount correctly reflect the
744 // final dirty run, constrained to the desired write range.
746 // Now compute the length we finally must write.
749 WriteLength
= (DirtyVbo
+ DirtyByteCount
) - StartingDirtyVbo
;
752 // We must now assume that the write will complete with success,
753 // and initialize our expected status in RaiseIosb. It will be
754 // modified below if an error occurs.
757 RaiseIosb
.Status
= STATUS_SUCCESS
;
758 RaiseIosb
.Information
= ByteCount
;
761 // Loop through all the fats, setting up a multiple async to
762 // write them all. If there are more than FAT_MAX_PARALLEL_IOS
763 // then we do several muilple asyncs.
769 IO_RUN StackIoRuns
[2];
772 BytesPerFat
= FatBytesPerFat( &Vcb
->Bpb
);
774 if ((ULONG
)Vcb
->Bpb
.Fats
> 2) {
776 IoRuns
= FsRtlAllocatePoolWithTag( PagedPool
,
777 (ULONG
)(Vcb
->Bpb
.Fats
*sizeof(IO_RUN
)),
782 IoRuns
= StackIoRuns
;
785 for (Fat
= 0; Fat
< (ULONG
)Vcb
->Bpb
.Fats
; Fat
++) {
787 IoRuns
[Fat
].Vbo
= StartingDirtyVbo
;
788 IoRuns
[Fat
].Lbo
= Fat
* BytesPerFat
+ StartingDirtyVbo
;
789 IoRuns
[Fat
].Offset
= StartingDirtyVbo
- StartingVbo
;
790 IoRuns
[Fat
].ByteCount
= WriteLength
;
794 // Keep track of meta-data disk ios.
797 Vcb
->Statistics
[KeGetCurrentProcessorNumber() % FatData
.NumberProcessors
].Common
.MetaDataDiskWrites
+= Vcb
->Bpb
.Fats
;
801 FatMultipleAsync( IrpContext
,
804 (ULONG
)Vcb
->Bpb
.Fats
,
809 if (IoRuns
!= StackIoRuns
) {
811 ExFreePool( IoRuns
);
815 #if (NTDDI_VERSION >= NTDDI_WIN8)
818 // Account for DASD Ios
821 if (FatDiskAccountingEnabled
) {
823 PETHREAD ThreadIssuingIo
= PsGetCurrentThread();
825 PsUpdateDiskCounters( PsGetThreadProcess( ThreadIssuingIo
),
835 // Wait for all the writes to finish
838 FatWaitSync( IrpContext
);
841 // If we got an error, or verify required, remember it.
844 if (!NT_SUCCESS( Irp
->IoStatus
.Status
)) {
848 "Error %X while writing volume file.\n",
849 Irp
->IoStatus
.Status
);
851 RaiseIosb
= Irp
->IoStatus
;
856 // If the writes were a success, set the sectors clean, else
857 // raise the error status and mark the volume as needing
858 // verification. This will automatically reset the volume
861 // If not, then mark this volume as needing verification to
862 // automatically cause everything to get cleaned up.
865 Irp
->IoStatus
= RaiseIosb
;
867 if ( NT_SUCCESS( Status
= Irp
->IoStatus
.Status
)) {
869 FatRemoveMcbEntry( Vcb
, &Vcb
->DirtyFatMcb
,
875 FatNormalizeAndRaiseStatus( IrpContext
, Status
);
878 DebugTrace(-1, Dbg
, "CommonWrite -> %08lx\n", Status
);
880 FatCompleteRequest( IrpContext
, Irp
, Status
);
885 // This case corresponds to a general opened volume (DASD), ie.
889 if (TypeOfOpen
== UserVolumeOpen
) {
895 // Precalculate the volume size since we're nearly always going
896 // to be wanting to use it.
899 VolumeSize
= (LBO
) Int32x32To64( Vcb
->Bpb
.BytesPerSector
,
900 (Vcb
->Bpb
.Sectors
!= 0 ? Vcb
->Bpb
.Sectors
:
901 Vcb
->Bpb
.LargeSectors
));
903 StartingLbo
= StartingByte
.QuadPart
;
905 DebugTrace(0, Dbg
, "Type of write is User Volume.\n", 0);
908 // If this is a write on a disk-based volume that is not locked, we need to limit
909 // the sectors we allow to be written within the volume. Specifically, we only
910 // allow writes to the reserved area. Note that extended DASD can still be used
911 // to write past the end of the volume. We also allow kernel mode callers to force
912 // access via a flag in the IRP. A handle that issued a dismount can write anywhere
916 if ((Vcb
->TargetDeviceObject
->DeviceType
== FILE_DEVICE_DISK
) &&
917 !FlagOn( Vcb
->VcbState
, VCB_STATE_FLAG_LOCKED
) &&
918 !FlagOn( IrpSp
->Flags
, SL_FORCE_DIRECT_WRITE
) &&
919 !FlagOn( Ccb
->Flags
, CCB_FLAG_COMPLETE_DISMOUNT
)) {
922 // First check for a write beyond the end of the volume.
925 if (!WriteToEof
&& (StartingLbo
< VolumeSize
)) {
928 // This write is within the volume. Make sure it is not beyond the reserved section.
931 if ((StartingLbo
>= FatReservedBytes( &(Vcb
->Bpb
) )) ||
932 (ByteCount
> (FatReservedBytes( &(Vcb
->Bpb
) ) - StartingLbo
))) {
934 FatCompleteRequest( IrpContext
, Irp
, STATUS_ACCESS_DENIED
);
935 return STATUS_ACCESS_DENIED
;
941 // Verify that the volume for this handle is still valid, permitting
942 // operations to proceed on dismounted volumes via the handle which
943 // performed the dismount or sent a format unit command.
946 if (!FlagOn( Ccb
->Flags
, CCB_FLAG_COMPLETE_DISMOUNT
| CCB_FLAG_SENT_FORMAT_UNIT
)) {
948 FatQuickVerifyVcb( IrpContext
, Vcb
);
952 // If the caller previously sent a format unit command, then we will allow
953 // their read/write requests to ignore the verify flag on the device, since some
954 // devices send a media change event after format unit, but we don't want to
955 // process it yet since we're probably in the process of formatting the
959 if (FlagOn( Ccb
->Flags
, CCB_FLAG_SENT_FORMAT_UNIT
)) {
961 SetFlag( IrpContext
->Flags
, IRP_CONTEXT_FLAG_OVERRIDE_VERIFY
);
964 if (!FlagOn( Ccb
->Flags
, CCB_FLAG_DASD_PURGE_DONE
)) {
966 BOOLEAN PreviousWait
= BooleanFlagOn( IrpContext
->Flags
, IRP_CONTEXT_FLAG_WAIT
);
969 // Grab the entire volume so that even the normally unsafe action
970 // of writing to an unlocked volume won't open us to a race between
971 // the flush and purge of the FAT below.
973 // I really don't think this is particularly important to worry about,
974 // but a repro case for another bug happens to dance into this race
975 // condition pretty easily. Eh.
978 SetFlag( IrpContext
->Flags
, IRP_CONTEXT_FLAG_WAIT
);
979 FatAcquireExclusiveVolume( IrpContext
, Vcb
);
984 // If the volume isn't locked, flush and purge it.
987 if (!FlagOn(Vcb
->VcbState
, VCB_STATE_FLAG_LOCKED
)) {
989 FatFlushFat( IrpContext
, Vcb
);
990 CcPurgeCacheSection( &Vcb
->SectionObjectPointers
,
995 FatPurgeReferencedFileObjects( IrpContext
, Vcb
->RootDcb
, Flush
);
1000 FatReleaseVolume( IrpContext
, Vcb
);
1001 if (!PreviousWait
) {
1002 ClearFlag( IrpContext
->Flags
, IRP_CONTEXT_FLAG_WAIT
);
1006 SetFlag( Ccb
->Flags
, CCB_FLAG_DASD_PURGE_DONE
|
1007 CCB_FLAG_DASD_FLUSH_DONE
);
1010 if (!FlagOn( Ccb
->Flags
, CCB_FLAG_ALLOW_EXTENDED_DASD_IO
)) {
1013 // Make sure we don't try to write past end of volume,
1014 // reducing the requested byte count if necessary.
1017 if (WriteToEof
|| StartingLbo
>= VolumeSize
) {
1018 FatCompleteRequest( IrpContext
, Irp
, STATUS_SUCCESS
);
1019 return STATUS_SUCCESS
;
1022 if (ByteCount
> VolumeSize
- StartingLbo
) {
1024 ByteCount
= (ULONG
) (VolumeSize
- StartingLbo
);
1027 // For async writes we had set the byte count in the FatIoContext
1028 // above, so fix that here.
1033 IrpContext
->FatIoContext
->Wait
.Async
.RequestedByteCount
=
1040 // This has a peculiar interpretation, but just adjust the starting
1041 // byte to the end of the visible volume.
1046 StartingLbo
= VolumeSize
;
1051 // For DASD we have to probe and lock the user's buffer
1054 FatLockUserBuffer( IrpContext
, Irp
, IoReadAccess
, ByteCount
);
1057 // Set the FO_MODIFIED flag here to trigger a verify when this
1058 // handle is closed. Note that we can err on the conservative
1059 // side with no problem, i.e. if we accidently do an extra
1060 // verify there is no problem.
1063 SetFlag( FileObject
->Flags
, FO_FILE_MODIFIED
);
1066 // Write the data and wait for the results
1069 FatSingleAsync( IrpContext
,
1075 #if (NTDDI_VERSION >= NTDDI_WIN8)
1078 // Account for DASD Ios
1081 if (FatDiskAccountingEnabled
) {
1083 PETHREAD ThreadIssuingIo
= PsGetCurrentThread();
1085 PsUpdateDiskCounters( PsGetThreadProcess( ThreadIssuingIo
),
1098 // We, nor anybody else, need the IrpContext any more.
1101 IrpContext
->FatIoContext
= NULL
;
1103 FatDeleteIrpContext( IrpContext
);
1105 DebugTrace(-1, Dbg
, "FatNonCachedIo -> STATUS_PENDING\n", 0);
1107 return STATUS_PENDING
;
1110 FatWaitSync( IrpContext
);
1113 // If the call didn't succeed, raise the error status
1115 // Also mark this volume as needing verification to automatically
1116 // cause everything to get cleaned up.
1119 if (!NT_SUCCESS( Status
= Irp
->IoStatus
.Status
)) {
1121 FatNormalizeAndRaiseStatus( IrpContext
, Status
);
1125 // Update the current file position. We assume that
1126 // open/create zeros out the CurrentByteOffset field.
1129 if (SynchronousIo
&& !PagingIo
) {
1130 FileObject
->CurrentByteOffset
.QuadPart
=
1131 StartingLbo
+ Irp
->IoStatus
.Information
;
1134 DebugTrace(-1, Dbg
, "FatCommonWrite -> %08lx\n", Status
);
1136 FatCompleteRequest( IrpContext
, Irp
, Status
);
1141 // At this point we know there is an Fcb/Dcb.
1144 NT_ASSERT( FcbOrDcb
!= NULL
);
1147 // Use a try-finally to free Fcb/Dcb and buffers on the way out.
1153 // This case corresponds to a normal user write file.
1156 if ( TypeOfOpen
== UserFileOpen
1159 ULONG ValidDataLength
;
1160 ULONG ValidDataToDisk
;
1161 ULONG ValidDataToCheck
;
1163 DebugTrace(0, Dbg
, "Type of write is user file open\n", 0);
1166 // If this is a noncached transfer and is not a paging I/O, and
1167 // the file has been opened cached, then we will do a flush here
1168 // to avoid stale data problems. Note that we must flush before
1169 // acquiring the Fcb shared since the write may try to acquire
1172 // The Purge following the flush will guarentee cache coherency.
1175 if (NonCachedIo
&& !PagingIo
&&
1176 (FileObject
->SectionObjectPointer
->DataSectionObject
!= NULL
)) {
1179 IO_STATUS_BLOCK IoStatus
= {0};
1181 IO_STATUS_BLOCK IoStatus
= {{0}};
1185 // We need the Fcb exclsuive to do the CcPurgeCache
1188 if (!FatAcquireExclusiveFcb( IrpContext
, FcbOrDcb
)) {
1190 DebugTrace( 0, Dbg
, "Cannot acquire FcbOrDcb = %p shared without waiting\n", FcbOrDcb
);
1192 try_return( PostIrp
= TRUE
);
1195 FcbOrDcbAcquired
= TRUE
;
1196 FcbAcquiredExclusive
= TRUE
;
1199 // Preacquire pagingio for the flush.
1202 ExAcquireResourceExclusiveLite( FcbOrDcb
->Header
.PagingIoResource
, TRUE
);
1204 #if (NTDDI_VERSION >= NTDDI_WIN7)
1207 // Remember that we are holding the paging I/O resource.
1210 PagingIoResourceAcquired
= TRUE
;
1213 // We hold so that we will prevent a pagefault from occuring and seeing
1214 // soon-to-be stale data from the disk. We used to believe this was
1215 // something to be left to the app to synchronize; we now realize that
1216 // noncached IO on a fileserver is doomed without the filesystem forcing
1217 // the coherency issue. By only penalizing noncached coherency when
1218 // needed, this is about the best we can do.
1222 // Now perform the coherency flush and purge operation. This version of the call
1223 // will try to invalidate mapped pages to prevent data corruption.
1226 CcCoherencyFlushAndPurgeCache( FileObject
->SectionObjectPointer
,
1227 WriteToEof
? &FcbOrDcb
->Header
.FileSize
: &StartingByte
,
1232 SuccessfulPurge
= NT_SUCCESS( IoStatus
.Status
);
1236 CcFlushCache( FileObject
->SectionObjectPointer
,
1237 WriteToEof
? &FcbOrDcb
->Header
.FileSize
: &StartingByte
,
1241 if (!NT_SUCCESS( IoStatus
.Status
)) {
1243 ExReleaseResourceLite( FcbOrDcb
->Header
.PagingIoResource
);
1244 try_return( IoStatus
.Status
);
1248 // Remember that we are holding the paging I/O resource.
1251 PagingIoResourceAcquired
= TRUE
;
1254 // We hold so that we will prevent a pagefault from occuring and seeing
1255 // soon-to-be stale data from the disk. We used to believe this was
1256 // something to be left to the app to synchronize; we now realize that
1257 // noncached IO on a fileserver is doomed without the filesystem forcing
1258 // the coherency issue. By only penalizing noncached coherency when
1259 // needed, this is about the best we can do.
1262 SuccessfulPurge
= CcPurgeCacheSection( FileObject
->SectionObjectPointer
,
1263 WriteToEof
? &FcbOrDcb
->Header
.FileSize
: &StartingByte
,
1269 if (!SuccessfulPurge
&& (FcbOrDcb
->PurgeFailureModeEnableCount
> 0)) {
1272 // Purge failure mode only applies to user files.
1275 NT_ASSERT( TypeOfOpen
== UserFileOpen
);
1278 // Do not swallow the purge failure if in purge failure
1279 // mode. Someone outside the file system intends to handle
1280 // the error and prevent any application compatibilty
1283 // NOTE: If the file system were not preventing a pagefault
1284 // from processing while this write is in flight, which it does
1285 // by holding the paging resource across the write, it would
1286 // need to fail the operation even if a purge succeeded. If
1287 // not a memory mapped read could bring in a stale page before
1288 // the write makes it to disk.
1291 try_return( Status
= STATUS_PURGE_FAILED
);
1295 // Indicate we're OK with the fcb being demoted to shared access
1296 // if that turns out to be possible later on after VDL extension
1299 // PagingIo must be held all the way through.
1302 FcbCanDemoteToShared
= TRUE
;
1306 // We assert that Paging Io writes will never WriteToEof.
1309 NT_ASSERT( WriteToEof
? !PagingIo
: TRUE
);
1312 // First let's acquire the Fcb shared. Shared is enough if we
1313 // are not writing beyond EOF.
1318 (VOID
)ExAcquireResourceSharedLite( FcbOrDcb
->Header
.PagingIoResource
, TRUE
);
1319 PagingIoResourceAcquired
= TRUE
;
1323 IrpContext
->FatIoContext
->Wait
.Async
.Resource
=
1324 FcbOrDcb
->Header
.PagingIoResource
;
1328 // Check to see if we colided with a MoveFile call, and if
1329 // so block until it completes.
1332 if (FcbOrDcb
->MoveFileEvent
) {
1334 (VOID
)KeWaitForSingleObject( FcbOrDcb
->MoveFileEvent
,
1344 // We may already have the Fcb due to noncached coherency
1345 // work done just above; however, we may still have to extend
1346 // valid data length. We can't demote this to shared, matching
1347 // what occured before, until we figure that out a bit later.
1349 // We kept ahold of it since our lockorder is main->paging,
1350 // and paging must now held across the noncached write from
1355 // If this is async I/O, we will wait if there is an exclusive
1359 if (!Wait
&& NonCachedIo
) {
1361 if (!FcbOrDcbAcquired
&&
1362 !FatAcquireSharedFcbWaitForEx( IrpContext
, FcbOrDcb
)) {
1364 DebugTrace( 0, Dbg
, "Cannot acquire FcbOrDcb = %p shared without waiting\n", FcbOrDcb
);
1365 try_return( PostIrp
= TRUE
);
1369 // Note we will have to release this resource elsewhere. If we came
1370 // out of the noncached coherency path, we will also have to drop
1371 // the paging io resource.
1374 IrpContext
->FatIoContext
->Wait
.Async
.Resource
= FcbOrDcb
->Header
.Resource
;
1376 if (FcbCanDemoteToShared
) {
1378 IrpContext
->FatIoContext
->Wait
.Async
.Resource2
= FcbOrDcb
->Header
.PagingIoResource
;
1382 if (!FcbOrDcbAcquired
&&
1383 !FatAcquireSharedFcb( IrpContext
, FcbOrDcb
)) {
1385 DebugTrace( 0, Dbg
, "Cannot acquire FcbOrDcb = %p shared without waiting\n", FcbOrDcb
);
1386 try_return( PostIrp
= TRUE
);
1390 FcbOrDcbAcquired
= TRUE
;
1394 // Get a first tentative file size and valid data length.
1395 // We must get ValidDataLength first since it is always
1396 // increased second (in case we are unprotected) and
1397 // we don't want to capture ValidDataLength > FileSize.
1400 ValidDataToDisk
= FcbOrDcb
->ValidDataToDisk
;
1401 ValidDataLength
= FcbOrDcb
->Header
.ValidDataLength
.LowPart
;
1402 FileSize
= FcbOrDcb
->Header
.FileSize
.LowPart
;
1404 NT_ASSERT( ValidDataLength
<= FileSize
);
1407 // If are paging io, then we do not want
1408 // to write beyond end of file. If the base is beyond Eof, we will just
1409 // Noop the call. If the transfer starts before Eof, but extends
1410 // beyond, we will truncate the transfer to the last sector
1415 // Just in case this is paging io, limit write to file size.
1416 // Otherwise, in case of write through, since Mm rounds up
1417 // to a page, we might try to acquire the resource exclusive
1418 // when our top level guy only acquired it shared. Thus, =><=.
1423 if (StartingVbo
>= FileSize
) {
1425 DebugTrace( 0, Dbg
, "PagingIo started beyond EOF.\n", 0 );
1427 Irp
->IoStatus
.Information
= 0;
1429 try_return( Status
= STATUS_SUCCESS
);
1432 if (ByteCount
> FileSize
- StartingVbo
) {
1434 DebugTrace( 0, Dbg
, "PagingIo extending beyond EOF.\n", 0 );
1436 ByteCount
= FileSize
- StartingVbo
;
1441 // Determine if we were called by the lazywriter.
1445 if (FcbOrDcb
->Specific
.Fcb
.LazyWriteThread
== PsGetCurrentThread()) {
1447 CalledByLazyWriter
= TRUE
;
1449 if (FlagOn( FcbOrDcb
->Header
.Flags
, FSRTL_FLAG_USER_MAPPED_FILE
)) {
1452 // Fail if the start of this request is beyond valid data length.
1453 // Don't worry if this is an unsafe test. MM and CC won't
1454 // throw this page away if it is really dirty.
1457 if ((StartingVbo
+ ByteCount
> ValidDataLength
) &&
1458 (StartingVbo
< FileSize
)) {
1461 // It's OK if byte range is within the page containing valid data length,
1462 // since we will use ValidDataToDisk as the start point.
1465 if (StartingVbo
+ ByteCount
> ((ValidDataLength
+ PAGE_SIZE
- 1) & ~(PAGE_SIZE
- 1))) {
1468 // Don't flush this now.
1471 try_return( Status
= STATUS_FILE_LOCK_CONFLICT
);
1478 // This code detects if we are a recursive synchronous page write
1479 // on a write through file object.
1482 if (FlagOn(Irp
->Flags
, IRP_SYNCHRONOUS_PAGING_IO
) &&
1483 FlagOn(IrpContext
->Flags
, IRP_CONTEXT_FLAG_RECURSIVE_CALL
)) {
1487 TopIrp
= IoGetTopLevelIrp();
1490 // This clause determines if the top level request was
1491 // in the FastIo path. Gack. Since we don't have a
1492 // real sharing protocol for the top level IRP field ...
1493 // yet ... if someone put things other than a pure IRP in
1494 // there we best be careful.
1497 if ((ULONG_PTR
)TopIrp
> FSRTL_MAX_TOP_LEVEL_IRP_FLAG
&&
1498 NodeType(TopIrp
) == IO_TYPE_IRP
) {
1500 PIO_STACK_LOCATION IrpStack
;
1502 IrpStack
= IoGetCurrentIrpStackLocation(TopIrp
);
1505 // Finally this routine detects if the Top irp was a
1506 // cached write to this file and thus we are the writethrough.
1509 if ((IrpStack
->MajorFunction
== IRP_MJ_WRITE
) &&
1510 (IrpStack
->FileObject
->FsContext
== FileObject
->FsContext
) &&
1511 !FlagOn(TopIrp
->Flags
,IRP_NOCACHE
)) {
1513 RecursiveWriteThrough
= TRUE
;
1514 SetFlag( IrpContext
->Flags
, IRP_CONTEXT_FLAG_WRITE_THROUGH
);
1520 // Here is the deal with ValidDataLength and FileSize:
1522 // Rule 1: PagingIo is never allowed to extend file size.
1524 // Rule 2: Only the top level requestor may extend Valid
1525 // Data Length. This may be paging IO, as when a
1526 // a user maps a file, but will never be as a result
1527 // of cache lazy writer writes since they are not the
1528 // top level request.
1530 // Rule 3: If, using Rules 1 and 2, we decide we must extend
1531 // file size or valid data, we take the Fcb exclusive.
1535 // Now see if we are writing beyond valid data length, and thus
1536 // maybe beyond the file size. If so, then we must
1537 // release the Fcb and reacquire it exclusive. Note that it is
1538 // important that when not writing beyond EOF that we check it
1539 // while acquired shared and keep the FCB acquired, in case some
1540 // turkey truncates the file.
1544 // Note that the lazy writer must not be allowed to try and
1545 // acquire the resource exclusive. This is not a problem since
1546 // the lazy writer is paging IO and thus not allowed to extend
1547 // file size, and is never the top level guy, thus not able to
1548 // extend valid data length.
1551 if ( !CalledByLazyWriter
&&
1553 !RecursiveWriteThrough
&&
1556 StartingVbo
+ ByteCount
> ValidDataLength
)) {
1559 // If this was an asynchronous write, we are going to make
1560 // the request synchronous at this point, but only kinda.
1561 // At the last moment, before sending the write off to the
1562 // driver, we may shift back to async.
1564 // The modified page writer already has the resources
1565 // he requires, so this will complete in small finite
1572 SetFlag( IrpContext
->Flags
, IRP_CONTEXT_FLAG_WAIT
);
1576 NT_ASSERT( TypeOfOpen
== UserFileOpen
);
1578 SwitchBackToAsync
= TRUE
;
1583 // We need Exclusive access to the Fcb/Dcb since we will
1584 // probably have to extend valid data and/or file.
1588 // Y'know, the PagingIo case is a mapped page writer, and
1589 // MmFlushSection or the mapped page writer itself already
1590 // snatched up the main exclusive for us via the AcquireForCcFlush
1591 // or AcquireForModWrite logic (the default logic parallels FAT's
1592 // requirements since this order/model came first). Should ASSERT
1593 // this since it'll just go 1->2, and a few more unnecesary DPC
1596 // The preacquire is done to avoid inversion over the collided flush
1597 // meta-resource in Mm. The one time this is not true is at final
1598 // system shutdown time, when Mm goes off and flushes all the dirty
1599 // pages. Since the callback is defined as Wait == FALSE he can't
1600 // guarantee acquisition (though with clean process shutdown being
1601 // enforced, it really should be now). Permit this to float.
1603 // Note that since we're going to fall back on the acquisition aleady
1604 // done for us, don't confuse things by thinking we did the work
1610 ExReleaseResourceLite( FcbOrDcb
->Header
.PagingIoResource
);
1611 PagingIoResourceAcquired
= FALSE
;
1616 // The Fcb may already be acquired exclusive due to coherency
1617 // work performed earlier. If so, obviously no work to do.
1620 if (!FcbAcquiredExclusive
) {
1622 FatReleaseFcb( IrpContext
, FcbOrDcb
);
1623 FcbOrDcbAcquired
= FALSE
;
1625 if (!FatAcquireExclusiveFcb( IrpContext
, FcbOrDcb
)) {
1627 DebugTrace( 0, Dbg
, "Cannot acquire FcbOrDcb = %p shared without waiting\n", FcbOrDcb
);
1629 try_return( PostIrp
= TRUE
);
1632 FcbOrDcbAcquired
= TRUE
;
1635 #pragma prefast( suppress:28931, "convenient for debugging" )
1637 FcbAcquiredExclusive
= TRUE
;
1642 // Now that we have the Fcb exclusive, see if this write
1643 // qualifies for being made async again. The key point
1644 // here is that we are going to update ValidDataLength in
1645 // the Fcb before returning. We must make sure this will
1646 // not cause a problem. One thing we must do is keep out
1650 if (SwitchBackToAsync
) {
1652 if ((FcbOrDcb
->NonPaged
->SectionObjectPointers
.DataSectionObject
!= NULL
) ||
1653 (StartingVbo
+ ByteCount
> FcbOrDcb
->Header
.ValidDataLength
.LowPart
) ||
1656 RtlZeroMemory( IrpContext
->FatIoContext
, sizeof(FAT_IO_CONTEXT
) );
1658 KeInitializeEvent( &IrpContext
->FatIoContext
->Wait
.SyncEvent
,
1662 SwitchBackToAsync
= FALSE
;
1666 if (!FcbOrDcb
->NonPaged
->OutstandingAsyncEvent
) {
1668 FcbOrDcb
->NonPaged
->OutstandingAsyncEvent
=
1670 FsRtlAllocatePoolWithTag( NonPagedPoolNx
,
1672 FsRtlAllocatePoolWithTag( NonPagedPool
,
1677 KeInitializeEvent( FcbOrDcb
->NonPaged
->OutstandingAsyncEvent
,
1683 // If we are transitioning from 0 to 1, reset the event.
1686 if (ExInterlockedAddUlong( &FcbOrDcb
->NonPaged
->OutstandingAsyncWrites
,
1688 &FatData
.GeneralSpinLock
) == 0) {
1690 KeClearEvent( FcbOrDcb
->NonPaged
->OutstandingAsyncEvent
);
1693 UnwindOutstandingAsync
= TRUE
;
1695 IrpContext
->FatIoContext
->Wait
.Async
.NonPagedFcb
= FcbOrDcb
->NonPaged
;
1700 // Now that we have the Fcb exclusive, get a new batch of
1701 // filesize and ValidDataLength.
1704 ValidDataToDisk
= FcbOrDcb
->ValidDataToDisk
;
1705 ValidDataLength
= FcbOrDcb
->Header
.ValidDataLength
.LowPart
;
1706 FileSize
= FcbOrDcb
->Header
.FileSize
.LowPart
;
1709 // If this is PagingIo check again if any pruning is
1710 // required. It is important to start from basic
1711 // princples in case the file was *grown* ...
1716 if (StartingVbo
>= FileSize
) {
1717 Irp
->IoStatus
.Information
= 0;
1718 try_return( Status
= STATUS_SUCCESS
);
1721 ByteCount
= IrpSp
->Parameters
.Write
.Length
;
1723 if (ByteCount
> FileSize
- StartingVbo
) {
1724 ByteCount
= FileSize
- StartingVbo
;
1730 // Remember the final requested byte count
1733 if (NonCachedIo
&& !Wait
) {
1735 IrpContext
->FatIoContext
->Wait
.Async
.RequestedByteCount
=
1740 // Remember the initial file size and valid data length,
1741 // just in case .....
1744 InitialFileSize
= FileSize
;
1746 InitialValidDataLength
= ValidDataLength
;
1749 // Make sure the FcbOrDcb is still good
1752 FatVerifyFcb( IrpContext
, FcbOrDcb
);
1755 // Check for writing to end of File. If we are, then we have to
1756 // recalculate a number of fields.
1761 StartingVbo
= FileSize
;
1762 StartingByte
= FcbOrDcb
->Header
.FileSize
;
1765 // Since we couldn't know this information until now, perform the
1766 // necessary bounds checking that we ommited at the top because
1767 // this is a WriteToEof operation.
1771 if (!FatIsIoRangeValid( Vcb
, StartingByte
, ByteCount
)) {
1773 Irp
->IoStatus
.Information
= 0;
1774 try_return( Status
= STATUS_DISK_FULL
);
1781 // If this is a non paging write to a data stream object we have to
1782 // check for access according to the current state op/filelocks.
1784 // Note that after this point, operations will be performed on the file.
1785 // No modifying activity can occur prior to this point in the write
1789 if (!PagingIo
&& TypeOfOpen
== UserFileOpen
) {
1791 Status
= FsRtlCheckOplock( FatGetFcbOplock(FcbOrDcb
),
1797 if (Status
!= STATUS_SUCCESS
) {
1799 OplockPostIrp
= TRUE
;
1801 try_return( NOTHING
);
1805 // This oplock call can affect whether fast IO is possible.
1806 // We may have broken an oplock to no oplock held. If the
1807 // current state of the file is FastIoIsNotPossible then
1808 // recheck the fast IO state.
1811 if (FcbOrDcb
->Header
.IsFastIoPossible
== FastIoIsNotPossible
) {
1813 FcbOrDcb
->Header
.IsFastIoPossible
= FatIsFastIoPossible( FcbOrDcb
);
1817 // And finally check the regular file locks.
1820 if (!FsRtlCheckLockForWriteAccess( &FcbOrDcb
->Specific
.Fcb
.FileLock
, Irp
)) {
1822 try_return( Status
= STATUS_FILE_LOCK_CONFLICT
);
1827 // Determine if we will deal with extending the file. Note that
1828 // this implies extending valid data, and so we already have all
1829 // of the required synchronization done.
1832 if (!PagingIo
&& (StartingVbo
+ ByteCount
> FileSize
)) {
1834 ExtendingFile
= TRUE
;
1837 if ( ExtendingFile
) {
1841 // EXTENDING THE FILE
1845 // For an extending write on hotplug media, we are going to defer the metadata
1846 // updates via Cc's lazy writer. They will also be flushed when the handle is closed.
1849 if (FlagOn(Vcb
->VcbState
, VCB_STATE_FLAG_DEFERRED_FLUSH
)) {
1851 SetFlag(IrpContext
->Flags
, IRP_CONTEXT_FLAG_DISABLE_WRITE_THROUGH
);
1855 // Update our local copy of FileSize
1858 FileSize
= StartingVbo
+ ByteCount
;
1861 if (FcbOrDcb
->Header
.AllocationSize
.QuadPart
== FCB_LOOKUP_ALLOCATIONSIZE_HINT
) {
1863 FatLookupFileAllocationSize( IrpContext
, FcbOrDcb
);
1867 // If the write goes beyond the allocation size, add some
1872 if ( (FileSize
) > FcbOrDcb
->Header
.AllocationSize
.LowPart
) {
1875 BOOLEAN AllocateMinimumSize
= TRUE
;
1878 // Only do allocation chuncking on writes if this is
1879 // not the first allocation added to the file.
1882 if (FcbOrDcb
->Header
.AllocationSize
.LowPart
!= 0 ) {
1884 ULONGLONG ApproximateClusterCount
;
1885 ULONGLONG TargetAllocation
;
1886 ULONGLONG AddedAllocation
;
1887 ULONGLONG Multiplier
;
1888 ULONG BytesPerCluster
;
1889 ULONG ClusterAlignedFileSize
;
1892 // We are going to try and allocate a bigger chunk than
1893 // we actually need in order to maximize FastIo usage.
1895 // The multiplier is computed as follows:
1899 // Mult = ( (-------------------------) / 32 ) + 1
1900 // (FileSize - AllocationSize)
1902 // and max out at 32.
1904 // With this formula we start winding down chunking
1905 // as we get near the disk space wall.
1907 // For instance on an empty 1 MEG floppy doing an 8K
1908 // write, the multiplier is 6, or 48K to allocate.
1909 // When this disk is half full, the multipler is 3,
1910 // and when it is 3/4 full, the mupltiplier is only 1.
1912 // On a larger disk, the multiplier for a 8K read will
1913 // reach its maximum of 32 when there is at least ~8 Megs
1918 // Small write performance note, use cluster aligned
1919 // file size in above equation.
1923 // We need to carefully consider what happens when we approach
1924 // a 2^32 byte filesize. Overflows will cause problems.
1927 BytesPerCluster
= 1 << Vcb
->AllocationSupport
.LogOfBytesPerCluster
;
1930 // This can overflow if the target filesize is in the last cluster.
1931 // In this case, we can obviously skip over all of this fancy
1932 // logic and just max out the file right now.
1936 ClusterAlignedFileSize
= ((FileSize
) + (BytesPerCluster
- 1)) &
1937 ~(BytesPerCluster
- 1);
1940 if (ClusterAlignedFileSize
!= 0) {
1943 // This actually has a chance but the possibility of overflowing
1944 // the numerator is pretty unlikely, made more unlikely by moving
1945 // the divide by 32 up to scale the BytesPerCluster. However, even if it does the
1946 // effect is completely benign.
1948 // FAT32 with a 64k cluster and over 2^21 clusters would do it (and
1949 // so forth - 2^(16 - 5 + 21) == 2^32). Since this implies a partition
1950 // of 32gb and a number of clusters (and cluster size) we plan to
1951 // disallow in format for FAT32, the odds of this happening are pretty
1953 Multiplier
= ((Vcb
->AllocationSupport
.NumberOfFreeClusters
*
1954 (BytesPerCluster
>> 5)) /
1955 (ClusterAlignedFileSize
-
1956 FcbOrDcb
->Header
.AllocationSize
.LowPart
)) + 1;
1958 if (Multiplier
> 32) { Multiplier
= 32; }
1960 // These computations will never overflow a ULONGLONG because a file is capped at 4GB, and
1961 // a single write can be a max of 4GB.
1962 AddedAllocation
= Multiplier
* (ClusterAlignedFileSize
- FcbOrDcb
->Header
.AllocationSize
.LowPart
);
1964 TargetAllocation
= FcbOrDcb
->Header
.AllocationSize
.LowPart
+ AddedAllocation
;
1967 // We know that TargetAllocation is in whole clusters. Now
1968 // we check if it exceeded the maximum valid FAT file size.
1969 // If it did, we fall back to allocating up to the maximum legal size.
1972 if (TargetAllocation
> ~BytesPerCluster
+ 1) {
1974 TargetAllocation
= ~BytesPerCluster
+ 1;
1975 AddedAllocation
= TargetAllocation
- FcbOrDcb
->Header
.AllocationSize
.LowPart
;
1979 // Now do an unsafe check here to see if we should even
1980 // try to allocate this much. If not, just allocate
1981 // the minimum size we need, if so so try it, but if it
1982 // fails, just allocate the minimum size we need.
1985 ApproximateClusterCount
= (AddedAllocation
/ BytesPerCluster
);
1987 if (ApproximateClusterCount
<= Vcb
->AllocationSupport
.NumberOfFreeClusters
) {
1991 FatAddFileAllocation( IrpContext
,
1994 (ULONG
)TargetAllocation
);
1996 AllocateMinimumSize
= FALSE
;
1997 SetFlag( FcbOrDcb
->FcbState
, FCB_STATE_TRUNCATE_ON_CLOSE
);
1999 } _SEH2_EXCEPT( _SEH2_GetExceptionCode() == STATUS_DISK_FULL
?
2000 EXCEPTION_EXECUTE_HANDLER
: EXCEPTION_CONTINUE_SEARCH
) {
2002 FatResetExceptionState( IrpContext
);
2008 if ( AllocateMinimumSize
) {
2011 FatAddFileAllocation( IrpContext
,
2020 // Assert that the allocation worked
2024 NT_ASSERT( FcbOrDcb
->Header
.AllocationSize
.LowPart
>= FileSize
);
2030 // Set the new file size in the Fcb
2034 NT_ASSERT( FileSize
<= FcbOrDcb
->Header
.AllocationSize
.LowPart
);
2037 FcbOrDcb
->Header
.FileSize
.LowPart
= FileSize
;
2040 // Extend the cache map, letting mm knows the new file size.
2041 // We only have to do this if the file is cached.
2044 if (CcIsFileCached(FileObject
)) {
2045 CcSetFileSizes( FileObject
, (PCC_FILE_SIZES
)&FcbOrDcb
->Header
.AllocationSize
);
2050 // Determine if we will deal with extending valid data.
2053 if ( !CalledByLazyWriter
&&
2054 !RecursiveWriteThrough
&&
2055 (StartingVbo
+ ByteCount
> ValidDataLength
) ) {
2057 ExtendingValidData
= TRUE
;
2062 // If not extending valid data, and we otherwise believe we
2063 // could demote from exclusive to shared, do so. This will
2064 // occur when we synchronize tight for noncached coherency
2065 // but must defer the demotion until after we decide about
2066 // valid data length, which requires it exclusive. Since we
2067 // can't drop/re-pick the resources without letting a pagefault
2068 // squirt through, the resource decision was kept up in the air
2071 // Note that we've still got PagingIo exclusive in these cases.
2074 if (FcbCanDemoteToShared
) {
2076 NT_ASSERT( FcbAcquiredExclusive
&& ExIsResourceAcquiredExclusiveLite( FcbOrDcb
->Header
.Resource
));
2077 ExConvertExclusiveToSharedLite( FcbOrDcb
->Header
.Resource
);
2078 FcbAcquiredExclusive
= FALSE
;
2082 if (ValidDataToDisk
> ValidDataLength
) {
2084 ValidDataToCheck
= ValidDataToDisk
;
2088 ValidDataToCheck
= ValidDataLength
;
2094 // HANDLE THE NON-CACHED CASE
2097 if ( NonCachedIo
) {
2100 // Declare some local variables for enumeration through the
2101 // runs of the file, and an array to store parameters for
2109 DebugTrace(0, Dbg
, "Non cached write.\n", 0);
2112 // Round up to sector boundry. The end of the write interval
2113 // must, however, be beyond EOF.
2116 SectorSize
= (ULONG
)Vcb
->Bpb
.BytesPerSector
;
2118 BytesToWrite
= (ByteCount
+ (SectorSize
- 1))
2119 & ~(SectorSize
- 1);
2122 // All requests should be well formed and
2123 // make sure we don't wipe out any data
2126 if (((StartingVbo
& (SectorSize
- 1)) != 0) ||
2128 ((BytesToWrite
!= ByteCount
) &&
2129 (StartingVbo
+ ByteCount
< ValidDataLength
))) {
2133 DebugTrace( 0, Dbg
, "FatCommonWrite -> STATUS_NOT_IMPLEMENTED\n", 0);
2134 try_return( Status
= STATUS_NOT_IMPLEMENTED
);
2138 // If this noncached transfer is at least one sector beyond
2139 // the current ValidDataLength in the Fcb, then we have to
2140 // zero the sectors in between. This can happen if the user
2141 // has opened the file noncached, or if the user has mapped
2142 // the file and modified a page beyond ValidDataLength. It
2143 // *cannot* happen if the user opened the file cached, because
2144 // ValidDataLength in the Fcb is updated when he does the cached
2145 // write (we also zero data in the cache at that time), and
2146 // therefore, we will bypass this test when the data
2147 // is ultimately written through (by the Lazy Writer).
2149 // For the paging file we don't care about security (ie.
2150 // stale data), do don't bother zeroing.
2152 // We can actually get writes wholly beyond valid data length
2153 // from the LazyWriter because of paging Io decoupling.
2156 if (!CalledByLazyWriter
&&
2157 !RecursiveWriteThrough
&&
2158 (StartingVbo
> ValidDataToCheck
)) {
2160 FatZeroData( IrpContext
,
2164 StartingVbo
- ValidDataToCheck
);
2168 // Make sure we write FileSize to the dirent if we
2169 // are extending it and we are successful. (This may or
2170 // may not occur Write Through, but that is fine.)
2173 WriteFileSizeToDirent
= TRUE
;
2176 // Perform the actual IO
2179 if (SwitchBackToAsync
) {
2182 ClearFlag( IrpContext
->Flags
, IRP_CONTEXT_FLAG_WAIT
);
2185 #ifdef SYSCACHE_COMPILE
2187 #define MY_SIZE 0x1000000
2188 #define LONGMAP_COUNTER
2192 // Maintain a bitmap of IO started on this file.
2196 PULONG WriteMask
= FcbOrDcb
->WriteMask
;
2198 if (NULL
== WriteMask
) {
2200 WriteMask
= FsRtlAllocatePoolWithTag( NonPagedPoolNx
,
2201 (MY_SIZE
/PAGE_SIZE
) / 8,
2204 FcbOrDcb
->WriteMask
= WriteMask
;
2205 RtlZeroMemory(WriteMask
, (MY_SIZE
/PAGE_SIZE
) / 8);
2208 if (StartingVbo
< MY_SIZE
) {
2210 ULONG Off
= StartingVbo
;
2211 ULONG Len
= BytesToWrite
;
2213 if (Off
+ Len
> MY_SIZE
) {
2214 Len
= MY_SIZE
- Off
;
2218 WriteMask
[(Off
/PAGE_SIZE
) / 32] |=
2219 1 << (Off
/PAGE_SIZE
) % 32;
2222 if (Len
<= PAGE_SIZE
) {
2231 #ifdef LONGMAP_COUNTER
2233 // Maintain a longmap of IO started on this file, each ulong containing
2234 // the value of an ascending counter per write (gives us order information).
2236 // Unlike the old bitmask stuff, this is mostly well synchronized.
2240 PULONG WriteMask
= (PULONG
)FcbOrDcb
->WriteMask
;
2242 if (NULL
== WriteMask
) {
2244 WriteMask
= FsRtlAllocatePoolWithTag( NonPagedPoolNx
,
2245 (MY_SIZE
/PAGE_SIZE
) * sizeof(ULONG
),
2248 FcbOrDcb
->WriteMask
= WriteMask
;
2249 RtlZeroMemory(WriteMask
, (MY_SIZE
/PAGE_SIZE
) * sizeof(ULONG
));
2252 if (StartingVbo
< MY_SIZE
) {
2254 ULONG Off
= StartingVbo
;
2255 ULONG Len
= BytesToWrite
;
2256 ULONG Tick
= InterlockedIncrement( &FcbOrDcb
->WriteMaskData
);
2258 if (Off
+ Len
> MY_SIZE
) {
2259 Len
= MY_SIZE
- Off
;
2263 InterlockedExchange( WriteMask
+ Off
/PAGE_SIZE
, Tick
);
2266 if (Len
<= PAGE_SIZE
) {
2278 if (FatNonCachedIo( IrpContext
,
2284 0) == STATUS_PENDING
) {
2287 UnwindOutstandingAsync
= FALSE
;
2290 #pragma prefast( suppress:28931, "convenient for debugging" )
2293 SetFlag( IrpContext
->Flags
, IRP_CONTEXT_FLAG_WAIT
);
2295 IrpContext
->FatIoContext
= NULL
;
2299 // As a matter of fact, if we hit this we are in deep trouble
2300 // if VDL is being extended. We are no longer attached to the
2301 // IRP, and have thus lost synchronization. Note that we should
2302 // not hit this case anymore since we will not re-async vdl extension.
2305 NT_ASSERT( !ExtendingValidData
);
2307 try_return( Status
= STATUS_PENDING
);
2311 // If the call didn't succeed, raise the error status
2314 if (!NT_SUCCESS( Status
= Irp
->IoStatus
.Status
)) {
2316 FatNormalizeAndRaiseStatus( IrpContext
, Status
);
2320 ULONG NewValidDataToDisk
;
2323 // Else set the context block to reflect the entire write
2324 // Also assert we got how many bytes we asked for.
2327 NT_ASSERT( Irp
->IoStatus
.Information
== BytesToWrite
);
2329 Irp
->IoStatus
.Information
= ByteCount
;
2332 // Take this opportunity to update ValidDataToDisk.
2335 NewValidDataToDisk
= StartingVbo
+ ByteCount
;
2337 if (NewValidDataToDisk
> FileSize
) {
2338 NewValidDataToDisk
= FileSize
;
2341 if (FcbOrDcb
->ValidDataToDisk
< NewValidDataToDisk
) {
2342 FcbOrDcb
->ValidDataToDisk
= NewValidDataToDisk
;
2347 // The transfer is either complete, or the Iosb contains the
2348 // appropriate status.
2351 try_return( Status
);
2353 } // if No Intermediate Buffering
2357 // HANDLE CACHED CASE
2362 NT_ASSERT( !PagingIo
);
2365 // We delay setting up the file cache until now, in case the
2366 // caller never does any I/O to the file, and thus
2367 // FileObject->PrivateCacheMap == NULL.
2370 if ( FileObject
->PrivateCacheMap
== NULL
) {
2372 DebugTrace(0, Dbg
, "Initialize cache mapping.\n", 0);
2375 // Get the file allocation size, and if it is less than
2376 // the file size, raise file corrupt error.
2379 if (FcbOrDcb
->Header
.AllocationSize
.QuadPart
== FCB_LOOKUP_ALLOCATIONSIZE_HINT
) {
2381 FatLookupFileAllocationSize( IrpContext
, FcbOrDcb
);
2384 if ( FileSize
> FcbOrDcb
->Header
.AllocationSize
.LowPart
) {
2386 FatPopUpFileCorrupt( IrpContext
, FcbOrDcb
);
2388 FatRaiseStatus( IrpContext
, STATUS_FILE_CORRUPT_ERROR
);
2392 // Now initialize the cache map.
2395 FatInitializeCacheMap( FileObject
,
2396 (PCC_FILE_SIZES
)&FcbOrDcb
->Header
.AllocationSize
,
2398 &FatData
.CacheManagerCallbacks
,
2401 CcSetReadAheadGranularity( FileObject
, READ_AHEAD_GRANULARITY
);
2404 // Special case large floppy tranfers, and make the file
2405 // object write through. For small floppy transfers,
2406 // set a timer to go off in a second and flush the file.
2410 if (!FlagOn( FileObject
->Flags
, FO_WRITE_THROUGH
) &&
2411 FlagOn(Vcb
->VcbState
, VCB_STATE_FLAG_DEFERRED_FLUSH
)) {
2413 if (((StartingByte
.LowPart
& (PAGE_SIZE
-1)) == 0) &&
2414 (ByteCount
>= PAGE_SIZE
)) {
2416 SetFlag( FileObject
->Flags
, FO_WRITE_THROUGH
);
2420 LARGE_INTEGER OneSecondFromNow
;
2421 PDEFERRED_FLUSH_CONTEXT FlushContext
;
2424 // Get pool and initialize the timer and DPC
2428 FlushContext
= FsRtlAllocatePoolWithTag( NonPagedPoolNx
,
2430 FlushContext
= FsRtlAllocatePoolWithTag( NonPagedPool
,
2432 sizeof(DEFERRED_FLUSH_CONTEXT
),
2433 TAG_DEFERRED_FLUSH_CONTEXT
);
2435 KeInitializeTimer( &FlushContext
->Timer
);
2437 KeInitializeDpc( &FlushContext
->Dpc
,
2438 FatDeferredFlushDpc
,
2443 // We have to reference the file object here.
2446 ObReferenceObject( FileObject
);
2448 FlushContext
->File
= FileObject
;
2454 OneSecondFromNow
.QuadPart
= (LONG
)-1*1000*1000*10;
2456 KeSetTimer( &FlushContext
->Timer
,
2458 &FlushContext
->Dpc
);
2464 // If this write is beyond valid data length, then we
2465 // must zero the data in between.
2468 if ( StartingVbo
> ValidDataToCheck
) {
2471 // Call the Cache Manager to zero the data.
2474 if (!FatZeroData( IrpContext
,
2478 StartingVbo
- ValidDataToCheck
)) {
2480 DebugTrace( 0, Dbg
, "Cached Write could not wait to zero\n", 0 );
2482 try_return( PostIrp
= TRUE
);
2486 WriteFileSizeToDirent
= BooleanFlagOn(IrpContext
->Flags
,
2487 IRP_CONTEXT_FLAG_WRITE_THROUGH
);
2491 // DO A NORMAL CACHED WRITE, if the MDL bit is not set,
2494 if (!FlagOn(IrpContext
->MinorFunction
, IRP_MN_MDL
)) {
2496 DebugTrace(0, Dbg
, "Cached write.\n", 0);
2499 // Get hold of the user's buffer.
2502 SystemBuffer
= FatMapUserBuffer( IrpContext
, Irp
);
2505 // Do the write, possibly writing through
2508 #if (NTDDI_VERSION >= NTDDI_WIN8)
2509 if (!CcCopyWriteEx( FileObject
,
2514 Irp
->Tail
.Overlay
.Thread
)) {
2516 if (!CcCopyWrite( FileObject
,
2523 DebugTrace( 0, Dbg
, "Cached Write could not wait\n", 0 );
2525 try_return( PostIrp
= TRUE
);
2528 Irp
->IoStatus
.Status
= STATUS_SUCCESS
;
2529 Irp
->IoStatus
.Information
= ByteCount
;
2531 try_return( Status
= STATUS_SUCCESS
);
2539 DebugTrace(0, Dbg
, "MDL write.\n", 0);
2543 CcPrepareMdlWrite( FileObject
,
2549 Status
= Irp
->IoStatus
.Status
;
2551 try_return( Status
);
2557 // These two cases correspond to a system write directory file and
2561 if (( TypeOfOpen
== DirectoryFile
) || ( TypeOfOpen
== EaFile
)
2567 if ( TypeOfOpen
== DirectoryFile
) {
2568 DebugTrace(0, Dbg
, "Type of write is directoryfile\n", 0);
2569 } else if ( TypeOfOpen
== EaFile
) {
2570 DebugTrace(0, Dbg
, "Type of write is eafile\n", 0);
2575 // Make sure the FcbOrDcb is still good
2578 FatVerifyFcb( IrpContext
, FcbOrDcb
);
2581 // Synchronize here with people deleting directories and
2582 // mucking with the internals of the EA file.
2585 if (!ExAcquireSharedStarveExclusive( FcbOrDcb
->Header
.PagingIoResource
,
2588 DebugTrace( 0, Dbg
, "Cannot acquire FcbOrDcb = %p shared without waiting\n", FcbOrDcb
);
2590 try_return( PostIrp
= TRUE
);
2593 PagingIoResourceAcquired
= TRUE
;
2597 IrpContext
->FatIoContext
->Wait
.Async
.Resource
=
2598 FcbOrDcb
->Header
.PagingIoResource
;
2602 // Check to see if we colided with a MoveFile call, and if
2603 // so block until it completes.
2606 if (FcbOrDcb
->MoveFileEvent
) {
2608 (VOID
)KeWaitForSingleObject( FcbOrDcb
->MoveFileEvent
,
2616 // If we weren't called by the Lazy Writer, then this write
2617 // must be the result of a write-through or flush operation.
2618 // Setting the IrpContext flag, will cause DevIoSup.c to
2619 // write-through the data to the disk.
2622 if (!FlagOn((ULONG_PTR
)IoGetTopLevelIrp(), FSRTL_CACHE_TOP_LEVEL_IRP
)) {
2624 SetFlag( IrpContext
->Flags
, IRP_CONTEXT_FLAG_WRITE_THROUGH
);
2628 // For the noncached case, assert that everything is sector
2633 #pragma prefast( suppress:28931, "needed for debug build" )
2635 SectorSize
= (ULONG
)Vcb
->Bpb
.BytesPerSector
;
2638 // We make several assumptions about these two types of files.
2639 // Make sure all of them are true.
2642 NT_ASSERT( NonCachedIo
&& PagingIo
);
2643 NT_ASSERT( ((StartingVbo
| ByteCount
) & (SectorSize
- 1)) == 0 );
2647 // These calls must always be within the allocation size, which is
2648 // convienently the same as filesize, which conveniently doesn't
2649 // get reset to a hint value when we verify the volume.
2652 if (StartingVbo
>= FcbOrDcb
->Header
.FileSize
.LowPart
) {
2654 DebugTrace( 0, Dbg
, "PagingIo dirent started beyond EOF.\n", 0 );
2656 Irp
->IoStatus
.Information
= 0;
2658 try_return( Status
= STATUS_SUCCESS
);
2661 if ( StartingVbo
+ ByteCount
> FcbOrDcb
->Header
.FileSize
.LowPart
) {
2663 DebugTrace( 0, Dbg
, "PagingIo dirent extending beyond EOF.\n", 0 );
2664 ByteCount
= FcbOrDcb
->Header
.FileSize
.LowPart
- StartingVbo
;
2669 // Perform the actual IO
2672 if (FatNonCachedIo( IrpContext
,
2678 0 ) == STATUS_PENDING
) {
2680 IrpContext
->FatIoContext
= NULL
;
2684 try_return( Status
= STATUS_PENDING
);
2688 // The transfer is either complete, or the Iosb contains the
2689 // appropriate status.
2691 // Also, mark the volume as needing verification to automatically
2695 if (!NT_SUCCESS( Status
= Irp
->IoStatus
.Status
)) {
2697 FatNormalizeAndRaiseStatus( IrpContext
, Status
);
2700 try_return( Status
);
2704 // This is the case of a user who openned a directory. No writing is
2708 if ( TypeOfOpen
== UserDirectoryOpen
) {
2710 DebugTrace( 0, Dbg
, "FatCommonWrite -> STATUS_INVALID_PARAMETER\n", 0);
2712 try_return( Status
= STATUS_INVALID_PARAMETER
);
2716 // If we get this far, something really serious is wrong.
2719 DebugDump("Illegal TypeOfOpen\n", 0, FcbOrDcb
);
2722 #pragma prefast( suppress:28159, "things are seriously wrong if we get here" )
2724 FatBugCheck( TypeOfOpen
, (ULONG_PTR
) FcbOrDcb
, 0 );
2730 // If the request was not posted and there is still an Irp,
2738 ULONG ActualBytesWrote
;
2740 DebugTrace( 0, Dbg
, "Completing request with status = %08lx\n",
2743 DebugTrace( 0, Dbg
, " Information = %08lx\n",
2744 Irp
->IoStatus
.Information
);
2747 // Record the total number of bytes actually written
2750 ActualBytesWrote
= (ULONG
)Irp
->IoStatus
.Information
;
2753 // If the file was opened for Synchronous IO, update the current
2757 if (SynchronousIo
&& !PagingIo
) {
2759 FileObject
->CurrentByteOffset
.LowPart
=
2760 StartingVbo
+ (NT_ERROR( Status
) ? 0 : ActualBytesWrote
);
2764 // The following are things we only do if we were successful
2767 if ( NT_SUCCESS( Status
) ) {
2770 // If this was not PagingIo, mark that the modify
2771 // time on the dirent needs to be updated on close.
2776 SetFlag( FileObject
->Flags
, FO_FILE_MODIFIED
);
2780 // If we extended the file size and we are meant to
2781 // immediately update the dirent, do so. (This flag is
2782 // set for either Write Through or noncached, because
2783 // in either case the data and any necessary zeros are
2784 // actually written to the file.)
2787 if ( ExtendingFile
&& WriteFileSizeToDirent
) {
2789 NT_ASSERT( FileObject
->DeleteAccess
|| FileObject
->WriteAccess
);
2791 FatSetFileSizeInDirent( IrpContext
, FcbOrDcb
, NULL
);
2794 // Report that a file size has changed.
2797 FatNotifyReportChange( IrpContext
,
2800 FILE_NOTIFY_CHANGE_SIZE
,
2801 FILE_ACTION_MODIFIED
);
2804 if ( ExtendingFile
&& !WriteFileSizeToDirent
) {
2806 SetFlag( FileObject
->Flags
, FO_FILE_SIZE_CHANGED
);
2809 if ( ExtendingValidData
) {
2811 ULONG EndingVboWritten
= StartingVbo
+ ActualBytesWrote
;
2814 // Never set a ValidDataLength greater than FileSize.
2817 if ( FileSize
< EndingVboWritten
) {
2819 FcbOrDcb
->Header
.ValidDataLength
.LowPart
= FileSize
;
2823 FcbOrDcb
->Header
.ValidDataLength
.LowPart
= EndingVboWritten
;
2827 // Now, if we are noncached and the file is cached, we must
2828 // tell the cache manager about the VDL extension so that
2829 // async cached IO will not be optimized into zero-page faults
2830 // beyond where it believes VDL is.
2832 // In the cached case, since Cc did the work, it has updated
2836 if (NonCachedIo
&& CcIsFileCached(FileObject
)) {
2837 CcSetFileSizes( FileObject
, (PCC_FILE_SIZES
)&FcbOrDcb
->Header
.AllocationSize
);
2844 // Note that we have to unpin repinned Bcbs here after the above
2845 // work, but if we are going to post the request, we must do this
2846 // before the post (below).
2849 FatUnpinRepinnedBcbs( IrpContext
);
2854 // Take action if the Oplock package is not going to post the Irp.
2857 if (!OplockPostIrp
) {
2859 FatUnpinRepinnedBcbs( IrpContext
);
2861 if ( ExtendingFile
) {
2864 // We need the PagingIo resource exclusive whenever we
2865 // pull back either file size or valid data length.
2868 NT_ASSERT( FcbOrDcb
->Header
.PagingIoResource
!= NULL
);
2870 (VOID
)ExAcquireResourceExclusiveLite(FcbOrDcb
->Header
.PagingIoResource
, TRUE
);
2872 FcbOrDcb
->Header
.FileSize
.LowPart
= InitialFileSize
;
2874 NT_ASSERT( FcbOrDcb
->Header
.FileSize
.LowPart
<= FcbOrDcb
->Header
.AllocationSize
.LowPart
);
2877 // Pull back the cache map as well
2880 if (FileObject
->SectionObjectPointer
->SharedCacheMap
!= NULL
) {
2882 *CcGetFileSizePointer(FileObject
) = FcbOrDcb
->Header
.FileSize
;
2885 ExReleaseResourceLite( FcbOrDcb
->Header
.PagingIoResource
);
2888 DebugTrace( 0, Dbg
, "Passing request to Fsp\n", 0 );
2890 Status
= FatFsdPostRequest(IrpContext
, Irp
);
2897 DebugUnwind( FatCommonWrite
);
2899 if (_SEH2_AbnormalTermination()) {
2902 // Restore initial file size and valid data length
2905 if (ExtendingFile
|| ExtendingValidData
) {
2908 // We got an error, pull back the file size if we extended it.
2911 FcbOrDcb
->Header
.FileSize
.LowPart
= InitialFileSize
;
2912 FcbOrDcb
->Header
.ValidDataLength
.LowPart
= InitialValidDataLength
;
2914 NT_ASSERT( FcbOrDcb
->Header
.FileSize
.LowPart
<= FcbOrDcb
->Header
.AllocationSize
.LowPart
);
2917 // Pull back the cache map as well
2920 if (FileObject
->SectionObjectPointer
->SharedCacheMap
!= NULL
) {
2922 *CcGetFileSizePointer(FileObject
) = FcbOrDcb
->Header
.FileSize
;
2928 // Check if this needs to be backed out.
2931 if (UnwindOutstandingAsync
) {
2933 ExInterlockedAddUlong( &FcbOrDcb
->NonPaged
->OutstandingAsyncWrites
,
2935 &FatData
.GeneralSpinLock
);
2939 // If the FcbOrDcb has been acquired, release it.
2942 if (FcbOrDcbAcquired
&& Irp
) {
2944 FatReleaseFcb( NULL
, FcbOrDcb
);
2947 if (PagingIoResourceAcquired
&& Irp
) {
2949 ExReleaseResourceLite( FcbOrDcb
->Header
.PagingIoResource
);
2953 // Complete the request if we didn't post it and no exception
2955 // Note that FatCompleteRequest does the right thing if either
2956 // IrpContext or Irp are NULL
2959 if ( !PostIrp
&& !_SEH2_AbnormalTermination() ) {
2961 FatCompleteRequest( IrpContext
, Irp
, Status
);
2964 DebugTrace(-1, Dbg
, "FatCommonWrite -> %08lx\n", Status
);
2972 // Local support routine
2977 FatDeferredFlushDpc (
2979 _In_opt_ PVOID DeferredContext
,
2980 _In_opt_ PVOID SystemArgument1
,
2981 _In_opt_ PVOID SystemArgument2
2986 Routine Description:
2988 This routine is dispatched 1 second after a small write to a deferred
2989 write device that initialized the cache map. It exqueues an executive
2990 worker thread to perform the actual task of flushing the file.
2994 DeferredContext - Contains the deferred flush context.
3003 PDEFERRED_FLUSH_CONTEXT FlushContext
;
3005 UNREFERENCED_PARAMETER( SystemArgument1
);
3006 UNREFERENCED_PARAMETER( SystemArgument2
);
3007 UNREFERENCED_PARAMETER( Dpc
);
3009 FlushContext
= (PDEFERRED_FLUSH_CONTEXT
)DeferredContext
;
3015 ExInitializeWorkItem( &FlushContext
->Item
,
3020 #pragma prefast( suppress:28159, "prefast indicates this API is obsolete, but it's ok for fastfat to keep using it" )
3022 ExQueueWorkItem( &FlushContext
->Item
, CriticalWorkQueue
);
3027 // Local support routine
3033 _In_ PVOID Parameter
3038 Routine Description:
3040 This routine performs the actual task of flushing the file.
3044 DeferredContext - Contains the deferred flush context.
3061 File
= ((PDEFERRED_FLUSH_CONTEXT
)Parameter
)->File
;
3063 FatDecodeFileObject(File
, &Vcb
, &FcbOrDcb
, &Ccb
);
3064 NT_ASSERT( FcbOrDcb
!= NULL
);
3067 // Make us appear as a top level FSP request so that we will
3068 // receive any errors from the flush.
3071 IoSetTopLevelIrp( (PIRP
)FSRTL_FSP_TOP_LEVEL_IRP
);
3073 ExAcquireResourceExclusiveLite( FcbOrDcb
->Header
.Resource
, TRUE
);
3074 ExAcquireResourceSharedLite( FcbOrDcb
->Header
.PagingIoResource
, TRUE
);
3076 CcFlushCache( File
->SectionObjectPointer
, NULL
, 0, NULL
);
3078 ExReleaseResourceLite( FcbOrDcb
->Header
.PagingIoResource
);
3079 ExReleaseResourceLite( FcbOrDcb
->Header
.Resource
);
3081 IoSetTopLevelIrp( NULL
);
3083 ObDereferenceObject( File
);
3085 ExFreePool( Parameter
);