[FASTFAT] Implement delayed close
[reactos.git] / drivers / filesystems / fastfat_new / write.c
1 /*++
2
3 Copyright (c) 1989-2000 Microsoft Corporation
4
5 Module Name:
6
7 Write.c
8
9 Abstract:
10
11 This module implements the File Write routine for Write called by the
12 dispatch driver.
13
14
15 --*/
16
17 #include "fatprocs.h"
18
19 //
20 // The Bug check file id for this module
21 //
22
23 #define BugCheckFileId (FAT_BUG_CHECK_WRITE)
24
25 //
26 // The local debug trace level
27 //
28
29 #define Dbg (DEBUG_TRACE_WRITE)
30
31 //
32 // Macros to increment the appropriate performance counters.
33 //
34
35 #define CollectWriteStats(VCB,OPEN_TYPE,BYTE_COUNT) { \
36 PFILESYSTEM_STATISTICS Stats = &(VCB)->Statistics[KeGetCurrentProcessorNumber() % FatData.NumberProcessors].Common; \
37 if (((OPEN_TYPE) == UserFileOpen)) { \
38 Stats->UserFileWrites += 1; \
39 Stats->UserFileWriteBytes += (ULONG)(BYTE_COUNT); \
40 } else if (((OPEN_TYPE) == VirtualVolumeFile || ((OPEN_TYPE) == DirectoryFile))) { \
41 Stats->MetaDataWrites += 1; \
42 Stats->MetaDataWriteBytes += (ULONG)(BYTE_COUNT); \
43 } \
44 }
45
46 BOOLEAN FatNoAsync = FALSE;
47
48 //
49 // Local support routines
50 //
51
52 KDEFERRED_ROUTINE FatDeferredFlushDpc;
53
54 VOID
55 NTAPI
56 FatDeferredFlushDpc (
57 _In_ PKDPC Dpc,
58 _In_opt_ PVOID DeferredContext,
59 _In_opt_ PVOID SystemArgument1,
60 _In_opt_ PVOID SystemArgument2
61 );
62
63 WORKER_THREAD_ROUTINE FatDeferredFlush;
64
65 VOID
66 NTAPI
67 FatDeferredFlush (
68 _In_ PVOID Parameter
69 );
70
71 #ifdef ALLOC_PRAGMA
72 #pragma alloc_text(PAGE, FatDeferredFlush)
73 #pragma alloc_text(PAGE, FatCommonWrite)
74 #endif
75
76 \f
77 _Function_class_(IRP_MJ_WRITE)
78 _Function_class_(DRIVER_DISPATCH)
79 NTSTATUS
80 NTAPI
81 FatFsdWrite (
82 _In_ PVOLUME_DEVICE_OBJECT VolumeDeviceObject,
83 _Inout_ PIRP Irp
84 )
85
86 /*++
87
88 Routine Description:
89
90 This routine implements the FSD part of the NtWriteFile API call
91
92 Arguments:
93
94 VolumeDeviceObject - Supplies the volume device object where the
95 file being Write exists
96
97 Irp - Supplies the Irp being processed
98
99 Return Value:
100
101 NTSTATUS - The FSD status for the IRP
102
103 --*/
104
105 {
106 PFCB Fcb;
107 NTSTATUS Status;
108 PIRP_CONTEXT IrpContext = NULL;
109
110 BOOLEAN ModWriter = FALSE;
111 BOOLEAN TopLevel = FALSE;
112
113 DebugTrace(+1, Dbg, "FatFsdWrite\n", 0);
114
115 //
116 // Call the common Write routine, with blocking allowed if synchronous
117 //
118
119 FsRtlEnterFileSystem();
120
121 //
122 // We are first going to do a quick check for paging file IO. Since this
123 // is a fast path, we must replicate the check for the fsdo.
124 //
125
126 if (!FatDeviceIsFatFsdo( IoGetCurrentIrpStackLocation(Irp)->DeviceObject)) {
127
128 Fcb = (PFCB)(IoGetCurrentIrpStackLocation(Irp)->FileObject->FsContext);
129
130 if ((NodeType(Fcb) == FAT_NTC_FCB) &&
131 FlagOn(Fcb->FcbState, FCB_STATE_PAGING_FILE)) {
132
133 //
134 // Do the usual STATUS_PENDING things.
135 //
136
137 IoMarkIrpPending( Irp );
138
139 //
140 // Perform the actual IO, it will be completed when the io finishes.
141 //
142
143 FatPagingFileIo( Irp, Fcb );
144
145 FsRtlExitFileSystem();
146
147 return STATUS_PENDING;
148 }
149 }
150
151 _SEH2_TRY {
152
153 TopLevel = FatIsIrpTopLevel( Irp );
154
155 IrpContext = FatCreateIrpContext( Irp, CanFsdWait( Irp ) );
156
157 //
158 // This is a kludge for the mod writer case. The correct state
159 // of recursion is set in IrpContext, however, we much with the
160 // actual top level Irp field to get the correct WriteThrough
161 // behaviour.
162 //
163
164 if (IoGetTopLevelIrp() == (PIRP)FSRTL_MOD_WRITE_TOP_LEVEL_IRP) {
165
166 ModWriter = TRUE;
167
168 IoSetTopLevelIrp( Irp );
169 }
170
171 //
172 // If this is an Mdl complete request, don't go through
173 // common write.
174 //
175
176 if (FlagOn( IrpContext->MinorFunction, IRP_MN_COMPLETE )) {
177
178 DebugTrace(0, Dbg, "Calling FatCompleteMdl\n", 0 );
179 Status = FatCompleteMdl( IrpContext, Irp );
180
181 } else {
182
183 Status = FatCommonWrite( IrpContext, Irp );
184 }
185
186 } _SEH2_EXCEPT(FatExceptionFilter( IrpContext, _SEH2_GetExceptionInformation() )) {
187
188 //
189 // We had some trouble trying to perform the requested
190 // operation, so we'll abort the I/O request with
191 // the error status that we get back from the
192 // execption code
193 //
194
195 Status = FatProcessException( IrpContext, Irp, _SEH2_GetExceptionCode() );
196 } _SEH2_END;
197
198 // NT_ASSERT( !(ModWriter && (Status == STATUS_CANT_WAIT)) );
199
200 NT_ASSERT( !(ModWriter && TopLevel) );
201
202 if (ModWriter) { IoSetTopLevelIrp((PIRP)FSRTL_MOD_WRITE_TOP_LEVEL_IRP); }
203
204 if (TopLevel) { IoSetTopLevelIrp( NULL ); }
205
206 FsRtlExitFileSystem();
207
208 //
209 // And return to our caller
210 //
211
212 DebugTrace(-1, Dbg, "FatFsdWrite -> %08lx\n", Status);
213
214 UNREFERENCED_PARAMETER( VolumeDeviceObject );
215
216 return Status;
217 }
218
219 \f
220 _Requires_lock_held_(_Global_critical_region_)
221 NTSTATUS
222 FatCommonWrite (
223 IN PIRP_CONTEXT IrpContext,
224 IN PIRP Irp
225 )
226
227 /*++
228
229 Routine Description:
230
231 This is the common write routine for NtWriteFile, called from both
232 the Fsd, or from the Fsp if a request could not be completed without
233 blocking in the Fsd. This routine's actions are
234 conditionalized by the Wait input parameter, which determines whether
235 it is allowed to block or not. If a blocking condition is encountered
236 with Wait == FALSE, however, the request is posted to the Fsp, who
237 always calls with WAIT == TRUE.
238
239 Arguments:
240
241 Irp - Supplies the Irp to process
242
243 Return Value:
244
245 NTSTATUS - The return status for the operation
246
247 --*/
248
249 {
250 PVCB Vcb;
251 PFCB FcbOrDcb;
252 PCCB Ccb;
253
254 VBO StartingVbo;
255 ULONG ByteCount;
256 ULONG FileSize = 0;
257 ULONG InitialFileSize = 0;
258 ULONG InitialValidDataLength = 0;
259
260 PIO_STACK_LOCATION IrpSp;
261 PFILE_OBJECT FileObject;
262 TYPE_OF_OPEN TypeOfOpen;
263
264 BOOLEAN PostIrp = FALSE;
265 BOOLEAN OplockPostIrp = FALSE;
266 BOOLEAN ExtendingFile = FALSE;
267 BOOLEAN FcbOrDcbAcquired = FALSE;
268 BOOLEAN SwitchBackToAsync = FALSE;
269 BOOLEAN CalledByLazyWriter = FALSE;
270 BOOLEAN ExtendingValidData = FALSE;
271 BOOLEAN FcbAcquiredExclusive = FALSE;
272 BOOLEAN FcbCanDemoteToShared = FALSE;
273 BOOLEAN WriteFileSizeToDirent = FALSE;
274 BOOLEAN RecursiveWriteThrough = FALSE;
275 BOOLEAN UnwindOutstandingAsync = FALSE;
276 BOOLEAN PagingIoResourceAcquired = FALSE;
277 BOOLEAN SuccessfulPurge = FALSE;
278
279 BOOLEAN SynchronousIo;
280 BOOLEAN WriteToEof;
281 BOOLEAN PagingIo;
282 BOOLEAN NonCachedIo;
283 BOOLEAN Wait;
284 NTSTATUS Status = STATUS_SUCCESS;
285
286 FAT_IO_CONTEXT StackFatIoContext;
287
288 //
289 // A system buffer is only used if we have to access the buffer directly
290 // from the Fsp to clear a portion or to do a synchronous I/O, or a
291 // cached transfer. It is possible that our caller may have already
292 // mapped a system buffer, in which case we must remember this so
293 // we do not unmap it on the way out.
294 //
295
296 PVOID SystemBuffer = (PVOID) NULL;
297
298 LARGE_INTEGER StartingByte;
299
300 PAGED_CODE();
301
302 //
303 // Get current Irp stack location and file object
304 //
305
306 IrpSp = IoGetCurrentIrpStackLocation( Irp );
307 FileObject = IrpSp->FileObject;
308
309
310 DebugTrace(+1, Dbg, "FatCommonWrite\n", 0);
311 DebugTrace( 0, Dbg, "Irp = %p\n", Irp);
312 DebugTrace( 0, Dbg, "ByteCount = %8lx\n", IrpSp->Parameters.Write.Length);
313 DebugTrace( 0, Dbg, "ByteOffset.LowPart = %8lx\n", IrpSp->Parameters.Write.ByteOffset.LowPart);
314 DebugTrace( 0, Dbg, "ByteOffset.HighPart = %8lx\n", IrpSp->Parameters.Write.ByteOffset.HighPart);
315
316 //
317 // Initialize the appropriate local variables.
318 //
319
320 Wait = BooleanFlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT);
321 PagingIo = BooleanFlagOn(Irp->Flags, IRP_PAGING_IO);
322 NonCachedIo = BooleanFlagOn(Irp->Flags,IRP_NOCACHE);
323 SynchronousIo = BooleanFlagOn(FileObject->Flags, FO_SYNCHRONOUS_IO);
324
325 //NT_ASSERT( PagingIo || FileObject->WriteAccess );
326
327 //
328 // Extract the bytecount and do our noop/throttle checking.
329 //
330
331 ByteCount = IrpSp->Parameters.Write.Length;
332
333 //
334 // If there is nothing to write, return immediately.
335 //
336
337 if (ByteCount == 0) {
338
339 Irp->IoStatus.Information = 0;
340 FatCompleteRequest( IrpContext, Irp, STATUS_SUCCESS );
341 return STATUS_SUCCESS;
342 }
343
344 //
345 // See if we have to defer the write.
346 //
347
348 if (!NonCachedIo &&
349 !CcCanIWrite(FileObject,
350 ByteCount,
351 (BOOLEAN)(Wait && !BooleanFlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_IN_FSP)),
352 BooleanFlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_DEFERRED_WRITE))) {
353
354 BOOLEAN Retrying = BooleanFlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_DEFERRED_WRITE);
355
356 FatPrePostIrp( IrpContext, Irp );
357
358 SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_DEFERRED_WRITE );
359
360 CcDeferWrite( FileObject,
361 (PCC_POST_DEFERRED_WRITE)FatAddToWorkque,
362 IrpContext,
363 Irp,
364 ByteCount,
365 Retrying );
366
367 return STATUS_PENDING;
368 }
369
370 //
371 // Determine our starting position and type. If we are writing
372 // at EOF, then we will need additional synchronization before
373 // the IO is issued to determine where the data will go.
374 //
375
376 StartingByte = IrpSp->Parameters.Write.ByteOffset;
377 StartingVbo = StartingByte.LowPart;
378
379 WriteToEof = ( (StartingByte.LowPart == FILE_WRITE_TO_END_OF_FILE) &&
380 (StartingByte.HighPart == -1) );
381
382 //
383 // Extract the nature of the write from the file object, and case on it
384 //
385
386 TypeOfOpen = FatDecodeFileObject(FileObject, &Vcb, &FcbOrDcb, &Ccb);
387
388 NT_ASSERT( Vcb != NULL );
389
390 //
391 // Save callers who try to do cached IO to the raw volume from themselves.
392 //
393
394 if (TypeOfOpen == UserVolumeOpen) {
395
396 NonCachedIo = TRUE;
397 }
398
399 NT_ASSERT(!(NonCachedIo == FALSE && TypeOfOpen == VirtualVolumeFile));
400
401 //
402 // Collect interesting statistics. The FLAG_USER_IO bit will indicate
403 // what type of io we're doing in the FatNonCachedIo function.
404 //
405
406 if (PagingIo) {
407 CollectWriteStats(Vcb, TypeOfOpen, ByteCount);
408
409 if (TypeOfOpen == UserFileOpen) {
410 SetFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_USER_IO);
411 } else {
412 ClearFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_USER_IO);
413 }
414 }
415
416 //
417 // We must disallow writes to regular objects that would require us
418 // to maintain an AllocationSize of greater than 32 significant bits.
419 //
420 // If this is paging IO, this is simply a case where we need to trim.
421 // This will occur in due course.
422 //
423
424 if (!PagingIo && !WriteToEof && (TypeOfOpen != UserVolumeOpen)) {
425
426
427 if (!FatIsIoRangeValid( Vcb, StartingByte, ByteCount)) {
428
429
430 Irp->IoStatus.Information = 0;
431 FatCompleteRequest( IrpContext, Irp, STATUS_DISK_FULL );
432
433 return STATUS_DISK_FULL;
434 }
435 }
436
437 //
438 // Allocate if necessary and initialize a FAT_IO_CONTEXT block for
439 // all non cached Io. For synchronous Io
440 // we use stack storage, otherwise we allocate pool.
441 //
442
443 if (NonCachedIo) {
444
445 if (IrpContext->FatIoContext == NULL) {
446
447 if (!Wait) {
448
449 IrpContext->FatIoContext =
450 #ifndef __REACTOS__
451 FsRtlAllocatePoolWithTag( NonPagedPoolNx,
452 #else
453 FsRtlAllocatePoolWithTag( NonPagedPool,
454 #endif
455 sizeof(FAT_IO_CONTEXT),
456 TAG_FAT_IO_CONTEXT );
457
458 } else {
459
460 IrpContext->FatIoContext = &StackFatIoContext;
461
462 SetFlag( IrpContext->Flags, IRP_CONTEXT_STACK_IO_CONTEXT );
463 }
464 }
465
466 RtlZeroMemory( IrpContext->FatIoContext, sizeof(FAT_IO_CONTEXT) );
467
468 if (Wait) {
469
470 KeInitializeEvent( &IrpContext->FatIoContext->Wait.SyncEvent,
471 NotificationEvent,
472 FALSE );
473
474 } else {
475
476 if (PagingIo) {
477
478 IrpContext->FatIoContext->Wait.Async.ResourceThreadId =
479 ExGetCurrentResourceThread();
480
481 } else {
482
483 IrpContext->FatIoContext->Wait.Async.ResourceThreadId =
484 ((ULONG_PTR)IrpContext->FatIoContext) | 3;
485 }
486
487 IrpContext->FatIoContext->Wait.Async.RequestedByteCount =
488 ByteCount;
489
490 IrpContext->FatIoContext->Wait.Async.FileObject = FileObject;
491 }
492
493 }
494
495 //
496 // Check if this volume has already been shut down. If it has, fail
497 // this write request.
498 //
499
500 if ( FlagOn(Vcb->VcbState, VCB_STATE_FLAG_SHUTDOWN) ) {
501
502 Irp->IoStatus.Information = 0;
503 FatCompleteRequest( IrpContext, Irp, STATUS_TOO_LATE );
504 return STATUS_TOO_LATE;
505 }
506
507 //
508 // This case corresponds to a write of the volume file (only the first
509 // fat allowed, the other fats are written automatically in parallel).
510 //
511 // We use an Mcb keep track of dirty sectors. Actual entries are Vbos
512 // and Lbos (ie. bytes), though they are all added in sector chunks.
513 // Since Vbo == Lbo for the volume file, the Mcb entries
514 // alternate between runs of Vbo == Lbo, and holes (Lbo == 0). We use
515 // the prior to represent runs of dirty fat sectors, and the latter
516 // for runs of clean fat. Note that since the first part of the volume
517 // file (boot sector) is always clean (a hole), and an Mcb never ends in
518 // a hole, there must always be an even number of runs(entries) in the Mcb.
519 //
520 // The strategy is to find the first and last dirty run in the desired
521 // write range (which will always be a set of pages), and write from the
522 // former to the later. The may result in writing some clean data, but
523 // will generally be more efficient than writing each runs seperately.
524 //
525
526 if (TypeOfOpen == VirtualVolumeFile) {
527
528 LBO DirtyLbo;
529 LBO CleanLbo;
530
531 VBO DirtyVbo;
532 VBO StartingDirtyVbo;
533
534 ULONG DirtyByteCount;
535 ULONG CleanByteCount;
536
537 ULONG WriteLength;
538
539 BOOLEAN MoreDirtyRuns = TRUE;
540
541 IO_STATUS_BLOCK RaiseIosb;
542
543 DebugTrace(0, Dbg, "Type of write is Virtual Volume File\n", 0);
544
545 //
546 // If we can't wait we have to post this.
547 //
548
549 if (!Wait) {
550
551 DebugTrace( 0, Dbg, "Passing request to Fsp\n", 0 );
552
553 Status = FatFsdPostRequest(IrpContext, Irp);
554
555 return Status;
556 }
557
558 //
559 // If we weren't called by the Lazy Writer, then this write
560 // must be the result of a write-through or flush operation.
561 // Setting the IrpContext flag, will cause DevIoSup.c to
562 // write-through the data to the disk.
563 //
564
565 if (!FlagOn((ULONG_PTR)IoGetTopLevelIrp(), FSRTL_CACHE_TOP_LEVEL_IRP)) {
566
567 SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_WRITE_THROUGH );
568 }
569
570 //
571 // Assert an even number of entries in the Mcb, an odd number would
572 // mean that the Mcb is corrupt.
573 //
574
575 NT_ASSERT( (FsRtlNumberOfRunsInLargeMcb( &Vcb->DirtyFatMcb ) & 1) == 0);
576
577 //
578 // We need to skip over any clean sectors at the start of the write.
579 //
580 // Also check the two cases where there are no dirty fats in the
581 // desired write range, and complete them with success.
582 //
583 // 1) There is no Mcb entry corresponding to StartingVbo, meaning
584 // we are beyond the end of the Mcb, and thus dirty fats.
585 //
586 // 2) The run at StartingVbo is clean and continues beyond the
587 // desired write range.
588 //
589
590 if (!FatLookupMcbEntry( Vcb, &Vcb->DirtyFatMcb,
591 StartingVbo,
592 &DirtyLbo,
593 &DirtyByteCount,
594 NULL )
595
596 || ( (DirtyLbo == 0) && (DirtyByteCount >= ByteCount) ) ) {
597
598 DebugTrace(0, DEBUG_TRACE_DEBUG_HOOKS,
599 "No dirty fat sectors in the write range.\n", 0);
600
601 FatCompleteRequest( IrpContext, Irp, STATUS_SUCCESS );
602 return STATUS_SUCCESS;
603 }
604
605 DirtyVbo = (VBO)DirtyLbo;
606
607 //
608 // If the last run was a hole (clean), up DirtyVbo to the next
609 // run, which must be dirty.
610 //
611
612 if (DirtyVbo == 0) {
613
614 DirtyVbo = StartingVbo + DirtyByteCount;
615 }
616
617 //
618 // This is where the write will start.
619 //
620
621 StartingDirtyVbo = DirtyVbo;
622
623 //
624 //
625 // Now start enumerating the dirty fat sectors spanning the desired
626 // write range, this first one of which is now DirtyVbo.
627 //
628
629 while ( MoreDirtyRuns ) {
630
631 //
632 // Find the next dirty run, if it is not there, the Mcb ended
633 // in a hole, or there is some other corruption of the Mcb.
634 //
635
636 if (!FatLookupMcbEntry( Vcb, &Vcb->DirtyFatMcb,
637 DirtyVbo,
638 &DirtyLbo,
639 &DirtyByteCount,
640 NULL )) {
641
642 #ifdef _MSC_VER
643 #pragma prefast( suppress:28931, "needed for debug build" )
644 #endif
645 DirtyVbo = (VBO)DirtyLbo;
646
647 DebugTrace(0, Dbg, "Last dirty fat Mcb entry was a hole: corrupt.\n", 0);
648
649 #ifdef _MSC_VER
650 #pragma prefast( suppress:28159, "things are seriously wrong if we get here" )
651 #endif
652 FatBugCheck( 0, 0, 0 );
653
654 } else {
655
656 DirtyVbo = (VBO)DirtyLbo;
657
658 //
659 // This has to correspond to a dirty run, and must start
660 // within the write range since we check it at entry to,
661 // and at the bottom of this loop.
662 //
663
664 NT_ASSERT((DirtyVbo != 0) && (DirtyVbo < StartingVbo + ByteCount));
665
666 //
667 // There are three ways we can know that this was the
668 // last dirty run we want to write.
669 //
670 // 1) The current dirty run extends beyond or to the
671 // desired write range.
672 //
673 // 2) On trying to find the following clean run, we
674 // discover that this is the last run in the Mcb.
675 //
676 // 3) The following clean run extend beyond the
677 // desired write range.
678 //
679 // In any of these cases we set MoreDirtyRuns = FALSE.
680 //
681
682 //
683 // If the run is larger than we are writing, we also
684 // must truncate the WriteLength. This is benign in
685 // the equals case.
686 //
687
688 if (DirtyVbo + DirtyByteCount >= StartingVbo + ByteCount) {
689
690 DirtyByteCount = StartingVbo + ByteCount - DirtyVbo;
691
692 MoreDirtyRuns = FALSE;
693
694 } else {
695
696 //
697 // Scan the clean hole after this dirty run. If this
698 // run was the last, prepare to exit the loop
699 //
700
701 if (!FatLookupMcbEntry( Vcb, &Vcb->DirtyFatMcb,
702 DirtyVbo + DirtyByteCount,
703 &CleanLbo,
704 &CleanByteCount,
705 NULL )) {
706
707 MoreDirtyRuns = FALSE;
708
709 } else {
710
711 //
712 // Assert that we actually found a clean run.
713 // and compute the start of the next dirty run.
714 //
715
716 NT_ASSERT (CleanLbo == 0);
717
718 //
719 // If the next dirty run starts beyond the desired
720 // write, we have found all the runs we need, so
721 // prepare to exit.
722 //
723
724 if (DirtyVbo + DirtyByteCount + CleanByteCount >=
725 StartingVbo + ByteCount) {
726
727 MoreDirtyRuns = FALSE;
728
729 } else {
730
731 //
732 // Compute the start of the next dirty run.
733 //
734
735 DirtyVbo += DirtyByteCount + CleanByteCount;
736 }
737 }
738 }
739 }
740 } // while ( MoreDirtyRuns )
741
742 //
743 // At this point DirtyVbo and DirtyByteCount correctly reflect the
744 // final dirty run, constrained to the desired write range.
745 //
746 // Now compute the length we finally must write.
747 //
748
749 WriteLength = (DirtyVbo + DirtyByteCount) - StartingDirtyVbo;
750
751 //
752 // We must now assume that the write will complete with success,
753 // and initialize our expected status in RaiseIosb. It will be
754 // modified below if an error occurs.
755 //
756
757 RaiseIosb.Status = STATUS_SUCCESS;
758 RaiseIosb.Information = ByteCount;
759
760 //
761 // Loop through all the fats, setting up a multiple async to
762 // write them all. If there are more than FAT_MAX_PARALLEL_IOS
763 // then we do several muilple asyncs.
764 //
765
766 {
767 ULONG Fat;
768 ULONG BytesPerFat;
769 IO_RUN StackIoRuns[2];
770 PIO_RUN IoRuns;
771
772 BytesPerFat = FatBytesPerFat( &Vcb->Bpb );
773
774 if ((ULONG)Vcb->Bpb.Fats > 2) {
775
776 IoRuns = FsRtlAllocatePoolWithTag( PagedPool,
777 (ULONG)(Vcb->Bpb.Fats*sizeof(IO_RUN)),
778 TAG_IO_RUNS );
779
780 } else {
781
782 IoRuns = StackIoRuns;
783 }
784
785 for (Fat = 0; Fat < (ULONG)Vcb->Bpb.Fats; Fat++) {
786
787 IoRuns[Fat].Vbo = StartingDirtyVbo;
788 IoRuns[Fat].Lbo = Fat * BytesPerFat + StartingDirtyVbo;
789 IoRuns[Fat].Offset = StartingDirtyVbo - StartingVbo;
790 IoRuns[Fat].ByteCount = WriteLength;
791 }
792
793 //
794 // Keep track of meta-data disk ios.
795 //
796
797 Vcb->Statistics[KeGetCurrentProcessorNumber() % FatData.NumberProcessors].Common.MetaDataDiskWrites += Vcb->Bpb.Fats;
798
799 _SEH2_TRY {
800
801 FatMultipleAsync( IrpContext,
802 Vcb,
803 Irp,
804 (ULONG)Vcb->Bpb.Fats,
805 IoRuns );
806
807 } _SEH2_FINALLY {
808
809 if (IoRuns != StackIoRuns) {
810
811 ExFreePool( IoRuns );
812 }
813 } _SEH2_END;
814
815 #if (NTDDI_VERSION >= NTDDI_WIN8)
816
817 //
818 // Account for DASD Ios
819 //
820
821 if (FatDiskAccountingEnabled) {
822
823 PETHREAD ThreadIssuingIo = PsGetCurrentThread();
824
825 PsUpdateDiskCounters( PsGetThreadProcess( ThreadIssuingIo ),
826 0,
827 WriteLength,
828 0,
829 1,
830 0 );
831 }
832
833 #endif
834 //
835 // Wait for all the writes to finish
836 //
837
838 FatWaitSync( IrpContext );
839
840 //
841 // If we got an error, or verify required, remember it.
842 //
843
844 if (!NT_SUCCESS( Irp->IoStatus.Status )) {
845
846 DebugTrace( 0,
847 Dbg,
848 "Error %X while writing volume file.\n",
849 Irp->IoStatus.Status );
850
851 RaiseIosb = Irp->IoStatus;
852 }
853 }
854
855 //
856 // If the writes were a success, set the sectors clean, else
857 // raise the error status and mark the volume as needing
858 // verification. This will automatically reset the volume
859 // structures.
860 //
861 // If not, then mark this volume as needing verification to
862 // automatically cause everything to get cleaned up.
863 //
864
865 Irp->IoStatus = RaiseIosb;
866
867 if ( NT_SUCCESS( Status = Irp->IoStatus.Status )) {
868
869 FatRemoveMcbEntry( Vcb, &Vcb->DirtyFatMcb,
870 StartingDirtyVbo,
871 WriteLength );
872
873 } else {
874
875 FatNormalizeAndRaiseStatus( IrpContext, Status );
876 }
877
878 DebugTrace(-1, Dbg, "CommonWrite -> %08lx\n", Status );
879
880 FatCompleteRequest( IrpContext, Irp, Status );
881 return Status;
882 }
883 \f
884 //
885 // This case corresponds to a general opened volume (DASD), ie.
886 // open ("a:").
887 //
888
889 if (TypeOfOpen == UserVolumeOpen) {
890
891 LBO StartingLbo;
892 LBO VolumeSize;
893
894 //
895 // Precalculate the volume size since we're nearly always going
896 // to be wanting to use it.
897 //
898
899 VolumeSize = (LBO) Int32x32To64( Vcb->Bpb.BytesPerSector,
900 (Vcb->Bpb.Sectors != 0 ? Vcb->Bpb.Sectors :
901 Vcb->Bpb.LargeSectors));
902
903 StartingLbo = StartingByte.QuadPart;
904
905 DebugTrace(0, Dbg, "Type of write is User Volume.\n", 0);
906
907 //
908 // If this is a write on a disk-based volume that is not locked, we need to limit
909 // the sectors we allow to be written within the volume. Specifically, we only
910 // allow writes to the reserved area. Note that extended DASD can still be used
911 // to write past the end of the volume. We also allow kernel mode callers to force
912 // access via a flag in the IRP. A handle that issued a dismount can write anywhere
913 // as well.
914 //
915
916 if ((Vcb->TargetDeviceObject->DeviceType == FILE_DEVICE_DISK) &&
917 !FlagOn( Vcb->VcbState, VCB_STATE_FLAG_LOCKED ) &&
918 !FlagOn( IrpSp->Flags, SL_FORCE_DIRECT_WRITE ) &&
919 !FlagOn( Ccb->Flags, CCB_FLAG_COMPLETE_DISMOUNT )) {
920
921 //
922 // First check for a write beyond the end of the volume.
923 //
924
925 if (!WriteToEof && (StartingLbo < VolumeSize)) {
926
927 //
928 // This write is within the volume. Make sure it is not beyond the reserved section.
929 //
930
931 if ((StartingLbo >= FatReservedBytes( &(Vcb->Bpb) )) ||
932 (ByteCount > (FatReservedBytes( &(Vcb->Bpb) ) - StartingLbo))) {
933
934 FatCompleteRequest( IrpContext, Irp, STATUS_ACCESS_DENIED );
935 return STATUS_ACCESS_DENIED;
936 }
937 }
938 }
939
940 //
941 // Verify that the volume for this handle is still valid, permitting
942 // operations to proceed on dismounted volumes via the handle which
943 // performed the dismount or sent a format unit command.
944 //
945
946 if (!FlagOn( Ccb->Flags, CCB_FLAG_COMPLETE_DISMOUNT | CCB_FLAG_SENT_FORMAT_UNIT )) {
947
948 FatQuickVerifyVcb( IrpContext, Vcb );
949 }
950
951 //
952 // If the caller previously sent a format unit command, then we will allow
953 // their read/write requests to ignore the verify flag on the device, since some
954 // devices send a media change event after format unit, but we don't want to
955 // process it yet since we're probably in the process of formatting the
956 // media.
957 //
958
959 if (FlagOn( Ccb->Flags, CCB_FLAG_SENT_FORMAT_UNIT )) {
960
961 SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_OVERRIDE_VERIFY );
962 }
963
964 if (!FlagOn( Ccb->Flags, CCB_FLAG_DASD_PURGE_DONE )) {
965
966 BOOLEAN PreviousWait = BooleanFlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT );
967
968 //
969 // Grab the entire volume so that even the normally unsafe action
970 // of writing to an unlocked volume won't open us to a race between
971 // the flush and purge of the FAT below.
972 //
973 // I really don't think this is particularly important to worry about,
974 // but a repro case for another bug happens to dance into this race
975 // condition pretty easily. Eh.
976 //
977
978 SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT );
979 FatAcquireExclusiveVolume( IrpContext, Vcb );
980
981 _SEH2_TRY {
982
983 //
984 // If the volume isn't locked, flush and purge it.
985 //
986
987 if (!FlagOn(Vcb->VcbState, VCB_STATE_FLAG_LOCKED)) {
988
989 FatFlushFat( IrpContext, Vcb );
990 CcPurgeCacheSection( &Vcb->SectionObjectPointers,
991 NULL,
992 0,
993 FALSE );
994
995 FatPurgeReferencedFileObjects( IrpContext, Vcb->RootDcb, Flush );
996 }
997
998 } _SEH2_FINALLY {
999
1000 FatReleaseVolume( IrpContext, Vcb );
1001 if (!PreviousWait) {
1002 ClearFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT );
1003 }
1004 } _SEH2_END;
1005
1006 SetFlag( Ccb->Flags, CCB_FLAG_DASD_PURGE_DONE |
1007 CCB_FLAG_DASD_FLUSH_DONE );
1008 }
1009
1010 if (!FlagOn( Ccb->Flags, CCB_FLAG_ALLOW_EXTENDED_DASD_IO )) {
1011
1012 //
1013 // Make sure we don't try to write past end of volume,
1014 // reducing the requested byte count if necessary.
1015 //
1016
1017 if (WriteToEof || StartingLbo >= VolumeSize) {
1018 FatCompleteRequest( IrpContext, Irp, STATUS_SUCCESS );
1019 return STATUS_SUCCESS;
1020 }
1021
1022 if (ByteCount > VolumeSize - StartingLbo) {
1023
1024 ByteCount = (ULONG) (VolumeSize - StartingLbo);
1025
1026 //
1027 // For async writes we had set the byte count in the FatIoContext
1028 // above, so fix that here.
1029 //
1030
1031 if (!Wait) {
1032
1033 IrpContext->FatIoContext->Wait.Async.RequestedByteCount =
1034 ByteCount;
1035 }
1036 }
1037 } else {
1038
1039 //
1040 // This has a peculiar interpretation, but just adjust the starting
1041 // byte to the end of the visible volume.
1042 //
1043
1044 if (WriteToEof) {
1045
1046 StartingLbo = VolumeSize;
1047 }
1048 }
1049
1050 //
1051 // For DASD we have to probe and lock the user's buffer
1052 //
1053
1054 FatLockUserBuffer( IrpContext, Irp, IoReadAccess, ByteCount );
1055
1056 //
1057 // Set the FO_MODIFIED flag here to trigger a verify when this
1058 // handle is closed. Note that we can err on the conservative
1059 // side with no problem, i.e. if we accidently do an extra
1060 // verify there is no problem.
1061 //
1062
1063 SetFlag( FileObject->Flags, FO_FILE_MODIFIED );
1064
1065 //
1066 // Write the data and wait for the results
1067 //
1068
1069 FatSingleAsync( IrpContext,
1070 Vcb,
1071 StartingLbo,
1072 ByteCount,
1073 Irp );
1074
1075 #if (NTDDI_VERSION >= NTDDI_WIN8)
1076
1077 //
1078 // Account for DASD Ios
1079 //
1080
1081 if (FatDiskAccountingEnabled) {
1082
1083 PETHREAD ThreadIssuingIo = PsGetCurrentThread();
1084
1085 PsUpdateDiskCounters( PsGetThreadProcess( ThreadIssuingIo ),
1086 0,
1087 ByteCount,
1088 0,
1089 1,
1090 0 );
1091 }
1092
1093 #endif
1094
1095 if (!Wait) {
1096
1097 //
1098 // We, nor anybody else, need the IrpContext any more.
1099 //
1100
1101 IrpContext->FatIoContext = NULL;
1102
1103 FatDeleteIrpContext( IrpContext );
1104
1105 DebugTrace(-1, Dbg, "FatNonCachedIo -> STATUS_PENDING\n", 0);
1106
1107 return STATUS_PENDING;
1108 }
1109
1110 FatWaitSync( IrpContext );
1111
1112 //
1113 // If the call didn't succeed, raise the error status
1114 //
1115 // Also mark this volume as needing verification to automatically
1116 // cause everything to get cleaned up.
1117 //
1118
1119 if (!NT_SUCCESS( Status = Irp->IoStatus.Status )) {
1120
1121 FatNormalizeAndRaiseStatus( IrpContext, Status );
1122 }
1123
1124 //
1125 // Update the current file position. We assume that
1126 // open/create zeros out the CurrentByteOffset field.
1127 //
1128
1129 if (SynchronousIo && !PagingIo) {
1130 FileObject->CurrentByteOffset.QuadPart =
1131 StartingLbo + Irp->IoStatus.Information;
1132 }
1133
1134 DebugTrace(-1, Dbg, "FatCommonWrite -> %08lx\n", Status );
1135
1136 FatCompleteRequest( IrpContext, Irp, Status );
1137 return Status;
1138 }
1139 \f
1140 //
1141 // At this point we know there is an Fcb/Dcb.
1142 //
1143
1144 NT_ASSERT( FcbOrDcb != NULL );
1145
1146 //
1147 // Use a try-finally to free Fcb/Dcb and buffers on the way out.
1148 //
1149
1150 _SEH2_TRY {
1151
1152 //
1153 // This case corresponds to a normal user write file.
1154 //
1155
1156 if ( TypeOfOpen == UserFileOpen
1157 ) {
1158
1159 ULONG ValidDataLength;
1160 ULONG ValidDataToDisk;
1161 ULONG ValidDataToCheck;
1162
1163 DebugTrace(0, Dbg, "Type of write is user file open\n", 0);
1164
1165 //
1166 // If this is a noncached transfer and is not a paging I/O, and
1167 // the file has been opened cached, then we will do a flush here
1168 // to avoid stale data problems. Note that we must flush before
1169 // acquiring the Fcb shared since the write may try to acquire
1170 // it exclusive.
1171 //
1172 // The Purge following the flush will guarentee cache coherency.
1173 //
1174
1175 if (NonCachedIo && !PagingIo &&
1176 (FileObject->SectionObjectPointer->DataSectionObject != NULL)) {
1177
1178 #ifndef __REACTOS__
1179 IO_STATUS_BLOCK IoStatus = {0};
1180 #else
1181 IO_STATUS_BLOCK IoStatus = {{0}};
1182 #endif
1183
1184 //
1185 // We need the Fcb exclsuive to do the CcPurgeCache
1186 //
1187
1188 if (!FatAcquireExclusiveFcb( IrpContext, FcbOrDcb )) {
1189
1190 DebugTrace( 0, Dbg, "Cannot acquire FcbOrDcb = %p shared without waiting\n", FcbOrDcb );
1191
1192 try_return( PostIrp = TRUE );
1193 }
1194
1195 FcbOrDcbAcquired = TRUE;
1196 FcbAcquiredExclusive = TRUE;
1197
1198 //
1199 // Preacquire pagingio for the flush.
1200 //
1201
1202 ExAcquireResourceExclusiveLite( FcbOrDcb->Header.PagingIoResource, TRUE );
1203
1204 #if (NTDDI_VERSION >= NTDDI_WIN7)
1205
1206 //
1207 // Remember that we are holding the paging I/O resource.
1208 //
1209
1210 PagingIoResourceAcquired = TRUE;
1211
1212 //
1213 // We hold so that we will prevent a pagefault from occuring and seeing
1214 // soon-to-be stale data from the disk. We used to believe this was
1215 // something to be left to the app to synchronize; we now realize that
1216 // noncached IO on a fileserver is doomed without the filesystem forcing
1217 // the coherency issue. By only penalizing noncached coherency when
1218 // needed, this is about the best we can do.
1219 //
1220
1221 //
1222 // Now perform the coherency flush and purge operation. This version of the call
1223 // will try to invalidate mapped pages to prevent data corruption.
1224 //
1225
1226 CcCoherencyFlushAndPurgeCache( FileObject->SectionObjectPointer,
1227 WriteToEof ? &FcbOrDcb->Header.FileSize : &StartingByte,
1228 ByteCount,
1229 &IoStatus,
1230 0 );
1231
1232 SuccessfulPurge = NT_SUCCESS( IoStatus.Status );
1233
1234 #else
1235
1236 CcFlushCache( FileObject->SectionObjectPointer,
1237 WriteToEof ? &FcbOrDcb->Header.FileSize : &StartingByte,
1238 ByteCount,
1239 &IoStatus );
1240
1241 if (!NT_SUCCESS( IoStatus.Status )) {
1242
1243 ExReleaseResourceLite( FcbOrDcb->Header.PagingIoResource );
1244 try_return( IoStatus.Status );
1245 }
1246
1247 //
1248 // Remember that we are holding the paging I/O resource.
1249 //
1250
1251 PagingIoResourceAcquired = TRUE;
1252
1253 //
1254 // We hold so that we will prevent a pagefault from occuring and seeing
1255 // soon-to-be stale data from the disk. We used to believe this was
1256 // something to be left to the app to synchronize; we now realize that
1257 // noncached IO on a fileserver is doomed without the filesystem forcing
1258 // the coherency issue. By only penalizing noncached coherency when
1259 // needed, this is about the best we can do.
1260 //
1261
1262 SuccessfulPurge = CcPurgeCacheSection( FileObject->SectionObjectPointer,
1263 WriteToEof ? &FcbOrDcb->Header.FileSize : &StartingByte,
1264 ByteCount,
1265 FALSE );
1266
1267 #endif
1268
1269 if (!SuccessfulPurge && (FcbOrDcb->PurgeFailureModeEnableCount > 0)) {
1270
1271 //
1272 // Purge failure mode only applies to user files.
1273 //
1274
1275 NT_ASSERT( TypeOfOpen == UserFileOpen );
1276
1277 //
1278 // Do not swallow the purge failure if in purge failure
1279 // mode. Someone outside the file system intends to handle
1280 // the error and prevent any application compatibilty
1281 // issue.
1282 //
1283 // NOTE: If the file system were not preventing a pagefault
1284 // from processing while this write is in flight, which it does
1285 // by holding the paging resource across the write, it would
1286 // need to fail the operation even if a purge succeeded. If
1287 // not a memory mapped read could bring in a stale page before
1288 // the write makes it to disk.
1289 //
1290
1291 try_return( Status = STATUS_PURGE_FAILED );
1292 }
1293
1294 //
1295 // Indicate we're OK with the fcb being demoted to shared access
1296 // if that turns out to be possible later on after VDL extension
1297 // is checked for.
1298 //
1299 // PagingIo must be held all the way through.
1300 //
1301
1302 FcbCanDemoteToShared = TRUE;
1303 }
1304
1305 //
1306 // We assert that Paging Io writes will never WriteToEof.
1307 //
1308
1309 NT_ASSERT( WriteToEof ? !PagingIo : TRUE );
1310
1311 //
1312 // First let's acquire the Fcb shared. Shared is enough if we
1313 // are not writing beyond EOF.
1314 //
1315
1316 if ( PagingIo ) {
1317
1318 (VOID)ExAcquireResourceSharedLite( FcbOrDcb->Header.PagingIoResource, TRUE );
1319 PagingIoResourceAcquired = TRUE;
1320
1321 if (!Wait) {
1322
1323 IrpContext->FatIoContext->Wait.Async.Resource =
1324 FcbOrDcb->Header.PagingIoResource;
1325 }
1326
1327 //
1328 // Check to see if we colided with a MoveFile call, and if
1329 // so block until it completes.
1330 //
1331
1332 if (FcbOrDcb->MoveFileEvent) {
1333
1334 (VOID)KeWaitForSingleObject( FcbOrDcb->MoveFileEvent,
1335 Executive,
1336 KernelMode,
1337 FALSE,
1338 NULL );
1339 }
1340
1341 } else {
1342
1343 //
1344 // We may already have the Fcb due to noncached coherency
1345 // work done just above; however, we may still have to extend
1346 // valid data length. We can't demote this to shared, matching
1347 // what occured before, until we figure that out a bit later.
1348 //
1349 // We kept ahold of it since our lockorder is main->paging,
1350 // and paging must now held across the noncached write from
1351 // the purge on.
1352 //
1353
1354 //
1355 // If this is async I/O, we will wait if there is an exclusive
1356 // waiter.
1357 //
1358
1359 if (!Wait && NonCachedIo) {
1360
1361 if (!FcbOrDcbAcquired &&
1362 !FatAcquireSharedFcbWaitForEx( IrpContext, FcbOrDcb )) {
1363
1364 DebugTrace( 0, Dbg, "Cannot acquire FcbOrDcb = %p shared without waiting\n", FcbOrDcb );
1365 try_return( PostIrp = TRUE );
1366 }
1367
1368 //
1369 // Note we will have to release this resource elsewhere. If we came
1370 // out of the noncached coherency path, we will also have to drop
1371 // the paging io resource.
1372 //
1373
1374 IrpContext->FatIoContext->Wait.Async.Resource = FcbOrDcb->Header.Resource;
1375
1376 if (FcbCanDemoteToShared) {
1377
1378 IrpContext->FatIoContext->Wait.Async.Resource2 = FcbOrDcb->Header.PagingIoResource;
1379 }
1380 } else {
1381
1382 if (!FcbOrDcbAcquired &&
1383 !FatAcquireSharedFcb( IrpContext, FcbOrDcb )) {
1384
1385 DebugTrace( 0, Dbg, "Cannot acquire FcbOrDcb = %p shared without waiting\n", FcbOrDcb );
1386 try_return( PostIrp = TRUE );
1387 }
1388 }
1389
1390 FcbOrDcbAcquired = TRUE;
1391 }
1392
1393 //
1394 // Get a first tentative file size and valid data length.
1395 // We must get ValidDataLength first since it is always
1396 // increased second (in case we are unprotected) and
1397 // we don't want to capture ValidDataLength > FileSize.
1398 //
1399
1400 ValidDataToDisk = FcbOrDcb->ValidDataToDisk;
1401 ValidDataLength = FcbOrDcb->Header.ValidDataLength.LowPart;
1402 FileSize = FcbOrDcb->Header.FileSize.LowPart;
1403
1404 NT_ASSERT( ValidDataLength <= FileSize );
1405
1406 //
1407 // If are paging io, then we do not want
1408 // to write beyond end of file. If the base is beyond Eof, we will just
1409 // Noop the call. If the transfer starts before Eof, but extends
1410 // beyond, we will truncate the transfer to the last sector
1411 // boundary.
1412 //
1413
1414 //
1415 // Just in case this is paging io, limit write to file size.
1416 // Otherwise, in case of write through, since Mm rounds up
1417 // to a page, we might try to acquire the resource exclusive
1418 // when our top level guy only acquired it shared. Thus, =><=.
1419 //
1420
1421 if ( PagingIo ) {
1422
1423 if (StartingVbo >= FileSize) {
1424
1425 DebugTrace( 0, Dbg, "PagingIo started beyond EOF.\n", 0 );
1426
1427 Irp->IoStatus.Information = 0;
1428
1429 try_return( Status = STATUS_SUCCESS );
1430 }
1431
1432 if (ByteCount > FileSize - StartingVbo) {
1433
1434 DebugTrace( 0, Dbg, "PagingIo extending beyond EOF.\n", 0 );
1435
1436 ByteCount = FileSize - StartingVbo;
1437 }
1438 }
1439
1440 //
1441 // Determine if we were called by the lazywriter.
1442 // (see resrcsup.c)
1443 //
1444
1445 if (FcbOrDcb->Specific.Fcb.LazyWriteThread == PsGetCurrentThread()) {
1446
1447 CalledByLazyWriter = TRUE;
1448
1449 if (FlagOn( FcbOrDcb->Header.Flags, FSRTL_FLAG_USER_MAPPED_FILE )) {
1450
1451 //
1452 // Fail if the start of this request is beyond valid data length.
1453 // Don't worry if this is an unsafe test. MM and CC won't
1454 // throw this page away if it is really dirty.
1455 //
1456
1457 if ((StartingVbo + ByteCount > ValidDataLength) &&
1458 (StartingVbo < FileSize)) {
1459
1460 //
1461 // It's OK if byte range is within the page containing valid data length,
1462 // since we will use ValidDataToDisk as the start point.
1463 //
1464
1465 if (StartingVbo + ByteCount > ((ValidDataLength + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))) {
1466
1467 //
1468 // Don't flush this now.
1469 //
1470
1471 try_return( Status = STATUS_FILE_LOCK_CONFLICT );
1472 }
1473 }
1474 }
1475 }
1476
1477 //
1478 // This code detects if we are a recursive synchronous page write
1479 // on a write through file object.
1480 //
1481
1482 if (FlagOn(Irp->Flags, IRP_SYNCHRONOUS_PAGING_IO) &&
1483 FlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_RECURSIVE_CALL)) {
1484
1485 PIRP TopIrp;
1486
1487 TopIrp = IoGetTopLevelIrp();
1488
1489 //
1490 // This clause determines if the top level request was
1491 // in the FastIo path. Gack. Since we don't have a
1492 // real sharing protocol for the top level IRP field ...
1493 // yet ... if someone put things other than a pure IRP in
1494 // there we best be careful.
1495 //
1496
1497 if ((ULONG_PTR)TopIrp > FSRTL_MAX_TOP_LEVEL_IRP_FLAG &&
1498 NodeType(TopIrp) == IO_TYPE_IRP) {
1499
1500 PIO_STACK_LOCATION IrpStack;
1501
1502 IrpStack = IoGetCurrentIrpStackLocation(TopIrp);
1503
1504 //
1505 // Finally this routine detects if the Top irp was a
1506 // cached write to this file and thus we are the writethrough.
1507 //
1508
1509 if ((IrpStack->MajorFunction == IRP_MJ_WRITE) &&
1510 (IrpStack->FileObject->FsContext == FileObject->FsContext) &&
1511 !FlagOn(TopIrp->Flags,IRP_NOCACHE)) {
1512
1513 RecursiveWriteThrough = TRUE;
1514 SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_WRITE_THROUGH );
1515 }
1516 }
1517 }
1518
1519 //
1520 // Here is the deal with ValidDataLength and FileSize:
1521 //
1522 // Rule 1: PagingIo is never allowed to extend file size.
1523 //
1524 // Rule 2: Only the top level requestor may extend Valid
1525 // Data Length. This may be paging IO, as when a
1526 // a user maps a file, but will never be as a result
1527 // of cache lazy writer writes since they are not the
1528 // top level request.
1529 //
1530 // Rule 3: If, using Rules 1 and 2, we decide we must extend
1531 // file size or valid data, we take the Fcb exclusive.
1532 //
1533
1534 //
1535 // Now see if we are writing beyond valid data length, and thus
1536 // maybe beyond the file size. If so, then we must
1537 // release the Fcb and reacquire it exclusive. Note that it is
1538 // important that when not writing beyond EOF that we check it
1539 // while acquired shared and keep the FCB acquired, in case some
1540 // turkey truncates the file.
1541 //
1542
1543 //
1544 // Note that the lazy writer must not be allowed to try and
1545 // acquire the resource exclusive. This is not a problem since
1546 // the lazy writer is paging IO and thus not allowed to extend
1547 // file size, and is never the top level guy, thus not able to
1548 // extend valid data length.
1549 //
1550
1551 if ( !CalledByLazyWriter &&
1552
1553 !RecursiveWriteThrough &&
1554
1555 (WriteToEof ||
1556 StartingVbo + ByteCount > ValidDataLength)) {
1557
1558 //
1559 // If this was an asynchronous write, we are going to make
1560 // the request synchronous at this point, but only kinda.
1561 // At the last moment, before sending the write off to the
1562 // driver, we may shift back to async.
1563 //
1564 // The modified page writer already has the resources
1565 // he requires, so this will complete in small finite
1566 // time.
1567 //
1568
1569 if (!Wait) {
1570
1571 Wait = TRUE;
1572 SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT );
1573
1574 if (NonCachedIo) {
1575
1576 NT_ASSERT( TypeOfOpen == UserFileOpen );
1577
1578 SwitchBackToAsync = TRUE;
1579 }
1580 }
1581
1582 //
1583 // We need Exclusive access to the Fcb/Dcb since we will
1584 // probably have to extend valid data and/or file.
1585 //
1586
1587 //
1588 // Y'know, the PagingIo case is a mapped page writer, and
1589 // MmFlushSection or the mapped page writer itself already
1590 // snatched up the main exclusive for us via the AcquireForCcFlush
1591 // or AcquireForModWrite logic (the default logic parallels FAT's
1592 // requirements since this order/model came first). Should ASSERT
1593 // this since it'll just go 1->2, and a few more unnecesary DPC
1594 // transitions.
1595 //
1596 // The preacquire is done to avoid inversion over the collided flush
1597 // meta-resource in Mm. The one time this is not true is at final
1598 // system shutdown time, when Mm goes off and flushes all the dirty
1599 // pages. Since the callback is defined as Wait == FALSE he can't
1600 // guarantee acquisition (though with clean process shutdown being
1601 // enforced, it really should be now). Permit this to float.
1602 //
1603 // Note that since we're going to fall back on the acquisition aleady
1604 // done for us, don't confuse things by thinking we did the work
1605 // for it.
1606 //
1607
1608 if ( PagingIo ) {
1609
1610 ExReleaseResourceLite( FcbOrDcb->Header.PagingIoResource );
1611 PagingIoResourceAcquired = FALSE;
1612
1613 } else {
1614
1615 //
1616 // The Fcb may already be acquired exclusive due to coherency
1617 // work performed earlier. If so, obviously no work to do.
1618 //
1619
1620 if (!FcbAcquiredExclusive) {
1621
1622 FatReleaseFcb( IrpContext, FcbOrDcb );
1623 FcbOrDcbAcquired = FALSE;
1624
1625 if (!FatAcquireExclusiveFcb( IrpContext, FcbOrDcb )) {
1626
1627 DebugTrace( 0, Dbg, "Cannot acquire FcbOrDcb = %p shared without waiting\n", FcbOrDcb );
1628
1629 try_return( PostIrp = TRUE );
1630 }
1631
1632 FcbOrDcbAcquired = TRUE;
1633
1634 #ifdef _MSC_VER
1635 #pragma prefast( suppress:28931, "convenient for debugging" )
1636 #endif
1637 FcbAcquiredExclusive = TRUE;
1638 }
1639 }
1640
1641 //
1642 // Now that we have the Fcb exclusive, see if this write
1643 // qualifies for being made async again. The key point
1644 // here is that we are going to update ValidDataLength in
1645 // the Fcb before returning. We must make sure this will
1646 // not cause a problem. One thing we must do is keep out
1647 // the FastIo path.
1648 //
1649
1650 if (SwitchBackToAsync) {
1651
1652 if ((FcbOrDcb->NonPaged->SectionObjectPointers.DataSectionObject != NULL) ||
1653 (StartingVbo + ByteCount > FcbOrDcb->Header.ValidDataLength.LowPart) ||
1654 FatNoAsync) {
1655
1656 RtlZeroMemory( IrpContext->FatIoContext, sizeof(FAT_IO_CONTEXT) );
1657
1658 KeInitializeEvent( &IrpContext->FatIoContext->Wait.SyncEvent,
1659 NotificationEvent,
1660 FALSE );
1661
1662 SwitchBackToAsync = FALSE;
1663
1664 } else {
1665
1666 if (!FcbOrDcb->NonPaged->OutstandingAsyncEvent) {
1667
1668 FcbOrDcb->NonPaged->OutstandingAsyncEvent =
1669 #ifndef __REACTOS__
1670 FsRtlAllocatePoolWithTag( NonPagedPoolNx,
1671 #else
1672 FsRtlAllocatePoolWithTag( NonPagedPool,
1673 #endif
1674 sizeof(KEVENT),
1675 TAG_EVENT );
1676
1677 KeInitializeEvent( FcbOrDcb->NonPaged->OutstandingAsyncEvent,
1678 NotificationEvent,
1679 FALSE );
1680 }
1681
1682 //
1683 // If we are transitioning from 0 to 1, reset the event.
1684 //
1685
1686 if (ExInterlockedAddUlong( &FcbOrDcb->NonPaged->OutstandingAsyncWrites,
1687 1,
1688 &FatData.GeneralSpinLock ) == 0) {
1689
1690 KeClearEvent( FcbOrDcb->NonPaged->OutstandingAsyncEvent );
1691 }
1692
1693 UnwindOutstandingAsync = TRUE;
1694
1695 IrpContext->FatIoContext->Wait.Async.NonPagedFcb = FcbOrDcb->NonPaged;
1696 }
1697 }
1698
1699 //
1700 // Now that we have the Fcb exclusive, get a new batch of
1701 // filesize and ValidDataLength.
1702 //
1703
1704 ValidDataToDisk = FcbOrDcb->ValidDataToDisk;
1705 ValidDataLength = FcbOrDcb->Header.ValidDataLength.LowPart;
1706 FileSize = FcbOrDcb->Header.FileSize.LowPart;
1707
1708 //
1709 // If this is PagingIo check again if any pruning is
1710 // required. It is important to start from basic
1711 // princples in case the file was *grown* ...
1712 //
1713
1714 if ( PagingIo ) {
1715
1716 if (StartingVbo >= FileSize) {
1717 Irp->IoStatus.Information = 0;
1718 try_return( Status = STATUS_SUCCESS );
1719 }
1720
1721 ByteCount = IrpSp->Parameters.Write.Length;
1722
1723 if (ByteCount > FileSize - StartingVbo) {
1724 ByteCount = FileSize - StartingVbo;
1725 }
1726 }
1727 }
1728
1729 //
1730 // Remember the final requested byte count
1731 //
1732
1733 if (NonCachedIo && !Wait) {
1734
1735 IrpContext->FatIoContext->Wait.Async.RequestedByteCount =
1736 ByteCount;
1737 }
1738
1739 //
1740 // Remember the initial file size and valid data length,
1741 // just in case .....
1742 //
1743
1744 InitialFileSize = FileSize;
1745
1746 InitialValidDataLength = ValidDataLength;
1747
1748 //
1749 // Make sure the FcbOrDcb is still good
1750 //
1751
1752 FatVerifyFcb( IrpContext, FcbOrDcb );
1753
1754 //
1755 // Check for writing to end of File. If we are, then we have to
1756 // recalculate a number of fields.
1757 //
1758
1759 if ( WriteToEof ) {
1760
1761 StartingVbo = FileSize;
1762 StartingByte = FcbOrDcb->Header.FileSize;
1763
1764 //
1765 // Since we couldn't know this information until now, perform the
1766 // necessary bounds checking that we ommited at the top because
1767 // this is a WriteToEof operation.
1768 //
1769
1770
1771 if (!FatIsIoRangeValid( Vcb, StartingByte, ByteCount)) {
1772
1773 Irp->IoStatus.Information = 0;
1774 try_return( Status = STATUS_DISK_FULL );
1775 }
1776
1777
1778 }
1779
1780 //
1781 // If this is a non paging write to a data stream object we have to
1782 // check for access according to the current state op/filelocks.
1783 //
1784 // Note that after this point, operations will be performed on the file.
1785 // No modifying activity can occur prior to this point in the write
1786 // path.
1787 //
1788
1789 if (!PagingIo && TypeOfOpen == UserFileOpen) {
1790
1791 Status = FsRtlCheckOplock( FatGetFcbOplock(FcbOrDcb),
1792 Irp,
1793 IrpContext,
1794 FatOplockComplete,
1795 FatPrePostIrp );
1796
1797 if (Status != STATUS_SUCCESS) {
1798
1799 OplockPostIrp = TRUE;
1800 PostIrp = TRUE;
1801 try_return( NOTHING );
1802 }
1803
1804 //
1805 // This oplock call can affect whether fast IO is possible.
1806 // We may have broken an oplock to no oplock held. If the
1807 // current state of the file is FastIoIsNotPossible then
1808 // recheck the fast IO state.
1809 //
1810
1811 if (FcbOrDcb->Header.IsFastIoPossible == FastIoIsNotPossible) {
1812
1813 FcbOrDcb->Header.IsFastIoPossible = FatIsFastIoPossible( FcbOrDcb );
1814 }
1815
1816 //
1817 // And finally check the regular file locks.
1818 //
1819
1820 if (!FsRtlCheckLockForWriteAccess( &FcbOrDcb->Specific.Fcb.FileLock, Irp )) {
1821
1822 try_return( Status = STATUS_FILE_LOCK_CONFLICT );
1823 }
1824 }
1825
1826 //
1827 // Determine if we will deal with extending the file. Note that
1828 // this implies extending valid data, and so we already have all
1829 // of the required synchronization done.
1830 //
1831
1832 if (!PagingIo && (StartingVbo + ByteCount > FileSize)) {
1833
1834 ExtendingFile = TRUE;
1835 }
1836
1837 if ( ExtendingFile ) {
1838
1839
1840 //
1841 // EXTENDING THE FILE
1842 //
1843
1844 //
1845 // For an extending write on hotplug media, we are going to defer the metadata
1846 // updates via Cc's lazy writer. They will also be flushed when the handle is closed.
1847 //
1848
1849 if (FlagOn(Vcb->VcbState, VCB_STATE_FLAG_DEFERRED_FLUSH)) {
1850
1851 SetFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_DISABLE_WRITE_THROUGH);
1852 }
1853
1854 //
1855 // Update our local copy of FileSize
1856 //
1857
1858 FileSize = StartingVbo + ByteCount;
1859
1860
1861 if (FcbOrDcb->Header.AllocationSize.QuadPart == FCB_LOOKUP_ALLOCATIONSIZE_HINT) {
1862
1863 FatLookupFileAllocationSize( IrpContext, FcbOrDcb );
1864 }
1865
1866 //
1867 // If the write goes beyond the allocation size, add some
1868 // file allocation.
1869 //
1870
1871
1872 if ( (FileSize) > FcbOrDcb->Header.AllocationSize.LowPart ) {
1873
1874
1875 BOOLEAN AllocateMinimumSize = TRUE;
1876
1877 //
1878 // Only do allocation chuncking on writes if this is
1879 // not the first allocation added to the file.
1880 //
1881
1882 if (FcbOrDcb->Header.AllocationSize.LowPart != 0 ) {
1883
1884 ULONGLONG ApproximateClusterCount;
1885 ULONGLONG TargetAllocation;
1886 ULONGLONG AddedAllocation;
1887 ULONGLONG Multiplier;
1888 ULONG BytesPerCluster;
1889 ULONG ClusterAlignedFileSize;
1890
1891 //
1892 // We are going to try and allocate a bigger chunk than
1893 // we actually need in order to maximize FastIo usage.
1894 //
1895 // The multiplier is computed as follows:
1896 //
1897 //
1898 // (FreeDiskSpace )
1899 // Mult = ( (-------------------------) / 32 ) + 1
1900 // (FileSize - AllocationSize)
1901 //
1902 // and max out at 32.
1903 //
1904 // With this formula we start winding down chunking
1905 // as we get near the disk space wall.
1906 //
1907 // For instance on an empty 1 MEG floppy doing an 8K
1908 // write, the multiplier is 6, or 48K to allocate.
1909 // When this disk is half full, the multipler is 3,
1910 // and when it is 3/4 full, the mupltiplier is only 1.
1911 //
1912 // On a larger disk, the multiplier for a 8K read will
1913 // reach its maximum of 32 when there is at least ~8 Megs
1914 // available.
1915 //
1916
1917 //
1918 // Small write performance note, use cluster aligned
1919 // file size in above equation.
1920 //
1921
1922 //
1923 // We need to carefully consider what happens when we approach
1924 // a 2^32 byte filesize. Overflows will cause problems.
1925 //
1926
1927 BytesPerCluster = 1 << Vcb->AllocationSupport.LogOfBytesPerCluster;
1928
1929 //
1930 // This can overflow if the target filesize is in the last cluster.
1931 // In this case, we can obviously skip over all of this fancy
1932 // logic and just max out the file right now.
1933 //
1934
1935
1936 ClusterAlignedFileSize = ((FileSize) + (BytesPerCluster - 1)) &
1937 ~(BytesPerCluster - 1);
1938
1939
1940 if (ClusterAlignedFileSize != 0) {
1941
1942 //
1943 // This actually has a chance but the possibility of overflowing
1944 // the numerator is pretty unlikely, made more unlikely by moving
1945 // the divide by 32 up to scale the BytesPerCluster. However, even if it does the
1946 // effect is completely benign.
1947 //
1948 // FAT32 with a 64k cluster and over 2^21 clusters would do it (and
1949 // so forth - 2^(16 - 5 + 21) == 2^32). Since this implies a partition
1950 // of 32gb and a number of clusters (and cluster size) we plan to
1951 // disallow in format for FAT32, the odds of this happening are pretty
1952 // low anyway.
1953 Multiplier = ((Vcb->AllocationSupport.NumberOfFreeClusters *
1954 (BytesPerCluster >> 5)) /
1955 (ClusterAlignedFileSize -
1956 FcbOrDcb->Header.AllocationSize.LowPart)) + 1;
1957
1958 if (Multiplier > 32) { Multiplier = 32; }
1959
1960 // These computations will never overflow a ULONGLONG because a file is capped at 4GB, and
1961 // a single write can be a max of 4GB.
1962 AddedAllocation = Multiplier * (ClusterAlignedFileSize - FcbOrDcb->Header.AllocationSize.LowPart);
1963
1964 TargetAllocation = FcbOrDcb->Header.AllocationSize.LowPart + AddedAllocation;
1965
1966 //
1967 // We know that TargetAllocation is in whole clusters. Now
1968 // we check if it exceeded the maximum valid FAT file size.
1969 // If it did, we fall back to allocating up to the maximum legal size.
1970 //
1971
1972 if (TargetAllocation > ~BytesPerCluster + 1) {
1973
1974 TargetAllocation = ~BytesPerCluster + 1;
1975 AddedAllocation = TargetAllocation - FcbOrDcb->Header.AllocationSize.LowPart;
1976 }
1977
1978 //
1979 // Now do an unsafe check here to see if we should even
1980 // try to allocate this much. If not, just allocate
1981 // the minimum size we need, if so so try it, but if it
1982 // fails, just allocate the minimum size we need.
1983 //
1984
1985 ApproximateClusterCount = (AddedAllocation / BytesPerCluster);
1986
1987 if (ApproximateClusterCount <= Vcb->AllocationSupport.NumberOfFreeClusters) {
1988
1989 _SEH2_TRY {
1990
1991 FatAddFileAllocation( IrpContext,
1992 FcbOrDcb,
1993 FileObject,
1994 (ULONG)TargetAllocation );
1995
1996 AllocateMinimumSize = FALSE;
1997 SetFlag( FcbOrDcb->FcbState, FCB_STATE_TRUNCATE_ON_CLOSE );
1998
1999 } _SEH2_EXCEPT( _SEH2_GetExceptionCode() == STATUS_DISK_FULL ?
2000 EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH ) {
2001
2002 FatResetExceptionState( IrpContext );
2003 } _SEH2_END;
2004 }
2005 }
2006 }
2007
2008 if ( AllocateMinimumSize ) {
2009
2010
2011 FatAddFileAllocation( IrpContext,
2012 FcbOrDcb,
2013 FileObject,
2014 FileSize );
2015
2016
2017 }
2018
2019 //
2020 // Assert that the allocation worked
2021 //
2022
2023
2024 NT_ASSERT( FcbOrDcb->Header.AllocationSize.LowPart >= FileSize );
2025
2026
2027 }
2028
2029 //
2030 // Set the new file size in the Fcb
2031 //
2032
2033
2034 NT_ASSERT( FileSize <= FcbOrDcb->Header.AllocationSize.LowPart );
2035
2036
2037 FcbOrDcb->Header.FileSize.LowPart = FileSize;
2038
2039 //
2040 // Extend the cache map, letting mm knows the new file size.
2041 // We only have to do this if the file is cached.
2042 //
2043
2044 if (CcIsFileCached(FileObject)) {
2045 CcSetFileSizes( FileObject, (PCC_FILE_SIZES)&FcbOrDcb->Header.AllocationSize );
2046 }
2047 }
2048
2049 //
2050 // Determine if we will deal with extending valid data.
2051 //
2052
2053 if ( !CalledByLazyWriter &&
2054 !RecursiveWriteThrough &&
2055 (StartingVbo + ByteCount > ValidDataLength) ) {
2056
2057 ExtendingValidData = TRUE;
2058
2059 } else {
2060
2061 //
2062 // If not extending valid data, and we otherwise believe we
2063 // could demote from exclusive to shared, do so. This will
2064 // occur when we synchronize tight for noncached coherency
2065 // but must defer the demotion until after we decide about
2066 // valid data length, which requires it exclusive. Since we
2067 // can't drop/re-pick the resources without letting a pagefault
2068 // squirt through, the resource decision was kept up in the air
2069 // until now.
2070 //
2071 // Note that we've still got PagingIo exclusive in these cases.
2072 //
2073
2074 if (FcbCanDemoteToShared) {
2075
2076 NT_ASSERT( FcbAcquiredExclusive && ExIsResourceAcquiredExclusiveLite( FcbOrDcb->Header.Resource ));
2077 ExConvertExclusiveToSharedLite( FcbOrDcb->Header.Resource );
2078 FcbAcquiredExclusive = FALSE;
2079 }
2080 }
2081
2082 if (ValidDataToDisk > ValidDataLength) {
2083
2084 ValidDataToCheck = ValidDataToDisk;
2085
2086 } else {
2087
2088 ValidDataToCheck = ValidDataLength;
2089 }
2090
2091
2092 \f
2093 //
2094 // HANDLE THE NON-CACHED CASE
2095 //
2096
2097 if ( NonCachedIo ) {
2098
2099 //
2100 // Declare some local variables for enumeration through the
2101 // runs of the file, and an array to store parameters for
2102 // parallel I/Os
2103 //
2104
2105 ULONG SectorSize;
2106
2107 ULONG BytesToWrite;
2108
2109 DebugTrace(0, Dbg, "Non cached write.\n", 0);
2110
2111 //
2112 // Round up to sector boundry. The end of the write interval
2113 // must, however, be beyond EOF.
2114 //
2115
2116 SectorSize = (ULONG)Vcb->Bpb.BytesPerSector;
2117
2118 BytesToWrite = (ByteCount + (SectorSize - 1))
2119 & ~(SectorSize - 1);
2120
2121 //
2122 // All requests should be well formed and
2123 // make sure we don't wipe out any data
2124 //
2125
2126 if (((StartingVbo & (SectorSize - 1)) != 0) ||
2127
2128 ((BytesToWrite != ByteCount) &&
2129 (StartingVbo + ByteCount < ValidDataLength))) {
2130
2131 NT_ASSERT( FALSE );
2132
2133 DebugTrace( 0, Dbg, "FatCommonWrite -> STATUS_NOT_IMPLEMENTED\n", 0);
2134 try_return( Status = STATUS_NOT_IMPLEMENTED );
2135 }
2136
2137 //
2138 // If this noncached transfer is at least one sector beyond
2139 // the current ValidDataLength in the Fcb, then we have to
2140 // zero the sectors in between. This can happen if the user
2141 // has opened the file noncached, or if the user has mapped
2142 // the file and modified a page beyond ValidDataLength. It
2143 // *cannot* happen if the user opened the file cached, because
2144 // ValidDataLength in the Fcb is updated when he does the cached
2145 // write (we also zero data in the cache at that time), and
2146 // therefore, we will bypass this test when the data
2147 // is ultimately written through (by the Lazy Writer).
2148 //
2149 // For the paging file we don't care about security (ie.
2150 // stale data), do don't bother zeroing.
2151 //
2152 // We can actually get writes wholly beyond valid data length
2153 // from the LazyWriter because of paging Io decoupling.
2154 //
2155
2156 if (!CalledByLazyWriter &&
2157 !RecursiveWriteThrough &&
2158 (StartingVbo > ValidDataToCheck)) {
2159
2160 FatZeroData( IrpContext,
2161 Vcb,
2162 FileObject,
2163 ValidDataToCheck,
2164 StartingVbo - ValidDataToCheck );
2165 }
2166
2167 //
2168 // Make sure we write FileSize to the dirent if we
2169 // are extending it and we are successful. (This may or
2170 // may not occur Write Through, but that is fine.)
2171 //
2172
2173 WriteFileSizeToDirent = TRUE;
2174
2175 //
2176 // Perform the actual IO
2177 //
2178
2179 if (SwitchBackToAsync) {
2180
2181 Wait = FALSE;
2182 ClearFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT );
2183 }
2184
2185 #ifdef SYSCACHE_COMPILE
2186
2187 #define MY_SIZE 0x1000000
2188 #define LONGMAP_COUNTER
2189
2190 #ifdef BITMAP
2191 //
2192 // Maintain a bitmap of IO started on this file.
2193 //
2194
2195 {
2196 PULONG WriteMask = FcbOrDcb->WriteMask;
2197
2198 if (NULL == WriteMask) {
2199
2200 WriteMask = FsRtlAllocatePoolWithTag( NonPagedPoolNx,
2201 (MY_SIZE/PAGE_SIZE) / 8,
2202 'wtaF' );
2203
2204 FcbOrDcb->WriteMask = WriteMask;
2205 RtlZeroMemory(WriteMask, (MY_SIZE/PAGE_SIZE) / 8);
2206 }
2207
2208 if (StartingVbo < MY_SIZE) {
2209
2210 ULONG Off = StartingVbo;
2211 ULONG Len = BytesToWrite;
2212
2213 if (Off + Len > MY_SIZE) {
2214 Len = MY_SIZE - Off;
2215 }
2216
2217 while (Len != 0) {
2218 WriteMask[(Off/PAGE_SIZE) / 32] |=
2219 1 << (Off/PAGE_SIZE) % 32;
2220
2221 Off += PAGE_SIZE;
2222 if (Len <= PAGE_SIZE) {
2223 break;
2224 }
2225 Len -= PAGE_SIZE;
2226 }
2227 }
2228 }
2229 #endif
2230
2231 #ifdef LONGMAP_COUNTER
2232 //
2233 // Maintain a longmap of IO started on this file, each ulong containing
2234 // the value of an ascending counter per write (gives us order information).
2235 //
2236 // Unlike the old bitmask stuff, this is mostly well synchronized.
2237 //
2238
2239 {
2240 PULONG WriteMask = (PULONG)FcbOrDcb->WriteMask;
2241
2242 if (NULL == WriteMask) {
2243
2244 WriteMask = FsRtlAllocatePoolWithTag( NonPagedPoolNx,
2245 (MY_SIZE/PAGE_SIZE) * sizeof(ULONG),
2246 'wtaF' );
2247
2248 FcbOrDcb->WriteMask = WriteMask;
2249 RtlZeroMemory(WriteMask, (MY_SIZE/PAGE_SIZE) * sizeof(ULONG));
2250 }
2251
2252 if (StartingVbo < MY_SIZE) {
2253
2254 ULONG Off = StartingVbo;
2255 ULONG Len = BytesToWrite;
2256 ULONG Tick = InterlockedIncrement( &FcbOrDcb->WriteMaskData );
2257
2258 if (Off + Len > MY_SIZE) {
2259 Len = MY_SIZE - Off;
2260 }
2261
2262 while (Len != 0) {
2263 InterlockedExchange( WriteMask + Off/PAGE_SIZE, Tick );
2264
2265 Off += PAGE_SIZE;
2266 if (Len <= PAGE_SIZE) {
2267 break;
2268 }
2269 Len -= PAGE_SIZE;
2270 }
2271 }
2272 }
2273 #endif
2274
2275 #endif
2276
2277
2278 if (FatNonCachedIo( IrpContext,
2279 Irp,
2280 FcbOrDcb,
2281 StartingVbo,
2282 BytesToWrite,
2283 BytesToWrite,
2284 0) == STATUS_PENDING) {
2285
2286
2287 UnwindOutstandingAsync = FALSE;
2288
2289 #ifdef _MSC_VER
2290 #pragma prefast( suppress:28931, "convenient for debugging" )
2291 #endif
2292 Wait = TRUE;
2293 SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT );
2294
2295 IrpContext->FatIoContext = NULL;
2296 Irp = NULL;
2297
2298 //
2299 // As a matter of fact, if we hit this we are in deep trouble
2300 // if VDL is being extended. We are no longer attached to the
2301 // IRP, and have thus lost synchronization. Note that we should
2302 // not hit this case anymore since we will not re-async vdl extension.
2303 //
2304
2305 NT_ASSERT( !ExtendingValidData );
2306
2307 try_return( Status = STATUS_PENDING );
2308 }
2309
2310 //
2311 // If the call didn't succeed, raise the error status
2312 //
2313
2314 if (!NT_SUCCESS( Status = Irp->IoStatus.Status )) {
2315
2316 FatNormalizeAndRaiseStatus( IrpContext, Status );
2317
2318 } else {
2319
2320 ULONG NewValidDataToDisk;
2321
2322 //
2323 // Else set the context block to reflect the entire write
2324 // Also assert we got how many bytes we asked for.
2325 //
2326
2327 NT_ASSERT( Irp->IoStatus.Information == BytesToWrite );
2328
2329 Irp->IoStatus.Information = ByteCount;
2330
2331 //
2332 // Take this opportunity to update ValidDataToDisk.
2333 //
2334
2335 NewValidDataToDisk = StartingVbo + ByteCount;
2336
2337 if (NewValidDataToDisk > FileSize) {
2338 NewValidDataToDisk = FileSize;
2339 }
2340
2341 if (FcbOrDcb->ValidDataToDisk < NewValidDataToDisk) {
2342 FcbOrDcb->ValidDataToDisk = NewValidDataToDisk;
2343 }
2344 }
2345
2346 //
2347 // The transfer is either complete, or the Iosb contains the
2348 // appropriate status.
2349 //
2350
2351 try_return( Status );
2352
2353 } // if No Intermediate Buffering
2354
2355 \f
2356 //
2357 // HANDLE CACHED CASE
2358 //
2359
2360 else {
2361
2362 NT_ASSERT( !PagingIo );
2363
2364 //
2365 // We delay setting up the file cache until now, in case the
2366 // caller never does any I/O to the file, and thus
2367 // FileObject->PrivateCacheMap == NULL.
2368 //
2369
2370 if ( FileObject->PrivateCacheMap == NULL ) {
2371
2372 DebugTrace(0, Dbg, "Initialize cache mapping.\n", 0);
2373
2374 //
2375 // Get the file allocation size, and if it is less than
2376 // the file size, raise file corrupt error.
2377 //
2378
2379 if (FcbOrDcb->Header.AllocationSize.QuadPart == FCB_LOOKUP_ALLOCATIONSIZE_HINT) {
2380
2381 FatLookupFileAllocationSize( IrpContext, FcbOrDcb );
2382 }
2383
2384 if ( FileSize > FcbOrDcb->Header.AllocationSize.LowPart ) {
2385
2386 FatPopUpFileCorrupt( IrpContext, FcbOrDcb );
2387
2388 FatRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR );
2389 }
2390
2391 //
2392 // Now initialize the cache map.
2393 //
2394
2395 FatInitializeCacheMap( FileObject,
2396 (PCC_FILE_SIZES)&FcbOrDcb->Header.AllocationSize,
2397 FALSE,
2398 &FatData.CacheManagerCallbacks,
2399 FcbOrDcb );
2400
2401 CcSetReadAheadGranularity( FileObject, READ_AHEAD_GRANULARITY );
2402
2403 //
2404 // Special case large floppy tranfers, and make the file
2405 // object write through. For small floppy transfers,
2406 // set a timer to go off in a second and flush the file.
2407 //
2408 //
2409
2410 if (!FlagOn( FileObject->Flags, FO_WRITE_THROUGH ) &&
2411 FlagOn(Vcb->VcbState, VCB_STATE_FLAG_DEFERRED_FLUSH)) {
2412
2413 if (((StartingByte.LowPart & (PAGE_SIZE-1)) == 0) &&
2414 (ByteCount >= PAGE_SIZE)) {
2415
2416 SetFlag( FileObject->Flags, FO_WRITE_THROUGH );
2417
2418 } else {
2419
2420 LARGE_INTEGER OneSecondFromNow;
2421 PDEFERRED_FLUSH_CONTEXT FlushContext;
2422
2423 //
2424 // Get pool and initialize the timer and DPC
2425 //
2426
2427 #ifndef __REACTOS__
2428 FlushContext = FsRtlAllocatePoolWithTag( NonPagedPoolNx,
2429 #else
2430 FlushContext = FsRtlAllocatePoolWithTag( NonPagedPool,
2431 #endif
2432 sizeof(DEFERRED_FLUSH_CONTEXT),
2433 TAG_DEFERRED_FLUSH_CONTEXT );
2434
2435 KeInitializeTimer( &FlushContext->Timer );
2436
2437 KeInitializeDpc( &FlushContext->Dpc,
2438 FatDeferredFlushDpc,
2439 FlushContext );
2440
2441
2442 //
2443 // We have to reference the file object here.
2444 //
2445
2446 ObReferenceObject( FileObject );
2447
2448 FlushContext->File = FileObject;
2449
2450 //
2451 // Let'er rip!
2452 //
2453
2454 OneSecondFromNow.QuadPart = (LONG)-1*1000*1000*10;
2455
2456 KeSetTimer( &FlushContext->Timer,
2457 OneSecondFromNow,
2458 &FlushContext->Dpc );
2459 }
2460 }
2461 }
2462
2463 //
2464 // If this write is beyond valid data length, then we
2465 // must zero the data in between.
2466 //
2467
2468 if ( StartingVbo > ValidDataToCheck ) {
2469
2470 //
2471 // Call the Cache Manager to zero the data.
2472 //
2473
2474 if (!FatZeroData( IrpContext,
2475 Vcb,
2476 FileObject,
2477 ValidDataToCheck,
2478 StartingVbo - ValidDataToCheck )) {
2479
2480 DebugTrace( 0, Dbg, "Cached Write could not wait to zero\n", 0 );
2481
2482 try_return( PostIrp = TRUE );
2483 }
2484 }
2485
2486 WriteFileSizeToDirent = BooleanFlagOn(IrpContext->Flags,
2487 IRP_CONTEXT_FLAG_WRITE_THROUGH);
2488
2489 \f
2490 //
2491 // DO A NORMAL CACHED WRITE, if the MDL bit is not set,
2492 //
2493
2494 if (!FlagOn(IrpContext->MinorFunction, IRP_MN_MDL)) {
2495
2496 DebugTrace(0, Dbg, "Cached write.\n", 0);
2497
2498 //
2499 // Get hold of the user's buffer.
2500 //
2501
2502 SystemBuffer = FatMapUserBuffer( IrpContext, Irp );
2503
2504 //
2505 // Do the write, possibly writing through
2506 //
2507
2508 #if (NTDDI_VERSION >= NTDDI_WIN8)
2509 if (!CcCopyWriteEx( FileObject,
2510 &StartingByte,
2511 ByteCount,
2512 Wait,
2513 SystemBuffer,
2514 Irp->Tail.Overlay.Thread )) {
2515 #else
2516 if (!CcCopyWrite( FileObject,
2517 &StartingByte,
2518 ByteCount,
2519 Wait,
2520 SystemBuffer )) {
2521 #endif
2522
2523 DebugTrace( 0, Dbg, "Cached Write could not wait\n", 0 );
2524
2525 try_return( PostIrp = TRUE );
2526 }
2527
2528 Irp->IoStatus.Status = STATUS_SUCCESS;
2529 Irp->IoStatus.Information = ByteCount;
2530
2531 try_return( Status = STATUS_SUCCESS );
2532
2533 } else {
2534
2535 //
2536 // DO AN MDL WRITE
2537 //
2538
2539 DebugTrace(0, Dbg, "MDL write.\n", 0);
2540
2541 NT_ASSERT( Wait );
2542
2543 CcPrepareMdlWrite( FileObject,
2544 &StartingByte,
2545 ByteCount,
2546 &Irp->MdlAddress,
2547 &Irp->IoStatus );
2548
2549 Status = Irp->IoStatus.Status;
2550
2551 try_return( Status );
2552 }
2553 }
2554 }
2555 \f
2556 //
2557 // These two cases correspond to a system write directory file and
2558 // ea file.
2559 //
2560
2561 if (( TypeOfOpen == DirectoryFile ) || ( TypeOfOpen == EaFile)
2562 ) {
2563
2564 ULONG SectorSize;
2565
2566 #if FASTFATDBG
2567 if ( TypeOfOpen == DirectoryFile ) {
2568 DebugTrace(0, Dbg, "Type of write is directoryfile\n", 0);
2569 } else if ( TypeOfOpen == EaFile) {
2570 DebugTrace(0, Dbg, "Type of write is eafile\n", 0);
2571 }
2572 #endif
2573
2574 //
2575 // Make sure the FcbOrDcb is still good
2576 //
2577
2578 FatVerifyFcb( IrpContext, FcbOrDcb );
2579
2580 //
2581 // Synchronize here with people deleting directories and
2582 // mucking with the internals of the EA file.
2583 //
2584
2585 if (!ExAcquireSharedStarveExclusive( FcbOrDcb->Header.PagingIoResource,
2586 Wait )) {
2587
2588 DebugTrace( 0, Dbg, "Cannot acquire FcbOrDcb = %p shared without waiting\n", FcbOrDcb );
2589
2590 try_return( PostIrp = TRUE );
2591 }
2592
2593 PagingIoResourceAcquired = TRUE;
2594
2595 if (!Wait) {
2596
2597 IrpContext->FatIoContext->Wait.Async.Resource =
2598 FcbOrDcb->Header.PagingIoResource;
2599 }
2600
2601 //
2602 // Check to see if we colided with a MoveFile call, and if
2603 // so block until it completes.
2604 //
2605
2606 if (FcbOrDcb->MoveFileEvent) {
2607
2608 (VOID)KeWaitForSingleObject( FcbOrDcb->MoveFileEvent,
2609 Executive,
2610 KernelMode,
2611 FALSE,
2612 NULL );
2613 }
2614
2615 //
2616 // If we weren't called by the Lazy Writer, then this write
2617 // must be the result of a write-through or flush operation.
2618 // Setting the IrpContext flag, will cause DevIoSup.c to
2619 // write-through the data to the disk.
2620 //
2621
2622 if (!FlagOn((ULONG_PTR)IoGetTopLevelIrp(), FSRTL_CACHE_TOP_LEVEL_IRP)) {
2623
2624 SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_WRITE_THROUGH );
2625 }
2626
2627 //
2628 // For the noncached case, assert that everything is sector
2629 // alligned.
2630 //
2631
2632 #ifdef _MSC_VER
2633 #pragma prefast( suppress:28931, "needed for debug build" )
2634 #endif
2635 SectorSize = (ULONG)Vcb->Bpb.BytesPerSector;
2636
2637 //
2638 // We make several assumptions about these two types of files.
2639 // Make sure all of them are true.
2640 //
2641
2642 NT_ASSERT( NonCachedIo && PagingIo );
2643 NT_ASSERT( ((StartingVbo | ByteCount) & (SectorSize - 1)) == 0 );
2644
2645
2646 //
2647 // These calls must always be within the allocation size, which is
2648 // convienently the same as filesize, which conveniently doesn't
2649 // get reset to a hint value when we verify the volume.
2650 //
2651
2652 if (StartingVbo >= FcbOrDcb->Header.FileSize.LowPart) {
2653
2654 DebugTrace( 0, Dbg, "PagingIo dirent started beyond EOF.\n", 0 );
2655
2656 Irp->IoStatus.Information = 0;
2657
2658 try_return( Status = STATUS_SUCCESS );
2659 }
2660
2661 if ( StartingVbo + ByteCount > FcbOrDcb->Header.FileSize.LowPart ) {
2662
2663 DebugTrace( 0, Dbg, "PagingIo dirent extending beyond EOF.\n", 0 );
2664 ByteCount = FcbOrDcb->Header.FileSize.LowPart - StartingVbo;
2665 }
2666
2667
2668 //
2669 // Perform the actual IO
2670 //
2671
2672 if (FatNonCachedIo( IrpContext,
2673 Irp,
2674 FcbOrDcb,
2675 StartingVbo,
2676 ByteCount,
2677 ByteCount,
2678 0 ) == STATUS_PENDING) {
2679
2680 IrpContext->FatIoContext = NULL;
2681
2682 Irp = NULL;
2683
2684 try_return( Status = STATUS_PENDING );
2685 }
2686
2687 //
2688 // The transfer is either complete, or the Iosb contains the
2689 // appropriate status.
2690 //
2691 // Also, mark the volume as needing verification to automatically
2692 // clean up stuff.
2693 //
2694
2695 if (!NT_SUCCESS( Status = Irp->IoStatus.Status )) {
2696
2697 FatNormalizeAndRaiseStatus( IrpContext, Status );
2698 }
2699
2700 try_return( Status );
2701 }
2702
2703 //
2704 // This is the case of a user who openned a directory. No writing is
2705 // allowed.
2706 //
2707
2708 if ( TypeOfOpen == UserDirectoryOpen ) {
2709
2710 DebugTrace( 0, Dbg, "FatCommonWrite -> STATUS_INVALID_PARAMETER\n", 0);
2711
2712 try_return( Status = STATUS_INVALID_PARAMETER );
2713 }
2714
2715 //
2716 // If we get this far, something really serious is wrong.
2717 //
2718
2719 DebugDump("Illegal TypeOfOpen\n", 0, FcbOrDcb );
2720
2721 #ifdef _MSC_VER
2722 #pragma prefast( suppress:28159, "things are seriously wrong if we get here" )
2723 #endif
2724 FatBugCheck( TypeOfOpen, (ULONG_PTR) FcbOrDcb, 0 );
2725
2726 try_exit: NOTHING;
2727 \f
2728
2729 //
2730 // If the request was not posted and there is still an Irp,
2731 // deal with it.
2732 //
2733
2734 if (Irp) {
2735
2736 if ( !PostIrp ) {
2737
2738 ULONG ActualBytesWrote;
2739
2740 DebugTrace( 0, Dbg, "Completing request with status = %08lx\n",
2741 Status);
2742
2743 DebugTrace( 0, Dbg, " Information = %08lx\n",
2744 Irp->IoStatus.Information);
2745
2746 //
2747 // Record the total number of bytes actually written
2748 //
2749
2750 ActualBytesWrote = (ULONG)Irp->IoStatus.Information;
2751
2752 //
2753 // If the file was opened for Synchronous IO, update the current
2754 // file position.
2755 //
2756
2757 if (SynchronousIo && !PagingIo) {
2758
2759 FileObject->CurrentByteOffset.LowPart =
2760 StartingVbo + (NT_ERROR( Status ) ? 0 : ActualBytesWrote);
2761 }
2762
2763 //
2764 // The following are things we only do if we were successful
2765 //
2766
2767 if ( NT_SUCCESS( Status ) ) {
2768
2769 //
2770 // If this was not PagingIo, mark that the modify
2771 // time on the dirent needs to be updated on close.
2772 //
2773
2774 if ( !PagingIo ) {
2775
2776 SetFlag( FileObject->Flags, FO_FILE_MODIFIED );
2777 }
2778
2779 //
2780 // If we extended the file size and we are meant to
2781 // immediately update the dirent, do so. (This flag is
2782 // set for either Write Through or noncached, because
2783 // in either case the data and any necessary zeros are
2784 // actually written to the file.)
2785 //
2786
2787 if ( ExtendingFile && WriteFileSizeToDirent ) {
2788
2789 NT_ASSERT( FileObject->DeleteAccess || FileObject->WriteAccess );
2790
2791 FatSetFileSizeInDirent( IrpContext, FcbOrDcb, NULL );
2792
2793 //
2794 // Report that a file size has changed.
2795 //
2796
2797 FatNotifyReportChange( IrpContext,
2798 Vcb,
2799 FcbOrDcb,
2800 FILE_NOTIFY_CHANGE_SIZE,
2801 FILE_ACTION_MODIFIED );
2802 }
2803
2804 if ( ExtendingFile && !WriteFileSizeToDirent ) {
2805
2806 SetFlag( FileObject->Flags, FO_FILE_SIZE_CHANGED );
2807 }
2808
2809 if ( ExtendingValidData ) {
2810
2811 ULONG EndingVboWritten = StartingVbo + ActualBytesWrote;
2812
2813 //
2814 // Never set a ValidDataLength greater than FileSize.
2815 //
2816
2817 if ( FileSize < EndingVboWritten ) {
2818
2819 FcbOrDcb->Header.ValidDataLength.LowPart = FileSize;
2820
2821 } else {
2822
2823 FcbOrDcb->Header.ValidDataLength.LowPart = EndingVboWritten;
2824 }
2825
2826 //
2827 // Now, if we are noncached and the file is cached, we must
2828 // tell the cache manager about the VDL extension so that
2829 // async cached IO will not be optimized into zero-page faults
2830 // beyond where it believes VDL is.
2831 //
2832 // In the cached case, since Cc did the work, it has updated
2833 // itself already.
2834 //
2835
2836 if (NonCachedIo && CcIsFileCached(FileObject)) {
2837 CcSetFileSizes( FileObject, (PCC_FILE_SIZES)&FcbOrDcb->Header.AllocationSize );
2838 }
2839 }
2840
2841 }
2842
2843 //
2844 // Note that we have to unpin repinned Bcbs here after the above
2845 // work, but if we are going to post the request, we must do this
2846 // before the post (below).
2847 //
2848
2849 FatUnpinRepinnedBcbs( IrpContext );
2850
2851 } else {
2852
2853 //
2854 // Take action if the Oplock package is not going to post the Irp.
2855 //
2856
2857 if (!OplockPostIrp) {
2858
2859 FatUnpinRepinnedBcbs( IrpContext );
2860
2861 if ( ExtendingFile ) {
2862
2863 //
2864 // We need the PagingIo resource exclusive whenever we
2865 // pull back either file size or valid data length.
2866 //
2867
2868 NT_ASSERT( FcbOrDcb->Header.PagingIoResource != NULL );
2869
2870 (VOID)ExAcquireResourceExclusiveLite(FcbOrDcb->Header.PagingIoResource, TRUE);
2871
2872 FcbOrDcb->Header.FileSize.LowPart = InitialFileSize;
2873
2874 NT_ASSERT( FcbOrDcb->Header.FileSize.LowPart <= FcbOrDcb->Header.AllocationSize.LowPart );
2875
2876 //
2877 // Pull back the cache map as well
2878 //
2879
2880 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL) {
2881
2882 *CcGetFileSizePointer(FileObject) = FcbOrDcb->Header.FileSize;
2883 }
2884
2885 ExReleaseResourceLite( FcbOrDcb->Header.PagingIoResource );
2886 }
2887
2888 DebugTrace( 0, Dbg, "Passing request to Fsp\n", 0 );
2889
2890 Status = FatFsdPostRequest(IrpContext, Irp);
2891 }
2892 }
2893 }
2894
2895 } _SEH2_FINALLY {
2896
2897 DebugUnwind( FatCommonWrite );
2898
2899 if (_SEH2_AbnormalTermination()) {
2900
2901 //
2902 // Restore initial file size and valid data length
2903 //
2904
2905 if (ExtendingFile || ExtendingValidData) {
2906
2907 //
2908 // We got an error, pull back the file size if we extended it.
2909 //
2910
2911 FcbOrDcb->Header.FileSize.LowPart = InitialFileSize;
2912 FcbOrDcb->Header.ValidDataLength.LowPart = InitialValidDataLength;
2913
2914 NT_ASSERT( FcbOrDcb->Header.FileSize.LowPart <= FcbOrDcb->Header.AllocationSize.LowPart );
2915
2916 //
2917 // Pull back the cache map as well
2918 //
2919
2920 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL) {
2921
2922 *CcGetFileSizePointer(FileObject) = FcbOrDcb->Header.FileSize;
2923 }
2924 }
2925 }
2926
2927 //
2928 // Check if this needs to be backed out.
2929 //
2930
2931 if (UnwindOutstandingAsync) {
2932
2933 ExInterlockedAddUlong( &FcbOrDcb->NonPaged->OutstandingAsyncWrites,
2934 0xffffffff,
2935 &FatData.GeneralSpinLock );
2936 }
2937
2938 //
2939 // If the FcbOrDcb has been acquired, release it.
2940 //
2941
2942 if (FcbOrDcbAcquired && Irp) {
2943
2944 FatReleaseFcb( NULL, FcbOrDcb );
2945 }
2946
2947 if (PagingIoResourceAcquired && Irp) {
2948
2949 ExReleaseResourceLite( FcbOrDcb->Header.PagingIoResource );
2950 }
2951
2952 //
2953 // Complete the request if we didn't post it and no exception
2954 //
2955 // Note that FatCompleteRequest does the right thing if either
2956 // IrpContext or Irp are NULL
2957 //
2958
2959 if ( !PostIrp && !_SEH2_AbnormalTermination() ) {
2960
2961 FatCompleteRequest( IrpContext, Irp, Status );
2962 }
2963
2964 DebugTrace(-1, Dbg, "FatCommonWrite -> %08lx\n", Status );
2965 } _SEH2_END;
2966
2967 return Status;
2968 }
2969
2970 \f
2971 //
2972 // Local support routine
2973 //
2974
2975 VOID
2976 NTAPI
2977 FatDeferredFlushDpc (
2978 _In_ PKDPC Dpc,
2979 _In_opt_ PVOID DeferredContext,
2980 _In_opt_ PVOID SystemArgument1,
2981 _In_opt_ PVOID SystemArgument2
2982 )
2983
2984 /*++
2985
2986 Routine Description:
2987
2988 This routine is dispatched 1 second after a small write to a deferred
2989 write device that initialized the cache map. It exqueues an executive
2990 worker thread to perform the actual task of flushing the file.
2991
2992 Arguments:
2993
2994 DeferredContext - Contains the deferred flush context.
2995
2996 Return Value:
2997
2998 None.
2999
3000 --*/
3001
3002 {
3003 PDEFERRED_FLUSH_CONTEXT FlushContext;
3004
3005 UNREFERENCED_PARAMETER( SystemArgument1 );
3006 UNREFERENCED_PARAMETER( SystemArgument2 );
3007 UNREFERENCED_PARAMETER( Dpc );
3008
3009 FlushContext = (PDEFERRED_FLUSH_CONTEXT)DeferredContext;
3010
3011 //
3012 // Send it off
3013 //
3014
3015 ExInitializeWorkItem( &FlushContext->Item,
3016 FatDeferredFlush,
3017 FlushContext );
3018
3019 #ifdef _MSC_VER
3020 #pragma prefast( suppress:28159, "prefast indicates this API is obsolete, but it's ok for fastfat to keep using it" )
3021 #endif
3022 ExQueueWorkItem( &FlushContext->Item, CriticalWorkQueue );
3023 }
3024
3025 \f
3026 //
3027 // Local support routine
3028 //
3029
3030 VOID
3031 NTAPI
3032 FatDeferredFlush (
3033 _In_ PVOID Parameter
3034 )
3035
3036 /*++
3037
3038 Routine Description:
3039
3040 This routine performs the actual task of flushing the file.
3041
3042 Arguments:
3043
3044 DeferredContext - Contains the deferred flush context.
3045
3046 Return Value:
3047
3048 None.
3049
3050 --*/
3051
3052 {
3053
3054 PFILE_OBJECT File;
3055 PVCB Vcb;
3056 PFCB FcbOrDcb;
3057 PCCB Ccb;
3058
3059 PAGED_CODE();
3060
3061 File = ((PDEFERRED_FLUSH_CONTEXT)Parameter)->File;
3062
3063 FatDecodeFileObject(File, &Vcb, &FcbOrDcb, &Ccb);
3064 NT_ASSERT( FcbOrDcb != NULL );
3065
3066 //
3067 // Make us appear as a top level FSP request so that we will
3068 // receive any errors from the flush.
3069 //
3070
3071 IoSetTopLevelIrp( (PIRP)FSRTL_FSP_TOP_LEVEL_IRP );
3072
3073 ExAcquireResourceExclusiveLite( FcbOrDcb->Header.Resource, TRUE );
3074 ExAcquireResourceSharedLite( FcbOrDcb->Header.PagingIoResource, TRUE );
3075
3076 CcFlushCache( File->SectionObjectPointer, NULL, 0, NULL );
3077
3078 ExReleaseResourceLite( FcbOrDcb->Header.PagingIoResource );
3079 ExReleaseResourceLite( FcbOrDcb->Header.Resource );
3080
3081 IoSetTopLevelIrp( NULL );
3082
3083 ObDereferenceObject( File );
3084
3085 ExFreePool( Parameter );
3086
3087 }
3088
3089