[FASTFAT] Fix size checking in VfatGetFileNameInformation()
[reactos.git] / drivers / filesystems / fastfat_new / deviosup.c
1 /*++
2
3 Copyright (c) 1989-2000 Microsoft Corporation
4
5 Module Name:
6
7 DevIoSup.c
8
9 Abstract:
10
11 This module implements the low lever disk read/write support for Fat.
12
13
14 --*/
15
16 #include "fatprocs.h"
17
18 //
19 // The Bug check file id for this module
20 //
21
22 #define BugCheckFileId (FAT_BUG_CHECK_DEVIOSUP)
23
24 //
25 // Local debug trace level
26 //
27
28 #define Dbg (DEBUG_TRACE_DEVIOSUP)
29
30 #define CollectDiskIoStats(VCB,FUNCTION,IS_USER_IO,COUNT) { \
31 PFILESYSTEM_STATISTICS Stats = &(VCB)->Statistics[KeGetCurrentProcessorNumber() % FatData.NumberProcessors].Common; \
32 if (IS_USER_IO) { \
33 if ((FUNCTION) == IRP_MJ_WRITE) { \
34 Stats->UserDiskWrites += (COUNT); \
35 } else { \
36 Stats->UserDiskReads += (COUNT); \
37 } \
38 } else { \
39 if ((FUNCTION) == IRP_MJ_WRITE) { \
40 Stats->MetaDataDiskWrites += (COUNT); \
41 } else { \
42 Stats->MetaDataDiskReads += (COUNT); \
43 } \
44 } \
45 }
46
47 typedef struct _FAT_SYNC_CONTEXT {
48
49 //
50 // Io status block for the request
51 //
52
53 IO_STATUS_BLOCK Iosb;
54
55 //
56 // Event to be signaled when the request completes
57 //
58
59 KEVENT Event;
60
61 } FAT_SYNC_CONTEXT, *PFAT_SYNC_CONTEXT;
62
63
64 //
65 // Completion Routine declarations
66 //
67
68 IO_COMPLETION_ROUTINE FatMultiSyncCompletionRoutine;
69
70 NTSTATUS
71 NTAPI
72 FatMultiSyncCompletionRoutine (
73 _In_ PDEVICE_OBJECT DeviceObject,
74 _In_ PIRP Irp,
75 _In_reads_opt_(_Inexpressible_("varies")) PVOID Contxt
76 );
77
78 IO_COMPLETION_ROUTINE FatMultiAsyncCompletionRoutine;
79
80 NTSTATUS
81 NTAPI
82 FatMultiAsyncCompletionRoutine (
83 _In_ PDEVICE_OBJECT DeviceObject,
84 _In_ PIRP Irp,
85 _In_reads_opt_(_Inexpressible_("varies")) PVOID Contxt
86 );
87
88 IO_COMPLETION_ROUTINE FatSpecialSyncCompletionRoutine;
89
90 NTSTATUS
91 NTAPI
92 FatSpecialSyncCompletionRoutine (
93 _In_ PDEVICE_OBJECT DeviceObject,
94 _In_ PIRP Irp,
95 _In_reads_opt_(_Inexpressible_("varies")) PVOID Contxt
96 );
97
98 IO_COMPLETION_ROUTINE FatSingleSyncCompletionRoutine;
99
100 NTSTATUS
101 NTAPI
102 FatSingleSyncCompletionRoutine (
103 _In_ PDEVICE_OBJECT DeviceObject,
104 _In_ PIRP Irp,
105 _In_reads_opt_(_Inexpressible_("varies")) PVOID Contxt
106 );
107
108 IO_COMPLETION_ROUTINE FatSingleAsyncCompletionRoutine;
109
110 NTSTATUS
111 NTAPI
112 FatSingleAsyncCompletionRoutine (
113 _In_ PDEVICE_OBJECT DeviceObject,
114 _In_ PIRP Irp,
115 _In_reads_opt_(_Inexpressible_("varies")) PVOID Contxt
116 );
117
118 IO_COMPLETION_ROUTINE FatPagingFileCompletionRoutine;
119
120 NTSTATUS
121 NTAPI
122 FatPagingFileCompletionRoutine (
123 _In_ PDEVICE_OBJECT DeviceObject,
124 _In_ PIRP Irp,
125 _In_reads_opt_(_Inexpressible_("varies")) PVOID MasterIrp
126 );
127
128 IO_COMPLETION_ROUTINE FatPagingFileCompletionRoutineCatch;
129
130 NTSTATUS
131 NTAPI
132 FatPagingFileCompletionRoutineCatch (
133 _In_ PDEVICE_OBJECT DeviceObject,
134 _In_ PIRP Irp,
135 _In_reads_opt_(_Inexpressible_("varies")) PVOID Contxt
136 );
137
138 VOID
139 FatSingleNonAlignedSync (
140 IN PIRP_CONTEXT IrpContext,
141 IN PVCB Vcb,
142 IN PUCHAR Buffer,
143 IN LBO Lbo,
144 IN ULONG ByteCount,
145 IN PIRP Irp
146 );
147
148 //
149 // The following macro decides whether to send a request directly to
150 // the device driver, or to other routines. It was meant to
151 // replace IoCallDriver as transparently as possible. It must only be
152 // called with a read or write Irp.
153 //
154 // NTSTATUS
155 // FatLowLevelReadWrite (
156 // PIRP_CONTEXT IrpContext,
157 // PDEVICE_OBJECT DeviceObject,
158 // PIRP Irp,
159 // PVCB Vcb
160 // );
161 //
162
163 #define FatLowLevelReadWrite(IRPCONTEXT,DO,IRP,VCB) ( \
164 IoCallDriver((DO),(IRP)) \
165 )
166
167 //
168 // The following macro handles completion-time zeroing of buffers.
169 //
170
171 #define FatDoCompletionZero( I, C ) \
172 if ((C)->ZeroMdl) { \
173 NT_ASSERT( (C)->ZeroMdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA | \
174 MDL_SOURCE_IS_NONPAGED_POOL));\
175 if (NT_SUCCESS((I)->IoStatus.Status)) { \
176 RtlZeroMemory( (C)->ZeroMdl->MappedSystemVa, \
177 (C)->ZeroMdl->ByteCount ); \
178 } \
179 IoFreeMdl((C)->ZeroMdl); \
180 (C)->ZeroMdl = NULL; \
181 }
182
183 #if (NTDDI_VERSION >= NTDDI_WIN8)
184 #define FatUpdateIOCountersPCW(IsAWrite,Count) \
185 FsRtlUpdateDiskCounters( ((IsAWrite) ? 0 : (Count) ), \
186 ((IsAWrite) ? (Count) : 0) )
187 #else
188 #define FatUpdateIOCountersPCW(IsAWrite,Count)
189 #endif
190
191 #ifdef ALLOC_PRAGMA
192 #pragma alloc_text(PAGE, FatMultipleAsync)
193 #pragma alloc_text(PAGE, FatSingleAsync)
194 #pragma alloc_text(PAGE, FatSingleNonAlignedSync)
195 #pragma alloc_text(PAGE, FatWaitSync)
196 #pragma alloc_text(PAGE, FatLockUserBuffer)
197 #pragma alloc_text(PAGE, FatBufferUserBuffer)
198 #pragma alloc_text(PAGE, FatMapUserBuffer)
199 #pragma alloc_text(PAGE, FatNonCachedIo)
200 #pragma alloc_text(PAGE, FatNonCachedNonAlignedRead)
201 #pragma alloc_text(PAGE, FatPerformDevIoCtrl)
202 #endif
203
204 typedef struct FAT_PAGING_FILE_CONTEXT {
205 KEVENT Event;
206 PMDL RestoreMdl;
207 } FAT_PAGING_FILE_CONTEXT, *PFAT_PAGING_FILE_CONTEXT;
208
209 \f
210 VOID
211 FatPagingFileIo (
212 IN PIRP Irp,
213 IN PFCB Fcb
214 )
215
216 /*++
217
218 Routine Description:
219
220 This routine performs the non-cached disk io described in its parameters.
221 This routine nevers blocks, and should only be used with the paging
222 file since no completion processing is performed.
223
224 Arguments:
225
226 Irp - Supplies the requesting Irp.
227
228 Fcb - Supplies the file to act on.
229
230 Return Value:
231
232 None.
233
234 --*/
235
236 {
237 //
238 // Declare some local variables for enumeration through the
239 // runs of the file.
240 //
241
242 VBO Vbo;
243 ULONG ByteCount;
244
245 PMDL Mdl;
246 LBO NextLbo;
247 VBO NextVbo = 0;
248 ULONG NextByteCount;
249 ULONG RemainingByteCount;
250 BOOLEAN MustSucceed;
251
252 ULONG FirstIndex;
253 ULONG CurrentIndex;
254 ULONG LastIndex;
255
256 LBO LastLbo;
257 ULONG LastByteCount;
258
259 BOOLEAN MdlIsReserve = FALSE;
260 BOOLEAN IrpIsMaster = FALSE;
261 FAT_PAGING_FILE_CONTEXT Context;
262 LONG IrpCount;
263
264 PIRP AssocIrp;
265 PIO_STACK_LOCATION IrpSp;
266 PIO_STACK_LOCATION NextIrpSp;
267 ULONG BufferOffset;
268 PDEVICE_OBJECT DeviceObject;
269
270 #ifndef __REACTOS__
271 BOOLEAN IsAWrite = FALSE;
272 #endif
273
274 DebugTrace(+1, Dbg, "FatPagingFileIo\n", 0);
275 DebugTrace( 0, Dbg, "Irp = %p\n", Irp );
276 DebugTrace( 0, Dbg, "Fcb = %p\n", Fcb );
277
278 NT_ASSERT( FlagOn( Fcb->FcbState, FCB_STATE_PAGING_FILE ));
279
280 //
281 // Initialize some locals.
282 //
283
284 BufferOffset = 0;
285 DeviceObject = Fcb->Vcb->TargetDeviceObject;
286 IrpSp = IoGetCurrentIrpStackLocation( Irp );
287
288 Vbo = IrpSp->Parameters.Read.ByteOffset.LowPart;
289 ByteCount = IrpSp->Parameters.Read.Length;
290 #ifndef __REACTOS__
291 IsAWrite = (IrpSp->MajorFunction == IRP_MJ_WRITE);
292 #endif
293
294 MustSucceed = FatLookupMcbEntry( Fcb->Vcb, &Fcb->Mcb,
295 Vbo,
296 &NextLbo,
297 &NextByteCount,
298 &FirstIndex);
299
300 //
301 // If this run isn't present, something is very wrong.
302 //
303
304 if (!MustSucceed) {
305
306 #ifdef _MSC_VER
307 #pragma prefast( suppress:28159, "things are seriously wrong if we get here" )
308 #endif
309 FatBugCheck( Vbo, ByteCount, 0 );
310 }
311
312 #if (NTDDI_VERSION >= NTDDI_WIN8)
313
314 //
315 // Charge the IO to paging file to current thread
316 //
317
318 if (FatDiskAccountingEnabled) {
319
320 PETHREAD ThreadIssuingIo = PsGetCurrentThread();
321 BOOLEAN IsWriteOperation = FALSE;
322
323 if (IrpSp->MajorFunction == IRP_MJ_WRITE) {
324 IsWriteOperation = TRUE;
325 }
326
327 PsUpdateDiskCounters( PsGetThreadProcess( ThreadIssuingIo ),
328 (IsWriteOperation ? 0 : ByteCount ), // bytes to read
329 (IsWriteOperation ? ByteCount : 0), // bytes to write
330 (IsWriteOperation ? 0 : 1), // # of reads
331 (IsWriteOperation ? 1 : 0), // # of writes
332 0 );
333 }
334 #endif
335
336 // See if the write covers a single valid run, and if so pass
337 // it on.
338 //
339
340 if ( NextByteCount >= ByteCount ) {
341
342 DebugTrace( 0, Dbg, "Passing Irp on to Disk Driver\n", 0 );
343
344 //
345 // Setup the next IRP stack location for the disk driver beneath us.
346 //
347
348 NextIrpSp = IoGetNextIrpStackLocation( Irp );
349
350 NextIrpSp->MajorFunction = IrpSp->MajorFunction;
351 NextIrpSp->Parameters.Read.Length = ByteCount;
352 NextIrpSp->Parameters.Read.ByteOffset.QuadPart = NextLbo;
353
354 //
355 // Since this is Paging file IO, we'll just ignore the verify bit.
356 //
357
358 SetFlag( NextIrpSp->Flags, SL_OVERRIDE_VERIFY_VOLUME );
359
360 //
361 // Set up the completion routine address in our stack frame.
362 // This is only invoked on error or cancel, and just copies
363 // the error Status into master irp's iosb.
364 //
365 // If the error implies a media problem, it also enqueues a
366 // worker item to write out the dirty bit so that the next
367 // time we run we will do a autochk /r
368 //
369
370 IoSetCompletionRoutine( Irp,
371 &FatPagingFileCompletionRoutine,
372 Irp,
373 FALSE,
374 TRUE,
375 TRUE );
376
377 //
378 // Issue the read/write request
379 //
380 // If IoCallDriver returns an error, it has completed the Irp
381 // and the error will be dealt with as a normal IO error.
382 //
383
384 (VOID)IoCallDriver( DeviceObject, Irp );
385
386 //
387 // We just issued an IO to the storage stack, update the counters indicating so.
388 //
389
390 if (FatDiskAccountingEnabled) {
391
392 FatUpdateIOCountersPCW( IsAWrite, ByteCount );
393 }
394
395 DebugTrace(-1, Dbg, "FatPagingFileIo -> VOID\n", 0);
396 return;
397 }
398
399 //
400 // Find out how may runs there are.
401 //
402
403 MustSucceed = FatLookupMcbEntry( Fcb->Vcb, &Fcb->Mcb,
404 Vbo + ByteCount - 1,
405 &LastLbo,
406 &LastByteCount,
407 &LastIndex);
408
409 //
410 // If this run isn't present, something is very wrong.
411 //
412
413 if (!MustSucceed) {
414
415 #ifdef _MSC_VER
416 #pragma prefast( suppress:28159, "things are seriously wrong if we get here" )
417 #endif
418 FatBugCheck( Vbo + ByteCount - 1, 1, 0 );
419 }
420
421 CurrentIndex = FirstIndex;
422
423 //
424 // Now set up the Irp->IoStatus. It will be modified by the
425 // multi-completion routine in case of error or verify required.
426 //
427
428 Irp->IoStatus.Status = STATUS_SUCCESS;
429 Irp->IoStatus.Information = ByteCount;
430
431 //
432 // Loop while there are still byte writes to satisfy. The way we'll work this
433 // is to hope for the best - one associated IRP per run, which will let us be
434 // completely async after launching all the IO.
435 //
436 // IrpCount will indicate the remaining number of associated Irps to launch.
437 //
438 // All we have to do is make sure IrpCount doesn't hit zero before we're building
439 // the very last Irp. If it is positive when we're done, it means we have to
440 // wait for the rest of the associated Irps to come back before we complete the
441 // master by hand.
442 //
443 // This will keep the master from completing early.
444 //
445
446 Irp->AssociatedIrp.IrpCount = IrpCount = LastIndex - FirstIndex + 1;
447
448 while (CurrentIndex <= LastIndex) {
449
450 //
451 // Reset this for unwinding purposes
452 //
453
454 AssocIrp = NULL;
455
456 //
457 // If next run is larger than we need, "ya get what ya need".
458 //
459
460 if (NextByteCount > ByteCount) {
461 NextByteCount = ByteCount;
462 }
463
464 RemainingByteCount = 0;
465
466 //
467 // Allocate and build a partial Mdl for the request.
468 //
469
470 Mdl = IoAllocateMdl( (PCHAR)Irp->UserBuffer + BufferOffset,
471 NextByteCount,
472 FALSE,
473 FALSE,
474 AssocIrp );
475
476 if (Mdl == NULL) {
477
478 //
479 // Pick up the reserve MDL
480 //
481
482 KeWaitForSingleObject( &FatReserveEvent, Executive, KernelMode, FALSE, NULL );
483
484 Mdl = FatReserveMdl;
485 MdlIsReserve = TRUE;
486
487 //
488 // Trim to fit the size of the reserve MDL.
489 //
490
491 if (NextByteCount > FAT_RESERVE_MDL_SIZE * PAGE_SIZE) {
492
493 RemainingByteCount = NextByteCount - FAT_RESERVE_MDL_SIZE * PAGE_SIZE;
494 NextByteCount = FAT_RESERVE_MDL_SIZE * PAGE_SIZE;
495 }
496 }
497
498 IoBuildPartialMdl( Irp->MdlAddress,
499 Mdl,
500 (PCHAR)Irp->UserBuffer + BufferOffset,
501 NextByteCount );
502
503 //
504 // Now that we have properly bounded this piece of the transfer, it is
505 // time to read/write it. We can simplify life slightly by always
506 // re-using the master IRP for cases where we use the reserve MDL,
507 // since we'll always be synchronous for those and can use a single
508 // completion context on our local stack.
509 //
510 // We also must prevent ourselves from issuing an associated IRP that would
511 // complete the master UNLESS this is the very last IRP we'll issue.
512 //
513 // This logic looks a bit complicated, but is hopefully understandable.
514 //
515
516 if (!MdlIsReserve &&
517 (IrpCount != 1 ||
518 (CurrentIndex == LastIndex &&
519 RemainingByteCount == 0))) {
520
521 AssocIrp = IoMakeAssociatedIrp( Irp, (CCHAR)(DeviceObject->StackSize + 1) );
522 }
523
524 if (AssocIrp == NULL) {
525
526 AssocIrp = Irp;
527 IrpIsMaster = TRUE;
528
529 //
530 // We need to drain the associated Irps so we can reliably figure out if
531 // the master Irp is showing a failed status, in which case we bail out
532 // immediately - as opposed to putting the value in the status field in
533 // jeopardy due to our re-use of the master Irp.
534 //
535
536 while (Irp->AssociatedIrp.IrpCount != IrpCount) {
537
538 KeDelayExecutionThread (KernelMode, FALSE, &Fat30Milliseconds);
539 }
540
541 //
542 // Note that since we failed to launch this associated Irp, that the completion
543 // code at the bottom will take care of completing the master Irp.
544 //
545
546 if (!NT_SUCCESS(Irp->IoStatus.Status)) {
547
548 NT_ASSERT( IrpCount );
549 break;
550 }
551
552 } else {
553
554 //
555 // Indicate we used an associated Irp.
556 //
557
558 IrpCount -= 1;
559 }
560
561 //
562 // With an associated IRP, we must take over the first stack location so
563 // we can have one to put the completion routine on. When re-using the
564 // master IRP, its already there.
565 //
566
567 if (!IrpIsMaster) {
568
569 //
570 // Get the first IRP stack location in the associated Irp
571 //
572
573 IoSetNextIrpStackLocation( AssocIrp );
574 NextIrpSp = IoGetCurrentIrpStackLocation( AssocIrp );
575
576 //
577 // Setup the Stack location to describe our read.
578 //
579
580 NextIrpSp->MajorFunction = IrpSp->MajorFunction;
581 NextIrpSp->Parameters.Read.Length = NextByteCount;
582 NextIrpSp->Parameters.Read.ByteOffset.QuadPart = Vbo;
583
584 //
585 // We also need the VolumeDeviceObject in the Irp stack in case
586 // we take the failure path.
587 //
588
589 NextIrpSp->DeviceObject = IrpSp->DeviceObject;
590
591 } else {
592
593 //
594 // Save the MDL in the IRP and prepare the stack
595 // context for the completion routine.
596 //
597
598 KeInitializeEvent( &Context.Event, SynchronizationEvent, FALSE );
599 Context.RestoreMdl = Irp->MdlAddress;
600 }
601
602 //
603 // And drop our Mdl into the Irp.
604 //
605
606 AssocIrp->MdlAddress = Mdl;
607
608 //
609 // Set up the completion routine address in our stack frame.
610 // For true associated IRPs, this is only invoked on error or
611 // cancel, and just copies the error Status into master irp's
612 // iosb.
613 //
614 // If the error implies a media problem, it also enqueues a
615 // worker item to write out the dirty bit so that the next
616 // time we run we will do a autochk /r
617 //
618
619 if (IrpIsMaster) {
620
621 IoSetCompletionRoutine( AssocIrp,
622 FatPagingFileCompletionRoutineCatch,
623 &Context,
624 TRUE,
625 TRUE,
626 TRUE );
627
628 } else {
629
630 IoSetCompletionRoutine( AssocIrp,
631 FatPagingFileCompletionRoutine,
632 Irp,
633 FALSE,
634 TRUE,
635 TRUE );
636 }
637
638 //
639 // Setup the next IRP stack location for the disk driver beneath us.
640 //
641
642 NextIrpSp = IoGetNextIrpStackLocation( AssocIrp );
643
644 //
645 // Since this is paging file IO, we'll just ignore the verify bit.
646 //
647
648 SetFlag( NextIrpSp->Flags, SL_OVERRIDE_VERIFY_VOLUME );
649
650 //
651 // Setup the Stack location to do a read from the disk driver.
652 //
653
654 NextIrpSp->MajorFunction = IrpSp->MajorFunction;
655 NextIrpSp->Parameters.Read.Length = NextByteCount;
656 NextIrpSp->Parameters.Read.ByteOffset.QuadPart = NextLbo;
657
658 (VOID)IoCallDriver( DeviceObject, AssocIrp );
659
660 //
661 // We just issued an IO to the storage stack, update the counters indicating so.
662 //
663
664 if (FatDiskAccountingEnabled) {
665
666 FatUpdateIOCountersPCW( IsAWrite, (ULONG64)NextByteCount );
667 }
668
669 //
670 // Wait for the Irp in the catch case and drop the flags.
671 //
672
673 if (IrpIsMaster) {
674
675 KeWaitForSingleObject( &Context.Event, Executive, KernelMode, FALSE, NULL );
676 IrpIsMaster = MdlIsReserve = FALSE;
677
678 //
679 // If the Irp is showing a failed status, there is no point in continuing.
680 // In doing so, we get to avoid squirreling away the failed status in case
681 // we were to re-use the master irp again.
682 //
683 // Note that since we re-used the master, we must not have issued the "last"
684 // associated Irp, and thus the completion code at the bottom will take care
685 // of that for us.
686 //
687
688 if (!NT_SUCCESS(Irp->IoStatus.Status)) {
689
690 NT_ASSERT( IrpCount );
691 break;
692 }
693 }
694
695 //
696 // Now adjust everything for the next pass through the loop.
697 //
698
699 Vbo += NextByteCount;
700 BufferOffset += NextByteCount;
701 ByteCount -= NextByteCount;
702
703 //
704 // Try to lookup the next run, if we are not done and we got
705 // all the way through the current run.
706 //
707
708 if (RemainingByteCount) {
709
710 //
711 // Advance the Lbo/Vbo if we have more to do in the current run.
712 //
713
714 NextLbo += NextByteCount;
715 NextVbo += NextByteCount;
716
717 NextByteCount = RemainingByteCount;
718
719 } else {
720
721 CurrentIndex += 1;
722
723 if ( CurrentIndex <= LastIndex ) {
724
725 NT_ASSERT( ByteCount != 0 );
726
727 FatGetNextMcbEntry( Fcb->Vcb, &Fcb->Mcb,
728 CurrentIndex,
729 &NextVbo,
730 &NextLbo,
731 &NextByteCount );
732
733 NT_ASSERT( NextVbo == Vbo );
734 }
735 }
736 } // while ( CurrentIndex <= LastIndex )
737
738 //
739 // If we didn't get enough associated Irps going to make this asynchronous, we
740 // twiddle our thumbs and wait for those we did launch to complete.
741 //
742
743 if (IrpCount) {
744
745 while (Irp->AssociatedIrp.IrpCount != IrpCount) {
746
747 KeDelayExecutionThread (KernelMode, FALSE, &Fat30Milliseconds);
748 }
749
750 IoCompleteRequest( Irp, IO_DISK_INCREMENT );
751 }
752
753 DebugTrace(-1, Dbg, "FatPagingFileIo -> VOID\n", 0);
754 return;
755 }
756
757 #if (NTDDI_VERSION >= NTDDI_WIN8)
758
759 VOID
760 FatUpdateDiskStats (
761 IN PIRP_CONTEXT IrpContext,
762 IN PIRP Irp,
763 IN ULONG ByteCount
764 )
765 /*++
766
767 Routine Description:
768
769 Charge appropriate process for the IO this IRP will cause.
770
771 Arguments:
772
773 IrpContext- The Irp Context
774
775 Irp - Supplies the requesting Irp.
776
777 ByteCount - The lengh of the operation.
778
779 Return Value:
780
781 None.
782
783 --*/
784
785 {
786 PETHREAD OriginatingThread = NULL;
787 ULONG NumReads = 0;
788 ULONG NumWrites = 0;
789 ULONGLONG BytesToRead = 0;
790 ULONGLONG BytesToWrite = 0;
791
792 //
793 // Here we attempt to charge the IO back to the originating process.
794 // - These checks are intended to cover following cases:
795 // o Buffered sync reads
796 // o Unbuffered sync read
797 // o Inline metadata reads
798 // o memory mapped reads (in-line faulting of data)
799 //
800
801 if (IrpContext->MajorFunction == IRP_MJ_READ) {
802
803 NumReads++;
804 BytesToRead = ByteCount;
805
806 if ((Irp->Tail.Overlay.Thread != NULL) &&
807 !IoIsSystemThread( Irp->Tail.Overlay.Thread )) {
808
809 OriginatingThread = Irp->Tail.Overlay.Thread;
810
811 } else if (!IoIsSystemThread( PsGetCurrentThread() )) {
812
813 OriginatingThread = PsGetCurrentThread();
814
815 //
816 // We couldn't find a non-system entity, so this should be charged to system.
817 // Do so only if we are top level.
818 // If we are not top-level then the read was initiated by someone like Cc (read ahead)
819 // who should have already accounted for this IO.
820 //
821
822 } else if (IoIsSystemThread( PsGetCurrentThread() ) &&
823 (IoGetTopLevelIrp() == Irp)) {
824
825 OriginatingThread = PsGetCurrentThread();
826 }
827
828 //
829 // Charge the write to Originating process.
830 // Intended to cover the following writes:
831 // - Unbuffered sync write
832 // - unbuffered async write
833 //
834 // If we re not top-level, then it should already have been accounted for
835 // somewhere else (Cc).
836 //
837
838 } else if (IrpContext->MajorFunction == IRP_MJ_WRITE) {
839
840 NumWrites++;
841 BytesToWrite = ByteCount;
842
843 if (IoGetTopLevelIrp() == Irp) {
844
845 if ((Irp->Tail.Overlay.Thread != NULL) &&
846 !IoIsSystemThread( Irp->Tail.Overlay.Thread )) {
847
848 OriginatingThread = Irp->Tail.Overlay.Thread;
849
850 } else {
851
852 OriginatingThread = PsGetCurrentThread();
853 }
854
855 //
856 // For mapped page writes
857 //
858
859 } else if (IoGetTopLevelIrp() == (PIRP)FSRTL_MOD_WRITE_TOP_LEVEL_IRP) {
860
861 OriginatingThread = PsGetCurrentThread();
862 }
863 }
864
865 if (OriginatingThread != NULL) {
866
867 PsUpdateDiskCounters( PsGetThreadProcess( OriginatingThread ),
868 BytesToRead,
869 BytesToWrite,
870 NumReads,
871 NumWrites,
872 0 );
873 }
874 }
875
876 #endif
877
878 \f
879
880 _Requires_lock_held_(_Global_critical_region_)
881 NTSTATUS
882 FatNonCachedIo (
883 IN PIRP_CONTEXT IrpContext,
884 IN PIRP Irp,
885 IN PFCB FcbOrDcb,
886 IN ULONG StartingVbo,
887 IN ULONG ByteCount,
888 IN ULONG UserByteCount,
889 IN ULONG StreamFlags
890 )
891 /*++
892
893 Routine Description:
894
895 This routine performs the non-cached disk io described in its parameters.
896 The choice of a single run is made if possible, otherwise multiple runs
897 are executed.
898
899 Arguments:
900
901 IrpContext->MajorFunction - Supplies either IRP_MJ_READ or IRP_MJ_WRITE.
902
903 Irp - Supplies the requesting Irp.
904
905 FcbOrDcb - Supplies the file to act on.
906
907 StartingVbo - The starting point for the operation.
908
909 ByteCount - The lengh of the operation.
910
911 UserByteCount - The last byte the user can see, rest to be zeroed.
912
913 StreamFlags - flag to indicate special attributes for a NonCachedIo.
914
915 Return Value:
916
917 None.
918
919 --*/
920
921 {
922
923 //
924 // Declare some local variables for enumeration through the
925 // runs of the file, and an array to store parameters for
926 // parallel I/Os
927 //
928
929 BOOLEAN Wait;
930
931 LBO NextLbo;
932 VBO NextVbo;
933 ULONG NextByteCount;
934 BOOLEAN NextIsAllocated;
935
936 LBO LastLbo;
937 ULONG LastByteCount;
938 BOOLEAN LastIsAllocated;
939
940 BOOLEAN EndOnMax;
941
942 ULONG FirstIndex;
943 ULONG CurrentIndex;
944 ULONG LastIndex;
945
946 ULONG NextRun;
947 ULONG BufferOffset;
948 ULONG OriginalByteCount;
949
950
951
952 IO_RUN StackIoRuns[FAT_MAX_IO_RUNS_ON_STACK];
953 PIO_RUN IoRuns;
954
955
956 PAGED_CODE();
957
958 UNREFERENCED_PARAMETER( StreamFlags );
959
960 DebugTrace(+1, Dbg, "FatNonCachedIo\n", 0);
961 DebugTrace( 0, Dbg, "Irp = %p\n", Irp );
962 DebugTrace( 0, Dbg, "MajorFunction = %08lx\n", IrpContext->MajorFunction );
963 DebugTrace( 0, Dbg, "FcbOrDcb = %p\n", FcbOrDcb );
964 DebugTrace( 0, Dbg, "StartingVbo = %08lx\n", StartingVbo );
965 DebugTrace( 0, Dbg, "ByteCount = %08lx\n", ByteCount );
966
967 if (!FlagOn(Irp->Flags, IRP_PAGING_IO)) {
968
969 PFILE_SYSTEM_STATISTICS Stats =
970 &FcbOrDcb->Vcb->Statistics[KeGetCurrentProcessorNumber() % FatData.NumberProcessors];
971
972 if (IrpContext->MajorFunction == IRP_MJ_READ) {
973 Stats->Fat.NonCachedReads += 1;
974 Stats->Fat.NonCachedReadBytes += ByteCount;
975 } else {
976 Stats->Fat.NonCachedWrites += 1;
977 Stats->Fat.NonCachedWriteBytes += ByteCount;
978 }
979 }
980
981 //
982 // Initialize some locals.
983 //
984
985 NextRun = 0;
986 BufferOffset = 0;
987 OriginalByteCount = ByteCount;
988
989 Wait = BooleanFlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT);
990
991 #if (NTDDI_VERSION >= NTDDI_WIN8)
992
993 //
994 // Disk IO accounting
995 //
996
997 if (FatDiskAccountingEnabled) {
998
999 FatUpdateDiskStats( IrpContext,
1000 Irp,
1001 ByteCount );
1002 }
1003 #endif
1004
1005 //
1006 // For nonbuffered I/O, we need the buffer locked in all
1007 // cases.
1008 //
1009 // This call may raise. If this call succeeds and a subsequent
1010 // condition is raised, the buffers are unlocked automatically
1011 // by the I/O system when the request is completed, via the
1012 // Irp->MdlAddress field.
1013 //
1014
1015 FatLockUserBuffer( IrpContext,
1016 Irp,
1017 (IrpContext->MajorFunction == IRP_MJ_READ) ?
1018 IoWriteAccess : IoReadAccess,
1019 ByteCount );
1020
1021
1022
1023 //
1024 // No zeroing for trailing sectors if requested.
1025 // Otherwise setup the required zeroing for read requests.
1026 //
1027
1028
1029 if (UserByteCount != ByteCount) {
1030
1031
1032 PMDL Mdl;
1033
1034 NT_ASSERT( ByteCount > UserByteCount );
1035 _Analysis_assume_(ByteCount > UserByteCount);
1036
1037 Mdl = IoAllocateMdl( (PUCHAR) Irp->UserBuffer + UserByteCount,
1038 ByteCount - UserByteCount,
1039 FALSE,
1040 FALSE,
1041 NULL );
1042
1043 if (Mdl == NULL) {
1044
1045 FatRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES );
1046 }
1047
1048 IoBuildPartialMdl( Irp->MdlAddress,
1049 Mdl,
1050 (PUCHAR) Irp->UserBuffer + UserByteCount,
1051 ByteCount - UserByteCount );
1052
1053 IrpContext->FatIoContext->ZeroMdl = Mdl;
1054
1055 //
1056 // Map the MDL now so we can't fail at IO completion time. Note
1057 // that this will be only a single page.
1058 //
1059
1060 #ifndef __REACTOS__
1061 if (MmGetSystemAddressForMdlSafe( Mdl, NormalPagePriority | MdlMappingNoExecute ) == NULL) {
1062 #else
1063 if (MmGetSystemAddressForMdlSafe( Mdl, NormalPagePriority ) == NULL) {
1064 #endif
1065
1066 FatRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES );
1067 }
1068 }
1069
1070
1071 //
1072 // Try to lookup the first run. If there is just a single run,
1073 // we may just be able to pass it on.
1074 //
1075
1076 FatLookupFileAllocation( IrpContext,
1077 FcbOrDcb,
1078 StartingVbo,
1079 &NextLbo,
1080 &NextByteCount,
1081 &NextIsAllocated,
1082 &EndOnMax,
1083 &FirstIndex );
1084
1085 //
1086 // We just added the allocation, thus there must be at least
1087 // one entry in the mcb corresponding to our write, ie.
1088 // NextIsAllocated must be true. If not, the pre-existing file
1089 // must have an allocation error.
1090 //
1091
1092 if ( !NextIsAllocated ) {
1093
1094 FatPopUpFileCorrupt( IrpContext, FcbOrDcb );
1095
1096 FatRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR );
1097 }
1098
1099 NT_ASSERT( NextByteCount != 0 );
1100
1101 //
1102 // If the request was not aligned correctly, read in the first
1103 // part first.
1104 //
1105
1106
1107 //
1108 // See if the write covers a single valid run, and if so pass
1109 // it on. We must bias this by the byte that is lost at the
1110 // end of the maximal file.
1111 //
1112
1113 if ( NextByteCount >= ByteCount - (EndOnMax ? 1 : 0)) {
1114
1115 if (FlagOn(Irp->Flags, IRP_PAGING_IO)) {
1116 CollectDiskIoStats(FcbOrDcb->Vcb, IrpContext->MajorFunction,
1117 FlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_USER_IO), 1);
1118 } else {
1119
1120 PFILE_SYSTEM_STATISTICS Stats =
1121 &FcbOrDcb->Vcb->Statistics[KeGetCurrentProcessorNumber() % FatData.NumberProcessors];
1122
1123 if (IrpContext->MajorFunction == IRP_MJ_READ) {
1124 Stats->Fat.NonCachedDiskReads += 1;
1125 } else {
1126 Stats->Fat.NonCachedDiskWrites += 1;
1127 }
1128 }
1129
1130 DebugTrace( 0, Dbg, "Passing 1 Irp on to Disk Driver\n", 0 );
1131
1132 FatSingleAsync( IrpContext,
1133 FcbOrDcb->Vcb,
1134 NextLbo,
1135 ByteCount,
1136 Irp );
1137
1138 } else {
1139
1140 //
1141 // If there we can't wait, and there are more runs than we can handle,
1142 // we will have to post this request.
1143 //
1144
1145 FatLookupFileAllocation( IrpContext,
1146 FcbOrDcb,
1147 StartingVbo + ByteCount - 1,
1148 &LastLbo,
1149 &LastByteCount,
1150 &LastIsAllocated,
1151 &EndOnMax,
1152 &LastIndex );
1153
1154 //
1155 // Since we already added the allocation for the whole
1156 // write, assert that we find runs until ByteCount == 0
1157 // Otherwise this file is corrupt.
1158 //
1159
1160 if ( !LastIsAllocated ) {
1161
1162 FatPopUpFileCorrupt( IrpContext, FcbOrDcb );
1163
1164 FatRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR );
1165 }
1166
1167 if (LastIndex - FirstIndex + 1 > FAT_MAX_IO_RUNS_ON_STACK) {
1168
1169 IoRuns = FsRtlAllocatePoolWithTag( PagedPool,
1170 (LastIndex - FirstIndex + 1) * sizeof(IO_RUN),
1171 TAG_IO_RUNS );
1172
1173 } else {
1174
1175 IoRuns = StackIoRuns;
1176 }
1177
1178 NT_ASSERT( LastIndex != FirstIndex );
1179
1180 CurrentIndex = FirstIndex;
1181
1182 //
1183 // Loop while there are still byte writes to satisfy.
1184 //
1185
1186 while (CurrentIndex <= LastIndex) {
1187
1188
1189 NT_ASSERT( NextByteCount != 0);
1190 NT_ASSERT( ByteCount != 0);
1191
1192 //
1193 // If next run is larger than we need, "ya get what you need".
1194 //
1195
1196 if (NextByteCount > ByteCount) {
1197 NextByteCount = ByteCount;
1198 }
1199
1200 //
1201 // Now that we have properly bounded this piece of the
1202 // transfer, it is time to write it.
1203 //
1204 // We remember each piece of a parallel run by saving the
1205 // essential information in the IoRuns array. The tranfers
1206 // are started up in parallel below.
1207 //
1208
1209 IoRuns[NextRun].Vbo = StartingVbo;
1210 IoRuns[NextRun].Lbo = NextLbo;
1211 IoRuns[NextRun].Offset = BufferOffset;
1212 IoRuns[NextRun].ByteCount = NextByteCount;
1213 NextRun += 1;
1214
1215 //
1216 // Now adjust everything for the next pass through the loop.
1217 //
1218
1219 StartingVbo += NextByteCount;
1220 BufferOffset += NextByteCount;
1221 ByteCount -= NextByteCount;
1222
1223 //
1224 // Try to lookup the next run (if we are not done).
1225 //
1226
1227 CurrentIndex += 1;
1228
1229 if ( CurrentIndex <= LastIndex ) {
1230
1231 NT_ASSERT( ByteCount != 0 );
1232
1233 FatGetNextMcbEntry( FcbOrDcb->Vcb, &FcbOrDcb->Mcb,
1234 CurrentIndex,
1235 &NextVbo,
1236 &NextLbo,
1237 &NextByteCount );
1238
1239
1240 NT_ASSERT(NextVbo == StartingVbo);
1241
1242
1243 }
1244
1245 } // while ( CurrentIndex <= LastIndex )
1246
1247 //
1248 // Now set up the Irp->IoStatus. It will be modified by the
1249 // multi-completion routine in case of error or verify required.
1250 //
1251
1252 Irp->IoStatus.Status = STATUS_SUCCESS;
1253 Irp->IoStatus.Information = OriginalByteCount;
1254
1255 if (FlagOn(Irp->Flags, IRP_PAGING_IO)) {
1256 CollectDiskIoStats(FcbOrDcb->Vcb, IrpContext->MajorFunction,
1257 FlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_USER_IO), NextRun);
1258 }
1259
1260 //
1261 // OK, now do the I/O.
1262 //
1263
1264 _SEH2_TRY {
1265
1266 DebugTrace( 0, Dbg, "Passing Multiple Irps on to Disk Driver\n", 0 );
1267
1268 FatMultipleAsync( IrpContext,
1269 FcbOrDcb->Vcb,
1270 Irp,
1271 NextRun,
1272 IoRuns );
1273
1274 } _SEH2_FINALLY {
1275
1276 if (IoRuns != StackIoRuns) {
1277
1278 ExFreePool( IoRuns );
1279 }
1280 } _SEH2_END;
1281 }
1282
1283 if (!Wait) {
1284
1285 DebugTrace(-1, Dbg, "FatNonCachedIo -> STATUS_PENDING\n", 0);
1286 return STATUS_PENDING;
1287 }
1288
1289 FatWaitSync( IrpContext );
1290
1291
1292 DebugTrace(-1, Dbg, "FatNonCachedIo -> 0x%08lx\n", Irp->IoStatus.Status);
1293 return Irp->IoStatus.Status;
1294 }
1295
1296 \f
1297 _Requires_lock_held_(_Global_critical_region_)
1298 VOID
1299 FatNonCachedNonAlignedRead (
1300 IN PIRP_CONTEXT IrpContext,
1301 IN PIRP Irp,
1302 IN PFCB FcbOrDcb,
1303 IN ULONG StartingVbo,
1304 IN ULONG ByteCount
1305 )
1306
1307 /*++
1308
1309 Routine Description:
1310
1311 This routine performs the non-cached disk io described in its parameters.
1312 This routine differs from the above in that the range does not have to be
1313 sector aligned. This accomplished with the use of intermediate buffers.
1314
1315 Arguments:
1316
1317 IrpContext->MajorFunction - Supplies either IRP_MJ_READ or IRP_MJ_WRITE.
1318
1319 Irp - Supplies the requesting Irp.
1320
1321 FcbOrDcb - Supplies the file to act on.
1322
1323 StartingVbo - The starting point for the operation.
1324
1325 ByteCount - The lengh of the operation.
1326
1327 Return Value:
1328
1329 None.
1330
1331 --*/
1332
1333 {
1334 //
1335 // Declare some local variables for enumeration through the
1336 // runs of the file, and an array to store parameters for
1337 // parallel I/Os
1338 //
1339
1340 LBO NextLbo;
1341 ULONG NextByteCount;
1342 BOOLEAN NextIsAllocated;
1343
1344 ULONG SectorSize;
1345 ULONG BytesToCopy;
1346 ULONG OriginalByteCount;
1347 ULONG OriginalStartingVbo;
1348
1349 BOOLEAN EndOnMax;
1350
1351 PUCHAR UserBuffer;
1352 PUCHAR DiskBuffer = NULL;
1353
1354 PMDL Mdl;
1355 PMDL SavedMdl;
1356 PVOID SavedUserBuffer;
1357
1358 PAGED_CODE();
1359
1360 DebugTrace(+1, Dbg, "FatNonCachedNonAlignedRead\n", 0);
1361 DebugTrace( 0, Dbg, "Irp = %p\n", Irp );
1362 DebugTrace( 0, Dbg, "MajorFunction = %08lx\n", IrpContext->MajorFunction );
1363 DebugTrace( 0, Dbg, "FcbOrDcb = %p\n", FcbOrDcb );
1364 DebugTrace( 0, Dbg, "StartingVbo = %08lx\n", StartingVbo );
1365 DebugTrace( 0, Dbg, "ByteCount = %08lx\n", ByteCount );
1366
1367 //
1368 // Initialize some locals.
1369 //
1370
1371 OriginalByteCount = ByteCount;
1372 OriginalStartingVbo = StartingVbo;
1373 SectorSize = FcbOrDcb->Vcb->Bpb.BytesPerSector;
1374
1375 NT_ASSERT( FlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT) );
1376
1377 //
1378 // For nonbuffered I/O, we need the buffer locked in all
1379 // cases.
1380 //
1381 // This call may raise. If this call succeeds and a subsequent
1382 // condition is raised, the buffers are unlocked automatically
1383 // by the I/O system when the request is completed, via the
1384 // Irp->MdlAddress field.
1385 //
1386
1387 FatLockUserBuffer( IrpContext,
1388 Irp,
1389 IoWriteAccess,
1390 ByteCount );
1391
1392 UserBuffer = FatMapUserBuffer( IrpContext, Irp );
1393
1394 //
1395 // Allocate the local buffer
1396 //
1397
1398 #ifndef __REACTOS__
1399 DiskBuffer = FsRtlAllocatePoolWithTag( NonPagedPoolNxCacheAligned,
1400 #else
1401 DiskBuffer = FsRtlAllocatePoolWithTag( NonPagedPoolCacheAligned,
1402 #endif
1403 (ULONG) ROUND_TO_PAGES( SectorSize ),
1404 TAG_IO_BUFFER );
1405
1406 //
1407 // We use a try block here to ensure the buffer is freed, and to
1408 // fill in the correct byte count in the Iosb.Information field.
1409 //
1410
1411 _SEH2_TRY {
1412
1413 //
1414 // If the beginning of the request was not aligned correctly, read in
1415 // the first part first.
1416 //
1417
1418 if ( StartingVbo & (SectorSize - 1) ) {
1419
1420 VBO Hole;
1421
1422 //
1423 // Try to lookup the first run.
1424 //
1425
1426 FatLookupFileAllocation( IrpContext,
1427 FcbOrDcb,
1428 StartingVbo,
1429 &NextLbo,
1430 &NextByteCount,
1431 &NextIsAllocated,
1432 &EndOnMax,
1433 NULL );
1434
1435 //
1436 // We just added the allocation, thus there must be at least
1437 // one entry in the mcb corresponding to our write, ie.
1438 // NextIsAllocated must be true. If not, the pre-existing file
1439 // must have an allocation error.
1440 //
1441
1442 if ( !NextIsAllocated ) {
1443
1444 FatPopUpFileCorrupt( IrpContext, FcbOrDcb );
1445
1446 FatRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR );
1447 }
1448
1449 FatSingleNonAlignedSync( IrpContext,
1450 FcbOrDcb->Vcb,
1451 DiskBuffer,
1452 NextLbo & ~((LONG)SectorSize - 1),
1453 SectorSize,
1454 Irp );
1455
1456 if (!NT_SUCCESS( Irp->IoStatus.Status )) {
1457
1458 try_return( NOTHING );
1459 }
1460
1461 //
1462 // Now copy the part of the first sector that we want to the user
1463 // buffer.
1464 //
1465
1466 Hole = StartingVbo & (SectorSize - 1);
1467
1468 BytesToCopy = ByteCount >= SectorSize - Hole ?
1469 SectorSize - Hole : ByteCount;
1470
1471 RtlCopyMemory( UserBuffer, DiskBuffer + Hole, BytesToCopy );
1472
1473 StartingVbo += BytesToCopy;
1474 ByteCount -= BytesToCopy;
1475
1476 if ( ByteCount == 0 ) {
1477
1478 try_return( NOTHING );
1479 }
1480 }
1481
1482 NT_ASSERT( (StartingVbo & (SectorSize - 1)) == 0 );
1483
1484 //
1485 // If there is a tail part that is not sector aligned, read it.
1486 //
1487
1488 if ( ByteCount & (SectorSize - 1) ) {
1489
1490 VBO LastSectorVbo;
1491
1492 LastSectorVbo = StartingVbo + (ByteCount & ~(SectorSize - 1));
1493
1494 //
1495 // Try to lookup the last part of the requested range.
1496 //
1497
1498 FatLookupFileAllocation( IrpContext,
1499 FcbOrDcb,
1500 LastSectorVbo,
1501 &NextLbo,
1502 &NextByteCount,
1503 &NextIsAllocated,
1504 &EndOnMax,
1505 NULL );
1506
1507 //
1508 // We just added the allocation, thus there must be at least
1509 // one entry in the mcb corresponding to our write, ie.
1510 // NextIsAllocated must be true. If not, the pre-existing file
1511 // must have an allocation error.
1512 //
1513
1514 if ( !NextIsAllocated ) {
1515
1516 FatPopUpFileCorrupt( IrpContext, FcbOrDcb );
1517
1518 FatRaiseStatus( IrpContext, STATUS_FILE_CORRUPT_ERROR );
1519 }
1520
1521 FatSingleNonAlignedSync( IrpContext,
1522 FcbOrDcb->Vcb,
1523 DiskBuffer,
1524 NextLbo,
1525 SectorSize,
1526 Irp );
1527
1528 if (!NT_SUCCESS( Irp->IoStatus.Status )) {
1529
1530 try_return( NOTHING );
1531 }
1532
1533 //
1534 // Now copy over the part of this last sector that we need.
1535 //
1536
1537 BytesToCopy = ByteCount & (SectorSize - 1);
1538
1539 UserBuffer += LastSectorVbo - OriginalStartingVbo;
1540
1541 RtlCopyMemory( UserBuffer, DiskBuffer, BytesToCopy );
1542
1543 ByteCount -= BytesToCopy;
1544
1545 if ( ByteCount == 0 ) {
1546
1547 try_return( NOTHING );
1548 }
1549 }
1550
1551 NT_ASSERT( ((StartingVbo | ByteCount) & (SectorSize - 1)) == 0 );
1552
1553 //
1554 // Now build a Mdl describing the sector aligned balance of the transfer,
1555 // and put it in the Irp, and read that part.
1556 //
1557
1558 SavedMdl = Irp->MdlAddress;
1559 Irp->MdlAddress = NULL;
1560
1561 SavedUserBuffer = Irp->UserBuffer;
1562
1563 Irp->UserBuffer = (PUCHAR)MmGetMdlVirtualAddress( SavedMdl ) +
1564 (StartingVbo - OriginalStartingVbo);
1565
1566 Mdl = IoAllocateMdl( Irp->UserBuffer,
1567 ByteCount,
1568 FALSE,
1569 FALSE,
1570 Irp );
1571
1572 if (Mdl == NULL) {
1573
1574 Irp->MdlAddress = SavedMdl;
1575 Irp->UserBuffer = SavedUserBuffer;
1576 FatRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES );
1577 }
1578
1579 IoBuildPartialMdl( SavedMdl,
1580 Mdl,
1581 Irp->UserBuffer,
1582 ByteCount );
1583
1584 //
1585 // Try to read in the pages.
1586 //
1587
1588 _SEH2_TRY {
1589
1590 FatNonCachedIo( IrpContext,
1591 Irp,
1592 FcbOrDcb,
1593 StartingVbo,
1594 ByteCount,
1595 ByteCount,
1596 0 );
1597
1598 } _SEH2_FINALLY {
1599
1600 IoFreeMdl( Irp->MdlAddress );
1601
1602 Irp->MdlAddress = SavedMdl;
1603 Irp->UserBuffer = SavedUserBuffer;
1604 } _SEH2_END;
1605
1606 try_exit: NOTHING;
1607
1608 } _SEH2_FINALLY {
1609
1610 ExFreePool( DiskBuffer );
1611
1612 if ( !_SEH2_AbnormalTermination() && NT_SUCCESS(Irp->IoStatus.Status) ) {
1613
1614 Irp->IoStatus.Information = OriginalByteCount;
1615
1616 //
1617 // We now flush the user's buffer to memory.
1618 //
1619
1620 KeFlushIoBuffers( Irp->MdlAddress, TRUE, FALSE );
1621 }
1622 } _SEH2_END;
1623
1624 DebugTrace(-1, Dbg, "FatNonCachedNonAlignedRead -> VOID\n", 0);
1625 return;
1626 }
1627
1628 \f
1629 VOID
1630 FatMultipleAsync (
1631 IN PIRP_CONTEXT IrpContext,
1632 IN PVCB Vcb,
1633 IN PIRP MasterIrp,
1634 IN ULONG MultipleIrpCount,
1635 IN PIO_RUN IoRuns
1636 )
1637
1638 /*++
1639
1640 Routine Description:
1641
1642 This routine first does the initial setup required of a Master IRP that is
1643 going to be completed using associated IRPs. This routine should not
1644 be used if only one async request is needed, instead the single read/write
1645 async routines should be called.
1646
1647 A context parameter is initialized, to serve as a communications area
1648 between here and the common completion routine. This initialization
1649 includes allocation of a spinlock. The spinlock is deallocated in the
1650 FatWaitSync routine, so it is essential that the caller insure that
1651 this routine is always called under all circumstances following a call
1652 to this routine.
1653
1654 Next this routine reads or writes one or more contiguous sectors from
1655 a device asynchronously, and is used if there are multiple reads for a
1656 master IRP. A completion routine is used to synchronize with the
1657 completion of all of the I/O requests started by calls to this routine.
1658
1659 Also, prior to calling this routine the caller must initialize the
1660 IoStatus field in the Context, with the correct success status and byte
1661 count which are expected if all of the parallel transfers complete
1662 successfully. After return this status will be unchanged if all requests
1663 were, in fact, successful. However, if one or more errors occur, the
1664 IoStatus will be modified to reflect the error status and byte count
1665 from the first run (by Vbo) which encountered an error. I/O status
1666 from all subsequent runs will not be indicated.
1667
1668 Arguments:
1669
1670 IrpContext->MajorFunction - Supplies either IRP_MJ_READ or IRP_MJ_WRITE.
1671
1672 Vcb - Supplies the device to be read
1673
1674 MasterIrp - Supplies the master Irp.
1675
1676 MulitpleIrpCount - Supplies the number of multiple async requests
1677 that will be issued against the master irp.
1678
1679 IoRuns - Supplies an array containing the Vbo, Lbo, BufferOffset, and
1680 ByteCount for all the runs to executed in parallel.
1681
1682 Return Value:
1683
1684 None.
1685
1686 --*/
1687
1688 {
1689 PIRP Irp;
1690 PIO_STACK_LOCATION IrpSp;
1691 PMDL Mdl;
1692 BOOLEAN Wait;
1693 PFAT_IO_CONTEXT Context;
1694 #ifndef __REACTOS__
1695 BOOLEAN IsAWrite = FALSE;
1696 ULONG Length = 0;
1697 #endif
1698
1699 ULONG UnwindRunCount = 0;
1700
1701 BOOLEAN ExceptionExpected = TRUE;
1702
1703 PAGED_CODE();
1704
1705 DebugTrace(+1, Dbg, "FatMultipleAsync\n", 0);
1706 DebugTrace( 0, Dbg, "MajorFunction = %08lx\n", IrpContext->MajorFunction );
1707 DebugTrace( 0, Dbg, "Vcb = %p\n", Vcb );
1708 DebugTrace( 0, Dbg, "MasterIrp = %p\n", MasterIrp );
1709 DebugTrace( 0, Dbg, "MultipleIrpCount = %08lx\n", MultipleIrpCount );
1710 DebugTrace( 0, Dbg, "IoRuns = %08lx\n", IoRuns );
1711
1712 //
1713 // If this I/O originating during FatVerifyVolume, bypass the
1714 // verify logic.
1715 //
1716
1717 if (Vcb->VerifyThread == KeGetCurrentThread()) {
1718
1719 SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_OVERRIDE_VERIFY );
1720 }
1721
1722 //
1723 // Set up things according to whether this is truely async.
1724 //
1725
1726 Wait = BooleanFlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT );
1727
1728 Context = IrpContext->FatIoContext;
1729
1730 //
1731 // Finish initializing Context, for use in Read/Write Multiple Asynch.
1732 //
1733
1734 Context->MasterIrp = MasterIrp;
1735
1736 IrpSp = IoGetCurrentIrpStackLocation( MasterIrp );
1737 #ifndef __REACTOS__
1738 IsAWrite = (IrpSp->MajorFunction == IRP_MJ_WRITE);
1739 Length = IrpSp->Parameters.Read.Length;
1740 #endif
1741
1742 _SEH2_TRY {
1743
1744 //
1745 // Itterate through the runs, doing everything that can fail
1746 //
1747
1748 for ( UnwindRunCount = 0;
1749 UnwindRunCount < MultipleIrpCount;
1750 UnwindRunCount++ ) {
1751
1752 //
1753 // Create an associated IRP, making sure there is one stack entry for
1754 // us, as well.
1755 //
1756
1757 IoRuns[UnwindRunCount].SavedIrp = 0;
1758
1759 Irp = IoMakeAssociatedIrp( MasterIrp,
1760 (CCHAR)(Vcb->TargetDeviceObject->StackSize + 1) );
1761
1762 if (Irp == NULL) {
1763
1764 FatRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES );
1765 }
1766
1767 IoRuns[UnwindRunCount].SavedIrp = Irp;
1768
1769 //
1770 // Allocate and build a partial Mdl for the request.
1771 //
1772
1773 Mdl = IoAllocateMdl( (PCHAR)MasterIrp->UserBuffer +
1774 IoRuns[UnwindRunCount].Offset,
1775 IoRuns[UnwindRunCount].ByteCount,
1776 FALSE,
1777 FALSE,
1778 Irp );
1779
1780 if (Mdl == NULL) {
1781
1782 FatRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES );
1783 }
1784
1785 //
1786 // Sanity Check
1787 //
1788
1789 NT_ASSERT( Mdl == Irp->MdlAddress );
1790
1791 IoBuildPartialMdl( MasterIrp->MdlAddress,
1792 Mdl,
1793 (PCHAR)MasterIrp->UserBuffer +
1794 IoRuns[UnwindRunCount].Offset,
1795 IoRuns[UnwindRunCount].ByteCount );
1796
1797 //
1798 // Get the first IRP stack location in the associated Irp
1799 //
1800
1801 IoSetNextIrpStackLocation( Irp );
1802 IrpSp = IoGetCurrentIrpStackLocation( Irp );
1803
1804 //
1805 // Setup the Stack location to describe our read.
1806 //
1807
1808 IrpSp->MajorFunction = IrpContext->MajorFunction;
1809 IrpSp->Parameters.Read.Length = IoRuns[UnwindRunCount].ByteCount;
1810 IrpSp->Parameters.Read.ByteOffset.QuadPart = IoRuns[UnwindRunCount].Vbo;
1811
1812 //
1813 // Set up the completion routine address in our stack frame.
1814 //
1815
1816 IoSetCompletionRoutine( Irp,
1817 Wait ?
1818 &FatMultiSyncCompletionRoutine :
1819 &FatMultiAsyncCompletionRoutine,
1820 Context,
1821 TRUE,
1822 TRUE,
1823 TRUE );
1824
1825 //
1826 // Setup the next IRP stack location in the associated Irp for the disk
1827 // driver beneath us.
1828 //
1829
1830 IrpSp = IoGetNextIrpStackLocation( Irp );
1831
1832 //
1833 // Setup the Stack location to do a read from the disk driver.
1834 //
1835
1836 IrpSp->MajorFunction = IrpContext->MajorFunction;
1837 IrpSp->Parameters.Read.Length = IoRuns[UnwindRunCount].ByteCount;
1838 IrpSp->Parameters.Read.ByteOffset.QuadPart = IoRuns[UnwindRunCount].Lbo;
1839
1840 //
1841 // If this Irp is the result of a WriteThough operation,
1842 // tell the device to write it through.
1843 //
1844
1845 if (FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_WRITE_THROUGH )) {
1846
1847 SetFlag( IrpSp->Flags, SL_WRITE_THROUGH );
1848 }
1849
1850 //
1851 // If this I/O requires override verify, bypass the verify logic.
1852 //
1853
1854 if (FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_OVERRIDE_VERIFY )) {
1855
1856 SetFlag( IrpSp->Flags, SL_OVERRIDE_VERIFY_VOLUME );
1857 }
1858 }
1859
1860 //
1861 // Now we no longer expect an exception. If the driver raises, we
1862 // must bugcheck, because we do not know how to recover from that
1863 // case.
1864 //
1865
1866 ExceptionExpected = FALSE;
1867
1868 //
1869 // We only need to set the associated IRP count in the master irp to
1870 // make it a master IRP. But we set the count to one more than our
1871 // caller requested, because we do not want the I/O system to complete
1872 // the I/O. We also set our own count.
1873 //
1874
1875 Context->IrpCount = MultipleIrpCount;
1876 MasterIrp->AssociatedIrp.IrpCount = MultipleIrpCount;
1877
1878 if (Wait) {
1879
1880 MasterIrp->AssociatedIrp.IrpCount += 1;
1881 }
1882 else if (FlagOn( Context->Wait.Async.ResourceThreadId, 3 )) {
1883
1884 //
1885 // For async requests if we acquired locks, transition the lock owners to an
1886 // object, since when we return this thread could go away before request
1887 // completion, and the resource package may try to boost priority.
1888 //
1889
1890 if (Context->Wait.Async.Resource != NULL) {
1891
1892 ExSetResourceOwnerPointer( Context->Wait.Async.Resource,
1893 (PVOID)Context->Wait.Async.ResourceThreadId );
1894 }
1895
1896 if (Context->Wait.Async.Resource2 != NULL) {
1897
1898 ExSetResourceOwnerPointer( Context->Wait.Async.Resource2,
1899 (PVOID)Context->Wait.Async.ResourceThreadId );
1900 }
1901 }
1902
1903 //
1904 // Back up a copy of the IrpContext flags for later use in async completion.
1905 //
1906
1907 Context->IrpContextFlags = IrpContext->Flags;
1908
1909 //
1910 // Now that all the dangerous work is done, issue the read requests
1911 //
1912
1913 for (UnwindRunCount = 0;
1914 UnwindRunCount < MultipleIrpCount;
1915 UnwindRunCount++) {
1916
1917 Irp = IoRuns[UnwindRunCount].SavedIrp;
1918
1919 DebugDoit( FatIoCallDriverCount += 1);
1920
1921 //
1922 // If IoCallDriver returns an error, it has completed the Irp
1923 // and the error will be caught by our completion routines
1924 // and dealt with as a normal IO error.
1925 //
1926
1927 (VOID)FatLowLevelReadWrite( IrpContext,
1928 Vcb->TargetDeviceObject,
1929 Irp,
1930 Vcb );
1931 }
1932
1933 //
1934 // We just issued an IO to the storage stack, update the counters indicating so.
1935 //
1936
1937 if (FatDiskAccountingEnabled) {
1938
1939 FatUpdateIOCountersPCW( IsAWrite, Length );
1940 }
1941
1942 } _SEH2_FINALLY {
1943
1944 ULONG i;
1945
1946 DebugUnwind( FatMultipleAsync );
1947
1948 //
1949 // Only allocating the spinlock, making the associated Irps
1950 // and allocating the Mdls can fail.
1951 //
1952
1953 if ( _SEH2_AbnormalTermination() ) {
1954
1955 //
1956 // If the driver raised, we are hosed. He is not supposed to raise,
1957 // and it is impossible for us to figure out how to clean up.
1958 //
1959
1960 if (!ExceptionExpected) {
1961 NT_ASSERT( ExceptionExpected );
1962 #ifdef _MSC_VER
1963 #pragma prefast( suppress:28159, "things are seriously wrong if we get here" )
1964 #endif
1965 FatBugCheck( 0, 0, 0 );
1966 }
1967
1968 //
1969 // Unwind
1970 //
1971
1972 for (i = 0; i <= UnwindRunCount; i++) {
1973
1974 if ( (Irp = IoRuns[i].SavedIrp) != NULL ) {
1975
1976 if ( Irp->MdlAddress != NULL ) {
1977
1978 IoFreeMdl( Irp->MdlAddress );
1979 }
1980
1981 IoFreeIrp( Irp );
1982 }
1983 }
1984 }
1985
1986 //
1987 // And return to our caller
1988 //
1989
1990 DebugTrace(-1, Dbg, "FatMultipleAsync -> VOID\n", 0);
1991 } _SEH2_END;
1992
1993 return;
1994 }
1995
1996 \f
1997 VOID
1998 FatSingleAsync (
1999 IN PIRP_CONTEXT IrpContext,
2000 IN PVCB Vcb,
2001 IN LBO Lbo,
2002 IN ULONG ByteCount,
2003 IN PIRP Irp
2004 )
2005
2006 /*++
2007
2008 Routine Description:
2009
2010 This routine reads or writes one or more contiguous sectors from a device
2011 asynchronously, and is used if there is only one read necessary to
2012 complete the IRP. It implements the read by simply filling
2013 in the next stack frame in the Irp, and passing it on. The transfer
2014 occurs to the single buffer originally specified in the user request.
2015
2016 Arguments:
2017
2018 IrpContext->MajorFunction - Supplies either IRP_MJ_READ or IRP_MJ_WRITE.
2019
2020 Vcb - Supplies the device to read
2021
2022 Lbo - Supplies the starting Logical Byte Offset to begin reading from
2023
2024 ByteCount - Supplies the number of bytes to read from the device
2025
2026 Irp - Supplies the master Irp to associated with the async
2027 request.
2028
2029 Return Value:
2030
2031 None.
2032
2033 --*/
2034
2035 {
2036 PIO_STACK_LOCATION IrpSp;
2037 PFAT_IO_CONTEXT Context;
2038 #ifndef __REACTOS__
2039 BOOLEAN IsAWrite = FALSE;
2040 #endif
2041
2042 PAGED_CODE();
2043
2044 DebugTrace(+1, Dbg, "FatSingleAsync\n", 0);
2045 DebugTrace( 0, Dbg, "MajorFunction = %08lx\n", IrpContext->MajorFunction );
2046 DebugTrace( 0, Dbg, "Vcb = %p\n", Vcb );
2047 DebugTrace( 0, Dbg, "Lbo = %08lx\n", Lbo);
2048 DebugTrace( 0, Dbg, "ByteCount = %08lx\n", ByteCount);
2049 DebugTrace( 0, Dbg, "Irp = %p\n", Irp );
2050
2051 //
2052 // If this I/O originating during FatVerifyVolume, bypass the
2053 // verify logic.
2054 //
2055
2056 if (Vcb->VerifyThread == KeGetCurrentThread()) {
2057
2058 SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_OVERRIDE_VERIFY );
2059 }
2060
2061 //
2062 // Set up the completion routine address in our stack frame.
2063 //
2064
2065 IoSetCompletionRoutine( Irp,
2066 FlagOn(IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT) ?
2067 &FatSingleSyncCompletionRoutine :
2068 &FatSingleAsyncCompletionRoutine,
2069 IrpContext->FatIoContext,
2070 TRUE,
2071 TRUE,
2072 TRUE );
2073
2074 //
2075 // Setup the next IRP stack location in the associated Irp for the disk
2076 // driver beneath us.
2077 //
2078
2079 IrpSp = IoGetNextIrpStackLocation( Irp );
2080
2081 //
2082 // Setup the Stack location to do a read from the disk driver.
2083 //
2084
2085 IrpSp->MajorFunction = IrpContext->MajorFunction;
2086 IrpSp->Parameters.Read.Length = ByteCount;
2087 IrpSp->Parameters.Read.ByteOffset.QuadPart = Lbo;
2088
2089 #ifndef __REACTOS__
2090 IsAWrite = (IrpSp->MajorFunction == IRP_MJ_WRITE);
2091 #endif
2092
2093 //
2094 // If this Irp is the result of a WriteThough operation,
2095 // tell the device to write it through.
2096 //
2097
2098 if (FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_WRITE_THROUGH )) {
2099
2100 SetFlag( IrpSp->Flags, SL_WRITE_THROUGH );
2101 }
2102
2103 //
2104 // If this I/O requires override verify, bypass the verify logic.
2105 //
2106
2107 if (FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_OVERRIDE_VERIFY )) {
2108
2109 SetFlag( IrpSp->Flags, SL_OVERRIDE_VERIFY_VOLUME );
2110 }
2111
2112 //
2113 // For async requests if we acquired locks, transition the lock owners to an
2114 // object, since when we return this thread could go away before request
2115 // completion, and the resource package may try to boost priority.
2116 //
2117
2118 if (!FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_WAIT ) &&
2119 FlagOn( IrpContext->FatIoContext->Wait.Async.ResourceThreadId, 3 )) {
2120
2121 Context = IrpContext->FatIoContext;
2122
2123 if (Context->Wait.Async.Resource != NULL) {
2124
2125 ExSetResourceOwnerPointer( Context->Wait.Async.Resource,
2126 (PVOID)Context->Wait.Async.ResourceThreadId );
2127 }
2128
2129 if (Context->Wait.Async.Resource2 != NULL) {
2130
2131 ExSetResourceOwnerPointer( Context->Wait.Async.Resource2,
2132 (PVOID)Context->Wait.Async.ResourceThreadId );
2133 }
2134 }
2135
2136 //
2137 // Back up a copy of the IrpContext flags for later use in async completion.
2138 //
2139
2140 IrpContext->FatIoContext->IrpContextFlags = IrpContext->Flags;
2141
2142 //
2143 // Issue the read request
2144 //
2145
2146 DebugDoit( FatIoCallDriverCount += 1);
2147
2148 //
2149 // If IoCallDriver returns an error, it has completed the Irp
2150 // and the error will be caught by our completion routines
2151 // and dealt with as a normal IO error.
2152 //
2153
2154 (VOID)FatLowLevelReadWrite( IrpContext,
2155 Vcb->TargetDeviceObject,
2156 Irp,
2157 Vcb );
2158
2159 //
2160 // We just issued an IO to the storage stack, update the counters indicating so.
2161 //
2162
2163 if (FatDiskAccountingEnabled) {
2164
2165 FatUpdateIOCountersPCW( IsAWrite, ByteCount );
2166 }
2167
2168 //
2169 // And return to our caller
2170 //
2171
2172 DebugTrace(-1, Dbg, "FatSingleAsync -> VOID\n", 0);
2173
2174 return;
2175 }
2176
2177 \f
2178 VOID
2179 FatSingleNonAlignedSync (
2180 IN PIRP_CONTEXT IrpContext,
2181 IN PVCB Vcb,
2182 IN PUCHAR Buffer,
2183 IN LBO Lbo,
2184 IN ULONG ByteCount,
2185 IN PIRP Irp
2186 )
2187
2188 /*++
2189
2190 Routine Description:
2191
2192 This routine reads or writes one or more contiguous sectors from a device
2193 Synchronously, and does so to a buffer that must come from non paged
2194 pool. It saves a pointer to the Irp's original Mdl, and creates a new
2195 one describing the given buffer. It implements the read by simply filling
2196 in the next stack frame in the Irp, and passing it on. The transfer
2197 occurs to the single buffer originally specified in the user request.
2198
2199 Arguments:
2200
2201 IrpContext->MajorFunction - Supplies either IRP_MJ_READ or IRP_MJ_WRITE.
2202
2203 Vcb - Supplies the device to read
2204
2205 Buffer - Supplies a buffer from non-paged pool.
2206
2207 Lbo - Supplies the starting Logical Byte Offset to begin reading from
2208
2209 ByteCount - Supplies the number of bytes to read from the device
2210
2211 Irp - Supplies the master Irp to associated with the async
2212 request.
2213
2214 Return Value:
2215
2216 None.
2217
2218 --*/
2219
2220 {
2221 PIO_STACK_LOCATION IrpSp;
2222
2223 PMDL Mdl;
2224 PMDL SavedMdl;
2225 #ifndef __REACTOS__
2226 BOOLEAN IsAWrite = FALSE;
2227 #endif
2228
2229 PAGED_CODE();
2230
2231 DebugTrace(+1, Dbg, "FatSingleNonAlignedAsync\n", 0);
2232 DebugTrace( 0, Dbg, "MajorFunction = %08lx\n", IrpContext->MajorFunction );
2233 DebugTrace( 0, Dbg, "Vcb = %p\n", Vcb );
2234 DebugTrace( 0, Dbg, "Buffer = %p\n", Buffer );
2235 DebugTrace( 0, Dbg, "Lbo = %08lx\n", Lbo);
2236 DebugTrace( 0, Dbg, "ByteCount = %08lx\n", ByteCount);
2237 DebugTrace( 0, Dbg, "Irp = %p\n", Irp );
2238
2239 //
2240 // Create a new Mdl describing the buffer, saving the current one in the
2241 // Irp
2242 //
2243
2244 SavedMdl = Irp->MdlAddress;
2245
2246 Irp->MdlAddress = 0;
2247
2248 Mdl = IoAllocateMdl( Buffer,
2249 ByteCount,
2250 FALSE,
2251 FALSE,
2252 Irp );
2253
2254 if (Mdl == NULL) {
2255
2256 Irp->MdlAddress = SavedMdl;
2257
2258 FatRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES );
2259 }
2260
2261 //
2262 // Lock the new Mdl in memory.
2263 //
2264
2265 _SEH2_TRY {
2266
2267 MmProbeAndLockPages( Mdl, KernelMode, IoWriteAccess );
2268
2269 } _SEH2_FINALLY {
2270
2271 if ( _SEH2_AbnormalTermination() ) {
2272
2273 IoFreeMdl( Mdl );
2274 Irp->MdlAddress = SavedMdl;
2275 }
2276 } _SEH2_END;
2277
2278 //
2279 // Set up the completion routine address in our stack frame.
2280 //
2281
2282 IoSetCompletionRoutine( Irp,
2283 &FatSingleSyncCompletionRoutine,
2284 IrpContext->FatIoContext,
2285 TRUE,
2286 TRUE,
2287 TRUE );
2288
2289 //
2290 // Setup the next IRP stack location in the associated Irp for the disk
2291 // driver beneath us.
2292 //
2293
2294 IrpSp = IoGetNextIrpStackLocation( Irp );
2295
2296 //
2297 // Setup the Stack location to do a read from the disk driver.
2298 //
2299
2300 IrpSp->MajorFunction = IrpContext->MajorFunction;
2301 IrpSp->Parameters.Read.Length = ByteCount;
2302 IrpSp->Parameters.Read.ByteOffset.QuadPart = Lbo;
2303
2304 #ifndef __REACTOS__
2305 IsAWrite = (IrpSp->MajorFunction == IRP_MJ_WRITE);
2306 #endif
2307
2308 //
2309 // If this I/O originating during FatVerifyVolume, bypass the
2310 // verify logic.
2311 //
2312
2313 if (Vcb->VerifyThread == KeGetCurrentThread()) {
2314
2315 SetFlag( IrpContext->Flags, IRP_CONTEXT_FLAG_OVERRIDE_VERIFY );
2316 }
2317
2318 //
2319 // If this I/O requires override verify, bypass the verify logic.
2320 //
2321
2322 if (FlagOn( IrpContext->Flags, IRP_CONTEXT_FLAG_OVERRIDE_VERIFY )) {
2323
2324 SetFlag( IrpSp->Flags, SL_OVERRIDE_VERIFY_VOLUME );
2325 }
2326
2327 //
2328 // Issue the read request
2329 //
2330
2331 DebugDoit( FatIoCallDriverCount += 1);
2332
2333 //
2334 // If IoCallDriver returns an error, it has completed the Irp
2335 // and the error will be caught by our completion routines
2336 // and dealt with as a normal IO error.
2337 //
2338
2339 _SEH2_TRY {
2340
2341 (VOID)FatLowLevelReadWrite( IrpContext,
2342 Vcb->TargetDeviceObject,
2343 Irp,
2344 Vcb );
2345
2346 FatWaitSync( IrpContext );
2347
2348 } _SEH2_FINALLY {
2349
2350 MmUnlockPages( Mdl );
2351 IoFreeMdl( Mdl );
2352 Irp->MdlAddress = SavedMdl;
2353 } _SEH2_END;
2354
2355 //
2356 // We just issued an IO to the storage stack, update the counters indicating so.
2357 //
2358
2359 if (FatDiskAccountingEnabled) {
2360
2361 FatUpdateIOCountersPCW( IsAWrite, ByteCount );
2362 }
2363
2364 //
2365 // And return to our caller
2366 //
2367
2368 DebugTrace(-1, Dbg, "FatSingleNonAlignedSync -> VOID\n", 0);
2369
2370 return;
2371 }
2372
2373 \f
2374 VOID
2375 FatWaitSync (
2376 IN PIRP_CONTEXT IrpContext
2377 )
2378
2379 /*++
2380
2381 Routine Description:
2382
2383 This routine waits for one or more previously started I/O requests
2384 from the above routines, by simply waiting on the event.
2385
2386 Arguments:
2387
2388 Return Value:
2389
2390 None
2391
2392 --*/
2393
2394 {
2395 PAGED_CODE();
2396
2397 DebugTrace(+1, Dbg, "FatWaitSync, Context = %p\n", IrpContext->FatIoContext );
2398
2399 KeWaitForSingleObject( &IrpContext->FatIoContext->Wait.SyncEvent,
2400 Executive, KernelMode, FALSE, NULL );
2401
2402 KeClearEvent( &IrpContext->FatIoContext->Wait.SyncEvent );
2403
2404 DebugTrace(-1, Dbg, "FatWaitSync -> VOID\n", 0 );
2405 }
2406
2407 \f
2408 //
2409 // Internal Support Routine
2410 //
2411
2412 NTSTATUS
2413 NTAPI
2414 FatMultiSyncCompletionRoutine (
2415 _In_ PDEVICE_OBJECT DeviceObject,
2416 _In_ PIRP Irp,
2417 _In_reads_opt_(_Inexpressible_("varies")) PVOID Contxt
2418 )
2419
2420 /*++
2421
2422 Routine Description:
2423
2424 This is the completion routine for all reads and writes started via
2425 FatRead/WriteMultipleAsynch. It must synchronize its operation for
2426 multiprocessor environments with itself on all other processors, via
2427 a spin lock found via the Context parameter.
2428
2429 The completion routine has the following responsibilities:
2430
2431 If the individual request was completed with an error, then
2432 this completion routine must see if this is the first error
2433 (essentially by Vbo), and if so it must correctly reduce the
2434 byte count and remember the error status in the Context.
2435
2436 If the IrpCount goes to 1, then it sets the event in the Context
2437 parameter to signal the caller that all of the asynch requests
2438 are done.
2439
2440 Arguments:
2441
2442 DeviceObject - Pointer to the file system device object.
2443
2444 Irp - Pointer to the associated Irp which is being completed. (This
2445 Irp will no longer be accessible after this routine returns.)
2446
2447 Contxt - The context parameter which was specified for all of
2448 the multiple asynch I/O requests for this MasterIrp.
2449
2450 Return Value:
2451
2452 The routine returns STATUS_MORE_PROCESSING_REQUIRED so that we can
2453 immediately complete the Master Irp without being in a race condition
2454 with the IoCompleteRequest thread trying to decrement the IrpCount in
2455 the Master Irp.
2456
2457 --*/
2458
2459 {
2460
2461 PFAT_IO_CONTEXT Context = Contxt;
2462 PIRP MasterIrp = Context->MasterIrp;
2463
2464 DebugTrace(+1, Dbg, "FatMultiSyncCompletionRoutine, Context = %p\n", Context );
2465
2466 //
2467 // If we got an error (or verify required), remember it in the Irp
2468 //
2469
2470 if (!NT_SUCCESS( Irp->IoStatus.Status )) {
2471
2472 #if DBG
2473 if(!( NT_SUCCESS( FatBreakOnInterestingIoCompletion ) || Irp->IoStatus.Status != FatBreakOnInterestingIoCompletion )) {
2474 DbgBreakPoint();
2475 }
2476 #endif
2477
2478 #ifdef SYSCACHE_COMPILE
2479 DbgPrint( "FAT SYSCACHE: MultiSync (IRP %08x for Master %08x) -> %08x\n", Irp, MasterIrp, Irp->IoStatus );
2480 #endif
2481
2482 MasterIrp->IoStatus = Irp->IoStatus;
2483 }
2484
2485 NT_ASSERT( !(NT_SUCCESS( Irp->IoStatus.Status ) && Irp->IoStatus.Information == 0 ));
2486
2487 //
2488 // We must do this here since IoCompleteRequest won't get a chance
2489 // on this associated Irp.
2490 //
2491
2492 IoFreeMdl( Irp->MdlAddress );
2493 IoFreeIrp( Irp );
2494
2495 if (InterlockedDecrement(&Context->IrpCount) == 0) {
2496
2497 FatDoCompletionZero( MasterIrp, Context );
2498 KeSetEvent( &Context->Wait.SyncEvent, 0, FALSE );
2499 }
2500
2501 DebugTrace(-1, Dbg, "FatMultiSyncCompletionRoutine -> SUCCESS\n", 0 );
2502
2503 UNREFERENCED_PARAMETER( DeviceObject );
2504
2505 return STATUS_MORE_PROCESSING_REQUIRED;
2506 }
2507
2508 \f
2509 //
2510 // Internal Support Routine
2511 //
2512
2513 NTSTATUS
2514 NTAPI
2515 FatMultiAsyncCompletionRoutine (
2516 _In_ PDEVICE_OBJECT DeviceObject,
2517 _In_ PIRP Irp,
2518 _In_reads_opt_(_Inexpressible_("varies")) PVOID Contxt
2519 )
2520
2521 /*++
2522
2523 Routine Description:
2524
2525 This is the completion routine for all reads and writes started via
2526 FatRead/WriteMultipleAsynch. It must synchronize its operation for
2527 multiprocessor environments with itself on all other processors, via
2528 a spin lock found via the Context parameter.
2529
2530 The completion routine has has the following responsibilities:
2531
2532 If the individual request was completed with an error, then
2533 this completion routine must see if this is the first error
2534 (essentially by Vbo), and if so it must correctly reduce the
2535 byte count and remember the error status in the Context.
2536
2537 If the IrpCount goes to 1, then it sets the event in the Context
2538 parameter to signal the caller that all of the asynch requests
2539 are done.
2540
2541 Arguments:
2542
2543 DeviceObject - Pointer to the file system device object.
2544
2545 Irp - Pointer to the associated Irp which is being completed. (This
2546 Irp will no longer be accessible after this routine returns.)
2547
2548 Contxt - The context parameter which was specified for all of
2549 the multiple asynch I/O requests for this MasterIrp.
2550
2551 Return Value:
2552
2553 The routine returns STATUS_MORE_PROCESSING_REQUIRED so that we can
2554 immediately complete the Master Irp without being in a race condition
2555 with the IoCompleteRequest thread trying to decrement the IrpCount in
2556 the Master Irp.
2557
2558 --*/
2559
2560 {
2561 NTSTATUS Status = STATUS_SUCCESS;
2562 PFAT_IO_CONTEXT Context = Contxt;
2563 PIRP MasterIrp = Context->MasterIrp;
2564 BOOLEAN PostRequest = FALSE;
2565
2566 DebugTrace(+1, Dbg, "FatMultiAsyncCompletionRoutine, Context = %p\n", Context );
2567
2568 //
2569 // If we got an error (or verify required), remember it in the Irp
2570 //
2571
2572 if (!NT_SUCCESS( Irp->IoStatus.Status )) {
2573
2574 #if DBG
2575 if (!( NT_SUCCESS( FatBreakOnInterestingIoCompletion ) || Irp->IoStatus.Status != FatBreakOnInterestingIoCompletion )) {
2576 DbgBreakPoint();
2577 }
2578 #endif
2579
2580 #ifdef SYSCACHE_COMPILE
2581 DbgPrint( "FAT SYSCACHE: MultiAsync (IRP %08x for Master %08x) -> %08x\n", Irp, MasterIrp, Irp->IoStatus );
2582 #endif
2583
2584 MasterIrp->IoStatus = Irp->IoStatus;
2585
2586 }
2587
2588 NT_ASSERT( !(NT_SUCCESS( Irp->IoStatus.Status ) && Irp->IoStatus.Information == 0 ));
2589
2590 if (InterlockedDecrement(&Context->IrpCount) == 0) {
2591
2592 FatDoCompletionZero( MasterIrp, Context );
2593
2594 if (NT_SUCCESS(MasterIrp->IoStatus.Status)) {
2595
2596 MasterIrp->IoStatus.Information =
2597 Context->Wait.Async.RequestedByteCount;
2598
2599 NT_ASSERT(MasterIrp->IoStatus.Information != 0);
2600
2601 //
2602 // Now if this wasn't PagingIo, set either the read or write bit.
2603 //
2604
2605 if (!FlagOn(MasterIrp->Flags, IRP_PAGING_IO)) {
2606
2607 SetFlag( Context->Wait.Async.FileObject->Flags,
2608 IoGetCurrentIrpStackLocation(MasterIrp)->MajorFunction == IRP_MJ_READ ?
2609 FO_FILE_FAST_IO_READ : FO_FILE_MODIFIED );
2610 }
2611
2612 } else {
2613
2614 //
2615 // Post STATUS_VERIFY_REQUIRED failures. Only post top level IRPs, because recursive I/Os
2616 // cannot process volume verification.
2617 //
2618
2619 if (!FlagOn(Context->IrpContextFlags, IRP_CONTEXT_FLAG_RECURSIVE_CALL) &&
2620 (MasterIrp->IoStatus.Status == STATUS_VERIFY_REQUIRED)) {
2621 PostRequest = TRUE;
2622 }
2623
2624 }
2625
2626 //
2627 // If this was a special async write, decrement the count. Set the
2628 // event if this was the final outstanding I/O for the file. We will
2629 // also want to queue an APC to deal with any error conditionions.
2630 //
2631 _Analysis_assume_(!(Context->Wait.Async.NonPagedFcb) &&
2632 (ExInterlockedAddUlong( &Context->Wait.Async.NonPagedFcb->OutstandingAsyncWrites,
2633 0xffffffff,
2634 &FatData.GeneralSpinLock ) != 1));
2635 if ((Context->Wait.Async.NonPagedFcb) &&
2636 (ExInterlockedAddUlong( &Context->Wait.Async.NonPagedFcb->OutstandingAsyncWrites,
2637 0xffffffff,
2638 &FatData.GeneralSpinLock ) == 1)) {
2639
2640 KeSetEvent( Context->Wait.Async.NonPagedFcb->OutstandingAsyncEvent, 0, FALSE );
2641 }
2642
2643 //
2644 // Now release the resources.
2645 //
2646
2647 if (Context->Wait.Async.Resource != NULL) {
2648
2649 ExReleaseResourceForThreadLite( Context->Wait.Async.Resource,
2650 Context->Wait.Async.ResourceThreadId );
2651 }
2652
2653 if (Context->Wait.Async.Resource2 != NULL) {
2654
2655 ExReleaseResourceForThreadLite( Context->Wait.Async.Resource2,
2656 Context->Wait.Async.ResourceThreadId );
2657 }
2658
2659 //
2660 // Mark the master Irp pending
2661 //
2662
2663 IoMarkIrpPending( MasterIrp );
2664
2665 //
2666 // and finally, free the context record.
2667 //
2668
2669 ExFreePool( Context );
2670
2671 if (PostRequest) {
2672
2673 PIRP_CONTEXT IrpContext = NULL;
2674
2675 _SEH2_TRY {
2676
2677 IrpContext = FatCreateIrpContext(Irp, TRUE );
2678 ClearFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_RECURSIVE_CALL);
2679 FatFsdPostRequest( IrpContext, Irp );
2680 Status = STATUS_MORE_PROCESSING_REQUIRED;
2681
2682 } _SEH2_EXCEPT( FatExceptionFilter(NULL, _SEH2_GetExceptionInformation()) ) {
2683
2684 //
2685 // If we failed to post the IRP, we just have to return the failure
2686 // to the user. :(
2687 //
2688
2689 NOTHING;
2690 } _SEH2_END;
2691 }
2692 }
2693
2694 DebugTrace(-1, Dbg, "FatMultiAsyncCompletionRoutine -> SUCCESS\n", 0 );
2695
2696 UNREFERENCED_PARAMETER( DeviceObject );
2697
2698 return Status;
2699 }
2700
2701 \f
2702 NTSTATUS
2703 FatPagingFileErrorHandler (
2704 IN PIRP Irp,
2705 IN PKEVENT Event OPTIONAL
2706 )
2707
2708 /*++
2709
2710 Routine Description:
2711
2712 This routine attempts to guarantee that the media is marked dirty
2713 with the surface test bit if a paging file IO fails.
2714
2715 The work done here has several basic problems
2716
2717 1) when paging file writes start failing, this is a good sign
2718 that the rest of the system is about to fall down around us
2719
2720 2) it has no forward progress guarantee
2721
2722 With Whistler, it is actually quite intentional that we're rejiggering
2723 the paging file write path to make forward progress at all times. This
2724 means that the cases where it *does* fail, we're truly seeing media errors
2725 and this is probably going to mean the paging file is going to stop working
2726 very soon.
2727
2728 It'd be nice to make this guarantee progress. It would need
2729
2730 1) a guaranteed worker thread which can only be used by items which
2731 will make forward progress (i.e., not block out this one)
2732
2733 2) the virtual volume file's pages containing the boot sector and
2734 1st FAT entry would have to be pinned resident and have a guaranteed
2735 mapping address
2736
2737 3) mark volume would have to have a stashed irp/mdl and roll the write
2738 irp, or use a generalized mechanism to guarantee issue of the irp
2739
2740 4) the lower stack would have to guarantee progress
2741
2742 Of these, 1 and 4 may actually exist shortly.
2743
2744 Arguments:
2745
2746 Irp - Pointer to the associated Irp which is being failed.
2747
2748 Event - Pointer to optional event to be signalled instead of completing
2749 the IRP
2750
2751 Return Value:
2752
2753 Returns STATUS_MORE_PROCESSING_REQUIRED if we managed to queue off the workitem,
2754 STATUS_SUCCESS otherwise.
2755
2756 --*/
2757
2758 {
2759 NTSTATUS Status;
2760
2761 //
2762 // If this was a media error, we want to chkdsk /r the next time we boot.
2763 //
2764
2765 if (FsRtlIsTotalDeviceFailure(Irp->IoStatus.Status)) {
2766
2767 Status = STATUS_SUCCESS;
2768
2769 } else {
2770
2771 PCLEAN_AND_DIRTY_VOLUME_PACKET Packet;
2772
2773 //
2774 // We are going to try to mark the volume needing recover.
2775 // If we can't get pool, oh well....
2776 //
2777
2778 #ifndef __REACTOS__
2779 Packet = ExAllocatePoolWithTag(NonPagedPoolNx, sizeof(CLEAN_AND_DIRTY_VOLUME_PACKET), ' taF');
2780 #else
2781 Packet = ExAllocatePoolWithTag(NonPagedPool, sizeof(CLEAN_AND_DIRTY_VOLUME_PACKET), ' taF');
2782 #endif
2783
2784 if ( Packet ) {
2785
2786 Packet->Vcb = &((PVOLUME_DEVICE_OBJECT)IoGetCurrentIrpStackLocation(Irp)->DeviceObject)->Vcb;
2787 Packet->Irp = Irp;
2788 Packet->Event = Event;
2789
2790 ExInitializeWorkItem( &Packet->Item,
2791 &FatFspMarkVolumeDirtyWithRecover,
2792 Packet );
2793
2794 #ifdef _MSC_VER
2795 #pragma prefast( suppress:28159, "prefast indicates this is obsolete, but it is ok for fastfat to use it" )
2796 #endif
2797 ExQueueWorkItem( &Packet->Item, CriticalWorkQueue );
2798
2799 Status = STATUS_MORE_PROCESSING_REQUIRED;
2800
2801 } else {
2802
2803 Status = STATUS_SUCCESS;
2804 }
2805 }
2806
2807 return Status;
2808 }
2809
2810 \f
2811 //
2812 // Internal Support Routine
2813 //
2814
2815 NTSTATUS
2816 NTAPI
2817 FatPagingFileCompletionRoutineCatch (
2818 _In_ PDEVICE_OBJECT DeviceObject,
2819 _In_ PIRP Irp,
2820 _In_reads_opt_(_Inexpressible_("varies")) PVOID Contxt
2821 )
2822
2823 /*++
2824
2825 Routine Description:
2826
2827 This is the completion routine for all reads and writes started via
2828 FatPagingFileIo that reuse the master irp (that we have to catch
2829 on the way back). It is always invoked.
2830
2831 The completion routine has has the following responsibility:
2832
2833 If the error implies a media problem, it enqueues a
2834 worker item to write out the dirty bit so that the next
2835 time we run we will do a autochk /r. This is not forward
2836 progress guaranteed at the moment.
2837
2838 Clean up the Mdl used for this partial request.
2839
2840 Note that if the Irp is failing, the error code is already where
2841 we want it.
2842
2843 Arguments:
2844
2845 DeviceObject - Pointer to the file system device object.
2846
2847 Irp - Pointer to the associated Irp which is being completed. (This
2848 Irp will no longer be accessible after this routine returns.)
2849
2850 MasterIrp - Pointer to the master Irp.
2851
2852 Return Value:
2853
2854 Always returns STATUS_MORE_PROCESSING_REQUIRED.
2855
2856 --*/
2857
2858 {
2859 PFAT_PAGING_FILE_CONTEXT Context = (PFAT_PAGING_FILE_CONTEXT) Contxt;
2860
2861 UNREFERENCED_PARAMETER( DeviceObject );
2862
2863 DebugTrace(+1, Dbg, "FatPagingFileCompletionRoutineCatch, Context = %p\n", Context );
2864
2865 //
2866 // Cleanup the existing Mdl, perhaps by returning the reserve.
2867 //
2868
2869 if (Irp->MdlAddress == FatReserveMdl) {
2870
2871 MmPrepareMdlForReuse( Irp->MdlAddress );
2872 KeSetEvent( &FatReserveEvent, 0, FALSE );
2873
2874 } else {
2875
2876 IoFreeMdl( Irp->MdlAddress );
2877 }
2878
2879 //
2880 // Restore the original Mdl.
2881 //
2882
2883 Irp->MdlAddress = Context->RestoreMdl;
2884
2885 DebugTrace(-1, Dbg, "FatPagingFileCompletionRoutine => (done)\n", 0 );
2886
2887 //
2888 // If the IRP is succeeding or the failure handler did not post off the
2889 // completion, we're done and should set the event to let the master
2890 // know the IRP is his again.
2891 //
2892
2893 if (NT_SUCCESS( Irp->IoStatus.Status ) ||
2894 FatPagingFileErrorHandler( Irp, &Context->Event ) == STATUS_SUCCESS) {
2895
2896 KeSetEvent( &Context->Event, 0, FALSE );
2897 }
2898
2899 return STATUS_MORE_PROCESSING_REQUIRED;
2900
2901 }
2902
2903 \f
2904 //
2905 // Internal Support Routine
2906 //
2907
2908 NTSTATUS
2909 NTAPI
2910 FatPagingFileCompletionRoutine (
2911 _In_ PDEVICE_OBJECT DeviceObject,
2912 _In_ PIRP Irp,
2913 _In_reads_opt_(_Inexpressible_("varies")) PVOID MasterIrp
2914 )
2915
2916 /*++
2917
2918 Routine Description:
2919
2920 This is the completion routine for all reads and writes started via
2921 FatPagingFileIo. It should only be invoked on error or cancel.
2922
2923 The completion routine has has the following responsibility:
2924
2925 Since the individual request was completed with an error,
2926 this completion routine must stuff it into the master irp.
2927
2928 If the error implies a media problem, it also enqueues a
2929 worker item to write out the dirty bit so that the next
2930 time we run we will do a autochk /r
2931
2932 Arguments:
2933
2934 DeviceObject - Pointer to the file system device object.
2935
2936 Irp - Pointer to the associated Irp which is being completed. (This
2937 Irp will no longer be accessible after this routine returns.)
2938
2939 MasterIrp - Pointer to the master Irp.
2940
2941 Return Value:
2942
2943 Always returns STATUS_SUCCESS.
2944
2945 --*/
2946
2947 {
2948 DebugTrace(+1, Dbg, "FatPagingFileCompletionRoutine, MasterIrp = %p\n", MasterIrp );
2949
2950 //
2951 // If we got an error (or verify required), remember it in the Irp
2952 //
2953
2954 NT_ASSERT( !NT_SUCCESS( Irp->IoStatus.Status ));
2955
2956 //
2957 // If we were invoked with an assoicated Irp, copy the error over.
2958 //
2959
2960 if (Irp != MasterIrp) {
2961
2962 ((PIRP)MasterIrp)->IoStatus = Irp->IoStatus;
2963 }
2964
2965 DebugTrace(-1, Dbg, "FatPagingFileCompletionRoutine => (done)\n", 0 );
2966
2967 UNREFERENCED_PARAMETER( DeviceObject );
2968
2969 return FatPagingFileErrorHandler( Irp, NULL );
2970 }
2971
2972 \f
2973 //
2974 // Internal Support Routine
2975 //
2976
2977 NTSTATUS
2978 NTAPI
2979 FatSpecialSyncCompletionRoutine (
2980 _In_ PDEVICE_OBJECT DeviceObject,
2981 _In_ PIRP Irp,
2982 _In_reads_opt_(_Inexpressible_("varies")) PVOID Contxt
2983 )
2984
2985 /*++
2986
2987 Routine Description:
2988
2989 This is the completion routine for a special set of sub irps
2990 that have to work at APC level.
2991
2992 The completion routine has has the following responsibilities:
2993
2994 It sets the event passed as the context to signal that the
2995 request is done.
2996
2997 By doing this, the caller will be released before final APC
2998 completion with knowledge that the IRP is finished. Final
2999 completion will occur at an indeterminate time after this
3000 occurs, and by using this completion routine the caller expects
3001 to not have any output or status returned. A junk user Iosb
3002 should be used to capture the status without forcing Io to take
3003 an exception on NULL.
3004
3005 Arguments:
3006
3007 DeviceObject - Pointer to the file system device object.
3008
3009 Irp - Pointer to the Irp for this request. (This Irp will no longer
3010 be accessible after this routine returns.)
3011
3012 Contxt - The context parameter which was specified in the call to
3013 FatRead/WriteSingleAsynch.
3014
3015 Return Value:
3016
3017 Currently always returns STATUS_SUCCESS.
3018
3019 --*/
3020
3021 {
3022 PFAT_SYNC_CONTEXT SyncContext = (PFAT_SYNC_CONTEXT)Contxt;
3023
3024 UNREFERENCED_PARAMETER( Irp );
3025
3026 DebugTrace(+1, Dbg, "FatSpecialSyncCompletionRoutine, Context = %p\n", Contxt );
3027
3028 SyncContext->Iosb = Irp->IoStatus;
3029
3030 KeSetEvent( &SyncContext->Event, 0, FALSE );
3031
3032 DebugTrace(-1, Dbg, "FatSpecialSyncCompletionRoutine -> STATUS_SUCCESS\n", 0 );
3033
3034 UNREFERENCED_PARAMETER( DeviceObject );
3035
3036 return STATUS_SUCCESS;
3037 }
3038
3039 \f
3040 //
3041 // Internal Support Routine
3042 //
3043
3044 NTSTATUS
3045 NTAPI
3046 FatSingleSyncCompletionRoutine (
3047 _In_ PDEVICE_OBJECT DeviceObject,
3048 _In_ PIRP Irp,
3049 _In_reads_opt_(_Inexpressible_("varies")) PVOID Contxt
3050 )
3051
3052 /*++
3053
3054 Routine Description:
3055
3056 This is the completion routine for all reads and writes started via
3057 FatRead/WriteSingleAsynch.
3058
3059 The completion routine has has the following responsibilities:
3060
3061 Copy the I/O status from the Irp to the Context, since the Irp
3062 will no longer be accessible.
3063
3064 It sets the event in the Context parameter to signal the caller
3065 that all of the asynch requests are done.
3066
3067 Arguments:
3068
3069 DeviceObject - Pointer to the file system device object.
3070
3071 Irp - Pointer to the Irp for this request. (This Irp will no longer
3072 be accessible after this routine returns.)
3073
3074 Contxt - The context parameter which was specified in the call to
3075 FatRead/WriteSingleAsynch.
3076
3077 Return Value:
3078
3079 Currently always returns STATUS_SUCCESS.
3080
3081 --*/
3082
3083 {
3084 PFAT_IO_CONTEXT Context = Contxt;
3085
3086 DebugTrace(+1, Dbg, "FatSingleSyncCompletionRoutine, Context = %p\n", Context );
3087
3088 FatDoCompletionZero( Irp, Context );
3089
3090 if (!NT_SUCCESS( Irp->IoStatus.Status )) {
3091
3092 #if DBG
3093 if(!( NT_SUCCESS( FatBreakOnInterestingIoCompletion ) || Irp->IoStatus.Status != FatBreakOnInterestingIoCompletion )) {
3094 DbgBreakPoint();
3095 }
3096 #endif
3097
3098 }
3099
3100 NT_ASSERT( !(NT_SUCCESS( Irp->IoStatus.Status ) && Irp->IoStatus.Information == 0 ));
3101
3102 KeSetEvent( &Context->Wait.SyncEvent, 0, FALSE );
3103
3104 DebugTrace(-1, Dbg, "FatSingleSyncCompletionRoutine -> STATUS_MORE_PROCESSING_REQUIRED\n", 0 );
3105
3106 UNREFERENCED_PARAMETER( DeviceObject );
3107
3108 return STATUS_MORE_PROCESSING_REQUIRED;
3109 }
3110
3111 \f
3112 //
3113 // Internal Support Routine
3114 //
3115
3116 NTSTATUS
3117 NTAPI
3118 FatSingleAsyncCompletionRoutine (
3119 _In_ PDEVICE_OBJECT DeviceObject,
3120 _In_ PIRP Irp,
3121 _In_reads_opt_(_Inexpressible_("varies")) PVOID Contxt
3122 )
3123
3124 /*++
3125
3126 Routine Description:
3127
3128 This is the completion routine for all reads and writes started via
3129 FatRead/WriteSingleAsynch.
3130
3131 The completion routine has has the following responsibilities:
3132
3133 Copy the I/O status from the Irp to the Context, since the Irp
3134 will no longer be accessible.
3135
3136 It sets the event in the Context parameter to signal the caller
3137 that all of the asynch requests are done.
3138
3139 Arguments:
3140
3141 DeviceObject - Pointer to the file system device object.
3142
3143 Irp - Pointer to the Irp for this request. (This Irp will no longer
3144 be accessible after this routine returns.)
3145
3146 Contxt - The context parameter which was specified in the call to
3147 FatRead/WriteSingleAsynch.
3148
3149 Return Value:
3150
3151 Currently always returns STATUS_SUCCESS.
3152
3153 --*/
3154
3155 {
3156 NTSTATUS Status = STATUS_SUCCESS;
3157
3158 PFAT_IO_CONTEXT Context = Contxt;
3159 BOOLEAN PostRequest = FALSE;
3160
3161 DebugTrace(+1, Dbg, "FatSingleAsyncCompletionRoutine, Context = %p\n", Context );
3162
3163 //
3164 // Fill in the information field correctedly if this worked.
3165 //
3166
3167 FatDoCompletionZero( Irp, Context );
3168
3169 if (NT_SUCCESS(Irp->IoStatus.Status)) {
3170
3171 NT_ASSERT( Irp->IoStatus.Information != 0 );
3172 Irp->IoStatus.Information = Context->Wait.Async.RequestedByteCount;
3173 NT_ASSERT( Irp->IoStatus.Information != 0 );
3174
3175 //
3176 // Now if this wasn't PagingIo, set either the read or write bit.
3177 //
3178
3179 if (!FlagOn(Irp->Flags, IRP_PAGING_IO)) {
3180
3181 SetFlag( Context->Wait.Async.FileObject->Flags,
3182 IoGetCurrentIrpStackLocation(Irp)->MajorFunction == IRP_MJ_READ ?
3183 FO_FILE_FAST_IO_READ : FO_FILE_MODIFIED );
3184 }
3185
3186 } else {
3187
3188 #if DBG
3189 if(!( NT_SUCCESS( FatBreakOnInterestingIoCompletion ) || Irp->IoStatus.Status != FatBreakOnInterestingIoCompletion )) {
3190 DbgBreakPoint();
3191 }
3192 #endif
3193
3194 #ifdef SYSCACHE_COMPILE
3195 DbgPrint( "FAT SYSCACHE: SingleAsync (IRP %08x) -> %08x\n", Irp, Irp->IoStatus );
3196 #endif
3197
3198 //
3199 // Post STATUS_VERIFY_REQUIRED failures. Only post top level IRPs, because recursive I/Os
3200 // cannot process volume verification.
3201 //
3202
3203 if (!FlagOn(Context->IrpContextFlags, IRP_CONTEXT_FLAG_RECURSIVE_CALL) &&
3204 (Irp->IoStatus.Status == STATUS_VERIFY_REQUIRED)) {
3205 PostRequest = TRUE;
3206 }
3207
3208 }
3209
3210 //
3211 // If this was a special async write, decrement the count. Set the
3212 // event if this was the final outstanding I/O for the file. We will
3213 // also want to queue an APC to deal with any error conditionions.
3214 //
3215 _Analysis_assume_(!(Context->Wait.Async.NonPagedFcb) &&
3216 (ExInterlockedAddUlong( &Context->Wait.Async.NonPagedFcb->OutstandingAsyncWrites,
3217 0xffffffff,
3218 &FatData.GeneralSpinLock ) != 1));
3219
3220 if ((Context->Wait.Async.NonPagedFcb) &&
3221 (ExInterlockedAddUlong( &Context->Wait.Async.NonPagedFcb->OutstandingAsyncWrites,
3222 0xffffffff,
3223 &FatData.GeneralSpinLock ) == 1)) {
3224
3225 KeSetEvent( Context->Wait.Async.NonPagedFcb->OutstandingAsyncEvent, 0, FALSE );
3226 }
3227
3228 //
3229 // Now release the resources
3230 //
3231
3232 if (Context->Wait.Async.Resource != NULL) {
3233
3234 ExReleaseResourceForThreadLite( Context->Wait.Async.Resource,
3235 Context->Wait.Async.ResourceThreadId );
3236 }
3237
3238 if (Context->Wait.Async.Resource2 != NULL) {
3239
3240 ExReleaseResourceForThreadLite( Context->Wait.Async.Resource2,
3241 Context->Wait.Async.ResourceThreadId );
3242 }
3243
3244 //
3245 // Mark the Irp pending
3246 //
3247
3248 IoMarkIrpPending( Irp );
3249
3250 //
3251 // and finally, free the context record.
3252 //
3253
3254 ExFreePool( Context );
3255
3256 if (PostRequest) {
3257
3258 PIRP_CONTEXT IrpContext = NULL;
3259
3260 _SEH2_TRY {
3261
3262 IrpContext = FatCreateIrpContext(Irp, TRUE );
3263 ClearFlag(IrpContext->Flags, IRP_CONTEXT_FLAG_RECURSIVE_CALL);
3264 FatFsdPostRequest( IrpContext, Irp );
3265 Status = STATUS_MORE_PROCESSING_REQUIRED;
3266
3267 } _SEH2_EXCEPT( FatExceptionFilter(NULL, _SEH2_GetExceptionInformation()) ) {
3268
3269 //
3270 // If we failed to post the IRP, we just have to return the failure
3271 // to the user. :(
3272 //
3273
3274 NOTHING;
3275 } _SEH2_END;
3276 }
3277
3278
3279 DebugTrace(-1, Dbg, "FatSingleAsyncCompletionRoutine -> STATUS_MORE_PROCESSING_REQUIRED\n", 0 );
3280
3281 UNREFERENCED_PARAMETER( DeviceObject );
3282
3283 return Status;
3284 }
3285
3286 \f
3287 VOID
3288 FatLockUserBuffer (
3289 IN PIRP_CONTEXT IrpContext,
3290 IN OUT PIRP Irp,
3291 IN LOCK_OPERATION Operation,
3292 IN ULONG BufferLength
3293 )
3294
3295 /*++
3296
3297 Routine Description:
3298
3299 This routine locks the specified buffer for the specified type of
3300 access. The file system requires this routine since it does not
3301 ask the I/O system to lock its buffers for direct I/O. This routine
3302 may only be called from the Fsd while still in the user context.
3303
3304 Note that this is the *input/output* buffer.
3305
3306 Arguments:
3307
3308 Irp - Pointer to the Irp for which the buffer is to be locked.
3309
3310 Operation - IoWriteAccess for read operations, or IoReadAccess for
3311 write operations.
3312
3313 BufferLength - Length of user buffer.
3314
3315 Return Value:
3316
3317 None
3318
3319 --*/
3320
3321 {
3322 PMDL Mdl = NULL;
3323
3324 PAGED_CODE();
3325
3326 if (Irp->MdlAddress == NULL) {
3327
3328 //
3329 // Allocate the Mdl, and Raise if we fail.
3330 //
3331
3332 Mdl = IoAllocateMdl( Irp->UserBuffer, BufferLength, FALSE, FALSE, Irp );
3333
3334 if (Mdl == NULL) {
3335
3336 FatRaiseStatus( IrpContext, STATUS_INSUFFICIENT_RESOURCES );
3337 }
3338
3339 //
3340 // Now probe the buffer described by the Irp. If we get an exception,
3341 // deallocate the Mdl and return the appropriate "expected" status.
3342 //
3343
3344 _SEH2_TRY {
3345
3346 MmProbeAndLockPages( Mdl,
3347 Irp->RequestorMode,
3348 Operation );
3349
3350 } _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) {
3351
3352 NTSTATUS Status;
3353
3354 Status = _SEH2_GetExceptionCode();
3355
3356 IoFreeMdl( Mdl );
3357 Irp->MdlAddress = NULL;
3358
3359 FatRaiseStatus( IrpContext,
3360 FsRtlIsNtstatusExpected(Status) ? Status : STATUS_INVALID_USER_BUFFER );
3361 } _SEH2_END;
3362 }
3363
3364 UNREFERENCED_PARAMETER( IrpContext );
3365 }
3366
3367 \f
3368 PVOID
3369 FatMapUserBuffer (
3370 IN PIRP_CONTEXT IrpContext,
3371 IN OUT PIRP Irp
3372 )
3373
3374 /*++
3375
3376 Routine Description:
3377
3378 This routine conditionally maps the user buffer for the current I/O
3379 request in the specified mode. If the buffer is already mapped, it
3380 just returns its address.
3381
3382 Note that this is the *input/output* buffer.
3383
3384 Arguments:
3385
3386 Irp - Pointer to the Irp for the request.
3387
3388 Return Value:
3389
3390 Mapped address
3391
3392 --*/
3393
3394 {
3395 UNREFERENCED_PARAMETER( IrpContext );
3396
3397 PAGED_CODE();
3398
3399 //
3400 // If there is no Mdl, then we must be in the Fsd, and we can simply
3401 // return the UserBuffer field from the Irp.
3402 //
3403
3404 if (Irp->MdlAddress == NULL) {
3405
3406 return Irp->UserBuffer;
3407
3408 } else {
3409
3410 #ifndef __REACTOS__
3411 PVOID Address = MmGetSystemAddressForMdlSafe( Irp->MdlAddress, NormalPagePriority | MdlMappingNoExecute );
3412 #else
3413 PVOID Address = MmGetSystemAddressForMdlSafe( Irp->MdlAddress, NormalPagePriority );
3414 #endif
3415
3416 if (Address == NULL) {
3417
3418 ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES );
3419 }
3420
3421 return Address;
3422 }
3423 }
3424
3425 \f
3426 PVOID
3427 FatBufferUserBuffer (
3428 IN PIRP_CONTEXT IrpContext,
3429 IN OUT PIRP Irp,
3430 IN ULONG BufferLength
3431 )
3432
3433 /*++
3434
3435 Routine Description:
3436
3437 This routine conditionally buffers the user buffer for the current I/O
3438 request. If the buffer is already buffered, it just returns its address.
3439
3440 Note that this is the *input* buffer.
3441
3442 Arguments:
3443
3444 Irp - Pointer to the Irp for the request.
3445
3446 BufferLength - Length of user buffer.
3447
3448 Return Value:
3449
3450 Buffered address.
3451
3452 --*/
3453
3454 {
3455 PUCHAR UserBuffer;
3456
3457 UNREFERENCED_PARAMETER( IrpContext );
3458
3459 PAGED_CODE();
3460
3461 //
3462 // Handle the no buffer case.
3463 //
3464
3465 if (BufferLength == 0) {
3466
3467 return NULL;
3468 }
3469
3470 //
3471 // If there is no system buffer we must have been supplied an Mdl
3472 // describing the users input buffer, which we will now snapshot.
3473 //
3474
3475 if (Irp->AssociatedIrp.SystemBuffer == NULL) {
3476
3477 UserBuffer = FatMapUserBuffer( IrpContext, Irp );
3478
3479 #ifndef __REACTOS__
3480 Irp->AssociatedIrp.SystemBuffer = FsRtlAllocatePoolWithQuotaTag( NonPagedPoolNx,
3481 #else
3482 Irp->AssociatedIrp.SystemBuffer = FsRtlAllocatePoolWithQuotaTag( NonPagedPool,
3483 #endif
3484 BufferLength,
3485 TAG_IO_USER_BUFFER );
3486
3487 //
3488 // Set the flags so that the completion code knows to deallocate the
3489 // buffer.
3490 //
3491
3492 Irp->Flags |= (IRP_BUFFERED_IO | IRP_DEALLOCATE_BUFFER);
3493
3494 _SEH2_TRY {
3495
3496 RtlCopyMemory( Irp->AssociatedIrp.SystemBuffer,
3497 UserBuffer,
3498 BufferLength );
3499
3500 } _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER) {
3501
3502 NTSTATUS Status;
3503
3504 Status = _SEH2_GetExceptionCode();
3505 FatRaiseStatus( IrpContext,
3506 FsRtlIsNtstatusExpected(Status) ? Status : STATUS_INVALID_USER_BUFFER );
3507 } _SEH2_END;
3508 }
3509
3510 return Irp->AssociatedIrp.SystemBuffer;
3511 }
3512
3513 \f
3514 NTSTATUS
3515 FatToggleMediaEjectDisable (
3516 IN PIRP_CONTEXT IrpContext,
3517 IN PVCB Vcb,
3518 IN BOOLEAN PreventRemoval
3519 )
3520
3521 /*++
3522
3523 Routine Description:
3524
3525 The routine either enables or disables the eject button on removable
3526 media.
3527
3528 Arguments:
3529
3530 Vcb - Descibes the volume to operate on
3531
3532 PreventRemoval - TRUE if we should disable the media eject button. FALSE
3533 if we want to enable it.
3534
3535 Return Value:
3536
3537 Status of the operation.
3538
3539 --*/
3540
3541 {
3542 PIRP Irp;
3543 KIRQL SavedIrql;
3544 NTSTATUS Status;
3545 FAT_SYNC_CONTEXT SyncContext;
3546 PREVENT_MEDIA_REMOVAL Prevent;
3547
3548 UNREFERENCED_PARAMETER( IrpContext );
3549
3550 //
3551 // If PreventRemoval is the same as VCB_STATE_FLAG_REMOVAL_PREVENTED,
3552 // no-op this call, otherwise toggle the state of the flag.
3553 //
3554
3555 KeAcquireSpinLock( &FatData.GeneralSpinLock, &SavedIrql );
3556
3557 if ((PreventRemoval ^
3558 BooleanFlagOn(Vcb->VcbState, VCB_STATE_FLAG_REMOVAL_PREVENTED)) == 0) {
3559
3560 KeReleaseSpinLock( &FatData.GeneralSpinLock, SavedIrql );
3561
3562 return STATUS_SUCCESS;
3563
3564 } else {
3565
3566 Vcb->VcbState ^= VCB_STATE_FLAG_REMOVAL_PREVENTED;
3567
3568 KeReleaseSpinLock( &FatData.GeneralSpinLock, SavedIrql );
3569 }
3570
3571 Prevent.PreventMediaRemoval = PreventRemoval;
3572
3573 KeInitializeEvent( &SyncContext.Event, NotificationEvent, FALSE );
3574
3575 //
3576 // We build this IRP using a junk Iosb that will receive the final
3577 // completion status since we won't be around for it.
3578 //
3579 // We fill in the UserIosb manually below,
3580 // So passing NULL for the final parameter is ok in this special case.
3581 //
3582 #ifdef _MSC_VER
3583 #pragma warning(suppress: 6387)
3584 #endif
3585 Irp = IoBuildDeviceIoControlRequest( IOCTL_DISK_MEDIA_REMOVAL,
3586 Vcb->TargetDeviceObject,
3587 &Prevent,
3588 sizeof(PREVENT_MEDIA_REMOVAL),
3589 NULL,
3590 0,
3591 FALSE,
3592 NULL,
3593 NULL );
3594
3595 if ( Irp != NULL ) {
3596
3597 //
3598 // Use our special completion routine which will remove the requirement that
3599 // the caller must be below APC level. All it tells us is that the Irp got
3600 // back, but will not tell us if it was succesful or not. We don't care,
3601 // and there is of course no fallback if the attempt to prevent removal
3602 // doesn't work for some mysterious reason.
3603 //
3604 // Normally, all IO is done at passive level. However, MM needs to be able
3605 // to issue IO with fast mutexes locked down, which raises us to APC. The
3606 // overlying IRP is set up to complete in yet another magical fashion even
3607 // though APCs are disabled, and any IRPage we do in these cases has to do
3608 // the same. Marking media dirty (and toggling eject state) is one.
3609 //
3610
3611 Irp->UserIosb = &Irp->IoStatus;
3612
3613 IoSetCompletionRoutine( Irp,
3614 FatSpecialSyncCompletionRoutine,
3615 &SyncContext,
3616 TRUE,
3617 TRUE,
3618 TRUE );
3619
3620 Status = IoCallDriver( Vcb->TargetDeviceObject, Irp );
3621
3622 if (Status == STATUS_PENDING) {
3623
3624 (VOID) KeWaitForSingleObject( &SyncContext.Event,
3625 Executive,
3626 KernelMode,
3627 FALSE,
3628 NULL );
3629
3630 Status = SyncContext.Iosb.Status;
3631 }
3632
3633 return Status;
3634 }
3635
3636 return STATUS_INSUFFICIENT_RESOURCES;
3637 }
3638
3639 \f
3640 NTSTATUS
3641 FatPerformDevIoCtrl (
3642 IN PIRP_CONTEXT IrpContext,
3643 IN ULONG IoControlCode,
3644 IN PDEVICE_OBJECT Device,
3645 IN PVOID InputBuffer OPTIONAL,
3646 IN ULONG InputBufferLength,
3647 OUT PVOID OutputBuffer OPTIONAL,
3648 IN ULONG OutputBufferLength,
3649 IN BOOLEAN InternalDeviceIoControl,
3650 IN BOOLEAN OverrideVerify,
3651 OUT PIO_STATUS_BLOCK Iosb OPTIONAL
3652 )
3653
3654 /*++
3655
3656 Routine Description:
3657
3658 This routine is called to perform DevIoCtrl functions internally within
3659 the filesystem. We take the status from the driver and return it to our
3660 caller.
3661
3662 Arguments:
3663
3664 IoControlCode - Code to send to driver.
3665
3666 Device - This is the device to send the request to.
3667
3668 OutPutBuffer - Pointer to output buffer.
3669
3670 OutputBufferLength - Length of output buffer above.
3671
3672 InternalDeviceIoControl - Indicates if this is an internal or external
3673 Io control code.
3674
3675 OverrideVerify - Indicates if we should tell the driver not to return
3676 STATUS_VERIFY_REQUIRED for mount and verify.
3677
3678 Iosb - If specified, we return the results of the operation here.
3679
3680 Return Value:
3681
3682 NTSTATUS - Status returned by next lower driver.
3683
3684 --*/
3685
3686 {
3687 NTSTATUS Status;
3688 PIRP Irp;
3689 KEVENT Event;
3690 IO_STATUS_BLOCK LocalIosb;
3691 PIO_STATUS_BLOCK IosbToUse = &LocalIosb;
3692
3693 PAGED_CODE();
3694
3695 UNREFERENCED_PARAMETER( IrpContext );
3696
3697 //
3698 // Check if the user gave us an Iosb.
3699 //
3700
3701 if (ARGUMENT_PRESENT( Iosb )) {
3702
3703 IosbToUse = Iosb;
3704 }
3705
3706 IosbToUse->Status = 0;
3707 IosbToUse->Information = 0;
3708
3709 KeInitializeEvent( &Event, NotificationEvent, FALSE );
3710
3711 Irp = IoBuildDeviceIoControlRequest( IoControlCode,
3712 Device,
3713 InputBuffer,
3714 InputBufferLength,
3715 OutputBuffer,
3716 OutputBufferLength,
3717 InternalDeviceIoControl,
3718 &Event,
3719 IosbToUse );
3720
3721 if (Irp == NULL) {
3722
3723 return STATUS_INSUFFICIENT_RESOURCES;
3724 }
3725
3726 if (OverrideVerify) {
3727
3728 SetFlag( IoGetNextIrpStackLocation( Irp )->Flags, SL_OVERRIDE_VERIFY_VOLUME );
3729 }
3730
3731 Status = IoCallDriver( Device, Irp );
3732
3733 //
3734 // We check for device not ready by first checking Status
3735 // and then if status pending was returned, the Iosb status
3736 // value.
3737 //
3738
3739 if (Status == STATUS_PENDING) {
3740
3741 (VOID) KeWaitForSingleObject( &Event,
3742 Executive,
3743 KernelMode,
3744 FALSE,
3745 (PLARGE_INTEGER)NULL );
3746
3747 Status = IosbToUse->Status;
3748 }
3749
3750 return Status;
3751 }
3752
3753 PMDL
3754 FatBuildZeroMdl (
3755 __in PIRP_CONTEXT IrpContext,
3756 __in ULONG Length
3757 )
3758 /*++
3759
3760 Routine Description:
3761
3762 Create an efficient mdl that describe a given length of zeros. We'll only
3763 use a one page buffer and make a mdl that maps all the pages back to the single
3764 physical page. We'll default to a smaller size buffer down to 1 PAGE if memory
3765 is tight. The caller should check the Mdl->ByteCount to see the true size
3766
3767 Arguments:
3768
3769 Length - The desired length of the zero buffer. We may return less than this
3770
3771 Return Value:
3772
3773 a MDL if successful / NULL if not
3774
3775 --*/
3776
3777 {
3778 PMDL ZeroMdl;
3779 ULONG SavedByteCount;
3780 PPFN_NUMBER Page;
3781 ULONG i;
3782
3783 UNREFERENCED_PARAMETER( IrpContext );
3784
3785 //
3786 // Spin down trying to get an MDL which can describe our operation.
3787 //
3788
3789 while (TRUE) {
3790
3791 ZeroMdl = IoAllocateMdl( FatData.ZeroPage, Length, FALSE, FALSE, NULL );
3792
3793 //
3794 // Throttle ourselves to what we've physically allocated. Note that
3795 // we could have started with an odd multiple of this number. If we
3796 // tried for exactly that size and failed, we're toast.
3797 //
3798
3799 if (ZeroMdl || (Length <= PAGE_SIZE)) {
3800
3801 break;
3802 }
3803
3804 //
3805 // Fallback by half and round down to a page multiple.
3806 //
3807
3808 ASSERT( IrpContext->Vcb->Bpb.BytesPerSector <= PAGE_SIZE );
3809 Length = BlockAlignTruncate( Length / 2, PAGE_SIZE );
3810 if (Length < PAGE_SIZE) {
3811 Length = PAGE_SIZE;
3812 }
3813 }
3814
3815 if (ZeroMdl == NULL) {
3816 return NULL;
3817 }
3818
3819 //
3820 // If we have throttled all the way down, stop and just build a
3821 // simple MDL describing our previous allocation.
3822 //
3823
3824 if (Length == PAGE_SIZE) {
3825
3826 MmBuildMdlForNonPagedPool( ZeroMdl );
3827 return ZeroMdl;
3828 }
3829
3830 //
3831 // Now we will temporarily lock the allocated pages
3832 // only, and then replicate the page frame numbers through
3833 // the entire Mdl to keep writing the same pages of zeros.
3834 //
3835 // It would be nice if Mm exported a way for us to not have
3836 // to pull the Mdl apart and rebuild it ourselves, but this
3837 // is so bizzare a purpose as to be tolerable.
3838 //
3839
3840 SavedByteCount = ZeroMdl->ByteCount;
3841 ZeroMdl->ByteCount = PAGE_SIZE;
3842 MmBuildMdlForNonPagedPool( ZeroMdl );
3843
3844 ZeroMdl->MdlFlags &= ~MDL_SOURCE_IS_NONPAGED_POOL;
3845 ZeroMdl->MdlFlags |= MDL_PAGES_LOCKED;
3846 ZeroMdl->MappedSystemVa = NULL;
3847 ZeroMdl->StartVa = NULL;
3848 ZeroMdl->ByteCount = SavedByteCount;
3849 Page = MmGetMdlPfnArray( ZeroMdl );
3850 for (i = 1; i < (ADDRESS_AND_SIZE_TO_SPAN_PAGES( 0, SavedByteCount )); i++) {
3851 *(Page + i) = *(Page);
3852 }
3853
3854
3855 return ZeroMdl;
3856 }
3857
3858