[NTOSKRNL] Implement per-file dirty page threshold.
[reactos.git] / ntoskrnl / cc / copy.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/copy.c
5 * PURPOSE: Implements cache managers copy interface
6 *
7 * PROGRAMMERS: Some people?
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* INCLUDES ******************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 /* GLOBALS *******************************************************************/
18
19 static PFN_NUMBER CcZeroPage = 0;
20
21 #define MAX_ZERO_LENGTH (256 * 1024)
22
23 typedef enum _CC_COPY_OPERATION
24 {
25 CcOperationRead,
26 CcOperationWrite,
27 CcOperationZero
28 } CC_COPY_OPERATION;
29
30 ULONG CcRosTraceLevel = 0;
31 ULONG CcFastMdlReadWait;
32 ULONG CcFastMdlReadNotPossible;
33 ULONG CcFastReadNotPossible;
34 ULONG CcFastReadWait;
35 ULONG CcFastReadNoWait;
36 ULONG CcFastReadResourceMiss;
37
38 extern KEVENT iLazyWriterNotify;
39
40 /* FUNCTIONS *****************************************************************/
41
42 VOID
43 NTAPI
44 MiZeroPhysicalPage (
45 IN PFN_NUMBER PageFrameIndex
46 );
47
48 VOID
49 NTAPI
50 CcInitCacheZeroPage (
51 VOID)
52 {
53 NTSTATUS Status;
54
55 MI_SET_USAGE(MI_USAGE_CACHE);
56 //MI_SET_PROCESS2(PsGetCurrentProcess()->ImageFileName);
57 Status = MmRequestPageMemoryConsumer(MC_SYSTEM, TRUE, &CcZeroPage);
58 if (!NT_SUCCESS(Status))
59 {
60 DbgPrint("Can't allocate CcZeroPage.\n");
61 KeBugCheck(CACHE_MANAGER);
62 }
63 MiZeroPhysicalPage(CcZeroPage);
64 }
65
66 NTSTATUS
67 NTAPI
68 CcReadVirtualAddress (
69 PROS_VACB Vacb)
70 {
71 ULONG Size, Pages;
72 PMDL Mdl;
73 NTSTATUS Status;
74 IO_STATUS_BLOCK IoStatus;
75 KEVENT Event;
76
77 Size = (ULONG)(Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart);
78 if (Size > VACB_MAPPING_GRANULARITY)
79 {
80 Size = VACB_MAPPING_GRANULARITY;
81 }
82
83 Pages = BYTES_TO_PAGES(Size);
84 ASSERT(Pages * PAGE_SIZE <= VACB_MAPPING_GRANULARITY);
85
86 Mdl = IoAllocateMdl(Vacb->BaseAddress, Pages * PAGE_SIZE, FALSE, FALSE, NULL);
87 if (!Mdl)
88 {
89 return STATUS_INSUFFICIENT_RESOURCES;
90 }
91
92 Status = STATUS_SUCCESS;
93 _SEH2_TRY
94 {
95 MmProbeAndLockPages(Mdl, KernelMode, IoWriteAccess);
96 }
97 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
98 {
99 Status = _SEH2_GetExceptionCode();
100 KeBugCheck(CACHE_MANAGER);
101 } _SEH2_END;
102
103 if (NT_SUCCESS(Status))
104 {
105 Mdl->MdlFlags |= MDL_IO_PAGE_READ;
106 KeInitializeEvent(&Event, NotificationEvent, FALSE);
107 Status = IoPageRead(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
108 if (Status == STATUS_PENDING)
109 {
110 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
111 Status = IoStatus.Status;
112 }
113
114 MmUnlockPages(Mdl);
115 }
116
117 IoFreeMdl(Mdl);
118
119 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
120 {
121 DPRINT1("IoPageRead failed, Status %x\n", Status);
122 return Status;
123 }
124
125 if (Size < VACB_MAPPING_GRANULARITY)
126 {
127 RtlZeroMemory((char*)Vacb->BaseAddress + Size,
128 VACB_MAPPING_GRANULARITY - Size);
129 }
130
131 return STATUS_SUCCESS;
132 }
133
134 NTSTATUS
135 NTAPI
136 CcWriteVirtualAddress (
137 PROS_VACB Vacb)
138 {
139 ULONG Size;
140 PMDL Mdl;
141 NTSTATUS Status;
142 IO_STATUS_BLOCK IoStatus;
143 KEVENT Event;
144
145 Vacb->Dirty = FALSE;
146 Size = (ULONG)(Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart);
147 if (Size > VACB_MAPPING_GRANULARITY)
148 {
149 Size = VACB_MAPPING_GRANULARITY;
150 }
151 //
152 // Nonpaged pool PDEs in ReactOS must actually be synchronized between the
153 // MmGlobalPageDirectory and the real system PDE directory. What a mess...
154 //
155 {
156 ULONG i = 0;
157 do
158 {
159 MmGetPfnForProcess(NULL, (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i << PAGE_SHIFT)));
160 } while (++i < (Size >> PAGE_SHIFT));
161 }
162
163 Mdl = IoAllocateMdl(Vacb->BaseAddress, Size, FALSE, FALSE, NULL);
164 if (!Mdl)
165 {
166 return STATUS_INSUFFICIENT_RESOURCES;
167 }
168
169 Status = STATUS_SUCCESS;
170 _SEH2_TRY
171 {
172 MmProbeAndLockPages(Mdl, KernelMode, IoReadAccess);
173 }
174 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
175 {
176 Status = _SEH2_GetExceptionCode();
177 KeBugCheck(CACHE_MANAGER);
178 } _SEH2_END;
179
180 if (NT_SUCCESS(Status))
181 {
182 KeInitializeEvent(&Event, NotificationEvent, FALSE);
183 Status = IoSynchronousPageWrite(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
184 if (Status == STATUS_PENDING)
185 {
186 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
187 Status = IoStatus.Status;
188 }
189
190 MmUnlockPages(Mdl);
191 }
192 IoFreeMdl(Mdl);
193 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
194 {
195 DPRINT1("IoPageWrite failed, Status %x\n", Status);
196 Vacb->Dirty = TRUE;
197 return Status;
198 }
199
200 return STATUS_SUCCESS;
201 }
202
203 NTSTATUS
204 ReadWriteOrZero(
205 _Inout_ PVOID BaseAddress,
206 _Inout_opt_ PVOID Buffer,
207 _In_ ULONG Length,
208 _In_ CC_COPY_OPERATION Operation)
209 {
210 NTSTATUS Status = STATUS_SUCCESS;
211
212 if (Operation == CcOperationZero)
213 {
214 /* Zero */
215 RtlZeroMemory(BaseAddress, Length);
216 }
217 else
218 {
219 _SEH2_TRY
220 {
221 if (Operation == CcOperationWrite)
222 RtlCopyMemory(BaseAddress, Buffer, Length);
223 else
224 RtlCopyMemory(Buffer, BaseAddress, Length);
225 }
226 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
227 {
228 Status = _SEH2_GetExceptionCode();
229 }
230 _SEH2_END;
231 }
232 return Status;
233 }
234
235 BOOLEAN
236 CcCopyData (
237 _In_ PFILE_OBJECT FileObject,
238 _In_ LONGLONG FileOffset,
239 _Inout_ PVOID Buffer,
240 _In_ LONGLONG Length,
241 _In_ CC_COPY_OPERATION Operation,
242 _In_ BOOLEAN Wait,
243 _Out_ PIO_STATUS_BLOCK IoStatus)
244 {
245 NTSTATUS Status;
246 LONGLONG CurrentOffset;
247 ULONG BytesCopied;
248 KIRQL OldIrql;
249 PROS_SHARED_CACHE_MAP SharedCacheMap;
250 PLIST_ENTRY ListEntry;
251 PROS_VACB Vacb;
252 ULONG PartialLength;
253 PVOID BaseAddress;
254 BOOLEAN Valid;
255
256 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
257 CurrentOffset = FileOffset;
258 BytesCopied = 0;
259
260 if (!Wait)
261 {
262 /* test if the requested data is available */
263 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
264 /* FIXME: this loop doesn't take into account areas that don't have
265 * a VACB in the list yet */
266 ListEntry = SharedCacheMap->CacheMapVacbListHead.Flink;
267 while (ListEntry != &SharedCacheMap->CacheMapVacbListHead)
268 {
269 Vacb = CONTAINING_RECORD(ListEntry,
270 ROS_VACB,
271 CacheMapVacbListEntry);
272 ListEntry = ListEntry->Flink;
273 if (!Vacb->Valid &&
274 DoRangesIntersect(Vacb->FileOffset.QuadPart,
275 VACB_MAPPING_GRANULARITY,
276 CurrentOffset, Length))
277 {
278 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
279 /* data not available */
280 return FALSE;
281 }
282 if (Vacb->FileOffset.QuadPart >= CurrentOffset + Length)
283 break;
284 }
285 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
286 }
287
288 PartialLength = CurrentOffset % VACB_MAPPING_GRANULARITY;
289 if (PartialLength != 0)
290 {
291 PartialLength = min(Length, VACB_MAPPING_GRANULARITY - PartialLength);
292 Status = CcRosRequestVacb(SharedCacheMap,
293 ROUND_DOWN(CurrentOffset,
294 VACB_MAPPING_GRANULARITY),
295 &BaseAddress,
296 &Valid,
297 &Vacb);
298 if (!NT_SUCCESS(Status))
299 ExRaiseStatus(Status);
300 if (!Valid)
301 {
302 Status = CcReadVirtualAddress(Vacb);
303 if (!NT_SUCCESS(Status))
304 {
305 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
306 ExRaiseStatus(Status);
307 }
308 }
309 Status = ReadWriteOrZero((PUCHAR)BaseAddress + CurrentOffset % VACB_MAPPING_GRANULARITY,
310 Buffer,
311 PartialLength,
312 Operation);
313
314 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
315
316 if (!NT_SUCCESS(Status))
317 ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
318
319 Length -= PartialLength;
320 CurrentOffset += PartialLength;
321 BytesCopied += PartialLength;
322
323 if (Operation != CcOperationZero)
324 Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
325 }
326
327 while (Length > 0)
328 {
329 ASSERT(CurrentOffset % VACB_MAPPING_GRANULARITY == 0);
330 PartialLength = min(VACB_MAPPING_GRANULARITY, Length);
331 Status = CcRosRequestVacb(SharedCacheMap,
332 CurrentOffset,
333 &BaseAddress,
334 &Valid,
335 &Vacb);
336 if (!NT_SUCCESS(Status))
337 ExRaiseStatus(Status);
338 if (!Valid &&
339 (Operation == CcOperationRead ||
340 PartialLength < VACB_MAPPING_GRANULARITY))
341 {
342 Status = CcReadVirtualAddress(Vacb);
343 if (!NT_SUCCESS(Status))
344 {
345 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
346 ExRaiseStatus(Status);
347 }
348 }
349 Status = ReadWriteOrZero(BaseAddress, Buffer, PartialLength, Operation);
350
351 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
352
353 if (!NT_SUCCESS(Status))
354 ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
355
356 Length -= PartialLength;
357 CurrentOffset += PartialLength;
358 BytesCopied += PartialLength;
359
360 if (Operation != CcOperationZero)
361 Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
362 }
363 IoStatus->Status = STATUS_SUCCESS;
364 IoStatus->Information = BytesCopied;
365 return TRUE;
366 }
367
368 /*
369 * @unimplemented
370 */
371 BOOLEAN
372 NTAPI
373 CcCanIWrite (
374 IN PFILE_OBJECT FileObject,
375 IN ULONG BytesToWrite,
376 IN BOOLEAN Wait,
377 IN BOOLEAN Retrying)
378 {
379 KIRQL OldIrql;
380 ULONG DirtyPages;
381 PLIST_ENTRY ListEntry;
382 PFSRTL_COMMON_FCB_HEADER Fcb;
383 PROS_SHARED_CACHE_MAP SharedCacheMap;
384
385 CCTRACE(CC_API_DEBUG, "FileObject=%p BytesToWrite=%lu Wait=%d Retrying=%d\n",
386 FileObject, BytesToWrite, Wait, Retrying);
387
388 /* We cannot write if dirty pages count is above threshold */
389 if (CcTotalDirtyPages > CcDirtyPageThreshold)
390 {
391 return FALSE;
392 }
393
394 /* We cannot write if dirty pages count will bring use above
395 * XXX: Might not be accurate
396 */
397 if (CcTotalDirtyPages + (BytesToWrite / PAGE_SIZE) > CcDirtyPageThreshold)
398 {
399 return FALSE;
400 }
401
402 /* Is there a limit per file object? */
403 Fcb = FileObject->FsContext;
404 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
405 if (!BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES) ||
406 SharedCacheMap->DirtyPageThreshold == 0)
407 {
408 /* Nope, so that's fine, allow write operation */
409 return TRUE;
410 }
411
412 /* There's a limit, start counting dirty pages */
413 DirtyPages = 0;
414 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
415 for (ListEntry = SharedCacheMap->CacheMapVacbListHead.Flink;
416 ListEntry != &SharedCacheMap->CacheMapVacbListHead;
417 ListEntry = ListEntry->Flink)
418 {
419 PROS_VACB Vacb;
420
421 Vacb = CONTAINING_RECORD(ListEntry,
422 ROS_VACB,
423 CacheMapVacbListEntry);
424 if (Vacb->Dirty)
425 {
426 DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
427 }
428 }
429 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
430
431 /* Is dirty page count above local threshold? */
432 if (DirtyPages > SharedCacheMap->DirtyPageThreshold)
433 {
434 return FALSE;
435 }
436
437 /* We cannot write if dirty pages count will bring use above
438 * XXX: Might not be accurate
439 */
440 if (DirtyPages + (BytesToWrite / PAGE_SIZE) > SharedCacheMap->DirtyPageThreshold)
441 {
442 return FALSE;
443 }
444
445 return TRUE;
446 }
447
448 /*
449 * @implemented
450 */
451 BOOLEAN
452 NTAPI
453 CcCopyRead (
454 IN PFILE_OBJECT FileObject,
455 IN PLARGE_INTEGER FileOffset,
456 IN ULONG Length,
457 IN BOOLEAN Wait,
458 OUT PVOID Buffer,
459 OUT PIO_STATUS_BLOCK IoStatus)
460 {
461 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d\n",
462 FileObject, FileOffset->QuadPart, Length, Wait);
463
464 DPRINT("CcCopyRead(FileObject 0x%p, FileOffset %I64x, "
465 "Length %lu, Wait %u, Buffer 0x%p, IoStatus 0x%p)\n",
466 FileObject, FileOffset->QuadPart, Length, Wait,
467 Buffer, IoStatus);
468
469 return CcCopyData(FileObject,
470 FileOffset->QuadPart,
471 Buffer,
472 Length,
473 CcOperationRead,
474 Wait,
475 IoStatus);
476 }
477
478 /*
479 * @implemented
480 */
481 BOOLEAN
482 NTAPI
483 CcCopyWrite (
484 IN PFILE_OBJECT FileObject,
485 IN PLARGE_INTEGER FileOffset,
486 IN ULONG Length,
487 IN BOOLEAN Wait,
488 IN PVOID Buffer)
489 {
490 IO_STATUS_BLOCK IoStatus;
491
492 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d Buffer=%p\n",
493 FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
494
495 DPRINT("CcCopyWrite(FileObject 0x%p, FileOffset %I64x, "
496 "Length %lu, Wait %u, Buffer 0x%p)\n",
497 FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
498
499 return CcCopyData(FileObject,
500 FileOffset->QuadPart,
501 Buffer,
502 Length,
503 CcOperationWrite,
504 Wait,
505 &IoStatus);
506 }
507
508 /*
509 * @implemented
510 */
511 VOID
512 NTAPI
513 CcDeferWrite (
514 IN PFILE_OBJECT FileObject,
515 IN PCC_POST_DEFERRED_WRITE PostRoutine,
516 IN PVOID Context1,
517 IN PVOID Context2,
518 IN ULONG BytesToWrite,
519 IN BOOLEAN Retrying)
520 {
521 PROS_DEFERRED_WRITE_CONTEXT Context;
522
523 CCTRACE(CC_API_DEBUG, "FileObject=%p PostRoutine=%p Context1=%p Context2=%p BytesToWrite=%lu Retrying=%d\n",
524 FileObject, PostRoutine, Context1, Context2, BytesToWrite, Retrying);
525
526 /* Try to allocate a context for queueing the write operation */
527 Context = ExAllocatePoolWithTag(NonPagedPool, sizeof(ROS_DEFERRED_WRITE_CONTEXT), 'CcDw');
528 /* If it failed, immediately execute the operation! */
529 if (Context == NULL)
530 {
531 PostRoutine(Context1, Context2);
532 return;
533 }
534
535 /* Otherwise, initialize the context */
536 Context->FileObject = FileObject;
537 Context->PostRoutine = PostRoutine;
538 Context->Context1 = Context1;
539 Context->Context2 = Context2;
540 Context->BytesToWrite = BytesToWrite;
541 Context->Retrying = Retrying;
542
543 /* And queue it */
544 if (Retrying)
545 {
546 /* To the top, if that's a retry */
547 ExInterlockedInsertHeadList(&CcDeferredWrites,
548 &Context->CcDeferredWritesEntry,
549 &CcDeferredWriteSpinLock);
550 }
551 else
552 {
553 /* To the bottom, if that's a first time */
554 ExInterlockedInsertTailList(&CcDeferredWrites,
555 &Context->CcDeferredWritesEntry,
556 &CcDeferredWriteSpinLock);
557 }
558 }
559
560 /*
561 * @unimplemented
562 */
563 VOID
564 NTAPI
565 CcFastCopyRead (
566 IN PFILE_OBJECT FileObject,
567 IN ULONG FileOffset,
568 IN ULONG Length,
569 IN ULONG PageCount,
570 OUT PVOID Buffer,
571 OUT PIO_STATUS_BLOCK IoStatus)
572 {
573 LARGE_INTEGER LargeFileOffset;
574 BOOLEAN Success;
575
576 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu PageCount=%lu Buffer=%p\n",
577 FileObject, FileOffset, Length, PageCount, Buffer);
578
579 DBG_UNREFERENCED_PARAMETER(PageCount);
580
581 LargeFileOffset.QuadPart = FileOffset;
582 Success = CcCopyRead(FileObject,
583 &LargeFileOffset,
584 Length,
585 TRUE,
586 Buffer,
587 IoStatus);
588 ASSERT(Success == TRUE);
589 }
590
591 /*
592 * @unimplemented
593 */
594 VOID
595 NTAPI
596 CcFastCopyWrite (
597 IN PFILE_OBJECT FileObject,
598 IN ULONG FileOffset,
599 IN ULONG Length,
600 IN PVOID Buffer)
601 {
602 LARGE_INTEGER LargeFileOffset;
603 BOOLEAN Success;
604
605 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu Buffer=%p\n",
606 FileObject, FileOffset, Length, Buffer);
607
608 LargeFileOffset.QuadPart = FileOffset;
609 Success = CcCopyWrite(FileObject,
610 &LargeFileOffset,
611 Length,
612 TRUE,
613 Buffer);
614 ASSERT(Success == TRUE);
615 }
616
617 /*
618 * @implemented
619 */
620 NTSTATUS
621 NTAPI
622 CcWaitForCurrentLazyWriterActivity (
623 VOID)
624 {
625 NTSTATUS Status;
626
627 /* Lazy writer is done when its event is set */
628 Status = KeWaitForSingleObject(&iLazyWriterNotify,
629 Executive,
630 KernelMode,
631 FALSE,
632 NULL);
633 if (!NT_SUCCESS(Status))
634 {
635 return Status;
636 }
637
638 return STATUS_SUCCESS;
639 }
640
641 /*
642 * @implemented
643 */
644 BOOLEAN
645 NTAPI
646 CcZeroData (
647 IN PFILE_OBJECT FileObject,
648 IN PLARGE_INTEGER StartOffset,
649 IN PLARGE_INTEGER EndOffset,
650 IN BOOLEAN Wait)
651 {
652 NTSTATUS Status;
653 LARGE_INTEGER WriteOffset;
654 LONGLONG Length;
655 ULONG CurrentLength;
656 PMDL Mdl;
657 ULONG i;
658 IO_STATUS_BLOCK Iosb;
659 KEVENT Event;
660
661 CCTRACE(CC_API_DEBUG, "FileObject=%p StartOffset=%I64u EndOffset=%I64u Wait=%d\n",
662 FileObject, StartOffset->QuadPart, EndOffset->QuadPart, Wait);
663
664 DPRINT("CcZeroData(FileObject 0x%p, StartOffset %I64x, EndOffset %I64x, "
665 "Wait %u)\n", FileObject, StartOffset->QuadPart, EndOffset->QuadPart,
666 Wait);
667
668 Length = EndOffset->QuadPart - StartOffset->QuadPart;
669 WriteOffset.QuadPart = StartOffset->QuadPart;
670
671 if (FileObject->SectionObjectPointer->SharedCacheMap == NULL)
672 {
673 /* File is not cached */
674
675 Mdl = _alloca(MmSizeOfMdl(NULL, MAX_ZERO_LENGTH));
676
677 while (Length > 0)
678 {
679 if (Length + WriteOffset.QuadPart % PAGE_SIZE > MAX_ZERO_LENGTH)
680 {
681 CurrentLength = MAX_ZERO_LENGTH - WriteOffset.QuadPart % PAGE_SIZE;
682 }
683 else
684 {
685 CurrentLength = Length;
686 }
687 MmInitializeMdl(Mdl, (PVOID)(ULONG_PTR)WriteOffset.QuadPart, CurrentLength);
688 Mdl->MdlFlags |= (MDL_PAGES_LOCKED | MDL_IO_PAGE_READ);
689 for (i = 0; i < ((Mdl->Size - sizeof(MDL)) / sizeof(ULONG)); i++)
690 {
691 ((PPFN_NUMBER)(Mdl + 1))[i] = CcZeroPage;
692 }
693 KeInitializeEvent(&Event, NotificationEvent, FALSE);
694 Status = IoSynchronousPageWrite(FileObject, Mdl, &WriteOffset, &Event, &Iosb);
695 if (Status == STATUS_PENDING)
696 {
697 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
698 Status = Iosb.Status;
699 }
700 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
701 {
702 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
703 }
704 if (!NT_SUCCESS(Status))
705 {
706 return FALSE;
707 }
708 WriteOffset.QuadPart += CurrentLength;
709 Length -= CurrentLength;
710 }
711 }
712 else
713 {
714 IO_STATUS_BLOCK IoStatus;
715
716 return CcCopyData(FileObject,
717 WriteOffset.QuadPart,
718 NULL,
719 Length,
720 CcOperationZero,
721 Wait,
722 &IoStatus);
723 }
724
725 return TRUE;
726 }