[NTOSKRNL] Implement CcPostDeferredWrites() that executes deferred writes.
[reactos.git] / ntoskrnl / cc / copy.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/copy.c
5 * PURPOSE: Implements cache managers copy interface
6 *
7 * PROGRAMMERS: Some people?
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* INCLUDES ******************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 /* GLOBALS *******************************************************************/
18
19 static PFN_NUMBER CcZeroPage = 0;
20
21 #define MAX_ZERO_LENGTH (256 * 1024)
22
23 typedef enum _CC_COPY_OPERATION
24 {
25 CcOperationRead,
26 CcOperationWrite,
27 CcOperationZero
28 } CC_COPY_OPERATION;
29
30 ULONG CcRosTraceLevel = 0;
31 ULONG CcFastMdlReadWait;
32 ULONG CcFastMdlReadNotPossible;
33 ULONG CcFastReadNotPossible;
34 ULONG CcFastReadWait;
35 ULONG CcFastReadNoWait;
36 ULONG CcFastReadResourceMiss;
37
38 /* FUNCTIONS *****************************************************************/
39
40 VOID
41 NTAPI
42 MiZeroPhysicalPage (
43 IN PFN_NUMBER PageFrameIndex
44 );
45
46 VOID
47 NTAPI
48 CcInitCacheZeroPage (
49 VOID)
50 {
51 NTSTATUS Status;
52
53 MI_SET_USAGE(MI_USAGE_CACHE);
54 //MI_SET_PROCESS2(PsGetCurrentProcess()->ImageFileName);
55 Status = MmRequestPageMemoryConsumer(MC_SYSTEM, TRUE, &CcZeroPage);
56 if (!NT_SUCCESS(Status))
57 {
58 DbgPrint("Can't allocate CcZeroPage.\n");
59 KeBugCheck(CACHE_MANAGER);
60 }
61 MiZeroPhysicalPage(CcZeroPage);
62 }
63
64 NTSTATUS
65 NTAPI
66 CcReadVirtualAddress (
67 PROS_VACB Vacb)
68 {
69 ULONG Size, Pages;
70 PMDL Mdl;
71 NTSTATUS Status;
72 IO_STATUS_BLOCK IoStatus;
73 KEVENT Event;
74
75 Size = (ULONG)(Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart);
76 if (Size > VACB_MAPPING_GRANULARITY)
77 {
78 Size = VACB_MAPPING_GRANULARITY;
79 }
80
81 Pages = BYTES_TO_PAGES(Size);
82 ASSERT(Pages * PAGE_SIZE <= VACB_MAPPING_GRANULARITY);
83
84 Mdl = IoAllocateMdl(Vacb->BaseAddress, Pages * PAGE_SIZE, FALSE, FALSE, NULL);
85 if (!Mdl)
86 {
87 return STATUS_INSUFFICIENT_RESOURCES;
88 }
89
90 Status = STATUS_SUCCESS;
91 _SEH2_TRY
92 {
93 MmProbeAndLockPages(Mdl, KernelMode, IoWriteAccess);
94 }
95 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
96 {
97 Status = _SEH2_GetExceptionCode();
98 KeBugCheck(CACHE_MANAGER);
99 } _SEH2_END;
100
101 if (NT_SUCCESS(Status))
102 {
103 Mdl->MdlFlags |= MDL_IO_PAGE_READ;
104 KeInitializeEvent(&Event, NotificationEvent, FALSE);
105 Status = IoPageRead(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
106 if (Status == STATUS_PENDING)
107 {
108 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
109 Status = IoStatus.Status;
110 }
111
112 MmUnlockPages(Mdl);
113 }
114
115 IoFreeMdl(Mdl);
116
117 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
118 {
119 DPRINT1("IoPageRead failed, Status %x\n", Status);
120 return Status;
121 }
122
123 if (Size < VACB_MAPPING_GRANULARITY)
124 {
125 RtlZeroMemory((char*)Vacb->BaseAddress + Size,
126 VACB_MAPPING_GRANULARITY - Size);
127 }
128
129 return STATUS_SUCCESS;
130 }
131
132 NTSTATUS
133 NTAPI
134 CcWriteVirtualAddress (
135 PROS_VACB Vacb)
136 {
137 ULONG Size;
138 PMDL Mdl;
139 NTSTATUS Status;
140 IO_STATUS_BLOCK IoStatus;
141 KEVENT Event;
142
143 Size = (ULONG)(Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart);
144 if (Size > VACB_MAPPING_GRANULARITY)
145 {
146 Size = VACB_MAPPING_GRANULARITY;
147 }
148 //
149 // Nonpaged pool PDEs in ReactOS must actually be synchronized between the
150 // MmGlobalPageDirectory and the real system PDE directory. What a mess...
151 //
152 {
153 ULONG i = 0;
154 do
155 {
156 MmGetPfnForProcess(NULL, (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i << PAGE_SHIFT)));
157 } while (++i < (Size >> PAGE_SHIFT));
158 }
159
160 Mdl = IoAllocateMdl(Vacb->BaseAddress, Size, FALSE, FALSE, NULL);
161 if (!Mdl)
162 {
163 return STATUS_INSUFFICIENT_RESOURCES;
164 }
165
166 Status = STATUS_SUCCESS;
167 _SEH2_TRY
168 {
169 MmProbeAndLockPages(Mdl, KernelMode, IoReadAccess);
170 }
171 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
172 {
173 Status = _SEH2_GetExceptionCode();
174 KeBugCheck(CACHE_MANAGER);
175 } _SEH2_END;
176
177 if (NT_SUCCESS(Status))
178 {
179 KeInitializeEvent(&Event, NotificationEvent, FALSE);
180 Status = IoSynchronousPageWrite(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
181 if (Status == STATUS_PENDING)
182 {
183 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
184 Status = IoStatus.Status;
185 }
186
187 MmUnlockPages(Mdl);
188 }
189 IoFreeMdl(Mdl);
190 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
191 {
192 DPRINT1("IoPageWrite failed, Status %x\n", Status);
193 return Status;
194 }
195
196 return STATUS_SUCCESS;
197 }
198
199 NTSTATUS
200 ReadWriteOrZero(
201 _Inout_ PVOID BaseAddress,
202 _Inout_opt_ PVOID Buffer,
203 _In_ ULONG Length,
204 _In_ CC_COPY_OPERATION Operation)
205 {
206 NTSTATUS Status = STATUS_SUCCESS;
207
208 if (Operation == CcOperationZero)
209 {
210 /* Zero */
211 RtlZeroMemory(BaseAddress, Length);
212 }
213 else
214 {
215 _SEH2_TRY
216 {
217 if (Operation == CcOperationWrite)
218 RtlCopyMemory(BaseAddress, Buffer, Length);
219 else
220 RtlCopyMemory(Buffer, BaseAddress, Length);
221 }
222 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
223 {
224 Status = _SEH2_GetExceptionCode();
225 }
226 _SEH2_END;
227 }
228 return Status;
229 }
230
231 BOOLEAN
232 CcCopyData (
233 _In_ PFILE_OBJECT FileObject,
234 _In_ LONGLONG FileOffset,
235 _Inout_ PVOID Buffer,
236 _In_ LONGLONG Length,
237 _In_ CC_COPY_OPERATION Operation,
238 _In_ BOOLEAN Wait,
239 _Out_ PIO_STATUS_BLOCK IoStatus)
240 {
241 NTSTATUS Status;
242 LONGLONG CurrentOffset;
243 ULONG BytesCopied;
244 KIRQL OldIrql;
245 PROS_SHARED_CACHE_MAP SharedCacheMap;
246 PLIST_ENTRY ListEntry;
247 PROS_VACB Vacb;
248 ULONG PartialLength;
249 PVOID BaseAddress;
250 BOOLEAN Valid;
251
252 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
253 CurrentOffset = FileOffset;
254 BytesCopied = 0;
255
256 if (!Wait)
257 {
258 /* test if the requested data is available */
259 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
260 /* FIXME: this loop doesn't take into account areas that don't have
261 * a VACB in the list yet */
262 ListEntry = SharedCacheMap->CacheMapVacbListHead.Flink;
263 while (ListEntry != &SharedCacheMap->CacheMapVacbListHead)
264 {
265 Vacb = CONTAINING_RECORD(ListEntry,
266 ROS_VACB,
267 CacheMapVacbListEntry);
268 ListEntry = ListEntry->Flink;
269 if (!Vacb->Valid &&
270 DoRangesIntersect(Vacb->FileOffset.QuadPart,
271 VACB_MAPPING_GRANULARITY,
272 CurrentOffset, Length))
273 {
274 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
275 /* data not available */
276 return FALSE;
277 }
278 if (Vacb->FileOffset.QuadPart >= CurrentOffset + Length)
279 break;
280 }
281 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
282 }
283
284 PartialLength = CurrentOffset % VACB_MAPPING_GRANULARITY;
285 if (PartialLength != 0)
286 {
287 PartialLength = min(Length, VACB_MAPPING_GRANULARITY - PartialLength);
288 Status = CcRosRequestVacb(SharedCacheMap,
289 ROUND_DOWN(CurrentOffset,
290 VACB_MAPPING_GRANULARITY),
291 &BaseAddress,
292 &Valid,
293 &Vacb);
294 if (!NT_SUCCESS(Status))
295 ExRaiseStatus(Status);
296 if (!Valid)
297 {
298 Status = CcReadVirtualAddress(Vacb);
299 if (!NT_SUCCESS(Status))
300 {
301 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
302 ExRaiseStatus(Status);
303 }
304 }
305 Status = ReadWriteOrZero((PUCHAR)BaseAddress + CurrentOffset % VACB_MAPPING_GRANULARITY,
306 Buffer,
307 PartialLength,
308 Operation);
309
310 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
311
312 if (!NT_SUCCESS(Status))
313 ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
314
315 Length -= PartialLength;
316 CurrentOffset += PartialLength;
317 BytesCopied += PartialLength;
318
319 if (Operation != CcOperationZero)
320 Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
321 }
322
323 while (Length > 0)
324 {
325 ASSERT(CurrentOffset % VACB_MAPPING_GRANULARITY == 0);
326 PartialLength = min(VACB_MAPPING_GRANULARITY, Length);
327 Status = CcRosRequestVacb(SharedCacheMap,
328 CurrentOffset,
329 &BaseAddress,
330 &Valid,
331 &Vacb);
332 if (!NT_SUCCESS(Status))
333 ExRaiseStatus(Status);
334 if (!Valid &&
335 (Operation == CcOperationRead ||
336 PartialLength < VACB_MAPPING_GRANULARITY))
337 {
338 Status = CcReadVirtualAddress(Vacb);
339 if (!NT_SUCCESS(Status))
340 {
341 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
342 ExRaiseStatus(Status);
343 }
344 }
345 Status = ReadWriteOrZero(BaseAddress, Buffer, PartialLength, Operation);
346
347 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
348
349 if (!NT_SUCCESS(Status))
350 ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
351
352 Length -= PartialLength;
353 CurrentOffset += PartialLength;
354 BytesCopied += PartialLength;
355
356 if (Operation != CcOperationZero)
357 Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
358 }
359 IoStatus->Status = STATUS_SUCCESS;
360 IoStatus->Information = BytesCopied;
361 return TRUE;
362 }
363
364 VOID
365 CcPostDeferredWrites(VOID)
366 {
367 ULONG WrittenBytes;
368
369 /* We'll try to write as much as we can */
370 WrittenBytes = 0;
371 while (TRUE)
372 {
373 KIRQL OldIrql;
374 PLIST_ENTRY ListEntry;
375 PDEFERRED_WRITE DeferredWrite;
376
377 DeferredWrite = NULL;
378
379 /* Lock our deferred writes list */
380 KeAcquireSpinLock(&CcDeferredWriteSpinLock, &OldIrql);
381 for (ListEntry = CcDeferredWrites.Flink;
382 ListEntry != &CcDeferredWrites;
383 ListEntry = ListEntry->Flink)
384 {
385 /* Extract an entry */
386 DeferredWrite = CONTAINING_RECORD(ListEntry, DEFERRED_WRITE, DeferredWriteLinks);
387
388 /* Compute the modified bytes, based on what we already wrote */
389 WrittenBytes += DeferredWrite->BytesToWrite;
390 /* We overflowed, give up */
391 if (WrittenBytes < DeferredWrite->BytesToWrite)
392 {
393 DeferredWrite = NULL;
394 break;
395 }
396
397 /* Check we can write */
398 if (CcCanIWrite(DeferredWrite->FileObject, WrittenBytes, FALSE, TRUE))
399 {
400 /* We can, so remove it from the list and stop looking for entry */
401 RemoveEntryList(&DeferredWrite->DeferredWriteLinks);
402 break;
403 }
404
405 /* If we don't accept modified pages, stop here */
406 if (!DeferredWrite->LimitModifiedPages)
407 {
408 DeferredWrite = NULL;
409 break;
410 }
411
412 /* Reset count as nothing was written yet */
413 WrittenBytes -= DeferredWrite->BytesToWrite;
414 DeferredWrite = NULL;
415 }
416 KeReleaseSpinLock(&CcDeferredWriteSpinLock, OldIrql);
417
418 /* Nothing to write found, give up */
419 if (DeferredWrite == NULL)
420 {
421 break;
422 }
423
424 /* If we have an event, set it and quit */
425 if (DeferredWrite->Event)
426 {
427 KeSetEvent(DeferredWrite->Event, IO_NO_INCREMENT, FALSE);
428 }
429 /* Otherwise, call the write routine and free the context */
430 else
431 {
432 DeferredWrite->PostRoutine(DeferredWrite->Context1, DeferredWrite->Context2);
433 ExFreePoolWithTag(DeferredWrite, 'CcDw');
434 }
435 }
436 }
437
438 /*
439 * @unimplemented
440 */
441 BOOLEAN
442 NTAPI
443 CcCanIWrite (
444 IN PFILE_OBJECT FileObject,
445 IN ULONG BytesToWrite,
446 IN BOOLEAN Wait,
447 IN BOOLEAN Retrying)
448 {
449 PFSRTL_COMMON_FCB_HEADER Fcb;
450 PROS_SHARED_CACHE_MAP SharedCacheMap;
451
452 CCTRACE(CC_API_DEBUG, "FileObject=%p BytesToWrite=%lu Wait=%d Retrying=%d\n",
453 FileObject, BytesToWrite, Wait, Retrying);
454
455 /* We cannot write if dirty pages count is above threshold */
456 if (CcTotalDirtyPages > CcDirtyPageThreshold)
457 {
458 return FALSE;
459 }
460
461 /* We cannot write if dirty pages count will bring use above
462 * XXX: Might not be accurate
463 */
464 if (CcTotalDirtyPages + (BytesToWrite / PAGE_SIZE) > CcDirtyPageThreshold)
465 {
466 return FALSE;
467 }
468
469 /* Is there a limit per file object? */
470 Fcb = FileObject->FsContext;
471 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
472 if (!BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES) ||
473 SharedCacheMap->DirtyPageThreshold == 0)
474 {
475 /* Nope, so that's fine, allow write operation */
476 return TRUE;
477 }
478
479 /* Is dirty page count above local threshold? */
480 if (SharedCacheMap->DirtyPages > SharedCacheMap->DirtyPageThreshold)
481 {
482 return FALSE;
483 }
484
485 /* We cannot write if dirty pages count will bring use above
486 * XXX: Might not be accurate
487 */
488 if (SharedCacheMap->DirtyPages + (BytesToWrite / PAGE_SIZE) > SharedCacheMap->DirtyPageThreshold)
489 {
490 return FALSE;
491 }
492
493 return TRUE;
494 }
495
496 /*
497 * @implemented
498 */
499 BOOLEAN
500 NTAPI
501 CcCopyRead (
502 IN PFILE_OBJECT FileObject,
503 IN PLARGE_INTEGER FileOffset,
504 IN ULONG Length,
505 IN BOOLEAN Wait,
506 OUT PVOID Buffer,
507 OUT PIO_STATUS_BLOCK IoStatus)
508 {
509 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d\n",
510 FileObject, FileOffset->QuadPart, Length, Wait);
511
512 DPRINT("CcCopyRead(FileObject 0x%p, FileOffset %I64x, "
513 "Length %lu, Wait %u, Buffer 0x%p, IoStatus 0x%p)\n",
514 FileObject, FileOffset->QuadPart, Length, Wait,
515 Buffer, IoStatus);
516
517 return CcCopyData(FileObject,
518 FileOffset->QuadPart,
519 Buffer,
520 Length,
521 CcOperationRead,
522 Wait,
523 IoStatus);
524 }
525
526 /*
527 * @implemented
528 */
529 BOOLEAN
530 NTAPI
531 CcCopyWrite (
532 IN PFILE_OBJECT FileObject,
533 IN PLARGE_INTEGER FileOffset,
534 IN ULONG Length,
535 IN BOOLEAN Wait,
536 IN PVOID Buffer)
537 {
538 IO_STATUS_BLOCK IoStatus;
539
540 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d Buffer=%p\n",
541 FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
542
543 DPRINT("CcCopyWrite(FileObject 0x%p, FileOffset %I64x, "
544 "Length %lu, Wait %u, Buffer 0x%p)\n",
545 FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
546
547 return CcCopyData(FileObject,
548 FileOffset->QuadPart,
549 Buffer,
550 Length,
551 CcOperationWrite,
552 Wait,
553 &IoStatus);
554 }
555
556 /*
557 * @implemented
558 */
559 VOID
560 NTAPI
561 CcDeferWrite (
562 IN PFILE_OBJECT FileObject,
563 IN PCC_POST_DEFERRED_WRITE PostRoutine,
564 IN PVOID Context1,
565 IN PVOID Context2,
566 IN ULONG BytesToWrite,
567 IN BOOLEAN Retrying)
568 {
569 PDEFERRED_WRITE Context;
570
571 CCTRACE(CC_API_DEBUG, "FileObject=%p PostRoutine=%p Context1=%p Context2=%p BytesToWrite=%lu Retrying=%d\n",
572 FileObject, PostRoutine, Context1, Context2, BytesToWrite, Retrying);
573
574 /* Try to allocate a context for queueing the write operation */
575 Context = ExAllocatePoolWithTag(NonPagedPool, sizeof(DEFERRED_WRITE), 'CcDw');
576 /* If it failed, immediately execute the operation! */
577 if (Context == NULL)
578 {
579 PostRoutine(Context1, Context2);
580 return;
581 }
582
583 /* Otherwise, initialize the context */
584 RtlZeroMemory(Context, sizeof(DEFERRED_WRITE));
585 Context->NodeTypeCode = NODE_TYPE_DEFERRED_WRITE;
586 Context->NodeByteSize = sizeof(DEFERRED_WRITE);
587 Context->FileObject = FileObject;
588 Context->PostRoutine = PostRoutine;
589 Context->Context1 = Context1;
590 Context->Context2 = Context2;
591 Context->BytesToWrite = BytesToWrite;
592
593 /* And queue it */
594 if (Retrying)
595 {
596 /* To the top, if that's a retry */
597 ExInterlockedInsertHeadList(&CcDeferredWrites,
598 &Context->DeferredWriteLinks,
599 &CcDeferredWriteSpinLock);
600 }
601 else
602 {
603 /* To the bottom, if that's a first time */
604 ExInterlockedInsertTailList(&CcDeferredWrites,
605 &Context->DeferredWriteLinks,
606 &CcDeferredWriteSpinLock);
607 }
608
609 /* Try to execute the posted writes */
610 CcPostDeferredWrites();
611
612 /* FIXME: lock master */
613 if (!LazyWriter.ScanActive)
614 {
615 CcScheduleLazyWriteScan(FALSE);
616 }
617 }
618
619 /*
620 * @unimplemented
621 */
622 VOID
623 NTAPI
624 CcFastCopyRead (
625 IN PFILE_OBJECT FileObject,
626 IN ULONG FileOffset,
627 IN ULONG Length,
628 IN ULONG PageCount,
629 OUT PVOID Buffer,
630 OUT PIO_STATUS_BLOCK IoStatus)
631 {
632 LARGE_INTEGER LargeFileOffset;
633 BOOLEAN Success;
634
635 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu PageCount=%lu Buffer=%p\n",
636 FileObject, FileOffset, Length, PageCount, Buffer);
637
638 DBG_UNREFERENCED_PARAMETER(PageCount);
639
640 LargeFileOffset.QuadPart = FileOffset;
641 Success = CcCopyRead(FileObject,
642 &LargeFileOffset,
643 Length,
644 TRUE,
645 Buffer,
646 IoStatus);
647 ASSERT(Success == TRUE);
648 }
649
650 /*
651 * @unimplemented
652 */
653 VOID
654 NTAPI
655 CcFastCopyWrite (
656 IN PFILE_OBJECT FileObject,
657 IN ULONG FileOffset,
658 IN ULONG Length,
659 IN PVOID Buffer)
660 {
661 LARGE_INTEGER LargeFileOffset;
662 BOOLEAN Success;
663
664 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu Buffer=%p\n",
665 FileObject, FileOffset, Length, Buffer);
666
667 LargeFileOffset.QuadPart = FileOffset;
668 Success = CcCopyWrite(FileObject,
669 &LargeFileOffset,
670 Length,
671 TRUE,
672 Buffer);
673 ASSERT(Success == TRUE);
674 }
675
676 /*
677 * @implemented
678 */
679 BOOLEAN
680 NTAPI
681 CcZeroData (
682 IN PFILE_OBJECT FileObject,
683 IN PLARGE_INTEGER StartOffset,
684 IN PLARGE_INTEGER EndOffset,
685 IN BOOLEAN Wait)
686 {
687 NTSTATUS Status;
688 LARGE_INTEGER WriteOffset;
689 LONGLONG Length;
690 ULONG CurrentLength;
691 PMDL Mdl;
692 ULONG i;
693 IO_STATUS_BLOCK Iosb;
694 KEVENT Event;
695
696 CCTRACE(CC_API_DEBUG, "FileObject=%p StartOffset=%I64u EndOffset=%I64u Wait=%d\n",
697 FileObject, StartOffset->QuadPart, EndOffset->QuadPart, Wait);
698
699 DPRINT("CcZeroData(FileObject 0x%p, StartOffset %I64x, EndOffset %I64x, "
700 "Wait %u)\n", FileObject, StartOffset->QuadPart, EndOffset->QuadPart,
701 Wait);
702
703 Length = EndOffset->QuadPart - StartOffset->QuadPart;
704 WriteOffset.QuadPart = StartOffset->QuadPart;
705
706 if (FileObject->SectionObjectPointer->SharedCacheMap == NULL)
707 {
708 /* File is not cached */
709
710 Mdl = _alloca(MmSizeOfMdl(NULL, MAX_ZERO_LENGTH));
711
712 while (Length > 0)
713 {
714 if (Length + WriteOffset.QuadPart % PAGE_SIZE > MAX_ZERO_LENGTH)
715 {
716 CurrentLength = MAX_ZERO_LENGTH - WriteOffset.QuadPart % PAGE_SIZE;
717 }
718 else
719 {
720 CurrentLength = Length;
721 }
722 MmInitializeMdl(Mdl, (PVOID)(ULONG_PTR)WriteOffset.QuadPart, CurrentLength);
723 Mdl->MdlFlags |= (MDL_PAGES_LOCKED | MDL_IO_PAGE_READ);
724 for (i = 0; i < ((Mdl->Size - sizeof(MDL)) / sizeof(ULONG)); i++)
725 {
726 ((PPFN_NUMBER)(Mdl + 1))[i] = CcZeroPage;
727 }
728 KeInitializeEvent(&Event, NotificationEvent, FALSE);
729 Status = IoSynchronousPageWrite(FileObject, Mdl, &WriteOffset, &Event, &Iosb);
730 if (Status == STATUS_PENDING)
731 {
732 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
733 Status = Iosb.Status;
734 }
735 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
736 {
737 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
738 }
739 if (!NT_SUCCESS(Status))
740 {
741 return FALSE;
742 }
743 WriteOffset.QuadPart += CurrentLength;
744 Length -= CurrentLength;
745 }
746 }
747 else
748 {
749 IO_STATUS_BLOCK IoStatus;
750
751 return CcCopyData(FileObject,
752 WriteOffset.QuadPart,
753 NULL,
754 Length,
755 CcOperationZero,
756 Wait,
757 &IoStatus);
758 }
759
760 return TRUE;
761 }