[NTOSKRNL] Add wait support in CcCanIWrite()
[reactos.git] / ntoskrnl / cc / copy.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/copy.c
5 * PURPOSE: Implements cache managers copy interface
6 *
7 * PROGRAMMERS: Some people?
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* INCLUDES ******************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 /* GLOBALS *******************************************************************/
18
19 static PFN_NUMBER CcZeroPage = 0;
20
21 #define MAX_ZERO_LENGTH (256 * 1024)
22
23 typedef enum _CC_COPY_OPERATION
24 {
25 CcOperationRead,
26 CcOperationWrite,
27 CcOperationZero
28 } CC_COPY_OPERATION;
29
30 ULONG CcRosTraceLevel = 0;
31 ULONG CcFastMdlReadWait;
32 ULONG CcFastMdlReadNotPossible;
33 ULONG CcFastReadNotPossible;
34 ULONG CcFastReadWait;
35 ULONG CcFastReadNoWait;
36 ULONG CcFastReadResourceMiss;
37
38 /* FUNCTIONS *****************************************************************/
39
40 VOID
41 NTAPI
42 MiZeroPhysicalPage (
43 IN PFN_NUMBER PageFrameIndex
44 );
45
46 VOID
47 NTAPI
48 CcInitCacheZeroPage (
49 VOID)
50 {
51 NTSTATUS Status;
52
53 MI_SET_USAGE(MI_USAGE_CACHE);
54 //MI_SET_PROCESS2(PsGetCurrentProcess()->ImageFileName);
55 Status = MmRequestPageMemoryConsumer(MC_SYSTEM, TRUE, &CcZeroPage);
56 if (!NT_SUCCESS(Status))
57 {
58 DbgPrint("Can't allocate CcZeroPage.\n");
59 KeBugCheck(CACHE_MANAGER);
60 }
61 MiZeroPhysicalPage(CcZeroPage);
62 }
63
64 NTSTATUS
65 NTAPI
66 CcReadVirtualAddress (
67 PROS_VACB Vacb)
68 {
69 ULONG Size, Pages;
70 PMDL Mdl;
71 NTSTATUS Status;
72 IO_STATUS_BLOCK IoStatus;
73 KEVENT Event;
74
75 Size = (ULONG)(Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart);
76 if (Size > VACB_MAPPING_GRANULARITY)
77 {
78 Size = VACB_MAPPING_GRANULARITY;
79 }
80
81 Pages = BYTES_TO_PAGES(Size);
82 ASSERT(Pages * PAGE_SIZE <= VACB_MAPPING_GRANULARITY);
83
84 Mdl = IoAllocateMdl(Vacb->BaseAddress, Pages * PAGE_SIZE, FALSE, FALSE, NULL);
85 if (!Mdl)
86 {
87 return STATUS_INSUFFICIENT_RESOURCES;
88 }
89
90 Status = STATUS_SUCCESS;
91 _SEH2_TRY
92 {
93 MmProbeAndLockPages(Mdl, KernelMode, IoWriteAccess);
94 }
95 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
96 {
97 Status = _SEH2_GetExceptionCode();
98 KeBugCheck(CACHE_MANAGER);
99 } _SEH2_END;
100
101 if (NT_SUCCESS(Status))
102 {
103 Mdl->MdlFlags |= MDL_IO_PAGE_READ;
104 KeInitializeEvent(&Event, NotificationEvent, FALSE);
105 Status = IoPageRead(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
106 if (Status == STATUS_PENDING)
107 {
108 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
109 Status = IoStatus.Status;
110 }
111
112 MmUnlockPages(Mdl);
113 }
114
115 IoFreeMdl(Mdl);
116
117 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
118 {
119 DPRINT1("IoPageRead failed, Status %x\n", Status);
120 return Status;
121 }
122
123 if (Size < VACB_MAPPING_GRANULARITY)
124 {
125 RtlZeroMemory((char*)Vacb->BaseAddress + Size,
126 VACB_MAPPING_GRANULARITY - Size);
127 }
128
129 return STATUS_SUCCESS;
130 }
131
132 NTSTATUS
133 NTAPI
134 CcWriteVirtualAddress (
135 PROS_VACB Vacb)
136 {
137 ULONG Size;
138 PMDL Mdl;
139 NTSTATUS Status;
140 IO_STATUS_BLOCK IoStatus;
141 KEVENT Event;
142
143 Size = (ULONG)(Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart);
144 if (Size > VACB_MAPPING_GRANULARITY)
145 {
146 Size = VACB_MAPPING_GRANULARITY;
147 }
148 //
149 // Nonpaged pool PDEs in ReactOS must actually be synchronized between the
150 // MmGlobalPageDirectory and the real system PDE directory. What a mess...
151 //
152 {
153 ULONG i = 0;
154 do
155 {
156 MmGetPfnForProcess(NULL, (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i << PAGE_SHIFT)));
157 } while (++i < (Size >> PAGE_SHIFT));
158 }
159
160 Mdl = IoAllocateMdl(Vacb->BaseAddress, Size, FALSE, FALSE, NULL);
161 if (!Mdl)
162 {
163 return STATUS_INSUFFICIENT_RESOURCES;
164 }
165
166 Status = STATUS_SUCCESS;
167 _SEH2_TRY
168 {
169 MmProbeAndLockPages(Mdl, KernelMode, IoReadAccess);
170 }
171 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
172 {
173 Status = _SEH2_GetExceptionCode();
174 KeBugCheck(CACHE_MANAGER);
175 } _SEH2_END;
176
177 if (NT_SUCCESS(Status))
178 {
179 KeInitializeEvent(&Event, NotificationEvent, FALSE);
180 Status = IoSynchronousPageWrite(Vacb->SharedCacheMap->FileObject, Mdl, &Vacb->FileOffset, &Event, &IoStatus);
181 if (Status == STATUS_PENDING)
182 {
183 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
184 Status = IoStatus.Status;
185 }
186
187 MmUnlockPages(Mdl);
188 }
189 IoFreeMdl(Mdl);
190 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
191 {
192 DPRINT1("IoPageWrite failed, Status %x\n", Status);
193 return Status;
194 }
195
196 return STATUS_SUCCESS;
197 }
198
199 NTSTATUS
200 ReadWriteOrZero(
201 _Inout_ PVOID BaseAddress,
202 _Inout_opt_ PVOID Buffer,
203 _In_ ULONG Length,
204 _In_ CC_COPY_OPERATION Operation)
205 {
206 NTSTATUS Status = STATUS_SUCCESS;
207
208 if (Operation == CcOperationZero)
209 {
210 /* Zero */
211 RtlZeroMemory(BaseAddress, Length);
212 }
213 else
214 {
215 _SEH2_TRY
216 {
217 if (Operation == CcOperationWrite)
218 RtlCopyMemory(BaseAddress, Buffer, Length);
219 else
220 RtlCopyMemory(Buffer, BaseAddress, Length);
221 }
222 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
223 {
224 Status = _SEH2_GetExceptionCode();
225 }
226 _SEH2_END;
227 }
228 return Status;
229 }
230
231 BOOLEAN
232 CcCopyData (
233 _In_ PFILE_OBJECT FileObject,
234 _In_ LONGLONG FileOffset,
235 _Inout_ PVOID Buffer,
236 _In_ LONGLONG Length,
237 _In_ CC_COPY_OPERATION Operation,
238 _In_ BOOLEAN Wait,
239 _Out_ PIO_STATUS_BLOCK IoStatus)
240 {
241 NTSTATUS Status;
242 LONGLONG CurrentOffset;
243 ULONG BytesCopied;
244 KIRQL OldIrql;
245 PROS_SHARED_CACHE_MAP SharedCacheMap;
246 PLIST_ENTRY ListEntry;
247 PROS_VACB Vacb;
248 ULONG PartialLength;
249 PVOID BaseAddress;
250 BOOLEAN Valid;
251
252 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
253 CurrentOffset = FileOffset;
254 BytesCopied = 0;
255
256 if (!Wait)
257 {
258 /* test if the requested data is available */
259 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
260 /* FIXME: this loop doesn't take into account areas that don't have
261 * a VACB in the list yet */
262 ListEntry = SharedCacheMap->CacheMapVacbListHead.Flink;
263 while (ListEntry != &SharedCacheMap->CacheMapVacbListHead)
264 {
265 Vacb = CONTAINING_RECORD(ListEntry,
266 ROS_VACB,
267 CacheMapVacbListEntry);
268 ListEntry = ListEntry->Flink;
269 if (!Vacb->Valid &&
270 DoRangesIntersect(Vacb->FileOffset.QuadPart,
271 VACB_MAPPING_GRANULARITY,
272 CurrentOffset, Length))
273 {
274 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
275 /* data not available */
276 return FALSE;
277 }
278 if (Vacb->FileOffset.QuadPart >= CurrentOffset + Length)
279 break;
280 }
281 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
282 }
283
284 PartialLength = CurrentOffset % VACB_MAPPING_GRANULARITY;
285 if (PartialLength != 0)
286 {
287 PartialLength = min(Length, VACB_MAPPING_GRANULARITY - PartialLength);
288 Status = CcRosRequestVacb(SharedCacheMap,
289 ROUND_DOWN(CurrentOffset,
290 VACB_MAPPING_GRANULARITY),
291 &BaseAddress,
292 &Valid,
293 &Vacb);
294 if (!NT_SUCCESS(Status))
295 ExRaiseStatus(Status);
296 if (!Valid)
297 {
298 Status = CcReadVirtualAddress(Vacb);
299 if (!NT_SUCCESS(Status))
300 {
301 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
302 ExRaiseStatus(Status);
303 }
304 }
305 Status = ReadWriteOrZero((PUCHAR)BaseAddress + CurrentOffset % VACB_MAPPING_GRANULARITY,
306 Buffer,
307 PartialLength,
308 Operation);
309
310 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
311
312 if (!NT_SUCCESS(Status))
313 ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
314
315 Length -= PartialLength;
316 CurrentOffset += PartialLength;
317 BytesCopied += PartialLength;
318
319 if (Operation != CcOperationZero)
320 Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
321 }
322
323 while (Length > 0)
324 {
325 ASSERT(CurrentOffset % VACB_MAPPING_GRANULARITY == 0);
326 PartialLength = min(VACB_MAPPING_GRANULARITY, Length);
327 Status = CcRosRequestVacb(SharedCacheMap,
328 CurrentOffset,
329 &BaseAddress,
330 &Valid,
331 &Vacb);
332 if (!NT_SUCCESS(Status))
333 ExRaiseStatus(Status);
334 if (!Valid &&
335 (Operation == CcOperationRead ||
336 PartialLength < VACB_MAPPING_GRANULARITY))
337 {
338 Status = CcReadVirtualAddress(Vacb);
339 if (!NT_SUCCESS(Status))
340 {
341 CcRosReleaseVacb(SharedCacheMap, Vacb, FALSE, FALSE, FALSE);
342 ExRaiseStatus(Status);
343 }
344 }
345 Status = ReadWriteOrZero(BaseAddress, Buffer, PartialLength, Operation);
346
347 CcRosReleaseVacb(SharedCacheMap, Vacb, TRUE, Operation != CcOperationRead, FALSE);
348
349 if (!NT_SUCCESS(Status))
350 ExRaiseStatus(STATUS_INVALID_USER_BUFFER);
351
352 Length -= PartialLength;
353 CurrentOffset += PartialLength;
354 BytesCopied += PartialLength;
355
356 if (Operation != CcOperationZero)
357 Buffer = (PVOID)((ULONG_PTR)Buffer + PartialLength);
358 }
359 IoStatus->Status = STATUS_SUCCESS;
360 IoStatus->Information = BytesCopied;
361 return TRUE;
362 }
363
364 VOID
365 CcPostDeferredWrites(VOID)
366 {
367 ULONG WrittenBytes;
368
369 /* We'll try to write as much as we can */
370 WrittenBytes = 0;
371 while (TRUE)
372 {
373 KIRQL OldIrql;
374 PLIST_ENTRY ListEntry;
375 PDEFERRED_WRITE DeferredWrite;
376
377 DeferredWrite = NULL;
378
379 /* Lock our deferred writes list */
380 KeAcquireSpinLock(&CcDeferredWriteSpinLock, &OldIrql);
381 for (ListEntry = CcDeferredWrites.Flink;
382 ListEntry != &CcDeferredWrites;
383 ListEntry = ListEntry->Flink)
384 {
385 /* Extract an entry */
386 DeferredWrite = CONTAINING_RECORD(ListEntry, DEFERRED_WRITE, DeferredWriteLinks);
387
388 /* Compute the modified bytes, based on what we already wrote */
389 WrittenBytes += DeferredWrite->BytesToWrite;
390 /* We overflowed, give up */
391 if (WrittenBytes < DeferredWrite->BytesToWrite)
392 {
393 DeferredWrite = NULL;
394 break;
395 }
396
397 /* Check we can write */
398 if (CcCanIWrite(DeferredWrite->FileObject, WrittenBytes, FALSE, TRUE))
399 {
400 /* We can, so remove it from the list and stop looking for entry */
401 RemoveEntryList(&DeferredWrite->DeferredWriteLinks);
402 break;
403 }
404
405 /* If we don't accept modified pages, stop here */
406 if (!DeferredWrite->LimitModifiedPages)
407 {
408 DeferredWrite = NULL;
409 break;
410 }
411
412 /* Reset count as nothing was written yet */
413 WrittenBytes -= DeferredWrite->BytesToWrite;
414 DeferredWrite = NULL;
415 }
416 KeReleaseSpinLock(&CcDeferredWriteSpinLock, OldIrql);
417
418 /* Nothing to write found, give up */
419 if (DeferredWrite == NULL)
420 {
421 break;
422 }
423
424 /* If we have an event, set it and quit */
425 if (DeferredWrite->Event)
426 {
427 KeSetEvent(DeferredWrite->Event, IO_NO_INCREMENT, FALSE);
428 }
429 /* Otherwise, call the write routine and free the context */
430 else
431 {
432 DeferredWrite->PostRoutine(DeferredWrite->Context1, DeferredWrite->Context2);
433 ExFreePoolWithTag(DeferredWrite, 'CcDw');
434 }
435 }
436 }
437
438 /*
439 * @unimplemented
440 */
441 BOOLEAN
442 NTAPI
443 CcCanIWrite (
444 IN PFILE_OBJECT FileObject,
445 IN ULONG BytesToWrite,
446 IN BOOLEAN Wait,
447 IN BOOLEAN Retrying)
448 {
449 KEVENT WaitEvent;
450 DEFERRED_WRITE Context;
451 PFSRTL_COMMON_FCB_HEADER Fcb;
452 PROS_SHARED_CACHE_MAP SharedCacheMap;
453
454 CCTRACE(CC_API_DEBUG, "FileObject=%p BytesToWrite=%lu Wait=%d Retrying=%d\n",
455 FileObject, BytesToWrite, Wait, Retrying);
456
457 /* We cannot write if dirty pages count is above threshold */
458 if (CcTotalDirtyPages > CcDirtyPageThreshold)
459 {
460 /* Can the caller wait till it's possible to write? */
461 goto CanIWait;
462 }
463
464 /* We cannot write if dirty pages count will bring use above
465 * XXX: Might not be accurate
466 */
467 if (CcTotalDirtyPages + (BytesToWrite / PAGE_SIZE) > CcDirtyPageThreshold)
468 {
469 /* Can the caller wait till it's possible to write? */
470 goto CanIWait;
471 }
472
473 /* Is there a limit per file object? */
474 Fcb = FileObject->FsContext;
475 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
476 if (!BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES) ||
477 SharedCacheMap->DirtyPageThreshold == 0)
478 {
479 /* Nope, so that's fine, allow write operation */
480 return TRUE;
481 }
482
483 /* Is dirty page count above local threshold? */
484 if (SharedCacheMap->DirtyPages > SharedCacheMap->DirtyPageThreshold)
485 {
486 /* Can the caller wait till it's possible to write? */
487 goto CanIWait;
488 }
489
490 /* We cannot write if dirty pages count will bring use above
491 * XXX: Might not be accurate
492 */
493 if (SharedCacheMap->DirtyPages + (BytesToWrite / PAGE_SIZE) > SharedCacheMap->DirtyPageThreshold)
494 {
495 /* Can the caller wait till it's possible to write? */
496 goto CanIWait;
497 }
498
499 return TRUE;
500
501 CanIWait:
502 /* If we reached that point, it means caller cannot write
503 * If he cannot wait, then fail and deny write
504 */
505 if (!Wait)
506 {
507 return FALSE;
508 }
509
510 /* Otherwise, if there are no deferred writes yet, start the lazy writer */
511 if (IsListEmpty(&CcDeferredWrites))
512 {
513 KIRQL OldIrql;
514
515 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
516 CcScheduleLazyWriteScan(TRUE);
517 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
518 }
519
520 /* Initialize our wait event */
521 KeInitializeEvent(&WaitEvent, NotificationEvent, FALSE);
522
523 /* And prepare a dummy context */
524 Context.NodeTypeCode = NODE_TYPE_DEFERRED_WRITE;
525 Context.NodeByteSize = sizeof(DEFERRED_WRITE);
526 Context.FileObject = FileObject;
527 Context.BytesToWrite = BytesToWrite;
528 Context.LimitModifiedPages = BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES);
529 Context.Event = &WaitEvent;
530
531 /* And queue it */
532 if (Retrying)
533 {
534 /* To the top, if that's a retry */
535 ExInterlockedInsertHeadList(&CcDeferredWrites,
536 &Context.DeferredWriteLinks,
537 &CcDeferredWriteSpinLock);
538 }
539 else
540 {
541 /* To the bottom, if that's a first time */
542 ExInterlockedInsertTailList(&CcDeferredWrites,
543 &Context.DeferredWriteLinks,
544 &CcDeferredWriteSpinLock);
545 }
546
547 /* Now, we'll loop until our event is set. When it is set, it means that caller
548 * can immediately write, and has to
549 */
550 do
551 {
552 CcPostDeferredWrites();
553 } while (KeWaitForSingleObject(&WaitEvent, Executive, KernelMode, FALSE, &CcIdleDelay) != STATUS_SUCCESS);
554
555 return TRUE;
556 }
557
558 /*
559 * @implemented
560 */
561 BOOLEAN
562 NTAPI
563 CcCopyRead (
564 IN PFILE_OBJECT FileObject,
565 IN PLARGE_INTEGER FileOffset,
566 IN ULONG Length,
567 IN BOOLEAN Wait,
568 OUT PVOID Buffer,
569 OUT PIO_STATUS_BLOCK IoStatus)
570 {
571 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d\n",
572 FileObject, FileOffset->QuadPart, Length, Wait);
573
574 DPRINT("CcCopyRead(FileObject 0x%p, FileOffset %I64x, "
575 "Length %lu, Wait %u, Buffer 0x%p, IoStatus 0x%p)\n",
576 FileObject, FileOffset->QuadPart, Length, Wait,
577 Buffer, IoStatus);
578
579 return CcCopyData(FileObject,
580 FileOffset->QuadPart,
581 Buffer,
582 Length,
583 CcOperationRead,
584 Wait,
585 IoStatus);
586 }
587
588 /*
589 * @implemented
590 */
591 BOOLEAN
592 NTAPI
593 CcCopyWrite (
594 IN PFILE_OBJECT FileObject,
595 IN PLARGE_INTEGER FileOffset,
596 IN ULONG Length,
597 IN BOOLEAN Wait,
598 IN PVOID Buffer)
599 {
600 IO_STATUS_BLOCK IoStatus;
601
602 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d Buffer=%p\n",
603 FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
604
605 DPRINT("CcCopyWrite(FileObject 0x%p, FileOffset %I64x, "
606 "Length %lu, Wait %u, Buffer 0x%p)\n",
607 FileObject, FileOffset->QuadPart, Length, Wait, Buffer);
608
609 return CcCopyData(FileObject,
610 FileOffset->QuadPart,
611 Buffer,
612 Length,
613 CcOperationWrite,
614 Wait,
615 &IoStatus);
616 }
617
618 /*
619 * @implemented
620 */
621 VOID
622 NTAPI
623 CcDeferWrite (
624 IN PFILE_OBJECT FileObject,
625 IN PCC_POST_DEFERRED_WRITE PostRoutine,
626 IN PVOID Context1,
627 IN PVOID Context2,
628 IN ULONG BytesToWrite,
629 IN BOOLEAN Retrying)
630 {
631 PDEFERRED_WRITE Context;
632
633 CCTRACE(CC_API_DEBUG, "FileObject=%p PostRoutine=%p Context1=%p Context2=%p BytesToWrite=%lu Retrying=%d\n",
634 FileObject, PostRoutine, Context1, Context2, BytesToWrite, Retrying);
635
636 /* Try to allocate a context for queueing the write operation */
637 Context = ExAllocatePoolWithTag(NonPagedPool, sizeof(DEFERRED_WRITE), 'CcDw');
638 /* If it failed, immediately execute the operation! */
639 if (Context == NULL)
640 {
641 PostRoutine(Context1, Context2);
642 return;
643 }
644
645 /* Otherwise, initialize the context */
646 RtlZeroMemory(Context, sizeof(DEFERRED_WRITE));
647 Context->NodeTypeCode = NODE_TYPE_DEFERRED_WRITE;
648 Context->NodeByteSize = sizeof(DEFERRED_WRITE);
649 Context->FileObject = FileObject;
650 Context->PostRoutine = PostRoutine;
651 Context->Context1 = Context1;
652 Context->Context2 = Context2;
653 Context->BytesToWrite = BytesToWrite;
654
655 /* And queue it */
656 if (Retrying)
657 {
658 /* To the top, if that's a retry */
659 ExInterlockedInsertHeadList(&CcDeferredWrites,
660 &Context->DeferredWriteLinks,
661 &CcDeferredWriteSpinLock);
662 }
663 else
664 {
665 /* To the bottom, if that's a first time */
666 ExInterlockedInsertTailList(&CcDeferredWrites,
667 &Context->DeferredWriteLinks,
668 &CcDeferredWriteSpinLock);
669 }
670
671 /* Try to execute the posted writes */
672 CcPostDeferredWrites();
673
674 /* FIXME: lock master */
675 if (!LazyWriter.ScanActive)
676 {
677 CcScheduleLazyWriteScan(FALSE);
678 }
679 }
680
681 /*
682 * @unimplemented
683 */
684 VOID
685 NTAPI
686 CcFastCopyRead (
687 IN PFILE_OBJECT FileObject,
688 IN ULONG FileOffset,
689 IN ULONG Length,
690 IN ULONG PageCount,
691 OUT PVOID Buffer,
692 OUT PIO_STATUS_BLOCK IoStatus)
693 {
694 LARGE_INTEGER LargeFileOffset;
695 BOOLEAN Success;
696
697 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu PageCount=%lu Buffer=%p\n",
698 FileObject, FileOffset, Length, PageCount, Buffer);
699
700 DBG_UNREFERENCED_PARAMETER(PageCount);
701
702 LargeFileOffset.QuadPart = FileOffset;
703 Success = CcCopyRead(FileObject,
704 &LargeFileOffset,
705 Length,
706 TRUE,
707 Buffer,
708 IoStatus);
709 ASSERT(Success == TRUE);
710 }
711
712 /*
713 * @unimplemented
714 */
715 VOID
716 NTAPI
717 CcFastCopyWrite (
718 IN PFILE_OBJECT FileObject,
719 IN ULONG FileOffset,
720 IN ULONG Length,
721 IN PVOID Buffer)
722 {
723 LARGE_INTEGER LargeFileOffset;
724 BOOLEAN Success;
725
726 CCTRACE(CC_API_DEBUG, "FileObject=%p FileOffset=%lu Length=%lu Buffer=%p\n",
727 FileObject, FileOffset, Length, Buffer);
728
729 LargeFileOffset.QuadPart = FileOffset;
730 Success = CcCopyWrite(FileObject,
731 &LargeFileOffset,
732 Length,
733 TRUE,
734 Buffer);
735 ASSERT(Success == TRUE);
736 }
737
738 /*
739 * @implemented
740 */
741 BOOLEAN
742 NTAPI
743 CcZeroData (
744 IN PFILE_OBJECT FileObject,
745 IN PLARGE_INTEGER StartOffset,
746 IN PLARGE_INTEGER EndOffset,
747 IN BOOLEAN Wait)
748 {
749 NTSTATUS Status;
750 LARGE_INTEGER WriteOffset;
751 LONGLONG Length;
752 ULONG CurrentLength;
753 PMDL Mdl;
754 ULONG i;
755 IO_STATUS_BLOCK Iosb;
756 KEVENT Event;
757
758 CCTRACE(CC_API_DEBUG, "FileObject=%p StartOffset=%I64u EndOffset=%I64u Wait=%d\n",
759 FileObject, StartOffset->QuadPart, EndOffset->QuadPart, Wait);
760
761 DPRINT("CcZeroData(FileObject 0x%p, StartOffset %I64x, EndOffset %I64x, "
762 "Wait %u)\n", FileObject, StartOffset->QuadPart, EndOffset->QuadPart,
763 Wait);
764
765 Length = EndOffset->QuadPart - StartOffset->QuadPart;
766 WriteOffset.QuadPart = StartOffset->QuadPart;
767
768 if (FileObject->SectionObjectPointer->SharedCacheMap == NULL)
769 {
770 /* File is not cached */
771
772 Mdl = _alloca(MmSizeOfMdl(NULL, MAX_ZERO_LENGTH));
773
774 while (Length > 0)
775 {
776 if (Length + WriteOffset.QuadPart % PAGE_SIZE > MAX_ZERO_LENGTH)
777 {
778 CurrentLength = MAX_ZERO_LENGTH - WriteOffset.QuadPart % PAGE_SIZE;
779 }
780 else
781 {
782 CurrentLength = Length;
783 }
784 MmInitializeMdl(Mdl, (PVOID)(ULONG_PTR)WriteOffset.QuadPart, CurrentLength);
785 Mdl->MdlFlags |= (MDL_PAGES_LOCKED | MDL_IO_PAGE_READ);
786 for (i = 0; i < ((Mdl->Size - sizeof(MDL)) / sizeof(ULONG)); i++)
787 {
788 ((PPFN_NUMBER)(Mdl + 1))[i] = CcZeroPage;
789 }
790 KeInitializeEvent(&Event, NotificationEvent, FALSE);
791 Status = IoSynchronousPageWrite(FileObject, Mdl, &WriteOffset, &Event, &Iosb);
792 if (Status == STATUS_PENDING)
793 {
794 KeWaitForSingleObject(&Event, Executive, KernelMode, FALSE, NULL);
795 Status = Iosb.Status;
796 }
797 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
798 {
799 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
800 }
801 if (!NT_SUCCESS(Status))
802 {
803 return FALSE;
804 }
805 WriteOffset.QuadPart += CurrentLength;
806 Length -= CurrentLength;
807 }
808 }
809 else
810 {
811 IO_STATUS_BLOCK IoStatus;
812
813 return CcCopyData(FileObject,
814 WriteOffset.QuadPart,
815 NULL,
816 Length,
817 CcOperationZero,
818 Wait,
819 &IoStatus);
820 }
821
822 return TRUE;
823 }