2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/copy.c
5 * PURPOSE: Implements cache managers copy interface
7 * PROGRAMMERS: Some people?
8 * Pierre Schweitzer (pierre@reactos.org)
11 /* INCLUDES ******************************************************************/
17 /* GLOBALS *******************************************************************/
19 static PFN_NUMBER CcZeroPage
= 0;
21 #define MAX_ZERO_LENGTH (256 * 1024)
23 typedef enum _CC_COPY_OPERATION
30 ULONG CcRosTraceLevel
= 0;
31 ULONG CcFastMdlReadWait
;
32 ULONG CcFastMdlReadNotPossible
;
33 ULONG CcFastReadNotPossible
;
35 ULONG CcFastReadNoWait
;
36 ULONG CcFastReadResourceMiss
;
38 /* FUNCTIONS *****************************************************************/
43 IN PFN_NUMBER PageFrameIndex
53 MI_SET_USAGE(MI_USAGE_CACHE
);
54 //MI_SET_PROCESS2(PsGetCurrentProcess()->ImageFileName);
55 Status
= MmRequestPageMemoryConsumer(MC_SYSTEM
, TRUE
, &CcZeroPage
);
56 if (!NT_SUCCESS(Status
))
58 DbgPrint("Can't allocate CcZeroPage.\n");
59 KeBugCheck(CACHE_MANAGER
);
61 MiZeroPhysicalPage(CcZeroPage
);
66 CcReadVirtualAddress (
72 IO_STATUS_BLOCK IoStatus
;
75 Size
= (ULONG
)(Vacb
->SharedCacheMap
->SectionSize
.QuadPart
- Vacb
->FileOffset
.QuadPart
);
76 if (Size
> VACB_MAPPING_GRANULARITY
)
78 Size
= VACB_MAPPING_GRANULARITY
;
81 Pages
= BYTES_TO_PAGES(Size
);
82 ASSERT(Pages
* PAGE_SIZE
<= VACB_MAPPING_GRANULARITY
);
84 Mdl
= IoAllocateMdl(Vacb
->BaseAddress
, Pages
* PAGE_SIZE
, FALSE
, FALSE
, NULL
);
87 return STATUS_INSUFFICIENT_RESOURCES
;
90 Status
= STATUS_SUCCESS
;
93 MmProbeAndLockPages(Mdl
, KernelMode
, IoWriteAccess
);
95 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER
)
97 Status
= _SEH2_GetExceptionCode();
98 KeBugCheck(CACHE_MANAGER
);
101 if (NT_SUCCESS(Status
))
103 Mdl
->MdlFlags
|= MDL_IO_PAGE_READ
;
104 KeInitializeEvent(&Event
, NotificationEvent
, FALSE
);
105 Status
= IoPageRead(Vacb
->SharedCacheMap
->FileObject
, Mdl
, &Vacb
->FileOffset
, &Event
, &IoStatus
);
106 if (Status
== STATUS_PENDING
)
108 KeWaitForSingleObject(&Event
, Executive
, KernelMode
, FALSE
, NULL
);
109 Status
= IoStatus
.Status
;
117 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
))
119 DPRINT1("IoPageRead failed, Status %x\n", Status
);
123 if (Size
< VACB_MAPPING_GRANULARITY
)
125 RtlZeroMemory((char*)Vacb
->BaseAddress
+ Size
,
126 VACB_MAPPING_GRANULARITY
- Size
);
129 return STATUS_SUCCESS
;
134 CcWriteVirtualAddress (
140 IO_STATUS_BLOCK IoStatus
;
143 Size
= (ULONG
)(Vacb
->SharedCacheMap
->SectionSize
.QuadPart
- Vacb
->FileOffset
.QuadPart
);
144 if (Size
> VACB_MAPPING_GRANULARITY
)
146 Size
= VACB_MAPPING_GRANULARITY
;
149 // Nonpaged pool PDEs in ReactOS must actually be synchronized between the
150 // MmGlobalPageDirectory and the real system PDE directory. What a mess...
156 MmGetPfnForProcess(NULL
, (PVOID
)((ULONG_PTR
)Vacb
->BaseAddress
+ (i
<< PAGE_SHIFT
)));
157 } while (++i
< (Size
>> PAGE_SHIFT
));
160 Mdl
= IoAllocateMdl(Vacb
->BaseAddress
, Size
, FALSE
, FALSE
, NULL
);
163 return STATUS_INSUFFICIENT_RESOURCES
;
166 Status
= STATUS_SUCCESS
;
169 MmProbeAndLockPages(Mdl
, KernelMode
, IoReadAccess
);
171 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER
)
173 Status
= _SEH2_GetExceptionCode();
174 KeBugCheck(CACHE_MANAGER
);
177 if (NT_SUCCESS(Status
))
179 KeInitializeEvent(&Event
, NotificationEvent
, FALSE
);
180 Status
= IoSynchronousPageWrite(Vacb
->SharedCacheMap
->FileObject
, Mdl
, &Vacb
->FileOffset
, &Event
, &IoStatus
);
181 if (Status
== STATUS_PENDING
)
183 KeWaitForSingleObject(&Event
, Executive
, KernelMode
, FALSE
, NULL
);
184 Status
= IoStatus
.Status
;
190 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
))
192 DPRINT1("IoPageWrite failed, Status %x\n", Status
);
196 return STATUS_SUCCESS
;
201 _Inout_ PVOID BaseAddress
,
202 _Inout_opt_ PVOID Buffer
,
204 _In_ CC_COPY_OPERATION Operation
)
206 NTSTATUS Status
= STATUS_SUCCESS
;
208 if (Operation
== CcOperationZero
)
211 RtlZeroMemory(BaseAddress
, Length
);
217 if (Operation
== CcOperationWrite
)
218 RtlCopyMemory(BaseAddress
, Buffer
, Length
);
220 RtlCopyMemory(Buffer
, BaseAddress
, Length
);
222 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER
)
224 Status
= _SEH2_GetExceptionCode();
233 _In_ PFILE_OBJECT FileObject
,
234 _In_ LONGLONG FileOffset
,
235 _Inout_ PVOID Buffer
,
236 _In_ LONGLONG Length
,
237 _In_ CC_COPY_OPERATION Operation
,
239 _Out_ PIO_STATUS_BLOCK IoStatus
)
242 LONGLONG CurrentOffset
;
245 PROS_SHARED_CACHE_MAP SharedCacheMap
;
246 PLIST_ENTRY ListEntry
;
251 PPRIVATE_CACHE_MAP PrivateCacheMap
;
253 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
254 PrivateCacheMap
= FileObject
->PrivateCacheMap
;
255 CurrentOffset
= FileOffset
;
260 /* test if the requested data is available */
261 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &OldIrql
);
262 /* FIXME: this loop doesn't take into account areas that don't have
263 * a VACB in the list yet */
264 ListEntry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
265 while (ListEntry
!= &SharedCacheMap
->CacheMapVacbListHead
)
267 Vacb
= CONTAINING_RECORD(ListEntry
,
269 CacheMapVacbListEntry
);
270 ListEntry
= ListEntry
->Flink
;
272 DoRangesIntersect(Vacb
->FileOffset
.QuadPart
,
273 VACB_MAPPING_GRANULARITY
,
274 CurrentOffset
, Length
))
276 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, OldIrql
);
277 /* data not available */
280 if (Vacb
->FileOffset
.QuadPart
>= CurrentOffset
+ Length
)
283 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, OldIrql
);
286 PartialLength
= CurrentOffset
% VACB_MAPPING_GRANULARITY
;
287 if (PartialLength
!= 0)
289 PartialLength
= min(Length
, VACB_MAPPING_GRANULARITY
- PartialLength
);
290 Status
= CcRosRequestVacb(SharedCacheMap
,
291 ROUND_DOWN(CurrentOffset
,
292 VACB_MAPPING_GRANULARITY
),
296 if (!NT_SUCCESS(Status
))
297 ExRaiseStatus(Status
);
300 Status
= CcReadVirtualAddress(Vacb
);
301 if (!NT_SUCCESS(Status
))
303 CcRosReleaseVacb(SharedCacheMap
, Vacb
, FALSE
, FALSE
, FALSE
);
304 ExRaiseStatus(Status
);
307 Status
= ReadWriteOrZero((PUCHAR
)BaseAddress
+ CurrentOffset
% VACB_MAPPING_GRANULARITY
,
312 CcRosReleaseVacb(SharedCacheMap
, Vacb
, TRUE
, Operation
!= CcOperationRead
, FALSE
);
314 if (!NT_SUCCESS(Status
))
315 ExRaiseStatus(STATUS_INVALID_USER_BUFFER
);
317 Length
-= PartialLength
;
318 CurrentOffset
+= PartialLength
;
319 BytesCopied
+= PartialLength
;
321 if (Operation
!= CcOperationZero
)
322 Buffer
= (PVOID
)((ULONG_PTR
)Buffer
+ PartialLength
);
327 ASSERT(CurrentOffset
% VACB_MAPPING_GRANULARITY
== 0);
328 PartialLength
= min(VACB_MAPPING_GRANULARITY
, Length
);
329 Status
= CcRosRequestVacb(SharedCacheMap
,
334 if (!NT_SUCCESS(Status
))
335 ExRaiseStatus(Status
);
337 (Operation
== CcOperationRead
||
338 PartialLength
< VACB_MAPPING_GRANULARITY
))
340 Status
= CcReadVirtualAddress(Vacb
);
341 if (!NT_SUCCESS(Status
))
343 CcRosReleaseVacb(SharedCacheMap
, Vacb
, FALSE
, FALSE
, FALSE
);
344 ExRaiseStatus(Status
);
347 Status
= ReadWriteOrZero(BaseAddress
, Buffer
, PartialLength
, Operation
);
349 CcRosReleaseVacb(SharedCacheMap
, Vacb
, TRUE
, Operation
!= CcOperationRead
, FALSE
);
351 if (!NT_SUCCESS(Status
))
352 ExRaiseStatus(STATUS_INVALID_USER_BUFFER
);
354 Length
-= PartialLength
;
355 CurrentOffset
+= PartialLength
;
356 BytesCopied
+= PartialLength
;
358 if (Operation
!= CcOperationZero
)
359 Buffer
= (PVOID
)((ULONG_PTR
)Buffer
+ PartialLength
);
362 /* If that was a successful sync read operation, let's handle read ahead */
363 if (Operation
== CcOperationRead
&& Length
== 0 && Wait
)
365 /* If file isn't random access, schedule next read */
366 if (!BooleanFlagOn(FileObject
->Flags
, FO_RANDOM_ACCESS
))
368 CcScheduleReadAhead(FileObject
, (PLARGE_INTEGER
)&FileOffset
, BytesCopied
);
371 /* And update read history in private cache map */
372 PrivateCacheMap
->FileOffset1
.QuadPart
= PrivateCacheMap
->FileOffset2
.QuadPart
;
373 PrivateCacheMap
->BeyondLastByte1
.QuadPart
= PrivateCacheMap
->BeyondLastByte2
.QuadPart
;
374 PrivateCacheMap
->FileOffset2
.QuadPart
= FileOffset
;
375 PrivateCacheMap
->BeyondLastByte2
.QuadPart
= FileOffset
+ BytesCopied
;
378 IoStatus
->Status
= STATUS_SUCCESS
;
379 IoStatus
->Information
= BytesCopied
;
384 CcPostDeferredWrites(VOID
)
388 /* We'll try to write as much as we can */
393 PLIST_ENTRY ListEntry
;
394 PDEFERRED_WRITE DeferredWrite
;
396 DeferredWrite
= NULL
;
398 /* Lock our deferred writes list */
399 KeAcquireSpinLock(&CcDeferredWriteSpinLock
, &OldIrql
);
400 for (ListEntry
= CcDeferredWrites
.Flink
;
401 ListEntry
!= &CcDeferredWrites
;
402 ListEntry
= ListEntry
->Flink
)
404 /* Extract an entry */
405 DeferredWrite
= CONTAINING_RECORD(ListEntry
, DEFERRED_WRITE
, DeferredWriteLinks
);
407 /* Compute the modified bytes, based on what we already wrote */
408 WrittenBytes
+= DeferredWrite
->BytesToWrite
;
409 /* We overflowed, give up */
410 if (WrittenBytes
< DeferredWrite
->BytesToWrite
)
412 DeferredWrite
= NULL
;
416 /* Check we can write */
417 if (CcCanIWrite(DeferredWrite
->FileObject
, WrittenBytes
, FALSE
, TRUE
))
419 /* We can, so remove it from the list and stop looking for entry */
420 RemoveEntryList(&DeferredWrite
->DeferredWriteLinks
);
424 /* If we don't accept modified pages, stop here */
425 if (!DeferredWrite
->LimitModifiedPages
)
427 DeferredWrite
= NULL
;
431 /* Reset count as nothing was written yet */
432 WrittenBytes
-= DeferredWrite
->BytesToWrite
;
433 DeferredWrite
= NULL
;
435 KeReleaseSpinLock(&CcDeferredWriteSpinLock
, OldIrql
);
437 /* Nothing to write found, give up */
438 if (DeferredWrite
== NULL
)
443 /* If we have an event, set it and quit */
444 if (DeferredWrite
->Event
)
446 KeSetEvent(DeferredWrite
->Event
, IO_NO_INCREMENT
, FALSE
);
448 /* Otherwise, call the write routine and free the context */
451 DeferredWrite
->PostRoutine(DeferredWrite
->Context1
, DeferredWrite
->Context2
);
452 ExFreePoolWithTag(DeferredWrite
, 'CcDw');
459 IN PFILE_OBJECT FileObject
)
462 LONGLONG CurrentOffset
;
464 PROS_SHARED_CACHE_MAP SharedCacheMap
;
470 PPRIVATE_CACHE_MAP PrivateCacheMap
;
473 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
476 * PrivateCacheMap might disappear in-between if the handle
477 * to the file is closed (private is attached to the handle not to
478 * the file), so we need to lock the master lock while we deal with
479 * it. It won't disappear without attempting to lock such lock.
481 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
482 PrivateCacheMap
= FileObject
->PrivateCacheMap
;
483 /* If the handle was closed since the read ahead was scheduled, just quit */
484 if (PrivateCacheMap
== NULL
)
486 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
487 ObDereferenceObject(FileObject
);
490 /* Otherwise, extract read offset and length and release private map */
493 KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap
->ReadAheadSpinLock
);
494 CurrentOffset
= PrivateCacheMap
->ReadAheadOffset
[1].QuadPart
;
495 Length
= PrivateCacheMap
->ReadAheadLength
[1];
496 KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap
->ReadAheadSpinLock
);
498 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
501 DPRINT("Doing ReadAhead for %p\n", FileObject
);
502 /* Lock the file, first */
503 if (!SharedCacheMap
->Callbacks
->AcquireForReadAhead(SharedCacheMap
->LazyWriteContext
, FALSE
))
509 /* Remember it's locked */
512 /* Next of the algorithm will lock like CcCopyData with the slight
513 * difference that we don't copy data back to an user-backed buffer
514 * We just bring data into Cc
516 PartialLength
= CurrentOffset
% VACB_MAPPING_GRANULARITY
;
517 if (PartialLength
!= 0)
519 PartialLength
= min(Length
, VACB_MAPPING_GRANULARITY
- PartialLength
);
520 Status
= CcRosRequestVacb(SharedCacheMap
,
521 ROUND_DOWN(CurrentOffset
,
522 VACB_MAPPING_GRANULARITY
),
526 if (!NT_SUCCESS(Status
))
528 DPRINT1("Failed to request VACB: %lx!\n", Status
);
534 Status
= CcReadVirtualAddress(Vacb
);
535 if (!NT_SUCCESS(Status
))
537 CcRosReleaseVacb(SharedCacheMap
, Vacb
, FALSE
, FALSE
, FALSE
);
538 DPRINT1("Failed to read data: %lx!\n", Status
);
543 CcRosReleaseVacb(SharedCacheMap
, Vacb
, TRUE
, FALSE
, FALSE
);
545 Length
-= PartialLength
;
546 CurrentOffset
+= PartialLength
;
551 ASSERT(CurrentOffset
% VACB_MAPPING_GRANULARITY
== 0);
552 PartialLength
= min(VACB_MAPPING_GRANULARITY
, Length
);
553 Status
= CcRosRequestVacb(SharedCacheMap
,
558 if (!NT_SUCCESS(Status
))
560 DPRINT1("Failed to request VACB: %lx!\n", Status
);
566 Status
= CcReadVirtualAddress(Vacb
);
567 if (!NT_SUCCESS(Status
))
569 CcRosReleaseVacb(SharedCacheMap
, Vacb
, FALSE
, FALSE
, FALSE
);
570 DPRINT1("Failed to read data: %lx!\n", Status
);
575 CcRosReleaseVacb(SharedCacheMap
, Vacb
, TRUE
, FALSE
, FALSE
);
577 Length
-= PartialLength
;
578 CurrentOffset
+= PartialLength
;
582 /* See previous comment about private cache map */
583 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
584 PrivateCacheMap
= FileObject
->PrivateCacheMap
;
585 if (PrivateCacheMap
!= NULL
)
587 /* Mark read ahead as unactive */
588 KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap
->ReadAheadSpinLock
);
589 InterlockedAnd((volatile long *)&PrivateCacheMap
->UlongFlags
, 0xFFFEFFFF);
590 KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap
->ReadAheadSpinLock
);
592 KeReleaseSpinLock(&PrivateCacheMap
->ReadAheadSpinLock
, OldIrql
);
594 /* If file was locked, release it */
597 SharedCacheMap
->Callbacks
->ReleaseFromReadAhead(SharedCacheMap
->LazyWriteContext
);
600 /* And drop our extra reference (See: CcScheduleReadAhead) */
601 ObDereferenceObject(FileObject
);
612 IN PFILE_OBJECT FileObject
,
613 IN ULONG BytesToWrite
,
618 DEFERRED_WRITE Context
;
619 PFSRTL_COMMON_FCB_HEADER Fcb
;
620 PROS_SHARED_CACHE_MAP SharedCacheMap
;
622 CCTRACE(CC_API_DEBUG
, "FileObject=%p BytesToWrite=%lu Wait=%d Retrying=%d\n",
623 FileObject
, BytesToWrite
, Wait
, Retrying
);
625 /* Write through is always OK */
626 if (BooleanFlagOn(FileObject
->Flags
, FO_WRITE_THROUGH
))
631 /* We cannot write if dirty pages count is above threshold */
632 if (CcTotalDirtyPages
> CcDirtyPageThreshold
)
634 /* Can the caller wait till it's possible to write? */
638 /* We cannot write if dirty pages count will bring use above
639 * XXX: Might not be accurate
641 if (CcTotalDirtyPages
+ (BytesToWrite
/ PAGE_SIZE
) > CcDirtyPageThreshold
)
643 /* Can the caller wait till it's possible to write? */
647 /* Is there a limit per file object? */
648 Fcb
= FileObject
->FsContext
;
649 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
650 if (!BooleanFlagOn(Fcb
->Flags
, FSRTL_FLAG_LIMIT_MODIFIED_PAGES
) ||
651 SharedCacheMap
->DirtyPageThreshold
== 0)
653 /* Nope, so that's fine, allow write operation */
657 /* Is dirty page count above local threshold? */
658 if (SharedCacheMap
->DirtyPages
> SharedCacheMap
->DirtyPageThreshold
)
660 /* Can the caller wait till it's possible to write? */
664 /* We cannot write if dirty pages count will bring use above
665 * XXX: Might not be accurate
667 if (SharedCacheMap
->DirtyPages
+ (BytesToWrite
/ PAGE_SIZE
) > SharedCacheMap
->DirtyPageThreshold
)
669 /* Can the caller wait till it's possible to write? */
676 /* If we reached that point, it means caller cannot write
677 * If he cannot wait, then fail and deny write
684 /* Otherwise, if there are no deferred writes yet, start the lazy writer */
685 if (IsListEmpty(&CcDeferredWrites
))
689 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
690 CcScheduleLazyWriteScan(TRUE
);
691 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
694 /* Initialize our wait event */
695 KeInitializeEvent(&WaitEvent
, NotificationEvent
, FALSE
);
697 /* And prepare a dummy context */
698 Context
.NodeTypeCode
= NODE_TYPE_DEFERRED_WRITE
;
699 Context
.NodeByteSize
= sizeof(DEFERRED_WRITE
);
700 Context
.FileObject
= FileObject
;
701 Context
.BytesToWrite
= BytesToWrite
;
702 Context
.LimitModifiedPages
= BooleanFlagOn(Fcb
->Flags
, FSRTL_FLAG_LIMIT_MODIFIED_PAGES
);
703 Context
.Event
= &WaitEvent
;
708 /* To the top, if that's a retry */
709 ExInterlockedInsertHeadList(&CcDeferredWrites
,
710 &Context
.DeferredWriteLinks
,
711 &CcDeferredWriteSpinLock
);
715 /* To the bottom, if that's a first time */
716 ExInterlockedInsertTailList(&CcDeferredWrites
,
717 &Context
.DeferredWriteLinks
,
718 &CcDeferredWriteSpinLock
);
721 /* Now, we'll loop until our event is set. When it is set, it means that caller
722 * can immediately write, and has to
726 CcPostDeferredWrites();
727 } while (KeWaitForSingleObject(&WaitEvent
, Executive
, KernelMode
, FALSE
, &CcIdleDelay
) != STATUS_SUCCESS
);
738 IN PFILE_OBJECT FileObject
,
739 IN PLARGE_INTEGER FileOffset
,
743 OUT PIO_STATUS_BLOCK IoStatus
)
745 CCTRACE(CC_API_DEBUG
, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d\n",
746 FileObject
, FileOffset
->QuadPart
, Length
, Wait
);
748 DPRINT("CcCopyRead(FileObject 0x%p, FileOffset %I64x, "
749 "Length %lu, Wait %u, Buffer 0x%p, IoStatus 0x%p)\n",
750 FileObject
, FileOffset
->QuadPart
, Length
, Wait
,
753 return CcCopyData(FileObject
,
754 FileOffset
->QuadPart
,
768 IN PFILE_OBJECT FileObject
,
769 IN PLARGE_INTEGER FileOffset
,
774 IO_STATUS_BLOCK IoStatus
;
776 CCTRACE(CC_API_DEBUG
, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d Buffer=%p\n",
777 FileObject
, FileOffset
->QuadPart
, Length
, Wait
, Buffer
);
779 DPRINT("CcCopyWrite(FileObject 0x%p, FileOffset %I64x, "
780 "Length %lu, Wait %u, Buffer 0x%p)\n",
781 FileObject
, FileOffset
->QuadPart
, Length
, Wait
, Buffer
);
783 return CcCopyData(FileObject
,
784 FileOffset
->QuadPart
,
798 IN PFILE_OBJECT FileObject
,
799 IN PCC_POST_DEFERRED_WRITE PostRoutine
,
802 IN ULONG BytesToWrite
,
806 PDEFERRED_WRITE Context
;
807 PFSRTL_COMMON_FCB_HEADER Fcb
;
809 CCTRACE(CC_API_DEBUG
, "FileObject=%p PostRoutine=%p Context1=%p Context2=%p BytesToWrite=%lu Retrying=%d\n",
810 FileObject
, PostRoutine
, Context1
, Context2
, BytesToWrite
, Retrying
);
812 /* Try to allocate a context for queueing the write operation */
813 Context
= ExAllocatePoolWithTag(NonPagedPool
, sizeof(DEFERRED_WRITE
), 'CcDw');
814 /* If it failed, immediately execute the operation! */
817 PostRoutine(Context1
, Context2
);
821 Fcb
= FileObject
->FsContext
;
823 /* Otherwise, initialize the context */
824 RtlZeroMemory(Context
, sizeof(DEFERRED_WRITE
));
825 Context
->NodeTypeCode
= NODE_TYPE_DEFERRED_WRITE
;
826 Context
->NodeByteSize
= sizeof(DEFERRED_WRITE
);
827 Context
->FileObject
= FileObject
;
828 Context
->PostRoutine
= PostRoutine
;
829 Context
->Context1
= Context1
;
830 Context
->Context2
= Context2
;
831 Context
->BytesToWrite
= BytesToWrite
;
832 Context
->LimitModifiedPages
= BooleanFlagOn(Fcb
->Flags
, FSRTL_FLAG_LIMIT_MODIFIED_PAGES
);
837 /* To the top, if that's a retry */
838 ExInterlockedInsertHeadList(&CcDeferredWrites
,
839 &Context
->DeferredWriteLinks
,
840 &CcDeferredWriteSpinLock
);
844 /* To the bottom, if that's a first time */
845 ExInterlockedInsertTailList(&CcDeferredWrites
,
846 &Context
->DeferredWriteLinks
,
847 &CcDeferredWriteSpinLock
);
850 /* Try to execute the posted writes */
851 CcPostDeferredWrites();
853 /* Schedule a lazy writer run to handle deferred writes */
854 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
855 if (!LazyWriter
.ScanActive
)
857 CcScheduleLazyWriteScan(FALSE
);
859 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
868 IN PFILE_OBJECT FileObject
,
873 OUT PIO_STATUS_BLOCK IoStatus
)
875 LARGE_INTEGER LargeFileOffset
;
878 CCTRACE(CC_API_DEBUG
, "FileObject=%p FileOffset=%lu Length=%lu PageCount=%lu Buffer=%p\n",
879 FileObject
, FileOffset
, Length
, PageCount
, Buffer
);
881 DBG_UNREFERENCED_PARAMETER(PageCount
);
883 LargeFileOffset
.QuadPart
= FileOffset
;
884 Success
= CcCopyRead(FileObject
,
890 ASSERT(Success
== TRUE
);
899 IN PFILE_OBJECT FileObject
,
904 LARGE_INTEGER LargeFileOffset
;
907 CCTRACE(CC_API_DEBUG
, "FileObject=%p FileOffset=%lu Length=%lu Buffer=%p\n",
908 FileObject
, FileOffset
, Length
, Buffer
);
910 LargeFileOffset
.QuadPart
= FileOffset
;
911 Success
= CcCopyWrite(FileObject
,
916 ASSERT(Success
== TRUE
);
925 IN PFILE_OBJECT FileObject
,
926 IN PLARGE_INTEGER StartOffset
,
927 IN PLARGE_INTEGER EndOffset
,
931 LARGE_INTEGER WriteOffset
;
936 IO_STATUS_BLOCK Iosb
;
939 CCTRACE(CC_API_DEBUG
, "FileObject=%p StartOffset=%I64u EndOffset=%I64u Wait=%d\n",
940 FileObject
, StartOffset
->QuadPart
, EndOffset
->QuadPart
, Wait
);
942 DPRINT("CcZeroData(FileObject 0x%p, StartOffset %I64x, EndOffset %I64x, "
943 "Wait %u)\n", FileObject
, StartOffset
->QuadPart
, EndOffset
->QuadPart
,
946 Length
= EndOffset
->QuadPart
- StartOffset
->QuadPart
;
947 WriteOffset
.QuadPart
= StartOffset
->QuadPart
;
949 if (FileObject
->SectionObjectPointer
->SharedCacheMap
== NULL
)
951 /* File is not cached */
953 Mdl
= _alloca(MmSizeOfMdl(NULL
, MAX_ZERO_LENGTH
));
957 if (Length
+ WriteOffset
.QuadPart
% PAGE_SIZE
> MAX_ZERO_LENGTH
)
959 CurrentLength
= MAX_ZERO_LENGTH
- WriteOffset
.QuadPart
% PAGE_SIZE
;
963 CurrentLength
= Length
;
965 MmInitializeMdl(Mdl
, (PVOID
)(ULONG_PTR
)WriteOffset
.QuadPart
, CurrentLength
);
966 Mdl
->MdlFlags
|= (MDL_PAGES_LOCKED
| MDL_IO_PAGE_READ
);
967 for (i
= 0; i
< ((Mdl
->Size
- sizeof(MDL
)) / sizeof(ULONG
)); i
++)
969 ((PPFN_NUMBER
)(Mdl
+ 1))[i
] = CcZeroPage
;
971 KeInitializeEvent(&Event
, NotificationEvent
, FALSE
);
972 Status
= IoSynchronousPageWrite(FileObject
, Mdl
, &WriteOffset
, &Event
, &Iosb
);
973 if (Status
== STATUS_PENDING
)
975 KeWaitForSingleObject(&Event
, Executive
, KernelMode
, FALSE
, NULL
);
976 Status
= Iosb
.Status
;
978 if (Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
)
980 MmUnmapLockedPages(Mdl
->MappedSystemVa
, Mdl
);
982 if (!NT_SUCCESS(Status
))
986 WriteOffset
.QuadPart
+= CurrentLength
;
987 Length
-= CurrentLength
;
992 IO_STATUS_BLOCK IoStatus
;
994 return CcCopyData(FileObject
,
995 WriteOffset
.QuadPart
,