2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/copy.c
5 * PURPOSE: Implements cache managers copy interface
7 * PROGRAMMERS: Some people?
8 * Pierre Schweitzer (pierre@reactos.org)
11 /* INCLUDES ******************************************************************/
17 /* GLOBALS *******************************************************************/
19 static PFN_NUMBER CcZeroPage
= 0;
21 #define MAX_ZERO_LENGTH (256 * 1024)
23 typedef enum _CC_COPY_OPERATION
30 typedef enum _CC_CAN_WRITE_RETRY
33 RetryAllowRemote
= 253,
34 RetryForceCheckPerFile
= 254,
35 RetryMasterLocked
= 255,
38 ULONG CcRosTraceLevel
= 0;
39 ULONG CcFastMdlReadWait
;
40 ULONG CcFastMdlReadNotPossible
;
41 ULONG CcFastReadNotPossible
;
43 ULONG CcFastReadNoWait
;
44 ULONG CcFastReadResourceMiss
;
46 /* FUNCTIONS *****************************************************************/
51 IN PFN_NUMBER PageFrameIndex
61 MI_SET_USAGE(MI_USAGE_CACHE
);
62 //MI_SET_PROCESS2(PsGetCurrentProcess()->ImageFileName);
63 Status
= MmRequestPageMemoryConsumer(MC_SYSTEM
, TRUE
, &CcZeroPage
);
64 if (!NT_SUCCESS(Status
))
66 DbgPrint("Can't allocate CcZeroPage.\n");
67 KeBugCheck(CACHE_MANAGER
);
69 MiZeroPhysicalPage(CcZeroPage
);
74 CcReadVirtualAddress (
80 IO_STATUS_BLOCK IoStatus
;
83 Size
= (ULONG
)(Vacb
->SharedCacheMap
->SectionSize
.QuadPart
- Vacb
->FileOffset
.QuadPart
);
84 if (Size
> VACB_MAPPING_GRANULARITY
)
86 Size
= VACB_MAPPING_GRANULARITY
;
89 Pages
= BYTES_TO_PAGES(Size
);
90 ASSERT(Pages
* PAGE_SIZE
<= VACB_MAPPING_GRANULARITY
);
92 Mdl
= IoAllocateMdl(Vacb
->BaseAddress
, Pages
* PAGE_SIZE
, FALSE
, FALSE
, NULL
);
95 return STATUS_INSUFFICIENT_RESOURCES
;
98 Status
= STATUS_SUCCESS
;
101 MmProbeAndLockPages(Mdl
, KernelMode
, IoWriteAccess
);
103 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER
)
105 Status
= _SEH2_GetExceptionCode();
106 KeBugCheck(CACHE_MANAGER
);
109 if (NT_SUCCESS(Status
))
111 Mdl
->MdlFlags
|= MDL_IO_PAGE_READ
;
112 KeInitializeEvent(&Event
, NotificationEvent
, FALSE
);
113 Status
= IoPageRead(Vacb
->SharedCacheMap
->FileObject
, Mdl
, &Vacb
->FileOffset
, &Event
, &IoStatus
);
114 if (Status
== STATUS_PENDING
)
116 KeWaitForSingleObject(&Event
, Executive
, KernelMode
, FALSE
, NULL
);
117 Status
= IoStatus
.Status
;
125 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
))
127 DPRINT1("IoPageRead failed, Status %x\n", Status
);
131 if (Size
< VACB_MAPPING_GRANULARITY
)
133 RtlZeroMemory((char*)Vacb
->BaseAddress
+ Size
,
134 VACB_MAPPING_GRANULARITY
- Size
);
137 return STATUS_SUCCESS
;
142 CcWriteVirtualAddress (
148 IO_STATUS_BLOCK IoStatus
;
151 Size
= (ULONG
)(Vacb
->SharedCacheMap
->SectionSize
.QuadPart
- Vacb
->FileOffset
.QuadPart
);
152 if (Size
> VACB_MAPPING_GRANULARITY
)
154 Size
= VACB_MAPPING_GRANULARITY
;
157 // Nonpaged pool PDEs in ReactOS must actually be synchronized between the
158 // MmGlobalPageDirectory and the real system PDE directory. What a mess...
164 MmGetPfnForProcess(NULL
, (PVOID
)((ULONG_PTR
)Vacb
->BaseAddress
+ (i
<< PAGE_SHIFT
)));
165 } while (++i
< (Size
>> PAGE_SHIFT
));
168 Mdl
= IoAllocateMdl(Vacb
->BaseAddress
, Size
, FALSE
, FALSE
, NULL
);
171 return STATUS_INSUFFICIENT_RESOURCES
;
174 Status
= STATUS_SUCCESS
;
177 MmProbeAndLockPages(Mdl
, KernelMode
, IoReadAccess
);
179 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER
)
181 Status
= _SEH2_GetExceptionCode();
182 KeBugCheck(CACHE_MANAGER
);
185 if (NT_SUCCESS(Status
))
187 KeInitializeEvent(&Event
, NotificationEvent
, FALSE
);
188 Status
= IoSynchronousPageWrite(Vacb
->SharedCacheMap
->FileObject
, Mdl
, &Vacb
->FileOffset
, &Event
, &IoStatus
);
189 if (Status
== STATUS_PENDING
)
191 KeWaitForSingleObject(&Event
, Executive
, KernelMode
, FALSE
, NULL
);
192 Status
= IoStatus
.Status
;
198 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
))
200 DPRINT1("IoPageWrite failed, Status %x\n", Status
);
204 return STATUS_SUCCESS
;
209 _Inout_ PVOID BaseAddress
,
210 _Inout_opt_ PVOID Buffer
,
212 _In_ CC_COPY_OPERATION Operation
)
214 NTSTATUS Status
= STATUS_SUCCESS
;
216 if (Operation
== CcOperationZero
)
219 RtlZeroMemory(BaseAddress
, Length
);
225 if (Operation
== CcOperationWrite
)
226 RtlCopyMemory(BaseAddress
, Buffer
, Length
);
228 RtlCopyMemory(Buffer
, BaseAddress
, Length
);
230 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER
)
232 Status
= _SEH2_GetExceptionCode();
241 _In_ PFILE_OBJECT FileObject
,
242 _In_ LONGLONG FileOffset
,
243 _Inout_ PVOID Buffer
,
244 _In_ LONGLONG Length
,
245 _In_ CC_COPY_OPERATION Operation
,
247 _Out_ PIO_STATUS_BLOCK IoStatus
)
250 LONGLONG CurrentOffset
;
253 PROS_SHARED_CACHE_MAP SharedCacheMap
;
254 PLIST_ENTRY ListEntry
;
259 PPRIVATE_CACHE_MAP PrivateCacheMap
;
261 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
262 PrivateCacheMap
= FileObject
->PrivateCacheMap
;
263 CurrentOffset
= FileOffset
;
268 /* test if the requested data is available */
269 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &OldIrql
);
270 /* FIXME: this loop doesn't take into account areas that don't have
271 * a VACB in the list yet */
272 ListEntry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
273 while (ListEntry
!= &SharedCacheMap
->CacheMapVacbListHead
)
275 Vacb
= CONTAINING_RECORD(ListEntry
,
277 CacheMapVacbListEntry
);
278 ListEntry
= ListEntry
->Flink
;
280 DoRangesIntersect(Vacb
->FileOffset
.QuadPart
,
281 VACB_MAPPING_GRANULARITY
,
282 CurrentOffset
, Length
))
284 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, OldIrql
);
285 /* data not available */
288 if (Vacb
->FileOffset
.QuadPart
>= CurrentOffset
+ Length
)
291 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, OldIrql
);
294 PartialLength
= CurrentOffset
% VACB_MAPPING_GRANULARITY
;
295 if (PartialLength
!= 0)
297 PartialLength
= min(Length
, VACB_MAPPING_GRANULARITY
- PartialLength
);
298 Status
= CcRosRequestVacb(SharedCacheMap
,
299 ROUND_DOWN(CurrentOffset
,
300 VACB_MAPPING_GRANULARITY
),
304 if (!NT_SUCCESS(Status
))
305 ExRaiseStatus(Status
);
308 Status
= CcReadVirtualAddress(Vacb
);
309 if (!NT_SUCCESS(Status
))
311 CcRosReleaseVacb(SharedCacheMap
, Vacb
, FALSE
, FALSE
, FALSE
);
312 ExRaiseStatus(Status
);
315 Status
= ReadWriteOrZero((PUCHAR
)BaseAddress
+ CurrentOffset
% VACB_MAPPING_GRANULARITY
,
320 CcRosReleaseVacb(SharedCacheMap
, Vacb
, TRUE
, Operation
!= CcOperationRead
, FALSE
);
322 if (!NT_SUCCESS(Status
))
323 ExRaiseStatus(STATUS_INVALID_USER_BUFFER
);
325 Length
-= PartialLength
;
326 CurrentOffset
+= PartialLength
;
327 BytesCopied
+= PartialLength
;
329 if (Operation
!= CcOperationZero
)
330 Buffer
= (PVOID
)((ULONG_PTR
)Buffer
+ PartialLength
);
335 ASSERT(CurrentOffset
% VACB_MAPPING_GRANULARITY
== 0);
336 PartialLength
= min(VACB_MAPPING_GRANULARITY
, Length
);
337 Status
= CcRosRequestVacb(SharedCacheMap
,
342 if (!NT_SUCCESS(Status
))
343 ExRaiseStatus(Status
);
345 (Operation
== CcOperationRead
||
346 PartialLength
< VACB_MAPPING_GRANULARITY
))
348 Status
= CcReadVirtualAddress(Vacb
);
349 if (!NT_SUCCESS(Status
))
351 CcRosReleaseVacb(SharedCacheMap
, Vacb
, FALSE
, FALSE
, FALSE
);
352 ExRaiseStatus(Status
);
355 Status
= ReadWriteOrZero(BaseAddress
, Buffer
, PartialLength
, Operation
);
357 CcRosReleaseVacb(SharedCacheMap
, Vacb
, TRUE
, Operation
!= CcOperationRead
, FALSE
);
359 if (!NT_SUCCESS(Status
))
360 ExRaiseStatus(STATUS_INVALID_USER_BUFFER
);
362 Length
-= PartialLength
;
363 CurrentOffset
+= PartialLength
;
364 BytesCopied
+= PartialLength
;
366 if (Operation
!= CcOperationZero
)
367 Buffer
= (PVOID
)((ULONG_PTR
)Buffer
+ PartialLength
);
370 /* If that was a successful sync read operation, let's handle read ahead */
371 if (Operation
== CcOperationRead
&& Length
== 0 && Wait
)
373 /* If file isn't random access, schedule next read */
374 if (!BooleanFlagOn(FileObject
->Flags
, FO_RANDOM_ACCESS
))
376 CcScheduleReadAhead(FileObject
, (PLARGE_INTEGER
)&FileOffset
, BytesCopied
);
379 /* And update read history in private cache map */
380 PrivateCacheMap
->FileOffset1
.QuadPart
= PrivateCacheMap
->FileOffset2
.QuadPart
;
381 PrivateCacheMap
->BeyondLastByte1
.QuadPart
= PrivateCacheMap
->BeyondLastByte2
.QuadPart
;
382 PrivateCacheMap
->FileOffset2
.QuadPart
= FileOffset
;
383 PrivateCacheMap
->BeyondLastByte2
.QuadPart
= FileOffset
+ BytesCopied
;
386 IoStatus
->Status
= STATUS_SUCCESS
;
387 IoStatus
->Information
= BytesCopied
;
392 CcPostDeferredWrites(VOID
)
396 /* We'll try to write as much as we can */
401 PLIST_ENTRY ListEntry
;
402 PDEFERRED_WRITE DeferredWrite
;
404 DeferredWrite
= NULL
;
406 /* Lock our deferred writes list */
407 KeAcquireSpinLock(&CcDeferredWriteSpinLock
, &OldIrql
);
408 for (ListEntry
= CcDeferredWrites
.Flink
;
409 ListEntry
!= &CcDeferredWrites
;
410 ListEntry
= ListEntry
->Flink
)
412 /* Extract an entry */
413 DeferredWrite
= CONTAINING_RECORD(ListEntry
, DEFERRED_WRITE
, DeferredWriteLinks
);
415 /* Compute the modified bytes, based on what we already wrote */
416 WrittenBytes
+= DeferredWrite
->BytesToWrite
;
417 /* We overflowed, give up */
418 if (WrittenBytes
< DeferredWrite
->BytesToWrite
)
420 DeferredWrite
= NULL
;
424 /* Check we can write */
425 if (CcCanIWrite(DeferredWrite
->FileObject
, WrittenBytes
, FALSE
, RetryForceCheckPerFile
))
427 /* We can, so remove it from the list and stop looking for entry */
428 RemoveEntryList(&DeferredWrite
->DeferredWriteLinks
);
432 /* If we don't accept modified pages, stop here */
433 if (!DeferredWrite
->LimitModifiedPages
)
435 DeferredWrite
= NULL
;
439 /* Reset count as nothing was written yet */
440 WrittenBytes
-= DeferredWrite
->BytesToWrite
;
441 DeferredWrite
= NULL
;
443 KeReleaseSpinLock(&CcDeferredWriteSpinLock
, OldIrql
);
445 /* Nothing to write found, give up */
446 if (DeferredWrite
== NULL
)
451 /* If we have an event, set it and quit */
452 if (DeferredWrite
->Event
)
454 KeSetEvent(DeferredWrite
->Event
, IO_NO_INCREMENT
, FALSE
);
456 /* Otherwise, call the write routine and free the context */
459 DeferredWrite
->PostRoutine(DeferredWrite
->Context1
, DeferredWrite
->Context2
);
460 ExFreePoolWithTag(DeferredWrite
, 'CcDw');
467 IN PFILE_OBJECT FileObject
)
470 LONGLONG CurrentOffset
;
472 PROS_SHARED_CACHE_MAP SharedCacheMap
;
478 PPRIVATE_CACHE_MAP PrivateCacheMap
;
481 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
484 * PrivateCacheMap might disappear in-between if the handle
485 * to the file is closed (private is attached to the handle not to
486 * the file), so we need to lock the master lock while we deal with
487 * it. It won't disappear without attempting to lock such lock.
489 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
490 PrivateCacheMap
= FileObject
->PrivateCacheMap
;
491 /* If the handle was closed since the read ahead was scheduled, just quit */
492 if (PrivateCacheMap
== NULL
)
494 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
495 ObDereferenceObject(FileObject
);
498 /* Otherwise, extract read offset and length and release private map */
501 KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap
->ReadAheadSpinLock
);
502 CurrentOffset
= PrivateCacheMap
->ReadAheadOffset
[1].QuadPart
;
503 Length
= PrivateCacheMap
->ReadAheadLength
[1];
504 KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap
->ReadAheadSpinLock
);
506 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
509 DPRINT("Doing ReadAhead for %p\n", FileObject
);
510 /* Lock the file, first */
511 if (!SharedCacheMap
->Callbacks
->AcquireForReadAhead(SharedCacheMap
->LazyWriteContext
, FALSE
))
517 /* Remember it's locked */
520 /* Next of the algorithm will lock like CcCopyData with the slight
521 * difference that we don't copy data back to an user-backed buffer
522 * We just bring data into Cc
524 PartialLength
= CurrentOffset
% VACB_MAPPING_GRANULARITY
;
525 if (PartialLength
!= 0)
527 PartialLength
= min(Length
, VACB_MAPPING_GRANULARITY
- PartialLength
);
528 Status
= CcRosRequestVacb(SharedCacheMap
,
529 ROUND_DOWN(CurrentOffset
,
530 VACB_MAPPING_GRANULARITY
),
534 if (!NT_SUCCESS(Status
))
536 DPRINT1("Failed to request VACB: %lx!\n", Status
);
542 Status
= CcReadVirtualAddress(Vacb
);
543 if (!NT_SUCCESS(Status
))
545 CcRosReleaseVacb(SharedCacheMap
, Vacb
, FALSE
, FALSE
, FALSE
);
546 DPRINT1("Failed to read data: %lx!\n", Status
);
551 CcRosReleaseVacb(SharedCacheMap
, Vacb
, TRUE
, FALSE
, FALSE
);
553 Length
-= PartialLength
;
554 CurrentOffset
+= PartialLength
;
559 ASSERT(CurrentOffset
% VACB_MAPPING_GRANULARITY
== 0);
560 PartialLength
= min(VACB_MAPPING_GRANULARITY
, Length
);
561 Status
= CcRosRequestVacb(SharedCacheMap
,
566 if (!NT_SUCCESS(Status
))
568 DPRINT1("Failed to request VACB: %lx!\n", Status
);
574 Status
= CcReadVirtualAddress(Vacb
);
575 if (!NT_SUCCESS(Status
))
577 CcRosReleaseVacb(SharedCacheMap
, Vacb
, FALSE
, FALSE
, FALSE
);
578 DPRINT1("Failed to read data: %lx!\n", Status
);
583 CcRosReleaseVacb(SharedCacheMap
, Vacb
, TRUE
, FALSE
, FALSE
);
585 Length
-= PartialLength
;
586 CurrentOffset
+= PartialLength
;
590 /* See previous comment about private cache map */
591 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
592 PrivateCacheMap
= FileObject
->PrivateCacheMap
;
593 if (PrivateCacheMap
!= NULL
)
595 /* Mark read ahead as unactive */
596 KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap
->ReadAheadSpinLock
);
597 InterlockedAnd((volatile long *)&PrivateCacheMap
->UlongFlags
, 0xFFFEFFFF);
598 KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap
->ReadAheadSpinLock
);
600 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
602 /* If file was locked, release it */
605 SharedCacheMap
->Callbacks
->ReleaseFromReadAhead(SharedCacheMap
->LazyWriteContext
);
608 /* And drop our extra reference (See: CcScheduleReadAhead) */
609 ObDereferenceObject(FileObject
);
620 IN PFILE_OBJECT FileObject
,
621 IN ULONG BytesToWrite
,
628 BOOLEAN PerFileDefer
;
629 DEFERRED_WRITE Context
;
630 PFSRTL_COMMON_FCB_HEADER Fcb
;
631 CC_CAN_WRITE_RETRY TryContext
;
632 PROS_SHARED_CACHE_MAP SharedCacheMap
;
634 CCTRACE(CC_API_DEBUG
, "FileObject=%p BytesToWrite=%lu Wait=%d Retrying=%d\n",
635 FileObject
, BytesToWrite
, Wait
, Retrying
);
637 /* Write through is always OK */
638 if (BooleanFlagOn(FileObject
->Flags
, FO_WRITE_THROUGH
))
643 TryContext
= Retrying
;
644 /* Allow remote file if not from posted */
645 if (IoIsFileOriginRemote(FileObject
) && TryContext
< RetryAllowRemote
)
650 /* Don't exceed max tolerated size */
651 Length
= MAX_ZERO_LENGTH
;
652 if (BytesToWrite
< MAX_ZERO_LENGTH
)
654 Length
= BytesToWrite
;
657 /* Convert it to pages count */
658 Pages
= (Length
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
660 /* By default, assume limits per file won't be hit */
661 PerFileDefer
= FALSE
;
662 Fcb
= FileObject
->FsContext
;
663 /* Do we have to check for limits per file? */
664 if (TryContext
>= RetryForceCheckPerFile
||
665 BooleanFlagOn(Fcb
->Flags
, FSRTL_FLAG_LIMIT_MODIFIED_PAGES
))
667 /* If master is not locked, lock it now */
668 if (TryContext
!= RetryMasterLocked
)
670 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
673 /* Let's not assume the file is cached... */
674 if (FileObject
->SectionObjectPointer
!= NULL
&&
675 FileObject
->SectionObjectPointer
->SharedCacheMap
!= NULL
)
677 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
678 /* Do we have limits per file set? */
679 if (SharedCacheMap
->DirtyPageThreshold
!= 0 &&
680 SharedCacheMap
->DirtyPages
!= 0)
682 /* Yes, check whether they are blocking */
683 if (Pages
+ SharedCacheMap
->DirtyPages
> SharedCacheMap
->DirtyPageThreshold
)
690 /* And don't forget to release master */
691 if (TryContext
!= RetryMasterLocked
)
693 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
697 /* So, now allow write if:
698 * - Not the first try or we have no throttling yet
700 * - We don't exceed threshold!
701 * - We don't exceed what Mm can allow us to use
702 * + If we're above top, that's fine
703 * + If we're above bottom with limited modified pages, that's fine
704 * + Otherwise, throttle!
706 if ((TryContext
!= FirstTry
|| IsListEmpty(&CcDeferredWrites
)) &&
707 CcTotalDirtyPages
+ Pages
< CcDirtyPageThreshold
&&
708 (MmAvailablePages
> MmThrottleTop
||
709 (MmModifiedPageListHead
.Total
< 1000 && MmAvailablePages
> MmThrottleBottom
)) &&
715 /* If we can wait, we'll start the wait loop for waiting till we can
723 /* Otherwise, if there are no deferred writes yet, start the lazy writer */
724 if (IsListEmpty(&CcDeferredWrites
))
728 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
729 CcScheduleLazyWriteScan(TRUE
);
730 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
733 /* Initialize our wait event */
734 KeInitializeEvent(&WaitEvent
, NotificationEvent
, FALSE
);
736 /* And prepare a dummy context */
737 Context
.NodeTypeCode
= NODE_TYPE_DEFERRED_WRITE
;
738 Context
.NodeByteSize
= sizeof(DEFERRED_WRITE
);
739 Context
.FileObject
= FileObject
;
740 Context
.BytesToWrite
= BytesToWrite
;
741 Context
.LimitModifiedPages
= BooleanFlagOn(Fcb
->Flags
, FSRTL_FLAG_LIMIT_MODIFIED_PAGES
);
742 Context
.Event
= &WaitEvent
;
747 /* To the top, if that's a retry */
748 ExInterlockedInsertHeadList(&CcDeferredWrites
,
749 &Context
.DeferredWriteLinks
,
750 &CcDeferredWriteSpinLock
);
754 /* To the bottom, if that's a first time */
755 ExInterlockedInsertTailList(&CcDeferredWrites
,
756 &Context
.DeferredWriteLinks
,
757 &CcDeferredWriteSpinLock
);
760 /* Now, we'll loop until our event is set. When it is set, it means that caller
761 * can immediately write, and has to
765 CcPostDeferredWrites();
766 } while (KeWaitForSingleObject(&WaitEvent
, Executive
, KernelMode
, FALSE
, &CcIdleDelay
) != STATUS_SUCCESS
);
777 IN PFILE_OBJECT FileObject
,
778 IN PLARGE_INTEGER FileOffset
,
782 OUT PIO_STATUS_BLOCK IoStatus
)
784 CCTRACE(CC_API_DEBUG
, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d\n",
785 FileObject
, FileOffset
->QuadPart
, Length
, Wait
);
787 DPRINT("CcCopyRead(FileObject 0x%p, FileOffset %I64x, "
788 "Length %lu, Wait %u, Buffer 0x%p, IoStatus 0x%p)\n",
789 FileObject
, FileOffset
->QuadPart
, Length
, Wait
,
792 return CcCopyData(FileObject
,
793 FileOffset
->QuadPart
,
807 IN PFILE_OBJECT FileObject
,
808 IN PLARGE_INTEGER FileOffset
,
813 IO_STATUS_BLOCK IoStatus
;
815 CCTRACE(CC_API_DEBUG
, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d Buffer=%p\n",
816 FileObject
, FileOffset
->QuadPart
, Length
, Wait
, Buffer
);
818 DPRINT("CcCopyWrite(FileObject 0x%p, FileOffset %I64x, "
819 "Length %lu, Wait %u, Buffer 0x%p)\n",
820 FileObject
, FileOffset
->QuadPart
, Length
, Wait
, Buffer
);
822 return CcCopyData(FileObject
,
823 FileOffset
->QuadPart
,
837 IN PFILE_OBJECT FileObject
,
838 IN PCC_POST_DEFERRED_WRITE PostRoutine
,
841 IN ULONG BytesToWrite
,
845 PDEFERRED_WRITE Context
;
846 PFSRTL_COMMON_FCB_HEADER Fcb
;
848 CCTRACE(CC_API_DEBUG
, "FileObject=%p PostRoutine=%p Context1=%p Context2=%p BytesToWrite=%lu Retrying=%d\n",
849 FileObject
, PostRoutine
, Context1
, Context2
, BytesToWrite
, Retrying
);
851 /* Try to allocate a context for queueing the write operation */
852 Context
= ExAllocatePoolWithTag(NonPagedPool
, sizeof(DEFERRED_WRITE
), 'CcDw');
853 /* If it failed, immediately execute the operation! */
856 PostRoutine(Context1
, Context2
);
860 Fcb
= FileObject
->FsContext
;
862 /* Otherwise, initialize the context */
863 RtlZeroMemory(Context
, sizeof(DEFERRED_WRITE
));
864 Context
->NodeTypeCode
= NODE_TYPE_DEFERRED_WRITE
;
865 Context
->NodeByteSize
= sizeof(DEFERRED_WRITE
);
866 Context
->FileObject
= FileObject
;
867 Context
->PostRoutine
= PostRoutine
;
868 Context
->Context1
= Context1
;
869 Context
->Context2
= Context2
;
870 Context
->BytesToWrite
= BytesToWrite
;
871 Context
->LimitModifiedPages
= BooleanFlagOn(Fcb
->Flags
, FSRTL_FLAG_LIMIT_MODIFIED_PAGES
);
876 /* To the top, if that's a retry */
877 ExInterlockedInsertHeadList(&CcDeferredWrites
,
878 &Context
->DeferredWriteLinks
,
879 &CcDeferredWriteSpinLock
);
883 /* To the bottom, if that's a first time */
884 ExInterlockedInsertTailList(&CcDeferredWrites
,
885 &Context
->DeferredWriteLinks
,
886 &CcDeferredWriteSpinLock
);
889 /* Try to execute the posted writes */
890 CcPostDeferredWrites();
892 /* Schedule a lazy writer run to handle deferred writes */
893 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
894 if (!LazyWriter
.ScanActive
)
896 CcScheduleLazyWriteScan(FALSE
);
898 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
907 IN PFILE_OBJECT FileObject
,
912 OUT PIO_STATUS_BLOCK IoStatus
)
914 LARGE_INTEGER LargeFileOffset
;
917 CCTRACE(CC_API_DEBUG
, "FileObject=%p FileOffset=%lu Length=%lu PageCount=%lu Buffer=%p\n",
918 FileObject
, FileOffset
, Length
, PageCount
, Buffer
);
920 DBG_UNREFERENCED_PARAMETER(PageCount
);
922 LargeFileOffset
.QuadPart
= FileOffset
;
923 Success
= CcCopyRead(FileObject
,
929 ASSERT(Success
== TRUE
);
938 IN PFILE_OBJECT FileObject
,
943 LARGE_INTEGER LargeFileOffset
;
946 CCTRACE(CC_API_DEBUG
, "FileObject=%p FileOffset=%lu Length=%lu Buffer=%p\n",
947 FileObject
, FileOffset
, Length
, Buffer
);
949 LargeFileOffset
.QuadPart
= FileOffset
;
950 Success
= CcCopyWrite(FileObject
,
955 ASSERT(Success
== TRUE
);
964 IN PFILE_OBJECT FileObject
,
965 IN PLARGE_INTEGER StartOffset
,
966 IN PLARGE_INTEGER EndOffset
,
970 LARGE_INTEGER WriteOffset
;
975 IO_STATUS_BLOCK Iosb
;
978 CCTRACE(CC_API_DEBUG
, "FileObject=%p StartOffset=%I64u EndOffset=%I64u Wait=%d\n",
979 FileObject
, StartOffset
->QuadPart
, EndOffset
->QuadPart
, Wait
);
981 DPRINT("CcZeroData(FileObject 0x%p, StartOffset %I64x, EndOffset %I64x, "
982 "Wait %u)\n", FileObject
, StartOffset
->QuadPart
, EndOffset
->QuadPart
,
985 Length
= EndOffset
->QuadPart
- StartOffset
->QuadPart
;
986 WriteOffset
.QuadPart
= StartOffset
->QuadPart
;
988 if (FileObject
->SectionObjectPointer
->SharedCacheMap
== NULL
)
990 /* File is not cached */
992 Mdl
= _alloca(MmSizeOfMdl(NULL
, MAX_ZERO_LENGTH
));
996 if (Length
+ WriteOffset
.QuadPart
% PAGE_SIZE
> MAX_ZERO_LENGTH
)
998 CurrentLength
= MAX_ZERO_LENGTH
- WriteOffset
.QuadPart
% PAGE_SIZE
;
1002 CurrentLength
= Length
;
1004 MmInitializeMdl(Mdl
, (PVOID
)(ULONG_PTR
)WriteOffset
.QuadPart
, CurrentLength
);
1005 Mdl
->MdlFlags
|= (MDL_PAGES_LOCKED
| MDL_IO_PAGE_READ
);
1006 for (i
= 0; i
< ((Mdl
->Size
- sizeof(MDL
)) / sizeof(ULONG
)); i
++)
1008 ((PPFN_NUMBER
)(Mdl
+ 1))[i
] = CcZeroPage
;
1010 KeInitializeEvent(&Event
, NotificationEvent
, FALSE
);
1011 Status
= IoSynchronousPageWrite(FileObject
, Mdl
, &WriteOffset
, &Event
, &Iosb
);
1012 if (Status
== STATUS_PENDING
)
1014 KeWaitForSingleObject(&Event
, Executive
, KernelMode
, FALSE
, NULL
);
1015 Status
= Iosb
.Status
;
1017 if (Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
)
1019 MmUnmapLockedPages(Mdl
->MappedSystemVa
, Mdl
);
1021 if (!NT_SUCCESS(Status
))
1025 WriteOffset
.QuadPart
+= CurrentLength
;
1026 Length
-= CurrentLength
;
1031 IO_STATUS_BLOCK IoStatus
;
1033 return CcCopyData(FileObject
,
1034 WriteOffset
.QuadPart
,