2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/copy.c
5 * PURPOSE: Implements cache managers copy interface
7 * PROGRAMMERS: Some people?
8 * Pierre Schweitzer (pierre@reactos.org)
11 /* INCLUDES ******************************************************************/
17 /* GLOBALS *******************************************************************/
19 static PFN_NUMBER CcZeroPage
= 0;
21 #define MAX_ZERO_LENGTH (256 * 1024)
23 typedef enum _CC_COPY_OPERATION
30 typedef enum _CC_CAN_WRITE_RETRY
33 RetryAllowRemote
= 253,
34 RetryForceCheckPerFile
= 254,
35 RetryMasterLocked
= 255,
38 ULONG CcRosTraceLevel
= 0;
39 ULONG CcFastMdlReadWait
;
40 ULONG CcFastMdlReadNotPossible
;
41 ULONG CcFastReadNotPossible
;
43 ULONG CcFastReadNoWait
;
44 ULONG CcFastReadResourceMiss
;
47 * - Amount of pages flushed to the disk
48 * - Number of flush operations
50 ULONG CcDataPages
= 0;
51 ULONG CcDataFlushes
= 0;
53 /* FUNCTIONS *****************************************************************/
58 IN PFN_NUMBER PageFrameIndex
68 MI_SET_USAGE(MI_USAGE_CACHE
);
69 //MI_SET_PROCESS2(PsGetCurrentProcess()->ImageFileName);
70 Status
= MmRequestPageMemoryConsumer(MC_SYSTEM
, TRUE
, &CcZeroPage
);
71 if (!NT_SUCCESS(Status
))
73 DbgPrint("Can't allocate CcZeroPage.\n");
74 KeBugCheck(CACHE_MANAGER
);
76 MiZeroPhysicalPage(CcZeroPage
);
81 CcReadVirtualAddress (
87 IO_STATUS_BLOCK IoStatus
;
89 ULARGE_INTEGER LargeSize
;
91 LargeSize
.QuadPart
= Vacb
->SharedCacheMap
->SectionSize
.QuadPart
- Vacb
->FileOffset
.QuadPart
;
92 if (LargeSize
.QuadPart
> VACB_MAPPING_GRANULARITY
)
94 LargeSize
.QuadPart
= VACB_MAPPING_GRANULARITY
;
96 Size
= LargeSize
.LowPart
;
98 Size
= ROUND_TO_PAGES(Size
);
99 ASSERT(Size
<= VACB_MAPPING_GRANULARITY
);
102 Mdl
= IoAllocateMdl(Vacb
->BaseAddress
, Size
, FALSE
, FALSE
, NULL
);
105 return STATUS_INSUFFICIENT_RESOURCES
;
108 Status
= STATUS_SUCCESS
;
111 MmProbeAndLockPages(Mdl
, KernelMode
, IoWriteAccess
);
113 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER
)
115 Status
= _SEH2_GetExceptionCode();
116 DPRINT1("MmProbeAndLockPages failed with: %lx for %p (%p, %p)\n", Status
, Mdl
, Vacb
, Vacb
->BaseAddress
);
117 KeBugCheck(CACHE_MANAGER
);
120 if (NT_SUCCESS(Status
))
122 Mdl
->MdlFlags
|= MDL_IO_PAGE_READ
;
123 KeInitializeEvent(&Event
, NotificationEvent
, FALSE
);
124 Status
= IoPageRead(Vacb
->SharedCacheMap
->FileObject
, Mdl
, &Vacb
->FileOffset
, &Event
, &IoStatus
);
125 if (Status
== STATUS_PENDING
)
127 KeWaitForSingleObject(&Event
, Executive
, KernelMode
, FALSE
, NULL
);
128 Status
= IoStatus
.Status
;
136 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
))
138 DPRINT1("IoPageRead failed, Status %x\n", Status
);
142 if (Size
< VACB_MAPPING_GRANULARITY
)
144 RtlZeroMemory((char*)Vacb
->BaseAddress
+ Size
,
145 VACB_MAPPING_GRANULARITY
- Size
);
148 return STATUS_SUCCESS
;
153 CcWriteVirtualAddress (
159 IO_STATUS_BLOCK IoStatus
;
161 ULARGE_INTEGER LargeSize
;
163 LargeSize
.QuadPart
= Vacb
->SharedCacheMap
->SectionSize
.QuadPart
- Vacb
->FileOffset
.QuadPart
;
164 if (LargeSize
.QuadPart
> VACB_MAPPING_GRANULARITY
)
166 LargeSize
.QuadPart
= VACB_MAPPING_GRANULARITY
;
168 Size
= LargeSize
.LowPart
;
170 // Nonpaged pool PDEs in ReactOS must actually be synchronized between the
171 // MmGlobalPageDirectory and the real system PDE directory. What a mess...
177 MmGetPfnForProcess(NULL
, (PVOID
)((ULONG_PTR
)Vacb
->BaseAddress
+ (i
<< PAGE_SHIFT
)));
178 } while (++i
< (Size
>> PAGE_SHIFT
));
181 Size
= ROUND_TO_PAGES(Size
);
182 ASSERT(Size
<= VACB_MAPPING_GRANULARITY
);
185 Mdl
= IoAllocateMdl(Vacb
->BaseAddress
, Size
, FALSE
, FALSE
, NULL
);
188 return STATUS_INSUFFICIENT_RESOURCES
;
191 Status
= STATUS_SUCCESS
;
194 MmProbeAndLockPages(Mdl
, KernelMode
, IoReadAccess
);
196 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER
)
198 Status
= _SEH2_GetExceptionCode();
199 DPRINT1("MmProbeAndLockPages failed with: %lx for %p (%p, %p)\n", Status
, Mdl
, Vacb
, Vacb
->BaseAddress
);
200 KeBugCheck(CACHE_MANAGER
);
203 if (NT_SUCCESS(Status
))
205 KeInitializeEvent(&Event
, NotificationEvent
, FALSE
);
206 Status
= IoSynchronousPageWrite(Vacb
->SharedCacheMap
->FileObject
, Mdl
, &Vacb
->FileOffset
, &Event
, &IoStatus
);
207 if (Status
== STATUS_PENDING
)
209 KeWaitForSingleObject(&Event
, Executive
, KernelMode
, FALSE
, NULL
);
210 Status
= IoStatus
.Status
;
216 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
))
218 DPRINT1("IoPageWrite failed, Status %x\n", Status
);
222 return STATUS_SUCCESS
;
227 _Inout_ PVOID BaseAddress
,
228 _Inout_opt_ PVOID Buffer
,
230 _In_ CC_COPY_OPERATION Operation
)
232 NTSTATUS Status
= STATUS_SUCCESS
;
234 if (Operation
== CcOperationZero
)
237 RtlZeroMemory(BaseAddress
, Length
);
243 if (Operation
== CcOperationWrite
)
244 RtlCopyMemory(BaseAddress
, Buffer
, Length
);
246 RtlCopyMemory(Buffer
, BaseAddress
, Length
);
248 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER
)
250 Status
= _SEH2_GetExceptionCode();
259 _In_ PFILE_OBJECT FileObject
,
260 _In_ LONGLONG FileOffset
,
261 _Inout_ PVOID Buffer
,
262 _In_ LONGLONG Length
,
263 _In_ CC_COPY_OPERATION Operation
,
265 _Out_ PIO_STATUS_BLOCK IoStatus
)
268 LONGLONG CurrentOffset
;
271 PROS_SHARED_CACHE_MAP SharedCacheMap
;
272 PLIST_ENTRY ListEntry
;
277 PPRIVATE_CACHE_MAP PrivateCacheMap
;
279 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
280 PrivateCacheMap
= FileObject
->PrivateCacheMap
;
281 CurrentOffset
= FileOffset
;
286 /* test if the requested data is available */
287 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &OldIrql
);
288 /* FIXME: this loop doesn't take into account areas that don't have
289 * a VACB in the list yet */
290 ListEntry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
291 while (ListEntry
!= &SharedCacheMap
->CacheMapVacbListHead
)
293 Vacb
= CONTAINING_RECORD(ListEntry
,
295 CacheMapVacbListEntry
);
296 ListEntry
= ListEntry
->Flink
;
298 DoRangesIntersect(Vacb
->FileOffset
.QuadPart
,
299 VACB_MAPPING_GRANULARITY
,
300 CurrentOffset
, Length
))
302 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, OldIrql
);
303 /* data not available */
306 if (Vacb
->FileOffset
.QuadPart
>= CurrentOffset
+ Length
)
309 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, OldIrql
);
312 PartialLength
= CurrentOffset
% VACB_MAPPING_GRANULARITY
;
313 if (PartialLength
!= 0)
315 PartialLength
= min(Length
, VACB_MAPPING_GRANULARITY
- PartialLength
);
316 Status
= CcRosRequestVacb(SharedCacheMap
,
317 ROUND_DOWN(CurrentOffset
,
318 VACB_MAPPING_GRANULARITY
),
322 if (!NT_SUCCESS(Status
))
323 ExRaiseStatus(Status
);
326 Status
= CcReadVirtualAddress(Vacb
);
327 if (!NT_SUCCESS(Status
))
329 CcRosReleaseVacb(SharedCacheMap
, Vacb
, FALSE
, FALSE
, FALSE
);
330 ExRaiseStatus(Status
);
333 Status
= ReadWriteOrZero((PUCHAR
)BaseAddress
+ CurrentOffset
% VACB_MAPPING_GRANULARITY
,
338 CcRosReleaseVacb(SharedCacheMap
, Vacb
, TRUE
, Operation
!= CcOperationRead
, FALSE
);
340 if (!NT_SUCCESS(Status
))
341 ExRaiseStatus(STATUS_INVALID_USER_BUFFER
);
343 Length
-= PartialLength
;
344 CurrentOffset
+= PartialLength
;
345 BytesCopied
+= PartialLength
;
347 if (Operation
!= CcOperationZero
)
348 Buffer
= (PVOID
)((ULONG_PTR
)Buffer
+ PartialLength
);
353 ASSERT(CurrentOffset
% VACB_MAPPING_GRANULARITY
== 0);
354 PartialLength
= min(VACB_MAPPING_GRANULARITY
, Length
);
355 Status
= CcRosRequestVacb(SharedCacheMap
,
360 if (!NT_SUCCESS(Status
))
361 ExRaiseStatus(Status
);
363 (Operation
== CcOperationRead
||
364 PartialLength
< VACB_MAPPING_GRANULARITY
))
366 Status
= CcReadVirtualAddress(Vacb
);
367 if (!NT_SUCCESS(Status
))
369 CcRosReleaseVacb(SharedCacheMap
, Vacb
, FALSE
, FALSE
, FALSE
);
370 ExRaiseStatus(Status
);
373 Status
= ReadWriteOrZero(BaseAddress
, Buffer
, PartialLength
, Operation
);
375 CcRosReleaseVacb(SharedCacheMap
, Vacb
, TRUE
, Operation
!= CcOperationRead
, FALSE
);
377 if (!NT_SUCCESS(Status
))
378 ExRaiseStatus(STATUS_INVALID_USER_BUFFER
);
380 Length
-= PartialLength
;
381 CurrentOffset
+= PartialLength
;
382 BytesCopied
+= PartialLength
;
384 if (Operation
!= CcOperationZero
)
385 Buffer
= (PVOID
)((ULONG_PTR
)Buffer
+ PartialLength
);
388 /* If that was a successful sync read operation, let's handle read ahead */
389 if (Operation
== CcOperationRead
&& Length
== 0 && Wait
)
391 /* If file isn't random access and next read may get us cross VACB boundary,
394 if (!BooleanFlagOn(FileObject
->Flags
, FO_RANDOM_ACCESS
) &&
395 (CurrentOffset
- 1) / VACB_MAPPING_GRANULARITY
!= (CurrentOffset
+ BytesCopied
- 1) / VACB_MAPPING_GRANULARITY
)
397 CcScheduleReadAhead(FileObject
, (PLARGE_INTEGER
)&FileOffset
, BytesCopied
);
400 /* And update read history in private cache map */
401 PrivateCacheMap
->FileOffset1
.QuadPart
= PrivateCacheMap
->FileOffset2
.QuadPart
;
402 PrivateCacheMap
->BeyondLastByte1
.QuadPart
= PrivateCacheMap
->BeyondLastByte2
.QuadPart
;
403 PrivateCacheMap
->FileOffset2
.QuadPart
= FileOffset
;
404 PrivateCacheMap
->BeyondLastByte2
.QuadPart
= FileOffset
+ BytesCopied
;
407 IoStatus
->Status
= STATUS_SUCCESS
;
408 IoStatus
->Information
= BytesCopied
;
413 CcPostDeferredWrites(VOID
)
417 /* We'll try to write as much as we can */
422 PLIST_ENTRY ListEntry
;
423 PDEFERRED_WRITE DeferredWrite
;
425 DeferredWrite
= NULL
;
427 /* Lock our deferred writes list */
428 KeAcquireSpinLock(&CcDeferredWriteSpinLock
, &OldIrql
);
429 for (ListEntry
= CcDeferredWrites
.Flink
;
430 ListEntry
!= &CcDeferredWrites
;
431 ListEntry
= ListEntry
->Flink
)
433 /* Extract an entry */
434 DeferredWrite
= CONTAINING_RECORD(ListEntry
, DEFERRED_WRITE
, DeferredWriteLinks
);
436 /* Compute the modified bytes, based on what we already wrote */
437 WrittenBytes
+= DeferredWrite
->BytesToWrite
;
438 /* We overflowed, give up */
439 if (WrittenBytes
< DeferredWrite
->BytesToWrite
)
441 DeferredWrite
= NULL
;
445 /* Check we can write */
446 if (CcCanIWrite(DeferredWrite
->FileObject
, WrittenBytes
, FALSE
, RetryForceCheckPerFile
))
448 /* We can, so remove it from the list and stop looking for entry */
449 RemoveEntryList(&DeferredWrite
->DeferredWriteLinks
);
453 /* If we don't accept modified pages, stop here */
454 if (!DeferredWrite
->LimitModifiedPages
)
456 DeferredWrite
= NULL
;
460 /* Reset count as nothing was written yet */
461 WrittenBytes
-= DeferredWrite
->BytesToWrite
;
462 DeferredWrite
= NULL
;
464 KeReleaseSpinLock(&CcDeferredWriteSpinLock
, OldIrql
);
466 /* Nothing to write found, give up */
467 if (DeferredWrite
== NULL
)
472 /* If we have an event, set it and quit */
473 if (DeferredWrite
->Event
)
475 KeSetEvent(DeferredWrite
->Event
, IO_NO_INCREMENT
, FALSE
);
477 /* Otherwise, call the write routine and free the context */
480 DeferredWrite
->PostRoutine(DeferredWrite
->Context1
, DeferredWrite
->Context2
);
481 ExFreePoolWithTag(DeferredWrite
, 'CcDw');
488 IN PFILE_OBJECT FileObject
)
491 LONGLONG CurrentOffset
;
493 PROS_SHARED_CACHE_MAP SharedCacheMap
;
499 PPRIVATE_CACHE_MAP PrivateCacheMap
;
502 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
505 * PrivateCacheMap might disappear in-between if the handle
506 * to the file is closed (private is attached to the handle not to
507 * the file), so we need to lock the master lock while we deal with
508 * it. It won't disappear without attempting to lock such lock.
510 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
511 PrivateCacheMap
= FileObject
->PrivateCacheMap
;
512 /* If the handle was closed since the read ahead was scheduled, just quit */
513 if (PrivateCacheMap
== NULL
)
515 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
516 ObDereferenceObject(FileObject
);
519 /* Otherwise, extract read offset and length and release private map */
522 KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap
->ReadAheadSpinLock
);
523 CurrentOffset
= PrivateCacheMap
->ReadAheadOffset
[1].QuadPart
;
524 Length
= PrivateCacheMap
->ReadAheadLength
[1];
525 KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap
->ReadAheadSpinLock
);
527 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
530 DPRINT("Doing ReadAhead for %p\n", FileObject
);
531 /* Lock the file, first */
532 if (!SharedCacheMap
->Callbacks
->AcquireForReadAhead(SharedCacheMap
->LazyWriteContext
, FALSE
))
538 /* Remember it's locked */
541 /* Don't read past the end of the file */
542 if (CurrentOffset
>= SharedCacheMap
->FileSize
.QuadPart
)
546 if (CurrentOffset
+ Length
> SharedCacheMap
->FileSize
.QuadPart
)
548 Length
= SharedCacheMap
->FileSize
.QuadPart
- CurrentOffset
;
551 /* Next of the algorithm will lock like CcCopyData with the slight
552 * difference that we don't copy data back to an user-backed buffer
553 * We just bring data into Cc
555 PartialLength
= CurrentOffset
% VACB_MAPPING_GRANULARITY
;
556 if (PartialLength
!= 0)
558 PartialLength
= min(Length
, VACB_MAPPING_GRANULARITY
- PartialLength
);
559 Status
= CcRosRequestVacb(SharedCacheMap
,
560 ROUND_DOWN(CurrentOffset
,
561 VACB_MAPPING_GRANULARITY
),
565 if (!NT_SUCCESS(Status
))
567 DPRINT1("Failed to request VACB: %lx!\n", Status
);
573 Status
= CcReadVirtualAddress(Vacb
);
574 if (!NT_SUCCESS(Status
))
576 CcRosReleaseVacb(SharedCacheMap
, Vacb
, FALSE
, FALSE
, FALSE
);
577 DPRINT1("Failed to read data: %lx!\n", Status
);
582 CcRosReleaseVacb(SharedCacheMap
, Vacb
, TRUE
, FALSE
, FALSE
);
584 Length
-= PartialLength
;
585 CurrentOffset
+= PartialLength
;
590 ASSERT(CurrentOffset
% VACB_MAPPING_GRANULARITY
== 0);
591 PartialLength
= min(VACB_MAPPING_GRANULARITY
, Length
);
592 Status
= CcRosRequestVacb(SharedCacheMap
,
597 if (!NT_SUCCESS(Status
))
599 DPRINT1("Failed to request VACB: %lx!\n", Status
);
605 Status
= CcReadVirtualAddress(Vacb
);
606 if (!NT_SUCCESS(Status
))
608 CcRosReleaseVacb(SharedCacheMap
, Vacb
, FALSE
, FALSE
, FALSE
);
609 DPRINT1("Failed to read data: %lx!\n", Status
);
614 CcRosReleaseVacb(SharedCacheMap
, Vacb
, TRUE
, FALSE
, FALSE
);
616 Length
-= PartialLength
;
617 CurrentOffset
+= PartialLength
;
621 /* See previous comment about private cache map */
622 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
623 PrivateCacheMap
= FileObject
->PrivateCacheMap
;
624 if (PrivateCacheMap
!= NULL
)
626 /* Mark read ahead as unactive */
627 KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap
->ReadAheadSpinLock
);
628 InterlockedAnd((volatile long *)&PrivateCacheMap
->UlongFlags
, ~PRIVATE_CACHE_MAP_READ_AHEAD_ACTIVE
);
629 KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap
->ReadAheadSpinLock
);
631 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
633 /* If file was locked, release it */
636 SharedCacheMap
->Callbacks
->ReleaseFromReadAhead(SharedCacheMap
->LazyWriteContext
);
639 /* And drop our extra reference (See: CcScheduleReadAhead) */
640 ObDereferenceObject(FileObject
);
651 IN PFILE_OBJECT FileObject
,
652 IN ULONG BytesToWrite
,
659 BOOLEAN PerFileDefer
;
660 DEFERRED_WRITE Context
;
661 PFSRTL_COMMON_FCB_HEADER Fcb
;
662 CC_CAN_WRITE_RETRY TryContext
;
663 PROS_SHARED_CACHE_MAP SharedCacheMap
;
665 CCTRACE(CC_API_DEBUG
, "FileObject=%p BytesToWrite=%lu Wait=%d Retrying=%d\n",
666 FileObject
, BytesToWrite
, Wait
, Retrying
);
668 /* Write through is always OK */
669 if (BooleanFlagOn(FileObject
->Flags
, FO_WRITE_THROUGH
))
674 TryContext
= Retrying
;
675 /* Allow remote file if not from posted */
676 if (IoIsFileOriginRemote(FileObject
) && TryContext
< RetryAllowRemote
)
681 /* Don't exceed max tolerated size */
682 Length
= MAX_ZERO_LENGTH
;
683 if (BytesToWrite
< MAX_ZERO_LENGTH
)
685 Length
= BytesToWrite
;
688 /* Convert it to pages count */
689 Pages
= (Length
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
691 /* By default, assume limits per file won't be hit */
692 PerFileDefer
= FALSE
;
693 Fcb
= FileObject
->FsContext
;
694 /* Do we have to check for limits per file? */
695 if (TryContext
>= RetryForceCheckPerFile
||
696 BooleanFlagOn(Fcb
->Flags
, FSRTL_FLAG_LIMIT_MODIFIED_PAGES
))
698 /* If master is not locked, lock it now */
699 if (TryContext
!= RetryMasterLocked
)
701 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
704 /* Let's not assume the file is cached... */
705 if (FileObject
->SectionObjectPointer
!= NULL
&&
706 FileObject
->SectionObjectPointer
->SharedCacheMap
!= NULL
)
708 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
709 /* Do we have limits per file set? */
710 if (SharedCacheMap
->DirtyPageThreshold
!= 0 &&
711 SharedCacheMap
->DirtyPages
!= 0)
713 /* Yes, check whether they are blocking */
714 if (Pages
+ SharedCacheMap
->DirtyPages
> SharedCacheMap
->DirtyPageThreshold
)
721 /* And don't forget to release master */
722 if (TryContext
!= RetryMasterLocked
)
724 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
728 /* So, now allow write if:
729 * - Not the first try or we have no throttling yet
731 * - We don't exceed threshold!
732 * - We don't exceed what Mm can allow us to use
733 * + If we're above top, that's fine
734 * + If we're above bottom with limited modified pages, that's fine
735 * + Otherwise, throttle!
737 if ((TryContext
!= FirstTry
|| IsListEmpty(&CcDeferredWrites
)) &&
738 CcTotalDirtyPages
+ Pages
< CcDirtyPageThreshold
&&
739 (MmAvailablePages
> MmThrottleTop
||
740 (MmModifiedPageListHead
.Total
< 1000 && MmAvailablePages
> MmThrottleBottom
)) &&
746 /* If we can wait, we'll start the wait loop for waiting till we can
754 /* Otherwise, if there are no deferred writes yet, start the lazy writer */
755 if (IsListEmpty(&CcDeferredWrites
))
759 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
760 CcScheduleLazyWriteScan(TRUE
);
761 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
764 /* Initialize our wait event */
765 KeInitializeEvent(&WaitEvent
, NotificationEvent
, FALSE
);
767 /* And prepare a dummy context */
768 Context
.NodeTypeCode
= NODE_TYPE_DEFERRED_WRITE
;
769 Context
.NodeByteSize
= sizeof(DEFERRED_WRITE
);
770 Context
.FileObject
= FileObject
;
771 Context
.BytesToWrite
= BytesToWrite
;
772 Context
.LimitModifiedPages
= BooleanFlagOn(Fcb
->Flags
, FSRTL_FLAG_LIMIT_MODIFIED_PAGES
);
773 Context
.Event
= &WaitEvent
;
778 /* To the top, if that's a retry */
779 ExInterlockedInsertHeadList(&CcDeferredWrites
,
780 &Context
.DeferredWriteLinks
,
781 &CcDeferredWriteSpinLock
);
785 /* To the bottom, if that's a first time */
786 ExInterlockedInsertTailList(&CcDeferredWrites
,
787 &Context
.DeferredWriteLinks
,
788 &CcDeferredWriteSpinLock
);
791 DPRINT1("Actively deferring write for: %p\n", FileObject
);
792 /* Now, we'll loop until our event is set. When it is set, it means that caller
793 * can immediately write, and has to
797 CcPostDeferredWrites();
798 } while (KeWaitForSingleObject(&WaitEvent
, Executive
, KernelMode
, FALSE
, &CcIdleDelay
) != STATUS_SUCCESS
);
809 IN PFILE_OBJECT FileObject
,
810 IN PLARGE_INTEGER FileOffset
,
814 OUT PIO_STATUS_BLOCK IoStatus
)
816 CCTRACE(CC_API_DEBUG
, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d\n",
817 FileObject
, FileOffset
->QuadPart
, Length
, Wait
);
819 DPRINT("CcCopyRead(FileObject 0x%p, FileOffset %I64x, "
820 "Length %lu, Wait %u, Buffer 0x%p, IoStatus 0x%p)\n",
821 FileObject
, FileOffset
->QuadPart
, Length
, Wait
,
824 return CcCopyData(FileObject
,
825 FileOffset
->QuadPart
,
839 IN PFILE_OBJECT FileObject
,
840 IN PLARGE_INTEGER FileOffset
,
845 IO_STATUS_BLOCK IoStatus
;
847 CCTRACE(CC_API_DEBUG
, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d Buffer=%p\n",
848 FileObject
, FileOffset
->QuadPart
, Length
, Wait
, Buffer
);
850 DPRINT("CcCopyWrite(FileObject 0x%p, FileOffset %I64x, "
851 "Length %lu, Wait %u, Buffer 0x%p)\n",
852 FileObject
, FileOffset
->QuadPart
, Length
, Wait
, Buffer
);
854 return CcCopyData(FileObject
,
855 FileOffset
->QuadPart
,
869 IN PFILE_OBJECT FileObject
,
870 IN PCC_POST_DEFERRED_WRITE PostRoutine
,
873 IN ULONG BytesToWrite
,
877 PDEFERRED_WRITE Context
;
878 PFSRTL_COMMON_FCB_HEADER Fcb
;
880 CCTRACE(CC_API_DEBUG
, "FileObject=%p PostRoutine=%p Context1=%p Context2=%p BytesToWrite=%lu Retrying=%d\n",
881 FileObject
, PostRoutine
, Context1
, Context2
, BytesToWrite
, Retrying
);
883 /* Try to allocate a context for queueing the write operation */
884 Context
= ExAllocatePoolWithTag(NonPagedPool
, sizeof(DEFERRED_WRITE
), 'CcDw');
885 /* If it failed, immediately execute the operation! */
888 PostRoutine(Context1
, Context2
);
892 Fcb
= FileObject
->FsContext
;
894 /* Otherwise, initialize the context */
895 RtlZeroMemory(Context
, sizeof(DEFERRED_WRITE
));
896 Context
->NodeTypeCode
= NODE_TYPE_DEFERRED_WRITE
;
897 Context
->NodeByteSize
= sizeof(DEFERRED_WRITE
);
898 Context
->FileObject
= FileObject
;
899 Context
->PostRoutine
= PostRoutine
;
900 Context
->Context1
= Context1
;
901 Context
->Context2
= Context2
;
902 Context
->BytesToWrite
= BytesToWrite
;
903 Context
->LimitModifiedPages
= BooleanFlagOn(Fcb
->Flags
, FSRTL_FLAG_LIMIT_MODIFIED_PAGES
);
908 /* To the top, if that's a retry */
909 ExInterlockedInsertHeadList(&CcDeferredWrites
,
910 &Context
->DeferredWriteLinks
,
911 &CcDeferredWriteSpinLock
);
915 /* To the bottom, if that's a first time */
916 ExInterlockedInsertTailList(&CcDeferredWrites
,
917 &Context
->DeferredWriteLinks
,
918 &CcDeferredWriteSpinLock
);
921 /* Try to execute the posted writes */
922 CcPostDeferredWrites();
924 /* Schedule a lazy writer run to handle deferred writes */
925 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
926 if (!LazyWriter
.ScanActive
)
928 CcScheduleLazyWriteScan(FALSE
);
930 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
939 IN PFILE_OBJECT FileObject
,
944 OUT PIO_STATUS_BLOCK IoStatus
)
946 LARGE_INTEGER LargeFileOffset
;
949 CCTRACE(CC_API_DEBUG
, "FileObject=%p FileOffset=%lu Length=%lu PageCount=%lu Buffer=%p\n",
950 FileObject
, FileOffset
, Length
, PageCount
, Buffer
);
952 DBG_UNREFERENCED_PARAMETER(PageCount
);
954 LargeFileOffset
.QuadPart
= FileOffset
;
955 Success
= CcCopyRead(FileObject
,
961 ASSERT(Success
== TRUE
);
970 IN PFILE_OBJECT FileObject
,
975 LARGE_INTEGER LargeFileOffset
;
978 CCTRACE(CC_API_DEBUG
, "FileObject=%p FileOffset=%lu Length=%lu Buffer=%p\n",
979 FileObject
, FileOffset
, Length
, Buffer
);
981 LargeFileOffset
.QuadPart
= FileOffset
;
982 Success
= CcCopyWrite(FileObject
,
987 ASSERT(Success
== TRUE
);
996 IN PFILE_OBJECT FileObject
,
997 IN PLARGE_INTEGER StartOffset
,
998 IN PLARGE_INTEGER EndOffset
,
1002 LARGE_INTEGER WriteOffset
;
1004 ULONG CurrentLength
;
1007 IO_STATUS_BLOCK Iosb
;
1010 CCTRACE(CC_API_DEBUG
, "FileObject=%p StartOffset=%I64u EndOffset=%I64u Wait=%d\n",
1011 FileObject
, StartOffset
->QuadPart
, EndOffset
->QuadPart
, Wait
);
1013 DPRINT("CcZeroData(FileObject 0x%p, StartOffset %I64x, EndOffset %I64x, "
1014 "Wait %u)\n", FileObject
, StartOffset
->QuadPart
, EndOffset
->QuadPart
,
1017 Length
= EndOffset
->QuadPart
- StartOffset
->QuadPart
;
1018 WriteOffset
.QuadPart
= StartOffset
->QuadPart
;
1020 if (FileObject
->SectionObjectPointer
->SharedCacheMap
== NULL
)
1022 /* File is not cached */
1024 Mdl
= _alloca(MmSizeOfMdl(NULL
, MAX_ZERO_LENGTH
));
1028 if (Length
+ WriteOffset
.QuadPart
% PAGE_SIZE
> MAX_ZERO_LENGTH
)
1030 CurrentLength
= MAX_ZERO_LENGTH
- WriteOffset
.QuadPart
% PAGE_SIZE
;
1034 CurrentLength
= Length
;
1036 MmInitializeMdl(Mdl
, (PVOID
)(ULONG_PTR
)WriteOffset
.QuadPart
, CurrentLength
);
1037 Mdl
->MdlFlags
|= (MDL_PAGES_LOCKED
| MDL_IO_PAGE_READ
);
1038 for (i
= 0; i
< ((Mdl
->Size
- sizeof(MDL
)) / sizeof(ULONG
)); i
++)
1040 ((PPFN_NUMBER
)(Mdl
+ 1))[i
] = CcZeroPage
;
1042 KeInitializeEvent(&Event
, NotificationEvent
, FALSE
);
1043 Status
= IoSynchronousPageWrite(FileObject
, Mdl
, &WriteOffset
, &Event
, &Iosb
);
1044 if (Status
== STATUS_PENDING
)
1046 KeWaitForSingleObject(&Event
, Executive
, KernelMode
, FALSE
, NULL
);
1047 Status
= Iosb
.Status
;
1049 if (Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
)
1051 MmUnmapLockedPages(Mdl
->MappedSystemVa
, Mdl
);
1053 if (!NT_SUCCESS(Status
))
1057 WriteOffset
.QuadPart
+= CurrentLength
;
1058 Length
-= CurrentLength
;
1063 IO_STATUS_BLOCK IoStatus
;
1065 return CcCopyData(FileObject
,
1066 WriteOffset
.QuadPart
,