2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/copy.c
5 * PURPOSE: Implements cache managers copy interface
7 * PROGRAMMERS: Some people?
8 * Pierre Schweitzer (pierre@reactos.org)
11 /* INCLUDES ******************************************************************/
17 /* GLOBALS *******************************************************************/
19 static PFN_NUMBER CcZeroPage
= 0;
21 #define MAX_ZERO_LENGTH (256 * 1024)
23 typedef enum _CC_COPY_OPERATION
30 typedef enum _CC_CAN_WRITE_RETRY
33 RetryAllowRemote
= 253,
34 RetryForceCheckPerFile
= 254,
35 RetryMasterLocked
= 255,
38 ULONG CcRosTraceLevel
= 0;
39 ULONG CcFastMdlReadWait
;
40 ULONG CcFastMdlReadNotPossible
;
41 ULONG CcFastReadNotPossible
;
43 ULONG CcFastReadNoWait
;
44 ULONG CcFastReadResourceMiss
;
47 * - Amount of pages flushed to the disk
48 * - Number of flush operations
50 ULONG CcDataPages
= 0;
51 ULONG CcDataFlushes
= 0;
53 /* FUNCTIONS *****************************************************************/
58 IN PFN_NUMBER PageFrameIndex
68 MI_SET_USAGE(MI_USAGE_CACHE
);
69 //MI_SET_PROCESS2(PsGetCurrentProcess()->ImageFileName);
70 Status
= MmRequestPageMemoryConsumer(MC_SYSTEM
, TRUE
, &CcZeroPage
);
71 if (!NT_SUCCESS(Status
))
73 DbgPrint("Can't allocate CcZeroPage.\n");
74 KeBugCheck(CACHE_MANAGER
);
76 MiZeroPhysicalPage(CcZeroPage
);
81 CcReadVirtualAddress (
87 IO_STATUS_BLOCK IoStatus
;
90 Size
= (ULONG
)(Vacb
->SharedCacheMap
->SectionSize
.QuadPart
- Vacb
->FileOffset
.QuadPart
);
91 if (Size
> VACB_MAPPING_GRANULARITY
)
93 Size
= VACB_MAPPING_GRANULARITY
;
96 Pages
= BYTES_TO_PAGES(Size
);
97 ASSERT(Pages
* PAGE_SIZE
<= VACB_MAPPING_GRANULARITY
);
99 Mdl
= IoAllocateMdl(Vacb
->BaseAddress
, Pages
* PAGE_SIZE
, FALSE
, FALSE
, NULL
);
102 return STATUS_INSUFFICIENT_RESOURCES
;
105 Status
= STATUS_SUCCESS
;
108 MmProbeAndLockPages(Mdl
, KernelMode
, IoWriteAccess
);
110 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER
)
112 Status
= _SEH2_GetExceptionCode();
113 KeBugCheck(CACHE_MANAGER
);
116 if (NT_SUCCESS(Status
))
118 Mdl
->MdlFlags
|= MDL_IO_PAGE_READ
;
119 KeInitializeEvent(&Event
, NotificationEvent
, FALSE
);
120 Status
= IoPageRead(Vacb
->SharedCacheMap
->FileObject
, Mdl
, &Vacb
->FileOffset
, &Event
, &IoStatus
);
121 if (Status
== STATUS_PENDING
)
123 KeWaitForSingleObject(&Event
, Executive
, KernelMode
, FALSE
, NULL
);
124 Status
= IoStatus
.Status
;
132 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
))
134 DPRINT1("IoPageRead failed, Status %x\n", Status
);
138 if (Size
< VACB_MAPPING_GRANULARITY
)
140 RtlZeroMemory((char*)Vacb
->BaseAddress
+ Size
,
141 VACB_MAPPING_GRANULARITY
- Size
);
144 return STATUS_SUCCESS
;
149 CcWriteVirtualAddress (
155 IO_STATUS_BLOCK IoStatus
;
158 Size
= (ULONG
)(Vacb
->SharedCacheMap
->SectionSize
.QuadPart
- Vacb
->FileOffset
.QuadPart
);
159 if (Size
> VACB_MAPPING_GRANULARITY
)
161 Size
= VACB_MAPPING_GRANULARITY
;
164 // Nonpaged pool PDEs in ReactOS must actually be synchronized between the
165 // MmGlobalPageDirectory and the real system PDE directory. What a mess...
171 MmGetPfnForProcess(NULL
, (PVOID
)((ULONG_PTR
)Vacb
->BaseAddress
+ (i
<< PAGE_SHIFT
)));
172 } while (++i
< (Size
>> PAGE_SHIFT
));
175 Mdl
= IoAllocateMdl(Vacb
->BaseAddress
, Size
, FALSE
, FALSE
, NULL
);
178 return STATUS_INSUFFICIENT_RESOURCES
;
181 Status
= STATUS_SUCCESS
;
184 MmProbeAndLockPages(Mdl
, KernelMode
, IoReadAccess
);
186 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER
)
188 Status
= _SEH2_GetExceptionCode();
189 KeBugCheck(CACHE_MANAGER
);
192 if (NT_SUCCESS(Status
))
194 KeInitializeEvent(&Event
, NotificationEvent
, FALSE
);
195 Status
= IoSynchronousPageWrite(Vacb
->SharedCacheMap
->FileObject
, Mdl
, &Vacb
->FileOffset
, &Event
, &IoStatus
);
196 if (Status
== STATUS_PENDING
)
198 KeWaitForSingleObject(&Event
, Executive
, KernelMode
, FALSE
, NULL
);
199 Status
= IoStatus
.Status
;
205 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
))
207 DPRINT1("IoPageWrite failed, Status %x\n", Status
);
211 return STATUS_SUCCESS
;
216 _Inout_ PVOID BaseAddress
,
217 _Inout_opt_ PVOID Buffer
,
219 _In_ CC_COPY_OPERATION Operation
)
221 NTSTATUS Status
= STATUS_SUCCESS
;
223 if (Operation
== CcOperationZero
)
226 RtlZeroMemory(BaseAddress
, Length
);
232 if (Operation
== CcOperationWrite
)
233 RtlCopyMemory(BaseAddress
, Buffer
, Length
);
235 RtlCopyMemory(Buffer
, BaseAddress
, Length
);
237 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER
)
239 Status
= _SEH2_GetExceptionCode();
248 _In_ PFILE_OBJECT FileObject
,
249 _In_ LONGLONG FileOffset
,
250 _Inout_ PVOID Buffer
,
251 _In_ LONGLONG Length
,
252 _In_ CC_COPY_OPERATION Operation
,
254 _Out_ PIO_STATUS_BLOCK IoStatus
)
257 LONGLONG CurrentOffset
;
260 PROS_SHARED_CACHE_MAP SharedCacheMap
;
261 PLIST_ENTRY ListEntry
;
266 PPRIVATE_CACHE_MAP PrivateCacheMap
;
268 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
269 PrivateCacheMap
= FileObject
->PrivateCacheMap
;
270 CurrentOffset
= FileOffset
;
275 /* test if the requested data is available */
276 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &OldIrql
);
277 /* FIXME: this loop doesn't take into account areas that don't have
278 * a VACB in the list yet */
279 ListEntry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
280 while (ListEntry
!= &SharedCacheMap
->CacheMapVacbListHead
)
282 Vacb
= CONTAINING_RECORD(ListEntry
,
284 CacheMapVacbListEntry
);
285 ListEntry
= ListEntry
->Flink
;
287 DoRangesIntersect(Vacb
->FileOffset
.QuadPart
,
288 VACB_MAPPING_GRANULARITY
,
289 CurrentOffset
, Length
))
291 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, OldIrql
);
292 /* data not available */
295 if (Vacb
->FileOffset
.QuadPart
>= CurrentOffset
+ Length
)
298 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, OldIrql
);
301 PartialLength
= CurrentOffset
% VACB_MAPPING_GRANULARITY
;
302 if (PartialLength
!= 0)
304 PartialLength
= min(Length
, VACB_MAPPING_GRANULARITY
- PartialLength
);
305 Status
= CcRosRequestVacb(SharedCacheMap
,
306 ROUND_DOWN(CurrentOffset
,
307 VACB_MAPPING_GRANULARITY
),
311 if (!NT_SUCCESS(Status
))
312 ExRaiseStatus(Status
);
315 Status
= CcReadVirtualAddress(Vacb
);
316 if (!NT_SUCCESS(Status
))
318 CcRosReleaseVacb(SharedCacheMap
, Vacb
, FALSE
, FALSE
, FALSE
);
319 ExRaiseStatus(Status
);
322 Status
= ReadWriteOrZero((PUCHAR
)BaseAddress
+ CurrentOffset
% VACB_MAPPING_GRANULARITY
,
327 CcRosReleaseVacb(SharedCacheMap
, Vacb
, TRUE
, Operation
!= CcOperationRead
, FALSE
);
329 if (!NT_SUCCESS(Status
))
330 ExRaiseStatus(STATUS_INVALID_USER_BUFFER
);
332 Length
-= PartialLength
;
333 CurrentOffset
+= PartialLength
;
334 BytesCopied
+= PartialLength
;
336 if (Operation
!= CcOperationZero
)
337 Buffer
= (PVOID
)((ULONG_PTR
)Buffer
+ PartialLength
);
342 ASSERT(CurrentOffset
% VACB_MAPPING_GRANULARITY
== 0);
343 PartialLength
= min(VACB_MAPPING_GRANULARITY
, Length
);
344 Status
= CcRosRequestVacb(SharedCacheMap
,
349 if (!NT_SUCCESS(Status
))
350 ExRaiseStatus(Status
);
352 (Operation
== CcOperationRead
||
353 PartialLength
< VACB_MAPPING_GRANULARITY
))
355 Status
= CcReadVirtualAddress(Vacb
);
356 if (!NT_SUCCESS(Status
))
358 CcRosReleaseVacb(SharedCacheMap
, Vacb
, FALSE
, FALSE
, FALSE
);
359 ExRaiseStatus(Status
);
362 Status
= ReadWriteOrZero(BaseAddress
, Buffer
, PartialLength
, Operation
);
364 CcRosReleaseVacb(SharedCacheMap
, Vacb
, TRUE
, Operation
!= CcOperationRead
, FALSE
);
366 if (!NT_SUCCESS(Status
))
367 ExRaiseStatus(STATUS_INVALID_USER_BUFFER
);
369 Length
-= PartialLength
;
370 CurrentOffset
+= PartialLength
;
371 BytesCopied
+= PartialLength
;
373 if (Operation
!= CcOperationZero
)
374 Buffer
= (PVOID
)((ULONG_PTR
)Buffer
+ PartialLength
);
377 /* If that was a successful sync read operation, let's handle read ahead */
378 if (Operation
== CcOperationRead
&& Length
== 0 && Wait
)
380 /* If file isn't random access and next read may get us cross VACB boundary,
383 if (!BooleanFlagOn(FileObject
->Flags
, FO_RANDOM_ACCESS
) &&
384 (CurrentOffset
- 1) / VACB_MAPPING_GRANULARITY
!= (CurrentOffset
+ BytesCopied
- 1) / VACB_MAPPING_GRANULARITY
)
386 CcScheduleReadAhead(FileObject
, (PLARGE_INTEGER
)&FileOffset
, BytesCopied
);
389 /* And update read history in private cache map */
390 PrivateCacheMap
->FileOffset1
.QuadPart
= PrivateCacheMap
->FileOffset2
.QuadPart
;
391 PrivateCacheMap
->BeyondLastByte1
.QuadPart
= PrivateCacheMap
->BeyondLastByte2
.QuadPart
;
392 PrivateCacheMap
->FileOffset2
.QuadPart
= FileOffset
;
393 PrivateCacheMap
->BeyondLastByte2
.QuadPart
= FileOffset
+ BytesCopied
;
396 IoStatus
->Status
= STATUS_SUCCESS
;
397 IoStatus
->Information
= BytesCopied
;
402 CcPostDeferredWrites(VOID
)
406 /* We'll try to write as much as we can */
411 PLIST_ENTRY ListEntry
;
412 PDEFERRED_WRITE DeferredWrite
;
414 DeferredWrite
= NULL
;
416 /* Lock our deferred writes list */
417 KeAcquireSpinLock(&CcDeferredWriteSpinLock
, &OldIrql
);
418 for (ListEntry
= CcDeferredWrites
.Flink
;
419 ListEntry
!= &CcDeferredWrites
;
420 ListEntry
= ListEntry
->Flink
)
422 /* Extract an entry */
423 DeferredWrite
= CONTAINING_RECORD(ListEntry
, DEFERRED_WRITE
, DeferredWriteLinks
);
425 /* Compute the modified bytes, based on what we already wrote */
426 WrittenBytes
+= DeferredWrite
->BytesToWrite
;
427 /* We overflowed, give up */
428 if (WrittenBytes
< DeferredWrite
->BytesToWrite
)
430 DeferredWrite
= NULL
;
434 /* Check we can write */
435 if (CcCanIWrite(DeferredWrite
->FileObject
, WrittenBytes
, FALSE
, RetryForceCheckPerFile
))
437 /* We can, so remove it from the list and stop looking for entry */
438 RemoveEntryList(&DeferredWrite
->DeferredWriteLinks
);
442 /* If we don't accept modified pages, stop here */
443 if (!DeferredWrite
->LimitModifiedPages
)
445 DeferredWrite
= NULL
;
449 /* Reset count as nothing was written yet */
450 WrittenBytes
-= DeferredWrite
->BytesToWrite
;
451 DeferredWrite
= NULL
;
453 KeReleaseSpinLock(&CcDeferredWriteSpinLock
, OldIrql
);
455 /* Nothing to write found, give up */
456 if (DeferredWrite
== NULL
)
461 /* If we have an event, set it and quit */
462 if (DeferredWrite
->Event
)
464 KeSetEvent(DeferredWrite
->Event
, IO_NO_INCREMENT
, FALSE
);
466 /* Otherwise, call the write routine and free the context */
469 DeferredWrite
->PostRoutine(DeferredWrite
->Context1
, DeferredWrite
->Context2
);
470 ExFreePoolWithTag(DeferredWrite
, 'CcDw');
477 IN PFILE_OBJECT FileObject
)
480 LONGLONG CurrentOffset
;
482 PROS_SHARED_CACHE_MAP SharedCacheMap
;
488 PPRIVATE_CACHE_MAP PrivateCacheMap
;
491 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
494 * PrivateCacheMap might disappear in-between if the handle
495 * to the file is closed (private is attached to the handle not to
496 * the file), so we need to lock the master lock while we deal with
497 * it. It won't disappear without attempting to lock such lock.
499 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
500 PrivateCacheMap
= FileObject
->PrivateCacheMap
;
501 /* If the handle was closed since the read ahead was scheduled, just quit */
502 if (PrivateCacheMap
== NULL
)
504 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
505 ObDereferenceObject(FileObject
);
508 /* Otherwise, extract read offset and length and release private map */
511 KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap
->ReadAheadSpinLock
);
512 CurrentOffset
= PrivateCacheMap
->ReadAheadOffset
[1].QuadPart
;
513 Length
= PrivateCacheMap
->ReadAheadLength
[1];
514 KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap
->ReadAheadSpinLock
);
516 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
519 DPRINT("Doing ReadAhead for %p\n", FileObject
);
520 /* Lock the file, first */
521 if (!SharedCacheMap
->Callbacks
->AcquireForReadAhead(SharedCacheMap
->LazyWriteContext
, FALSE
))
527 /* Remember it's locked */
530 /* Don't read past the end of the file */
531 if (CurrentOffset
>= SharedCacheMap
->FileSize
.QuadPart
)
535 if (CurrentOffset
+ Length
> SharedCacheMap
->FileSize
.QuadPart
)
537 Length
= SharedCacheMap
->FileSize
.QuadPart
- CurrentOffset
;
540 /* Next of the algorithm will lock like CcCopyData with the slight
541 * difference that we don't copy data back to an user-backed buffer
542 * We just bring data into Cc
544 PartialLength
= CurrentOffset
% VACB_MAPPING_GRANULARITY
;
545 if (PartialLength
!= 0)
547 PartialLength
= min(Length
, VACB_MAPPING_GRANULARITY
- PartialLength
);
548 Status
= CcRosRequestVacb(SharedCacheMap
,
549 ROUND_DOWN(CurrentOffset
,
550 VACB_MAPPING_GRANULARITY
),
554 if (!NT_SUCCESS(Status
))
556 DPRINT1("Failed to request VACB: %lx!\n", Status
);
562 Status
= CcReadVirtualAddress(Vacb
);
563 if (!NT_SUCCESS(Status
))
565 CcRosReleaseVacb(SharedCacheMap
, Vacb
, FALSE
, FALSE
, FALSE
);
566 DPRINT1("Failed to read data: %lx!\n", Status
);
571 CcRosReleaseVacb(SharedCacheMap
, Vacb
, TRUE
, FALSE
, FALSE
);
573 Length
-= PartialLength
;
574 CurrentOffset
+= PartialLength
;
579 ASSERT(CurrentOffset
% VACB_MAPPING_GRANULARITY
== 0);
580 PartialLength
= min(VACB_MAPPING_GRANULARITY
, Length
);
581 Status
= CcRosRequestVacb(SharedCacheMap
,
586 if (!NT_SUCCESS(Status
))
588 DPRINT1("Failed to request VACB: %lx!\n", Status
);
594 Status
= CcReadVirtualAddress(Vacb
);
595 if (!NT_SUCCESS(Status
))
597 CcRosReleaseVacb(SharedCacheMap
, Vacb
, FALSE
, FALSE
, FALSE
);
598 DPRINT1("Failed to read data: %lx!\n", Status
);
603 CcRosReleaseVacb(SharedCacheMap
, Vacb
, TRUE
, FALSE
, FALSE
);
605 Length
-= PartialLength
;
606 CurrentOffset
+= PartialLength
;
610 /* See previous comment about private cache map */
611 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
612 PrivateCacheMap
= FileObject
->PrivateCacheMap
;
613 if (PrivateCacheMap
!= NULL
)
615 /* Mark read ahead as unactive */
616 KeAcquireSpinLockAtDpcLevel(&PrivateCacheMap
->ReadAheadSpinLock
);
617 InterlockedAnd((volatile long *)&PrivateCacheMap
->UlongFlags
, ~PRIVATE_CACHE_MAP_READ_AHEAD_ACTIVE
);
618 KeReleaseSpinLockFromDpcLevel(&PrivateCacheMap
->ReadAheadSpinLock
);
620 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
622 /* If file was locked, release it */
625 SharedCacheMap
->Callbacks
->ReleaseFromReadAhead(SharedCacheMap
->LazyWriteContext
);
628 /* And drop our extra reference (See: CcScheduleReadAhead) */
629 ObDereferenceObject(FileObject
);
640 IN PFILE_OBJECT FileObject
,
641 IN ULONG BytesToWrite
,
648 BOOLEAN PerFileDefer
;
649 DEFERRED_WRITE Context
;
650 PFSRTL_COMMON_FCB_HEADER Fcb
;
651 CC_CAN_WRITE_RETRY TryContext
;
652 PROS_SHARED_CACHE_MAP SharedCacheMap
;
654 CCTRACE(CC_API_DEBUG
, "FileObject=%p BytesToWrite=%lu Wait=%d Retrying=%d\n",
655 FileObject
, BytesToWrite
, Wait
, Retrying
);
657 /* Write through is always OK */
658 if (BooleanFlagOn(FileObject
->Flags
, FO_WRITE_THROUGH
))
663 TryContext
= Retrying
;
664 /* Allow remote file if not from posted */
665 if (IoIsFileOriginRemote(FileObject
) && TryContext
< RetryAllowRemote
)
670 /* Don't exceed max tolerated size */
671 Length
= MAX_ZERO_LENGTH
;
672 if (BytesToWrite
< MAX_ZERO_LENGTH
)
674 Length
= BytesToWrite
;
677 /* Convert it to pages count */
678 Pages
= (Length
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
680 /* By default, assume limits per file won't be hit */
681 PerFileDefer
= FALSE
;
682 Fcb
= FileObject
->FsContext
;
683 /* Do we have to check for limits per file? */
684 if (TryContext
>= RetryForceCheckPerFile
||
685 BooleanFlagOn(Fcb
->Flags
, FSRTL_FLAG_LIMIT_MODIFIED_PAGES
))
687 /* If master is not locked, lock it now */
688 if (TryContext
!= RetryMasterLocked
)
690 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
693 /* Let's not assume the file is cached... */
694 if (FileObject
->SectionObjectPointer
!= NULL
&&
695 FileObject
->SectionObjectPointer
->SharedCacheMap
!= NULL
)
697 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
698 /* Do we have limits per file set? */
699 if (SharedCacheMap
->DirtyPageThreshold
!= 0 &&
700 SharedCacheMap
->DirtyPages
!= 0)
702 /* Yes, check whether they are blocking */
703 if (Pages
+ SharedCacheMap
->DirtyPages
> SharedCacheMap
->DirtyPageThreshold
)
710 /* And don't forget to release master */
711 if (TryContext
!= RetryMasterLocked
)
713 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
717 /* So, now allow write if:
718 * - Not the first try or we have no throttling yet
720 * - We don't exceed threshold!
721 * - We don't exceed what Mm can allow us to use
722 * + If we're above top, that's fine
723 * + If we're above bottom with limited modified pages, that's fine
724 * + Otherwise, throttle!
726 if ((TryContext
!= FirstTry
|| IsListEmpty(&CcDeferredWrites
)) &&
727 CcTotalDirtyPages
+ Pages
< CcDirtyPageThreshold
&&
728 (MmAvailablePages
> MmThrottleTop
||
729 (MmModifiedPageListHead
.Total
< 1000 && MmAvailablePages
> MmThrottleBottom
)) &&
735 /* If we can wait, we'll start the wait loop for waiting till we can
743 /* Otherwise, if there are no deferred writes yet, start the lazy writer */
744 if (IsListEmpty(&CcDeferredWrites
))
748 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
749 CcScheduleLazyWriteScan(TRUE
);
750 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
753 /* Initialize our wait event */
754 KeInitializeEvent(&WaitEvent
, NotificationEvent
, FALSE
);
756 /* And prepare a dummy context */
757 Context
.NodeTypeCode
= NODE_TYPE_DEFERRED_WRITE
;
758 Context
.NodeByteSize
= sizeof(DEFERRED_WRITE
);
759 Context
.FileObject
= FileObject
;
760 Context
.BytesToWrite
= BytesToWrite
;
761 Context
.LimitModifiedPages
= BooleanFlagOn(Fcb
->Flags
, FSRTL_FLAG_LIMIT_MODIFIED_PAGES
);
762 Context
.Event
= &WaitEvent
;
767 /* To the top, if that's a retry */
768 ExInterlockedInsertHeadList(&CcDeferredWrites
,
769 &Context
.DeferredWriteLinks
,
770 &CcDeferredWriteSpinLock
);
774 /* To the bottom, if that's a first time */
775 ExInterlockedInsertTailList(&CcDeferredWrites
,
776 &Context
.DeferredWriteLinks
,
777 &CcDeferredWriteSpinLock
);
780 /* Now, we'll loop until our event is set. When it is set, it means that caller
781 * can immediately write, and has to
785 CcPostDeferredWrites();
786 } while (KeWaitForSingleObject(&WaitEvent
, Executive
, KernelMode
, FALSE
, &CcIdleDelay
) != STATUS_SUCCESS
);
797 IN PFILE_OBJECT FileObject
,
798 IN PLARGE_INTEGER FileOffset
,
802 OUT PIO_STATUS_BLOCK IoStatus
)
804 CCTRACE(CC_API_DEBUG
, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d\n",
805 FileObject
, FileOffset
->QuadPart
, Length
, Wait
);
807 DPRINT("CcCopyRead(FileObject 0x%p, FileOffset %I64x, "
808 "Length %lu, Wait %u, Buffer 0x%p, IoStatus 0x%p)\n",
809 FileObject
, FileOffset
->QuadPart
, Length
, Wait
,
812 return CcCopyData(FileObject
,
813 FileOffset
->QuadPart
,
827 IN PFILE_OBJECT FileObject
,
828 IN PLARGE_INTEGER FileOffset
,
833 IO_STATUS_BLOCK IoStatus
;
835 CCTRACE(CC_API_DEBUG
, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d Buffer=%p\n",
836 FileObject
, FileOffset
->QuadPart
, Length
, Wait
, Buffer
);
838 DPRINT("CcCopyWrite(FileObject 0x%p, FileOffset %I64x, "
839 "Length %lu, Wait %u, Buffer 0x%p)\n",
840 FileObject
, FileOffset
->QuadPart
, Length
, Wait
, Buffer
);
842 return CcCopyData(FileObject
,
843 FileOffset
->QuadPart
,
857 IN PFILE_OBJECT FileObject
,
858 IN PCC_POST_DEFERRED_WRITE PostRoutine
,
861 IN ULONG BytesToWrite
,
865 PDEFERRED_WRITE Context
;
866 PFSRTL_COMMON_FCB_HEADER Fcb
;
868 CCTRACE(CC_API_DEBUG
, "FileObject=%p PostRoutine=%p Context1=%p Context2=%p BytesToWrite=%lu Retrying=%d\n",
869 FileObject
, PostRoutine
, Context1
, Context2
, BytesToWrite
, Retrying
);
871 /* Try to allocate a context for queueing the write operation */
872 Context
= ExAllocatePoolWithTag(NonPagedPool
, sizeof(DEFERRED_WRITE
), 'CcDw');
873 /* If it failed, immediately execute the operation! */
876 PostRoutine(Context1
, Context2
);
880 Fcb
= FileObject
->FsContext
;
882 /* Otherwise, initialize the context */
883 RtlZeroMemory(Context
, sizeof(DEFERRED_WRITE
));
884 Context
->NodeTypeCode
= NODE_TYPE_DEFERRED_WRITE
;
885 Context
->NodeByteSize
= sizeof(DEFERRED_WRITE
);
886 Context
->FileObject
= FileObject
;
887 Context
->PostRoutine
= PostRoutine
;
888 Context
->Context1
= Context1
;
889 Context
->Context2
= Context2
;
890 Context
->BytesToWrite
= BytesToWrite
;
891 Context
->LimitModifiedPages
= BooleanFlagOn(Fcb
->Flags
, FSRTL_FLAG_LIMIT_MODIFIED_PAGES
);
896 /* To the top, if that's a retry */
897 ExInterlockedInsertHeadList(&CcDeferredWrites
,
898 &Context
->DeferredWriteLinks
,
899 &CcDeferredWriteSpinLock
);
903 /* To the bottom, if that's a first time */
904 ExInterlockedInsertTailList(&CcDeferredWrites
,
905 &Context
->DeferredWriteLinks
,
906 &CcDeferredWriteSpinLock
);
909 /* Try to execute the posted writes */
910 CcPostDeferredWrites();
912 /* Schedule a lazy writer run to handle deferred writes */
913 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
914 if (!LazyWriter
.ScanActive
)
916 CcScheduleLazyWriteScan(FALSE
);
918 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
927 IN PFILE_OBJECT FileObject
,
932 OUT PIO_STATUS_BLOCK IoStatus
)
934 LARGE_INTEGER LargeFileOffset
;
937 CCTRACE(CC_API_DEBUG
, "FileObject=%p FileOffset=%lu Length=%lu PageCount=%lu Buffer=%p\n",
938 FileObject
, FileOffset
, Length
, PageCount
, Buffer
);
940 DBG_UNREFERENCED_PARAMETER(PageCount
);
942 LargeFileOffset
.QuadPart
= FileOffset
;
943 Success
= CcCopyRead(FileObject
,
949 ASSERT(Success
== TRUE
);
958 IN PFILE_OBJECT FileObject
,
963 LARGE_INTEGER LargeFileOffset
;
966 CCTRACE(CC_API_DEBUG
, "FileObject=%p FileOffset=%lu Length=%lu Buffer=%p\n",
967 FileObject
, FileOffset
, Length
, Buffer
);
969 LargeFileOffset
.QuadPart
= FileOffset
;
970 Success
= CcCopyWrite(FileObject
,
975 ASSERT(Success
== TRUE
);
984 IN PFILE_OBJECT FileObject
,
985 IN PLARGE_INTEGER StartOffset
,
986 IN PLARGE_INTEGER EndOffset
,
990 LARGE_INTEGER WriteOffset
;
995 IO_STATUS_BLOCK Iosb
;
998 CCTRACE(CC_API_DEBUG
, "FileObject=%p StartOffset=%I64u EndOffset=%I64u Wait=%d\n",
999 FileObject
, StartOffset
->QuadPart
, EndOffset
->QuadPart
, Wait
);
1001 DPRINT("CcZeroData(FileObject 0x%p, StartOffset %I64x, EndOffset %I64x, "
1002 "Wait %u)\n", FileObject
, StartOffset
->QuadPart
, EndOffset
->QuadPart
,
1005 Length
= EndOffset
->QuadPart
- StartOffset
->QuadPart
;
1006 WriteOffset
.QuadPart
= StartOffset
->QuadPart
;
1008 if (FileObject
->SectionObjectPointer
->SharedCacheMap
== NULL
)
1010 /* File is not cached */
1012 Mdl
= _alloca(MmSizeOfMdl(NULL
, MAX_ZERO_LENGTH
));
1016 if (Length
+ WriteOffset
.QuadPart
% PAGE_SIZE
> MAX_ZERO_LENGTH
)
1018 CurrentLength
= MAX_ZERO_LENGTH
- WriteOffset
.QuadPart
% PAGE_SIZE
;
1022 CurrentLength
= Length
;
1024 MmInitializeMdl(Mdl
, (PVOID
)(ULONG_PTR
)WriteOffset
.QuadPart
, CurrentLength
);
1025 Mdl
->MdlFlags
|= (MDL_PAGES_LOCKED
| MDL_IO_PAGE_READ
);
1026 for (i
= 0; i
< ((Mdl
->Size
- sizeof(MDL
)) / sizeof(ULONG
)); i
++)
1028 ((PPFN_NUMBER
)(Mdl
+ 1))[i
] = CcZeroPage
;
1030 KeInitializeEvent(&Event
, NotificationEvent
, FALSE
);
1031 Status
= IoSynchronousPageWrite(FileObject
, Mdl
, &WriteOffset
, &Event
, &Iosb
);
1032 if (Status
== STATUS_PENDING
)
1034 KeWaitForSingleObject(&Event
, Executive
, KernelMode
, FALSE
, NULL
);
1035 Status
= Iosb
.Status
;
1037 if (Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
)
1039 MmUnmapLockedPages(Mdl
->MappedSystemVa
, Mdl
);
1041 if (!NT_SUCCESS(Status
))
1045 WriteOffset
.QuadPart
+= CurrentLength
;
1046 Length
-= CurrentLength
;
1051 IO_STATUS_BLOCK IoStatus
;
1053 return CcCopyData(FileObject
,
1054 WriteOffset
.QuadPart
,