2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/copy.c
5 * PURPOSE: Implements cache managers copy interface
7 * PROGRAMMERS: Some people?
8 * Pierre Schweitzer (pierre@reactos.org)
11 /* INCLUDES ******************************************************************/
17 /* GLOBALS *******************************************************************/
19 static PFN_NUMBER CcZeroPage
= 0;
21 #define MAX_ZERO_LENGTH (256 * 1024)
23 typedef enum _CC_COPY_OPERATION
30 ULONG CcRosTraceLevel
= 0;
31 ULONG CcFastMdlReadWait
;
32 ULONG CcFastMdlReadNotPossible
;
33 ULONG CcFastReadNotPossible
;
35 ULONG CcFastReadNoWait
;
36 ULONG CcFastReadResourceMiss
;
38 extern KEVENT iLazyWriterNotify
;
40 /* FUNCTIONS *****************************************************************/
45 IN PFN_NUMBER PageFrameIndex
55 MI_SET_USAGE(MI_USAGE_CACHE
);
56 //MI_SET_PROCESS2(PsGetCurrentProcess()->ImageFileName);
57 Status
= MmRequestPageMemoryConsumer(MC_SYSTEM
, TRUE
, &CcZeroPage
);
58 if (!NT_SUCCESS(Status
))
60 DbgPrint("Can't allocate CcZeroPage.\n");
61 KeBugCheck(CACHE_MANAGER
);
63 MiZeroPhysicalPage(CcZeroPage
);
68 CcReadVirtualAddress (
74 IO_STATUS_BLOCK IoStatus
;
77 Size
= (ULONG
)(Vacb
->SharedCacheMap
->SectionSize
.QuadPart
- Vacb
->FileOffset
.QuadPart
);
78 if (Size
> VACB_MAPPING_GRANULARITY
)
80 Size
= VACB_MAPPING_GRANULARITY
;
83 Pages
= BYTES_TO_PAGES(Size
);
84 ASSERT(Pages
* PAGE_SIZE
<= VACB_MAPPING_GRANULARITY
);
86 Mdl
= IoAllocateMdl(Vacb
->BaseAddress
, Pages
* PAGE_SIZE
, FALSE
, FALSE
, NULL
);
89 return STATUS_INSUFFICIENT_RESOURCES
;
92 Status
= STATUS_SUCCESS
;
95 MmProbeAndLockPages(Mdl
, KernelMode
, IoWriteAccess
);
97 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER
)
99 Status
= _SEH2_GetExceptionCode();
100 KeBugCheck(CACHE_MANAGER
);
103 if (NT_SUCCESS(Status
))
105 Mdl
->MdlFlags
|= MDL_IO_PAGE_READ
;
106 KeInitializeEvent(&Event
, NotificationEvent
, FALSE
);
107 Status
= IoPageRead(Vacb
->SharedCacheMap
->FileObject
, Mdl
, &Vacb
->FileOffset
, &Event
, &IoStatus
);
108 if (Status
== STATUS_PENDING
)
110 KeWaitForSingleObject(&Event
, Executive
, KernelMode
, FALSE
, NULL
);
111 Status
= IoStatus
.Status
;
119 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
))
121 DPRINT1("IoPageRead failed, Status %x\n", Status
);
125 if (Size
< VACB_MAPPING_GRANULARITY
)
127 RtlZeroMemory((char*)Vacb
->BaseAddress
+ Size
,
128 VACB_MAPPING_GRANULARITY
- Size
);
131 return STATUS_SUCCESS
;
136 CcWriteVirtualAddress (
142 IO_STATUS_BLOCK IoStatus
;
146 Size
= (ULONG
)(Vacb
->SharedCacheMap
->SectionSize
.QuadPart
- Vacb
->FileOffset
.QuadPart
);
147 if (Size
> VACB_MAPPING_GRANULARITY
)
149 Size
= VACB_MAPPING_GRANULARITY
;
152 // Nonpaged pool PDEs in ReactOS must actually be synchronized between the
153 // MmGlobalPageDirectory and the real system PDE directory. What a mess...
159 MmGetPfnForProcess(NULL
, (PVOID
)((ULONG_PTR
)Vacb
->BaseAddress
+ (i
<< PAGE_SHIFT
)));
160 } while (++i
< (Size
>> PAGE_SHIFT
));
163 Mdl
= IoAllocateMdl(Vacb
->BaseAddress
, Size
, FALSE
, FALSE
, NULL
);
166 return STATUS_INSUFFICIENT_RESOURCES
;
169 Status
= STATUS_SUCCESS
;
172 MmProbeAndLockPages(Mdl
, KernelMode
, IoReadAccess
);
174 _SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER
)
176 Status
= _SEH2_GetExceptionCode();
177 KeBugCheck(CACHE_MANAGER
);
180 if (NT_SUCCESS(Status
))
182 KeInitializeEvent(&Event
, NotificationEvent
, FALSE
);
183 Status
= IoSynchronousPageWrite(Vacb
->SharedCacheMap
->FileObject
, Mdl
, &Vacb
->FileOffset
, &Event
, &IoStatus
);
184 if (Status
== STATUS_PENDING
)
186 KeWaitForSingleObject(&Event
, Executive
, KernelMode
, FALSE
, NULL
);
187 Status
= IoStatus
.Status
;
193 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
))
195 DPRINT1("IoPageWrite failed, Status %x\n", Status
);
200 return STATUS_SUCCESS
;
205 _Inout_ PVOID BaseAddress
,
206 _Inout_opt_ PVOID Buffer
,
208 _In_ CC_COPY_OPERATION Operation
)
210 NTSTATUS Status
= STATUS_SUCCESS
;
212 if (Operation
== CcOperationZero
)
215 RtlZeroMemory(BaseAddress
, Length
);
221 if (Operation
== CcOperationWrite
)
222 RtlCopyMemory(BaseAddress
, Buffer
, Length
);
224 RtlCopyMemory(Buffer
, BaseAddress
, Length
);
226 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER
)
228 Status
= _SEH2_GetExceptionCode();
237 _In_ PFILE_OBJECT FileObject
,
238 _In_ LONGLONG FileOffset
,
239 _Inout_ PVOID Buffer
,
240 _In_ LONGLONG Length
,
241 _In_ CC_COPY_OPERATION Operation
,
243 _Out_ PIO_STATUS_BLOCK IoStatus
)
246 LONGLONG CurrentOffset
;
249 PROS_SHARED_CACHE_MAP SharedCacheMap
;
250 PLIST_ENTRY ListEntry
;
256 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
257 CurrentOffset
= FileOffset
;
262 /* test if the requested data is available */
263 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &OldIrql
);
264 /* FIXME: this loop doesn't take into account areas that don't have
265 * a VACB in the list yet */
266 ListEntry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
267 while (ListEntry
!= &SharedCacheMap
->CacheMapVacbListHead
)
269 Vacb
= CONTAINING_RECORD(ListEntry
,
271 CacheMapVacbListEntry
);
272 ListEntry
= ListEntry
->Flink
;
274 DoRangesIntersect(Vacb
->FileOffset
.QuadPart
,
275 VACB_MAPPING_GRANULARITY
,
276 CurrentOffset
, Length
))
278 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, OldIrql
);
279 /* data not available */
282 if (Vacb
->FileOffset
.QuadPart
>= CurrentOffset
+ Length
)
285 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, OldIrql
);
288 PartialLength
= CurrentOffset
% VACB_MAPPING_GRANULARITY
;
289 if (PartialLength
!= 0)
291 PartialLength
= min(Length
, VACB_MAPPING_GRANULARITY
- PartialLength
);
292 Status
= CcRosRequestVacb(SharedCacheMap
,
293 ROUND_DOWN(CurrentOffset
,
294 VACB_MAPPING_GRANULARITY
),
298 if (!NT_SUCCESS(Status
))
299 ExRaiseStatus(Status
);
302 Status
= CcReadVirtualAddress(Vacb
);
303 if (!NT_SUCCESS(Status
))
305 CcRosReleaseVacb(SharedCacheMap
, Vacb
, FALSE
, FALSE
, FALSE
);
306 ExRaiseStatus(Status
);
309 Status
= ReadWriteOrZero((PUCHAR
)BaseAddress
+ CurrentOffset
% VACB_MAPPING_GRANULARITY
,
314 CcRosReleaseVacb(SharedCacheMap
, Vacb
, TRUE
, Operation
!= CcOperationRead
, FALSE
);
316 if (!NT_SUCCESS(Status
))
317 ExRaiseStatus(STATUS_INVALID_USER_BUFFER
);
319 Length
-= PartialLength
;
320 CurrentOffset
+= PartialLength
;
321 BytesCopied
+= PartialLength
;
323 if (Operation
!= CcOperationZero
)
324 Buffer
= (PVOID
)((ULONG_PTR
)Buffer
+ PartialLength
);
329 ASSERT(CurrentOffset
% VACB_MAPPING_GRANULARITY
== 0);
330 PartialLength
= min(VACB_MAPPING_GRANULARITY
, Length
);
331 Status
= CcRosRequestVacb(SharedCacheMap
,
336 if (!NT_SUCCESS(Status
))
337 ExRaiseStatus(Status
);
339 (Operation
== CcOperationRead
||
340 PartialLength
< VACB_MAPPING_GRANULARITY
))
342 Status
= CcReadVirtualAddress(Vacb
);
343 if (!NT_SUCCESS(Status
))
345 CcRosReleaseVacb(SharedCacheMap
, Vacb
, FALSE
, FALSE
, FALSE
);
346 ExRaiseStatus(Status
);
349 Status
= ReadWriteOrZero(BaseAddress
, Buffer
, PartialLength
, Operation
);
351 CcRosReleaseVacb(SharedCacheMap
, Vacb
, TRUE
, Operation
!= CcOperationRead
, FALSE
);
353 if (!NT_SUCCESS(Status
))
354 ExRaiseStatus(STATUS_INVALID_USER_BUFFER
);
356 Length
-= PartialLength
;
357 CurrentOffset
+= PartialLength
;
358 BytesCopied
+= PartialLength
;
360 if (Operation
!= CcOperationZero
)
361 Buffer
= (PVOID
)((ULONG_PTR
)Buffer
+ PartialLength
);
363 IoStatus
->Status
= STATUS_SUCCESS
;
364 IoStatus
->Information
= BytesCopied
;
374 IN PFILE_OBJECT FileObject
,
375 IN ULONG BytesToWrite
,
381 PLIST_ENTRY ListEntry
;
382 PFSRTL_COMMON_FCB_HEADER Fcb
;
383 PROS_SHARED_CACHE_MAP SharedCacheMap
;
385 CCTRACE(CC_API_DEBUG
, "FileObject=%p BytesToWrite=%lu Wait=%d Retrying=%d\n",
386 FileObject
, BytesToWrite
, Wait
, Retrying
);
388 /* We cannot write if dirty pages count is above threshold */
389 if (CcTotalDirtyPages
> CcDirtyPageThreshold
)
394 /* We cannot write if dirty pages count will bring use above
395 * XXX: Might not be accurate
397 if (CcTotalDirtyPages
+ (BytesToWrite
/ PAGE_SIZE
) > CcDirtyPageThreshold
)
402 /* Is there a limit per file object? */
403 Fcb
= FileObject
->FsContext
;
404 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
405 if (!BooleanFlagOn(Fcb
->Flags
, FSRTL_FLAG_LIMIT_MODIFIED_PAGES
) ||
406 SharedCacheMap
->DirtyPageThreshold
== 0)
408 /* Nope, so that's fine, allow write operation */
412 /* There's a limit, start counting dirty pages */
414 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &OldIrql
);
415 for (ListEntry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
416 ListEntry
!= &SharedCacheMap
->CacheMapVacbListHead
;
417 ListEntry
= ListEntry
->Flink
)
421 Vacb
= CONTAINING_RECORD(ListEntry
,
423 CacheMapVacbListEntry
);
426 DirtyPages
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
429 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, OldIrql
);
431 /* Is dirty page count above local threshold? */
432 if (DirtyPages
> SharedCacheMap
->DirtyPageThreshold
)
437 /* We cannot write if dirty pages count will bring use above
438 * XXX: Might not be accurate
440 if (DirtyPages
+ (BytesToWrite
/ PAGE_SIZE
) > SharedCacheMap
->DirtyPageThreshold
)
454 IN PFILE_OBJECT FileObject
,
455 IN PLARGE_INTEGER FileOffset
,
459 OUT PIO_STATUS_BLOCK IoStatus
)
461 CCTRACE(CC_API_DEBUG
, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d\n",
462 FileObject
, FileOffset
->QuadPart
, Length
, Wait
);
464 DPRINT("CcCopyRead(FileObject 0x%p, FileOffset %I64x, "
465 "Length %lu, Wait %u, Buffer 0x%p, IoStatus 0x%p)\n",
466 FileObject
, FileOffset
->QuadPart
, Length
, Wait
,
469 return CcCopyData(FileObject
,
470 FileOffset
->QuadPart
,
484 IN PFILE_OBJECT FileObject
,
485 IN PLARGE_INTEGER FileOffset
,
490 IO_STATUS_BLOCK IoStatus
;
492 CCTRACE(CC_API_DEBUG
, "FileObject=%p FileOffset=%I64d Length=%lu Wait=%d Buffer=%p\n",
493 FileObject
, FileOffset
->QuadPart
, Length
, Wait
, Buffer
);
495 DPRINT("CcCopyWrite(FileObject 0x%p, FileOffset %I64x, "
496 "Length %lu, Wait %u, Buffer 0x%p)\n",
497 FileObject
, FileOffset
->QuadPart
, Length
, Wait
, Buffer
);
499 return CcCopyData(FileObject
,
500 FileOffset
->QuadPart
,
514 IN PFILE_OBJECT FileObject
,
515 IN PCC_POST_DEFERRED_WRITE PostRoutine
,
518 IN ULONG BytesToWrite
,
521 PROS_DEFERRED_WRITE_CONTEXT Context
;
523 CCTRACE(CC_API_DEBUG
, "FileObject=%p PostRoutine=%p Context1=%p Context2=%p BytesToWrite=%lu Retrying=%d\n",
524 FileObject
, PostRoutine
, Context1
, Context2
, BytesToWrite
, Retrying
);
526 /* Try to allocate a context for queueing the write operation */
527 Context
= ExAllocatePoolWithTag(NonPagedPool
, sizeof(ROS_DEFERRED_WRITE_CONTEXT
), 'CcDw');
528 /* If it failed, immediately execute the operation! */
531 PostRoutine(Context1
, Context2
);
535 /* Otherwise, initialize the context */
536 Context
->FileObject
= FileObject
;
537 Context
->PostRoutine
= PostRoutine
;
538 Context
->Context1
= Context1
;
539 Context
->Context2
= Context2
;
540 Context
->BytesToWrite
= BytesToWrite
;
541 Context
->Retrying
= Retrying
;
546 /* To the top, if that's a retry */
547 ExInterlockedInsertHeadList(&CcDeferredWrites
,
548 &Context
->CcDeferredWritesEntry
,
549 &CcDeferredWriteSpinLock
);
553 /* To the bottom, if that's a first time */
554 ExInterlockedInsertTailList(&CcDeferredWrites
,
555 &Context
->CcDeferredWritesEntry
,
556 &CcDeferredWriteSpinLock
);
566 IN PFILE_OBJECT FileObject
,
571 OUT PIO_STATUS_BLOCK IoStatus
)
573 LARGE_INTEGER LargeFileOffset
;
576 CCTRACE(CC_API_DEBUG
, "FileObject=%p FileOffset=%lu Length=%lu PageCount=%lu Buffer=%p\n",
577 FileObject
, FileOffset
, Length
, PageCount
, Buffer
);
579 DBG_UNREFERENCED_PARAMETER(PageCount
);
581 LargeFileOffset
.QuadPart
= FileOffset
;
582 Success
= CcCopyRead(FileObject
,
588 ASSERT(Success
== TRUE
);
597 IN PFILE_OBJECT FileObject
,
602 LARGE_INTEGER LargeFileOffset
;
605 CCTRACE(CC_API_DEBUG
, "FileObject=%p FileOffset=%lu Length=%lu Buffer=%p\n",
606 FileObject
, FileOffset
, Length
, Buffer
);
608 LargeFileOffset
.QuadPart
= FileOffset
;
609 Success
= CcCopyWrite(FileObject
,
614 ASSERT(Success
== TRUE
);
622 CcWaitForCurrentLazyWriterActivity (
627 /* Lazy writer is done when its event is set */
628 Status
= KeWaitForSingleObject(&iLazyWriterNotify
,
633 if (!NT_SUCCESS(Status
))
638 return STATUS_SUCCESS
;
647 IN PFILE_OBJECT FileObject
,
648 IN PLARGE_INTEGER StartOffset
,
649 IN PLARGE_INTEGER EndOffset
,
653 LARGE_INTEGER WriteOffset
;
658 IO_STATUS_BLOCK Iosb
;
661 CCTRACE(CC_API_DEBUG
, "FileObject=%p StartOffset=%I64u EndOffset=%I64u Wait=%d\n",
662 FileObject
, StartOffset
->QuadPart
, EndOffset
->QuadPart
, Wait
);
664 DPRINT("CcZeroData(FileObject 0x%p, StartOffset %I64x, EndOffset %I64x, "
665 "Wait %u)\n", FileObject
, StartOffset
->QuadPart
, EndOffset
->QuadPart
,
668 Length
= EndOffset
->QuadPart
- StartOffset
->QuadPart
;
669 WriteOffset
.QuadPart
= StartOffset
->QuadPart
;
671 if (FileObject
->SectionObjectPointer
->SharedCacheMap
== NULL
)
673 /* File is not cached */
675 Mdl
= _alloca(MmSizeOfMdl(NULL
, MAX_ZERO_LENGTH
));
679 if (Length
+ WriteOffset
.QuadPart
% PAGE_SIZE
> MAX_ZERO_LENGTH
)
681 CurrentLength
= MAX_ZERO_LENGTH
- WriteOffset
.QuadPart
% PAGE_SIZE
;
685 CurrentLength
= Length
;
687 MmInitializeMdl(Mdl
, (PVOID
)(ULONG_PTR
)WriteOffset
.QuadPart
, CurrentLength
);
688 Mdl
->MdlFlags
|= (MDL_PAGES_LOCKED
| MDL_IO_PAGE_READ
);
689 for (i
= 0; i
< ((Mdl
->Size
- sizeof(MDL
)) / sizeof(ULONG
)); i
++)
691 ((PPFN_NUMBER
)(Mdl
+ 1))[i
] = CcZeroPage
;
693 KeInitializeEvent(&Event
, NotificationEvent
, FALSE
);
694 Status
= IoSynchronousPageWrite(FileObject
, Mdl
, &WriteOffset
, &Event
, &Iosb
);
695 if (Status
== STATUS_PENDING
)
697 KeWaitForSingleObject(&Event
, Executive
, KernelMode
, FALSE
, NULL
);
698 Status
= Iosb
.Status
;
700 if (Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
)
702 MmUnmapLockedPages(Mdl
->MappedSystemVa
, Mdl
);
704 if (!NT_SUCCESS(Status
))
708 WriteOffset
.QuadPart
+= CurrentLength
;
709 Length
-= CurrentLength
;
714 IO_STATUS_BLOCK IoStatus
;
716 return CcCopyData(FileObject
,
717 WriteOffset
.QuadPart
,