2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/copy.c
5 * PURPOSE: Implements cache managers copy interface
10 /* INCLUDES ******************************************************************/
16 /* GLOBALS *******************************************************************/
18 static PFN_NUMBER CcZeroPage
= 0;
20 #define MAX_ZERO_LENGTH (256 * 1024)
21 #define MAX_RW_LENGTH (256 * 1024)
22 C_ASSERT(MAX_RW_LENGTH
<= VACB_MAPPING_GRANULARITY
);
24 ULONG CcFastMdlReadWait
;
25 ULONG CcFastMdlReadNotPossible
;
26 ULONG CcFastReadNotPossible
;
28 ULONG CcFastReadNoWait
;
29 ULONG CcFastReadResourceMiss
;
31 /* FUNCTIONS *****************************************************************/
36 IN PFN_NUMBER PageFrameIndex
46 MI_SET_USAGE(MI_USAGE_CACHE
);
47 //MI_SET_PROCESS2(PsGetCurrentProcess()->ImageFileName);
48 Status
= MmRequestPageMemoryConsumer(MC_SYSTEM
, TRUE
, &CcZeroPage
);
49 if (!NT_SUCCESS(Status
))
51 DbgPrint("Can't allocate CcZeroPage.\n");
52 KeBugCheck(CACHE_MANAGER
);
54 MiZeroPhysicalPage(CcZeroPage
);
60 PROS_SHARED_CACHE_MAP SharedCacheMap
,
74 Mdl
= _alloca(MmSizeOfMdl(NULL
, MAX_RW_LENGTH
));
76 Status
= CcRosGetVacbChain(SharedCacheMap
, ReadOffset
, Length
, &head
);
77 if (!NT_SUCCESS(Status
))
82 while (current
!= NULL
)
85 * If the current VACB is valid then copy it into the user buffer.
89 TempLength
= min(VACB_MAPPING_GRANULARITY
, Length
);
90 RtlCopyMemory(Buffer
, current
->BaseAddress
, TempLength
);
92 Buffer
= (PVOID
)((ULONG_PTR
)Buffer
+ TempLength
);
94 Length
= Length
- TempLength
;
96 current
= current
->NextInChain
;
97 CcRosReleaseVacb(SharedCacheMap
, previous
, TRUE
, FALSE
, FALSE
);
100 * Otherwise read in as much as we can.
107 PPFN_NUMBER MdlPages
;
110 * Count the maximum number of bytes we could read starting
111 * from the current VACB.
115 while ((current2
!= NULL
) && !current2
->Valid
&& (current_size
< MAX_RW_LENGTH
))
117 current2
= current2
->NextInChain
;
118 current_size
+= VACB_MAPPING_GRANULARITY
;
122 * Create an MDL which contains all their pages.
124 MmInitializeMdl(Mdl
, NULL
, current_size
);
125 Mdl
->MdlFlags
|= (MDL_PAGES_LOCKED
| MDL_IO_PAGE_READ
);
128 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
129 while ((current2
!= NULL
) && !current2
->Valid
&& (current_size
< MAX_RW_LENGTH
))
131 PVOID address
= current2
->BaseAddress
;
132 for (i
= 0; i
< VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
; i
++, address
= RVA(address
, PAGE_SIZE
))
134 *MdlPages
++ = MmGetPfnForProcess(NULL
, address
);
136 current2
= current2
->NextInChain
;
137 current_size
+= VACB_MAPPING_GRANULARITY
;
141 * Read in the information.
143 KeInitializeEvent(&Event
, NotificationEvent
, FALSE
);
144 Status
= IoPageRead(SharedCacheMap
->FileObject
,
146 ¤t
->FileOffset
,
149 if (Status
== STATUS_PENDING
)
151 KeWaitForSingleObject(&Event
, Executive
, KernelMode
, FALSE
, NULL
);
152 Status
= Iosb
.Status
;
154 if (Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
)
156 MmUnmapLockedPages(Mdl
->MappedSystemVa
, Mdl
);
158 if (!NT_SUCCESS(Status
) && Status
!= STATUS_END_OF_FILE
)
160 while (current
!= NULL
)
163 current
= current
->NextInChain
;
164 CcRosReleaseVacb(SharedCacheMap
, previous
, FALSE
, FALSE
, FALSE
);
169 while (current
!= NULL
&& !current
->Valid
&& current_size
< MAX_RW_LENGTH
)
172 current
= current
->NextInChain
;
173 TempLength
= min(VACB_MAPPING_GRANULARITY
, Length
);
174 RtlCopyMemory(Buffer
, previous
->BaseAddress
, TempLength
);
176 Buffer
= (PVOID
)((ULONG_PTR
)Buffer
+ TempLength
);
178 Length
= Length
- TempLength
;
179 CcRosReleaseVacb(SharedCacheMap
, previous
, TRUE
, FALSE
, FALSE
);
180 current_size
+= VACB_MAPPING_GRANULARITY
;
184 return STATUS_SUCCESS
;
189 CcReadVirtualAddress (
195 IO_STATUS_BLOCK IoStatus
;
198 Size
= (ULONG
)(Vacb
->SharedCacheMap
->SectionSize
.QuadPart
- Vacb
->FileOffset
.QuadPart
);
199 if (Size
> VACB_MAPPING_GRANULARITY
)
201 Size
= VACB_MAPPING_GRANULARITY
;
204 Mdl
= IoAllocateMdl(Vacb
->BaseAddress
, Size
, FALSE
, FALSE
, NULL
);
207 return STATUS_INSUFFICIENT_RESOURCES
;
210 MmBuildMdlForNonPagedPool(Mdl
);
211 Mdl
->MdlFlags
|= MDL_IO_PAGE_READ
;
212 KeInitializeEvent(&Event
, NotificationEvent
, FALSE
);
213 Status
= IoPageRead(Vacb
->SharedCacheMap
->FileObject
, Mdl
, &Vacb
->FileOffset
, &Event
, &IoStatus
);
214 if (Status
== STATUS_PENDING
)
216 KeWaitForSingleObject(&Event
, Executive
, KernelMode
, FALSE
, NULL
);
217 Status
= IoStatus
.Status
;
222 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
))
224 DPRINT1("IoPageRead failed, Status %x\n", Status
);
228 if (Size
< VACB_MAPPING_GRANULARITY
)
230 RtlZeroMemory((char*)Vacb
->BaseAddress
+ Size
,
231 VACB_MAPPING_GRANULARITY
- Size
);
234 return STATUS_SUCCESS
;
239 CcWriteVirtualAddress (
245 IO_STATUS_BLOCK IoStatus
;
249 Size
= (ULONG
)(Vacb
->SharedCacheMap
->SectionSize
.QuadPart
- Vacb
->FileOffset
.QuadPart
);
250 if (Size
> VACB_MAPPING_GRANULARITY
)
252 Size
= VACB_MAPPING_GRANULARITY
;
255 // Nonpaged pool PDEs in ReactOS must actually be synchronized between the
256 // MmGlobalPageDirectory and the real system PDE directory. What a mess...
262 MmGetPfnForProcess(NULL
, (PVOID
)((ULONG_PTR
)Vacb
->BaseAddress
+ (i
<< PAGE_SHIFT
)));
263 } while (++i
< (Size
>> PAGE_SHIFT
));
266 Mdl
= IoAllocateMdl(Vacb
->BaseAddress
, Size
, FALSE
, FALSE
, NULL
);
269 return STATUS_INSUFFICIENT_RESOURCES
;
271 MmBuildMdlForNonPagedPool(Mdl
);
272 Mdl
->MdlFlags
|= MDL_IO_PAGE_READ
;
273 KeInitializeEvent(&Event
, NotificationEvent
, FALSE
);
274 Status
= IoSynchronousPageWrite(Vacb
->SharedCacheMap
->FileObject
, Mdl
, &Vacb
->FileOffset
, &Event
, &IoStatus
);
275 if (Status
== STATUS_PENDING
)
277 KeWaitForSingleObject(&Event
, Executive
, KernelMode
, FALSE
, NULL
);
278 Status
= IoStatus
.Status
;
281 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
))
283 DPRINT1("IoPageWrite failed, Status %x\n", Status
);
288 return STATUS_SUCCESS
;
298 IN PFILE_OBJECT FileObject
,
299 IN ULONG BytesToWrite
,
314 IN PFILE_OBJECT FileObject
,
315 IN PLARGE_INTEGER FileOffset
,
319 OUT PIO_STATUS_BLOCK IoStatus
)
323 NTSTATUS Status
= STATUS_SUCCESS
;
327 ULONG ReadLength
= 0;
328 PROS_SHARED_CACHE_MAP SharedCacheMap
;
330 PLIST_ENTRY current_entry
;
333 DPRINT("CcCopyRead(FileObject 0x%p, FileOffset %I64x, "
334 "Length %lu, Wait %u, Buffer 0x%p, IoStatus 0x%p)\n",
335 FileObject
, FileOffset
->QuadPart
, Length
, Wait
,
338 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
339 ReadOffset
= (ULONG
)FileOffset
->QuadPart
;
341 DPRINT("SectionSize %I64d, FileSize %I64d\n",
342 SharedCacheMap
->SectionSize
.QuadPart
,
343 SharedCacheMap
->FileSize
.QuadPart
);
346 * Check for the nowait case that all the cache VACBs that would
347 * cover this read are in memory.
351 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldirql
);
352 /* FIXME: this loop doesn't take into account areas that don't have
353 * a VACB in the list yet */
354 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
355 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
357 current
= CONTAINING_RECORD(current_entry
,
359 CacheMapVacbListEntry
);
360 if (!current
->Valid
&&
361 DoRangesIntersect(current
->FileOffset
.QuadPart
,
362 VACB_MAPPING_GRANULARITY
,
365 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldirql
);
366 IoStatus
->Status
= STATUS_UNSUCCESSFUL
;
367 IoStatus
->Information
= 0;
370 if (current
->FileOffset
.QuadPart
>= ReadOffset
+ Length
)
372 current_entry
= current_entry
->Flink
;
374 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldirql
);
377 TempLength
= ReadOffset
% VACB_MAPPING_GRANULARITY
;
380 TempLength
= min(Length
, VACB_MAPPING_GRANULARITY
- TempLength
);
381 Status
= CcRosRequestVacb(SharedCacheMap
,
382 ROUND_DOWN(ReadOffset
,
383 VACB_MAPPING_GRANULARITY
),
384 &BaseAddress
, &Valid
, &Vacb
);
385 if (!NT_SUCCESS(Status
))
387 IoStatus
->Information
= 0;
388 IoStatus
->Status
= Status
;
389 DPRINT("CcRosRequestVacb failed, Status %x\n", Status
);
394 Status
= CcReadVirtualAddress(Vacb
);
395 if (!NT_SUCCESS(Status
))
397 IoStatus
->Information
= 0;
398 IoStatus
->Status
= Status
;
399 CcRosReleaseVacb(SharedCacheMap
, Vacb
, FALSE
, FALSE
, FALSE
);
403 RtlCopyMemory(Buffer
,
404 (char*)BaseAddress
+ ReadOffset
% VACB_MAPPING_GRANULARITY
,
406 CcRosReleaseVacb(SharedCacheMap
, Vacb
, TRUE
, FALSE
, FALSE
);
407 ReadLength
+= TempLength
;
408 Length
-= TempLength
;
409 ReadOffset
+= TempLength
;
410 Buffer
= (PVOID
)((char*)Buffer
+ TempLength
);
415 TempLength
= min(VACB_MAPPING_GRANULARITY
, Length
);
416 Status
= ReadVacbChain(SharedCacheMap
, ReadOffset
, TempLength
, Buffer
);
417 if (!NT_SUCCESS(Status
))
419 IoStatus
->Information
= 0;
420 IoStatus
->Status
= Status
;
421 DPRINT("ReadVacbChain failed, Status %x\n", Status
);
425 ReadLength
+= TempLength
;
426 Length
-= TempLength
;
427 ReadOffset
+= TempLength
;
429 Buffer
= (PVOID
)((ULONG_PTR
)Buffer
+ TempLength
);
432 IoStatus
->Status
= STATUS_SUCCESS
;
433 IoStatus
->Information
= ReadLength
;
434 DPRINT("CcCopyRead O.K.\n");
444 IN PFILE_OBJECT FileObject
,
445 IN PLARGE_INTEGER FileOffset
,
453 PROS_SHARED_CACHE_MAP SharedCacheMap
;
454 PLIST_ENTRY current_entry
;
460 DPRINT("CcCopyWrite(FileObject 0x%p, FileOffset %I64x, "
461 "Length %lu, Wait %u, Buffer 0x%p)\n",
462 FileObject
, FileOffset
->QuadPart
, Length
, Wait
, Buffer
);
464 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
465 WriteOffset
= (ULONG
)FileOffset
->QuadPart
;
469 /* testing, if the requested datas are available */
470 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldirql
);
471 /* FIXME: this loop doesn't take into account areas that don't have
472 * a VACB in the list yet */
473 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
474 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
476 Vacb
= CONTAINING_RECORD(current_entry
,
478 CacheMapVacbListEntry
);
480 DoRangesIntersect(Vacb
->FileOffset
.QuadPart
,
481 VACB_MAPPING_GRANULARITY
,
482 WriteOffset
, Length
))
484 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldirql
);
485 /* datas not available */
488 if (Vacb
->FileOffset
.QuadPart
>= WriteOffset
+ Length
)
490 current_entry
= current_entry
->Flink
;
492 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldirql
);
495 TempLength
= WriteOffset
% VACB_MAPPING_GRANULARITY
;
499 ROffset
= ROUND_DOWN(WriteOffset
, VACB_MAPPING_GRANULARITY
);
500 TempLength
= min(Length
, VACB_MAPPING_GRANULARITY
- TempLength
);
501 Status
= CcRosRequestVacb(SharedCacheMap
, ROffset
,
502 &BaseAddress
, &Valid
, &Vacb
);
503 if (!NT_SUCCESS(Status
))
509 if (!NT_SUCCESS(CcReadVirtualAddress(Vacb
)))
514 RtlCopyMemory((char*)BaseAddress
+ WriteOffset
% VACB_MAPPING_GRANULARITY
,
517 CcRosReleaseVacb(SharedCacheMap
, Vacb
, TRUE
, TRUE
, FALSE
);
519 Length
-= TempLength
;
520 WriteOffset
+= TempLength
;
522 Buffer
= (PVOID
)((ULONG_PTR
)Buffer
+ TempLength
);
527 TempLength
= min(VACB_MAPPING_GRANULARITY
, Length
);
528 Status
= CcRosRequestVacb(SharedCacheMap
,
533 if (!NT_SUCCESS(Status
))
537 if (!Valid
&& TempLength
< VACB_MAPPING_GRANULARITY
)
539 if (!NT_SUCCESS(CcReadVirtualAddress(Vacb
)))
541 CcRosReleaseVacb(SharedCacheMap
, Vacb
, FALSE
, FALSE
, FALSE
);
545 RtlCopyMemory(BaseAddress
, Buffer
, TempLength
);
546 CcRosReleaseVacb(SharedCacheMap
, Vacb
, TRUE
, TRUE
, FALSE
);
547 Length
-= TempLength
;
548 WriteOffset
+= TempLength
;
550 Buffer
= (PVOID
)((ULONG_PTR
)Buffer
+ TempLength
);
561 IN PFILE_OBJECT FileObject
,
562 IN PCC_POST_DEFERRED_WRITE PostRoutine
,
565 IN ULONG BytesToWrite
,
577 IN PFILE_OBJECT FileObject
,
582 OUT PIO_STATUS_BLOCK IoStatus
)
592 IN PFILE_OBJECT FileObject
,
605 CcWaitForCurrentLazyWriterActivity (
609 return STATUS_NOT_IMPLEMENTED
;
618 IN PFILE_OBJECT FileObject
,
619 IN PLARGE_INTEGER StartOffset
,
620 IN PLARGE_INTEGER EndOffset
,
624 LARGE_INTEGER WriteOffset
;
629 IO_STATUS_BLOCK Iosb
;
632 DPRINT("CcZeroData(FileObject 0x%p, StartOffset %I64x, EndOffset %I64x, "
633 "Wait %u)\n", FileObject
, StartOffset
->QuadPart
, EndOffset
->QuadPart
,
636 Length
= EndOffset
->u
.LowPart
- StartOffset
->u
.LowPart
;
637 WriteOffset
.QuadPart
= StartOffset
->QuadPart
;
639 if (FileObject
->SectionObjectPointer
->SharedCacheMap
== NULL
)
641 /* File is not cached */
643 Mdl
= _alloca(MmSizeOfMdl(NULL
, MAX_ZERO_LENGTH
));
647 if (Length
+ WriteOffset
.u
.LowPart
% PAGE_SIZE
> MAX_ZERO_LENGTH
)
649 CurrentLength
= MAX_ZERO_LENGTH
- WriteOffset
.u
.LowPart
% PAGE_SIZE
;
653 CurrentLength
= Length
;
655 MmInitializeMdl(Mdl
, (PVOID
)(ULONG_PTR
)WriteOffset
.QuadPart
, CurrentLength
);
656 Mdl
->MdlFlags
|= (MDL_PAGES_LOCKED
| MDL_IO_PAGE_READ
);
657 for (i
= 0; i
< ((Mdl
->Size
- sizeof(MDL
)) / sizeof(ULONG
)); i
++)
659 ((PPFN_NUMBER
)(Mdl
+ 1))[i
] = CcZeroPage
;
661 KeInitializeEvent(&Event
, NotificationEvent
, FALSE
);
662 Status
= IoSynchronousPageWrite(FileObject
, Mdl
, &WriteOffset
, &Event
, &Iosb
);
663 if (Status
== STATUS_PENDING
)
665 KeWaitForSingleObject(&Event
, Executive
, KernelMode
, FALSE
, NULL
);
666 Status
= Iosb
.Status
;
668 if (Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
)
670 MmUnmapLockedPages(Mdl
->MappedSystemVa
, Mdl
);
672 if (!NT_SUCCESS(Status
))
676 WriteOffset
.QuadPart
+= CurrentLength
;
677 Length
-= CurrentLength
;
684 PROS_SHARED_CACHE_MAP SharedCacheMap
;
685 PLIST_ENTRY current_entry
;
686 PROS_VACB Vacb
, current
, previous
;
689 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
692 /* testing, if the requested datas are available */
693 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldirql
);
694 /* FIXME: this loop doesn't take into account areas that don't have
695 * a VACB in the list yet */
696 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
697 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
699 Vacb
= CONTAINING_RECORD(current_entry
,
701 CacheMapVacbListEntry
);
703 DoRangesIntersect(Vacb
->FileOffset
.QuadPart
,
704 VACB_MAPPING_GRANULARITY
,
705 WriteOffset
.u
.LowPart
, Length
))
707 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldirql
);
708 /* datas not available */
711 if (Vacb
->FileOffset
.QuadPart
>= WriteOffset
.u
.LowPart
+ Length
)
713 current_entry
= current_entry
->Flink
;
715 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldirql
);
721 Offset
= WriteOffset
.u
.LowPart
% VACB_MAPPING_GRANULARITY
;
722 if (Length
+ Offset
> MAX_ZERO_LENGTH
)
724 CurrentLength
= MAX_ZERO_LENGTH
- Offset
;
728 CurrentLength
= Length
;
730 Status
= CcRosGetVacbChain(SharedCacheMap
, WriteOffset
.u
.LowPart
- Offset
,
731 Offset
+ CurrentLength
, &Vacb
);
732 if (!NT_SUCCESS(Status
))
738 while (current
!= NULL
)
740 Offset
= WriteOffset
.u
.LowPart
% VACB_MAPPING_GRANULARITY
;
742 (Offset
+ CurrentLength
< VACB_MAPPING_GRANULARITY
))
747 Status
= CcReadVirtualAddress(current
);
748 if (!NT_SUCCESS(Status
))
750 DPRINT1("CcReadVirtualAddress failed, status %x\n",
754 TempLength
= min(CurrentLength
, VACB_MAPPING_GRANULARITY
- Offset
);
758 TempLength
= VACB_MAPPING_GRANULARITY
;
760 RtlZeroMemory((PUCHAR
)current
->BaseAddress
+ Offset
,
763 WriteOffset
.QuadPart
+= TempLength
;
764 CurrentLength
-= TempLength
;
765 Length
-= TempLength
;
767 current
= current
->NextInChain
;
771 while (current
!= NULL
)
774 current
= current
->NextInChain
;
775 CcRosReleaseVacb(SharedCacheMap
, previous
, TRUE
, TRUE
, FALSE
);