2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/copy.c
5 * PURPOSE: Implements cache managers copy interface
10 /* INCLUDES ******************************************************************/
16 /* GLOBALS *******************************************************************/
18 static PFN_NUMBER CcZeroPage
= 0;
20 #define MAX_ZERO_LENGTH (256 * 1024)
21 #define MAX_RW_LENGTH (256 * 1024)
22 C_ASSERT(MAX_RW_LENGTH
<= VACB_MAPPING_GRANULARITY
);
24 ULONG CcFastMdlReadWait
;
25 ULONG CcFastMdlReadNotPossible
;
26 ULONG CcFastReadNotPossible
;
28 ULONG CcFastReadNoWait
;
29 ULONG CcFastReadResourceMiss
;
31 /* FUNCTIONS *****************************************************************/
36 IN PFN_NUMBER PageFrameIndex
46 MI_SET_USAGE(MI_USAGE_CACHE
);
47 //MI_SET_PROCESS2(PsGetCurrentProcess()->ImageFileName);
48 Status
= MmRequestPageMemoryConsumer(MC_SYSTEM
, TRUE
, &CcZeroPage
);
49 if (!NT_SUCCESS(Status
))
51 DbgPrint("Can't allocate CcZeroPage.\n");
52 KeBugCheck(CACHE_MANAGER
);
54 MiZeroPhysicalPage(CcZeroPage
);
59 ReadCacheSegmentChain (
66 PCACHE_SEGMENT current
;
67 PCACHE_SEGMENT previous
;
69 LARGE_INTEGER SegOffset
;
75 Mdl
= _alloca(MmSizeOfMdl(NULL
, MAX_RW_LENGTH
));
77 Status
= CcRosGetCacheSegmentChain(Bcb
, ReadOffset
, Length
, &head
);
78 if (!NT_SUCCESS(Status
))
83 while (current
!= NULL
)
86 * If the current segment is valid then copy it into the
91 TempLength
= min(VACB_MAPPING_GRANULARITY
, Length
);
92 RtlCopyMemory(Buffer
, current
->BaseAddress
, TempLength
);
94 Buffer
= (PVOID
)((ULONG_PTR
)Buffer
+ TempLength
);
96 Length
= Length
- TempLength
;
98 current
= current
->NextInChain
;
99 CcRosReleaseCacheSegment(Bcb
, previous
, TRUE
, FALSE
, FALSE
);
102 * Otherwise read in as much as we can.
106 PCACHE_SEGMENT current2
;
109 PPFN_NUMBER MdlPages
;
112 * Count the maximum number of bytes we could read starting
113 * from the current segment.
117 while ((current2
!= NULL
) && !current2
->Valid
&& (current_size
< MAX_RW_LENGTH
))
119 current2
= current2
->NextInChain
;
120 current_size
+= VACB_MAPPING_GRANULARITY
;
124 * Create an MDL which contains all their pages.
126 MmInitializeMdl(Mdl
, NULL
, current_size
);
127 Mdl
->MdlFlags
|= (MDL_PAGES_LOCKED
| MDL_IO_PAGE_READ
);
130 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
131 while ((current2
!= NULL
) && !current2
->Valid
&& (current_size
< MAX_RW_LENGTH
))
133 PVOID address
= current2
->BaseAddress
;
134 for (i
= 0; i
< VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
; i
++, address
= RVA(address
, PAGE_SIZE
))
136 *MdlPages
++ = MmGetPfnForProcess(NULL
, address
);
138 current2
= current2
->NextInChain
;
139 current_size
+= VACB_MAPPING_GRANULARITY
;
143 * Read in the information.
145 SegOffset
.QuadPart
= current
->FileOffset
;
146 KeInitializeEvent(&Event
, NotificationEvent
, FALSE
);
147 Status
= IoPageRead(Bcb
->FileObject
,
152 if (Status
== STATUS_PENDING
)
154 KeWaitForSingleObject(&Event
, Executive
, KernelMode
, FALSE
, NULL
);
155 Status
= Iosb
.Status
;
157 if (Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
)
159 MmUnmapLockedPages(Mdl
->MappedSystemVa
, Mdl
);
161 if (!NT_SUCCESS(Status
) && Status
!= STATUS_END_OF_FILE
)
163 while (current
!= NULL
)
166 current
= current
->NextInChain
;
167 CcRosReleaseCacheSegment(Bcb
, previous
, FALSE
, FALSE
, FALSE
);
172 while (current
!= NULL
&& !current
->Valid
&& current_size
< MAX_RW_LENGTH
)
175 current
= current
->NextInChain
;
176 TempLength
= min(VACB_MAPPING_GRANULARITY
, Length
);
177 RtlCopyMemory(Buffer
, previous
->BaseAddress
, TempLength
);
179 Buffer
= (PVOID
)((ULONG_PTR
)Buffer
+ TempLength
);
181 Length
= Length
- TempLength
;
182 CcRosReleaseCacheSegment(Bcb
, previous
, TRUE
, FALSE
, FALSE
);
183 current_size
+= VACB_MAPPING_GRANULARITY
;
187 return STATUS_SUCCESS
;
193 PCACHE_SEGMENT CacheSeg
)
198 LARGE_INTEGER SegOffset
;
199 IO_STATUS_BLOCK IoStatus
;
202 SegOffset
.QuadPart
= CacheSeg
->FileOffset
;
203 Size
= (ULONG
)(CacheSeg
->Bcb
->AllocationSize
.QuadPart
- CacheSeg
->FileOffset
);
204 if (Size
> VACB_MAPPING_GRANULARITY
)
206 Size
= VACB_MAPPING_GRANULARITY
;
209 Mdl
= IoAllocateMdl(CacheSeg
->BaseAddress
, Size
, FALSE
, FALSE
, NULL
);
212 return STATUS_INSUFFICIENT_RESOURCES
;
215 MmBuildMdlForNonPagedPool(Mdl
);
216 Mdl
->MdlFlags
|= MDL_IO_PAGE_READ
;
217 KeInitializeEvent(&Event
, NotificationEvent
, FALSE
);
218 Status
= IoPageRead(CacheSeg
->Bcb
->FileObject
, Mdl
, &SegOffset
, &Event
, &IoStatus
);
219 if (Status
== STATUS_PENDING
)
221 KeWaitForSingleObject(&Event
, Executive
, KernelMode
, FALSE
, NULL
);
222 Status
= IoStatus
.Status
;
227 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
))
229 DPRINT1("IoPageRead failed, Status %x\n", Status
);
233 if (Size
< VACB_MAPPING_GRANULARITY
)
235 RtlZeroMemory((char*)CacheSeg
->BaseAddress
+ Size
,
236 VACB_MAPPING_GRANULARITY
- Size
);
239 return STATUS_SUCCESS
;
245 PCACHE_SEGMENT CacheSeg
)
250 IO_STATUS_BLOCK IoStatus
;
251 LARGE_INTEGER SegOffset
;
254 CacheSeg
->Dirty
= FALSE
;
255 SegOffset
.QuadPart
= CacheSeg
->FileOffset
;
256 Size
= (ULONG
)(CacheSeg
->Bcb
->AllocationSize
.QuadPart
- CacheSeg
->FileOffset
);
257 if (Size
> VACB_MAPPING_GRANULARITY
)
259 Size
= VACB_MAPPING_GRANULARITY
;
262 // Nonpaged pool PDEs in ReactOS must actually be synchronized between the
263 // MmGlobalPageDirectory and the real system PDE directory. What a mess...
269 MmGetPfnForProcess(NULL
, (PVOID
)((ULONG_PTR
)CacheSeg
->BaseAddress
+ (i
<< PAGE_SHIFT
)));
270 } while (++i
< (Size
>> PAGE_SHIFT
));
273 Mdl
= IoAllocateMdl(CacheSeg
->BaseAddress
, Size
, FALSE
, FALSE
, NULL
);
276 return STATUS_INSUFFICIENT_RESOURCES
;
278 MmBuildMdlForNonPagedPool(Mdl
);
279 Mdl
->MdlFlags
|= MDL_IO_PAGE_READ
;
280 KeInitializeEvent(&Event
, NotificationEvent
, FALSE
);
281 Status
= IoSynchronousPageWrite(CacheSeg
->Bcb
->FileObject
, Mdl
, &SegOffset
, &Event
, &IoStatus
);
282 if (Status
== STATUS_PENDING
)
284 KeWaitForSingleObject(&Event
, Executive
, KernelMode
, FALSE
, NULL
);
285 Status
= IoStatus
.Status
;
288 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
))
290 DPRINT1("IoPageWrite failed, Status %x\n", Status
);
291 CacheSeg
->Dirty
= TRUE
;
295 return STATUS_SUCCESS
;
305 IN PFILE_OBJECT FileObject
,
306 IN ULONG BytesToWrite
,
321 IN PFILE_OBJECT FileObject
,
322 IN PLARGE_INTEGER FileOffset
,
326 OUT PIO_STATUS_BLOCK IoStatus
)
330 NTSTATUS Status
= STATUS_SUCCESS
;
332 PCACHE_SEGMENT CacheSeg
;
334 ULONG ReadLength
= 0;
337 PLIST_ENTRY current_entry
;
338 PCACHE_SEGMENT current
;
340 DPRINT("CcCopyRead(FileObject 0x%p, FileOffset %I64x, "
341 "Length %lu, Wait %u, Buffer 0x%p, IoStatus 0x%p)\n",
342 FileObject
, FileOffset
->QuadPart
, Length
, Wait
,
345 Bcb
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
346 ReadOffset
= (ULONG
)FileOffset
->QuadPart
;
348 DPRINT("AllocationSize %I64d, FileSize %I64d\n",
349 Bcb
->AllocationSize
.QuadPart
,
350 Bcb
->FileSize
.QuadPart
);
353 * Check for the nowait case that all the cache segments that would
354 * cover this read are in memory.
358 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldirql
);
359 /* FIXME: this loop doesn't take into account areas that don't have
360 * a segment in the list yet */
361 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
362 while (current_entry
!= &Bcb
->BcbSegmentListHead
)
364 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
365 BcbSegmentListEntry
);
366 if (!current
->Valid
&&
367 DoSegmentsIntersect(current
->FileOffset
, VACB_MAPPING_GRANULARITY
,
370 KeReleaseSpinLock(&Bcb
->BcbLock
, oldirql
);
371 IoStatus
->Status
= STATUS_UNSUCCESSFUL
;
372 IoStatus
->Information
= 0;
375 if (current
->FileOffset
>= ReadOffset
+ Length
)
377 current_entry
= current_entry
->Flink
;
379 KeReleaseSpinLock(&Bcb
->BcbLock
, oldirql
);
382 TempLength
= ReadOffset
% VACB_MAPPING_GRANULARITY
;
385 TempLength
= min(Length
, VACB_MAPPING_GRANULARITY
- TempLength
);
386 Status
= CcRosRequestCacheSegment(Bcb
,
387 ROUND_DOWN(ReadOffset
,
388 VACB_MAPPING_GRANULARITY
),
389 &BaseAddress
, &Valid
, &CacheSeg
);
390 if (!NT_SUCCESS(Status
))
392 IoStatus
->Information
= 0;
393 IoStatus
->Status
= Status
;
394 DPRINT("CcRosRequestCacheSegment faild, Status %x\n", Status
);
399 Status
= ReadCacheSegment(CacheSeg
);
400 if (!NT_SUCCESS(Status
))
402 IoStatus
->Information
= 0;
403 IoStatus
->Status
= Status
;
404 CcRosReleaseCacheSegment(Bcb
, CacheSeg
, FALSE
, FALSE
, FALSE
);
408 RtlCopyMemory(Buffer
,
409 (char*)BaseAddress
+ ReadOffset
% VACB_MAPPING_GRANULARITY
,
411 CcRosReleaseCacheSegment(Bcb
, CacheSeg
, TRUE
, FALSE
, FALSE
);
412 ReadLength
+= TempLength
;
413 Length
-= TempLength
;
414 ReadOffset
+= TempLength
;
415 Buffer
= (PVOID
)((char*)Buffer
+ TempLength
);
420 TempLength
= min(VACB_MAPPING_GRANULARITY
, Length
);
421 Status
= ReadCacheSegmentChain(Bcb
, ReadOffset
, TempLength
, Buffer
);
422 if (!NT_SUCCESS(Status
))
424 IoStatus
->Information
= 0;
425 IoStatus
->Status
= Status
;
426 DPRINT("ReadCacheSegmentChain failed, Status %x\n", Status
);
430 ReadLength
+= TempLength
;
431 Length
-= TempLength
;
432 ReadOffset
+= TempLength
;
434 Buffer
= (PVOID
)((ULONG_PTR
)Buffer
+ TempLength
);
437 IoStatus
->Status
= STATUS_SUCCESS
;
438 IoStatus
->Information
= ReadLength
;
439 DPRINT("CcCopyRead O.K.\n");
449 IN PFILE_OBJECT FileObject
,
450 IN PLARGE_INTEGER FileOffset
,
459 PLIST_ENTRY current_entry
;
460 PCACHE_SEGMENT CacheSeg
;
465 DPRINT("CcCopyWrite(FileObject 0x%p, FileOffset %I64x, "
466 "Length %lu, Wait %u, Buffer 0x%p)\n",
467 FileObject
, FileOffset
->QuadPart
, Length
, Wait
, Buffer
);
469 Bcb
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
470 WriteOffset
= (ULONG
)FileOffset
->QuadPart
;
474 /* testing, if the requested datas are available */
475 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldirql
);
476 /* FIXME: this loop doesn't take into account areas that don't have
477 * a segment in the list yet */
478 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
479 while (current_entry
!= &Bcb
->BcbSegmentListHead
)
481 CacheSeg
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
482 BcbSegmentListEntry
);
483 if (!CacheSeg
->Valid
&&
484 DoSegmentsIntersect(CacheSeg
->FileOffset
, VACB_MAPPING_GRANULARITY
,
485 WriteOffset
, Length
))
487 KeReleaseSpinLock(&Bcb
->BcbLock
, oldirql
);
488 /* datas not available */
491 if (CacheSeg
->FileOffset
>= WriteOffset
+ Length
)
493 current_entry
= current_entry
->Flink
;
495 KeReleaseSpinLock(&Bcb
->BcbLock
, oldirql
);
498 TempLength
= WriteOffset
% VACB_MAPPING_GRANULARITY
;
502 ROffset
= ROUND_DOWN(WriteOffset
, VACB_MAPPING_GRANULARITY
);
503 TempLength
= min(Length
, VACB_MAPPING_GRANULARITY
- TempLength
);
504 Status
= CcRosRequestCacheSegment(Bcb
, ROffset
,
505 &BaseAddress
, &Valid
, &CacheSeg
);
506 if (!NT_SUCCESS(Status
))
512 if (!NT_SUCCESS(ReadCacheSegment(CacheSeg
)))
517 RtlCopyMemory((char*)BaseAddress
+ WriteOffset
% VACB_MAPPING_GRANULARITY
,
520 CcRosReleaseCacheSegment(Bcb
, CacheSeg
, TRUE
, TRUE
, FALSE
);
522 Length
-= TempLength
;
523 WriteOffset
+= TempLength
;
525 Buffer
= (PVOID
)((ULONG_PTR
)Buffer
+ TempLength
);
530 TempLength
= min(VACB_MAPPING_GRANULARITY
, Length
);
531 Status
= CcRosRequestCacheSegment(Bcb
,
536 if (!NT_SUCCESS(Status
))
540 if (!Valid
&& TempLength
< VACB_MAPPING_GRANULARITY
)
542 if (!NT_SUCCESS(ReadCacheSegment(CacheSeg
)))
544 CcRosReleaseCacheSegment(Bcb
, CacheSeg
, FALSE
, FALSE
, FALSE
);
548 RtlCopyMemory(BaseAddress
, Buffer
, TempLength
);
549 CcRosReleaseCacheSegment(Bcb
, CacheSeg
, TRUE
, TRUE
, FALSE
);
550 Length
-= TempLength
;
551 WriteOffset
+= TempLength
;
553 Buffer
= (PVOID
)((ULONG_PTR
)Buffer
+ TempLength
);
564 IN PFILE_OBJECT FileObject
,
565 IN PCC_POST_DEFERRED_WRITE PostRoutine
,
568 IN ULONG BytesToWrite
,
580 IN PFILE_OBJECT FileObject
,
585 OUT PIO_STATUS_BLOCK IoStatus
)
595 IN PFILE_OBJECT FileObject
,
608 CcWaitForCurrentLazyWriterActivity (
612 return STATUS_NOT_IMPLEMENTED
;
621 IN PFILE_OBJECT FileObject
,
622 IN PLARGE_INTEGER StartOffset
,
623 IN PLARGE_INTEGER EndOffset
,
627 LARGE_INTEGER WriteOffset
;
632 IO_STATUS_BLOCK Iosb
;
635 DPRINT("CcZeroData(FileObject 0x%p, StartOffset %I64x, EndOffset %I64x, "
636 "Wait %u)\n", FileObject
, StartOffset
->QuadPart
, EndOffset
->QuadPart
,
639 Length
= EndOffset
->u
.LowPart
- StartOffset
->u
.LowPart
;
640 WriteOffset
.QuadPart
= StartOffset
->QuadPart
;
642 if (FileObject
->SectionObjectPointer
->SharedCacheMap
== NULL
)
644 /* File is not cached */
646 Mdl
= _alloca(MmSizeOfMdl(NULL
, MAX_ZERO_LENGTH
));
650 if (Length
+ WriteOffset
.u
.LowPart
% PAGE_SIZE
> MAX_ZERO_LENGTH
)
652 CurrentLength
= MAX_ZERO_LENGTH
- WriteOffset
.u
.LowPart
% PAGE_SIZE
;
656 CurrentLength
= Length
;
658 MmInitializeMdl(Mdl
, (PVOID
)(ULONG_PTR
)WriteOffset
.QuadPart
, CurrentLength
);
659 Mdl
->MdlFlags
|= (MDL_PAGES_LOCKED
| MDL_IO_PAGE_READ
);
660 for (i
= 0; i
< ((Mdl
->Size
- sizeof(MDL
)) / sizeof(ULONG
)); i
++)
662 ((PPFN_NUMBER
)(Mdl
+ 1))[i
] = CcZeroPage
;
664 KeInitializeEvent(&Event
, NotificationEvent
, FALSE
);
665 Status
= IoSynchronousPageWrite(FileObject
, Mdl
, &WriteOffset
, &Event
, &Iosb
);
666 if (Status
== STATUS_PENDING
)
668 KeWaitForSingleObject(&Event
, Executive
, KernelMode
, FALSE
, NULL
);
669 Status
= Iosb
.Status
;
671 if (Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
)
673 MmUnmapLockedPages(Mdl
->MappedSystemVa
, Mdl
);
675 if (!NT_SUCCESS(Status
))
679 WriteOffset
.QuadPart
+= CurrentLength
;
680 Length
-= CurrentLength
;
688 PLIST_ENTRY current_entry
;
689 PCACHE_SEGMENT CacheSeg
, current
, previous
;
692 Bcb
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
695 /* testing, if the requested datas are available */
696 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldirql
);
697 /* FIXME: this loop doesn't take into account areas that don't have
698 * a segment in the list yet */
699 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
700 while (current_entry
!= &Bcb
->BcbSegmentListHead
)
702 CacheSeg
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
703 BcbSegmentListEntry
);
704 if (!CacheSeg
->Valid
&&
705 DoSegmentsIntersect(CacheSeg
->FileOffset
, VACB_MAPPING_GRANULARITY
,
706 WriteOffset
.u
.LowPart
, Length
))
708 KeReleaseSpinLock(&Bcb
->BcbLock
, oldirql
);
709 /* datas not available */
712 if (CacheSeg
->FileOffset
>= WriteOffset
.u
.LowPart
+ Length
)
714 current_entry
= current_entry
->Flink
;
716 KeReleaseSpinLock(&Bcb
->BcbLock
, oldirql
);
722 Offset
= WriteOffset
.u
.LowPart
% VACB_MAPPING_GRANULARITY
;
723 if (Length
+ Offset
> MAX_ZERO_LENGTH
)
725 CurrentLength
= MAX_ZERO_LENGTH
- Offset
;
729 CurrentLength
= Length
;
731 Status
= CcRosGetCacheSegmentChain (Bcb
, WriteOffset
.u
.LowPart
- Offset
,
732 Offset
+ CurrentLength
, &CacheSeg
);
733 if (!NT_SUCCESS(Status
))
739 while (current
!= NULL
)
741 Offset
= WriteOffset
.u
.LowPart
% VACB_MAPPING_GRANULARITY
;
743 (Offset
+ CurrentLength
< VACB_MAPPING_GRANULARITY
))
747 /* read the segment */
748 Status
= ReadCacheSegment(current
);
749 if (!NT_SUCCESS(Status
))
751 DPRINT1("ReadCacheSegment failed, status %x\n",
755 TempLength
= min(CurrentLength
, VACB_MAPPING_GRANULARITY
- Offset
);
759 TempLength
= VACB_MAPPING_GRANULARITY
;
761 RtlZeroMemory((PUCHAR
)current
->BaseAddress
+ Offset
,
764 WriteOffset
.QuadPart
+= TempLength
;
765 CurrentLength
-= TempLength
;
766 Length
-= TempLength
;
768 current
= current
->NextInChain
;
772 while (current
!= NULL
)
775 current
= current
->NextInChain
;
776 CcRosReleaseCacheSegment(Bcb
, previous
, TRUE
, TRUE
, FALSE
);