2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
10 /* NOTES **********************************************************************
12 * This is not the NT implementation of a file cache nor anything much like
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 * (4) Copy the data into or out of the page as necessary.
30 * (5) Release the cache page
32 /* INCLUDES ******************************************************************/
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
42 /* GLOBALS *******************************************************************/
44 LIST_ENTRY DirtyVacbListHead
;
45 static LIST_ENTRY VacbLruListHead
;
47 KGUARDED_MUTEX ViewLock
;
49 NPAGED_LOOKASIDE_LIST iBcbLookasideList
;
50 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList
;
51 static NPAGED_LOOKASIDE_LIST VacbLookasideList
;
54 * - Amount of pages flushed by lazy writer
55 * - Number of times lazy writer ran
57 ULONG CcLazyWritePages
= 0;
58 ULONG CcLazyWriteIos
= 0;
60 /* Internal vars (MS):
61 * - Threshold above which lazy writer will start action
62 * - Amount of dirty pages
64 ULONG CcDirtyPageThreshold
= 0;
65 ULONG CcTotalDirtyPages
= 0;
67 /* Internal vars (ROS):
68 * - Event to notify lazy writer to shutdown
69 * - Event to inform watchers lazy writer is done for this loop
71 KEVENT iLazyWriterShutdown
;
72 KEVENT iLazyWriterNotify
;
75 static void CcRosVacbIncRefCount_(PROS_VACB vacb
, const char* file
, int line
)
77 ++vacb
->ReferenceCount
;
78 if (vacb
->SharedCacheMap
->Trace
)
80 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
81 file
, line
, vacb
, vacb
->ReferenceCount
, vacb
->Dirty
, vacb
->PageOut
);
84 static void CcRosVacbDecRefCount_(PROS_VACB vacb
, const char* file
, int line
)
86 --vacb
->ReferenceCount
;
87 if (vacb
->SharedCacheMap
->Trace
)
89 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
90 file
, line
, vacb
, vacb
->ReferenceCount
, vacb
->Dirty
, vacb
->PageOut
);
93 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
94 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
96 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
97 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
101 CcRosInternalFreeVacb(PROS_VACB Vacb
);
104 /* FUNCTIONS *****************************************************************/
109 PROS_SHARED_CACHE_MAP SharedCacheMap
,
114 PLIST_ENTRY current_entry
;
120 SharedCacheMap
->Trace
= Trace
;
124 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
126 KeAcquireGuardedMutex(&ViewLock
);
127 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldirql
);
129 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
130 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
132 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
133 current_entry
= current_entry
->Flink
;
135 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
136 current
, current
->ReferenceCount
, current
->Dirty
, current
->PageOut
);
138 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldirql
);
139 KeReleaseGuardedMutex(&ViewLock
);
143 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
147 UNREFERENCED_PARAMETER(SharedCacheMap
);
148 UNREFERENCED_PARAMETER(Trace
);
160 Status
= CcWriteVirtualAddress(Vacb
);
161 if (NT_SUCCESS(Status
))
163 KeAcquireGuardedMutex(&ViewLock
);
164 KeAcquireSpinLock(&Vacb
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
167 RemoveEntryList(&Vacb
->DirtyVacbListEntry
);
168 CcTotalDirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
169 CcRosVacbDecRefCount(Vacb
);
171 KeReleaseSpinLock(&Vacb
->SharedCacheMap
->CacheMapLock
, oldIrql
);
172 KeReleaseGuardedMutex(&ViewLock
);
180 CcRosFlushDirtyPages (
184 BOOLEAN CalledFromLazy
)
186 PLIST_ENTRY current_entry
;
190 LARGE_INTEGER ZeroTimeout
;
192 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target
);
195 ZeroTimeout
.QuadPart
= 0;
197 KeEnterCriticalRegion();
198 KeAcquireGuardedMutex(&ViewLock
);
200 current_entry
= DirtyVacbListHead
.Flink
;
201 if (current_entry
== &DirtyVacbListHead
)
203 DPRINT("No Dirty pages\n");
206 while ((current_entry
!= &DirtyVacbListHead
) && (Target
> 0))
208 current
= CONTAINING_RECORD(current_entry
,
211 current_entry
= current_entry
->Flink
;
213 CcRosVacbIncRefCount(current
);
215 /* When performing lazy write, don't handle temporary files */
216 if (CalledFromLazy
&&
217 BooleanFlagOn(current
->SharedCacheMap
->FileObject
->Flags
, FO_TEMPORARY_FILE
))
219 CcRosVacbDecRefCount(current
);
223 Locked
= current
->SharedCacheMap
->Callbacks
->AcquireForLazyWrite(
224 current
->SharedCacheMap
->LazyWriteContext
, Wait
);
227 CcRosVacbDecRefCount(current
);
231 Status
= CcRosAcquireVacbLock(current
,
232 Wait
? NULL
: &ZeroTimeout
);
233 if (Status
!= STATUS_SUCCESS
)
235 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
236 current
->SharedCacheMap
->LazyWriteContext
);
237 CcRosVacbDecRefCount(current
);
241 ASSERT(current
->Dirty
);
243 /* One reference is added above */
244 if (current
->ReferenceCount
> 2)
246 CcRosReleaseVacbLock(current
);
247 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
248 current
->SharedCacheMap
->LazyWriteContext
);
249 CcRosVacbDecRefCount(current
);
253 KeReleaseGuardedMutex(&ViewLock
);
255 Status
= CcRosFlushVacb(current
);
257 CcRosReleaseVacbLock(current
);
258 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
259 current
->SharedCacheMap
->LazyWriteContext
);
261 KeAcquireGuardedMutex(&ViewLock
);
262 CcRosVacbDecRefCount(current
);
264 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
) &&
265 (Status
!= STATUS_MEDIA_WRITE_PROTECTED
))
267 DPRINT1("CC: Failed to flush VACB.\n");
273 /* How many pages did we free? */
274 PagesFreed
= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
275 (*Count
) += PagesFreed
;
277 /* Make sure we don't overflow target! */
278 if (Target
< PagesFreed
)
280 /* If we would have, jump to zero directly */
285 Target
-= PagesFreed
;
289 current_entry
= DirtyVacbListHead
.Flink
;
292 KeReleaseGuardedMutex(&ViewLock
);
293 KeLeaveCriticalRegion();
295 DPRINT("CcRosFlushDirtyPages() finished\n");
296 return STATUS_SUCCESS
;
299 /* FIXME: Someday this could somewhat implement write-behind/read-ahead */
302 CciLazyWriter(PVOID Unused
)
304 LARGE_INTEGER OneSecond
;
306 OneSecond
.QuadPart
= (LONGLONG
)-1*1000*1000*10;
311 ULONG Target
, Count
= 0;
313 /* One per second or until we have to stop */
314 Status
= KeWaitForSingleObject(&iLazyWriterShutdown
,
320 /* If we succeeed, we've to stop running! */
321 if (Status
== STATUS_SUCCESS
)
326 /* We're not sleeping anymore */
327 KeClearEvent(&iLazyWriterNotify
);
329 /* Our target is one-eighth of the dirty pages */
330 Target
= CcTotalDirtyPages
/ 8;
334 DPRINT("Lazy writer starting (%d)\n", Target
);
335 CcRosFlushDirtyPages(Target
, &Count
, FALSE
, TRUE
);
337 /* And update stats */
338 CcLazyWritePages
+= Count
;
340 DPRINT("Lazy writer done (%d)\n", Count
);
343 /* Inform people waiting on us that we're done */
344 KeSetEvent(&iLazyWriterNotify
, IO_DISK_INCREMENT
, FALSE
);
354 * FUNCTION: Try to free some memory from the file cache.
356 * Target - The number of pages to be freed.
357 * Priority - The priority of free (currently unused).
358 * NrFreed - Points to a variable where the number of pages
359 * actually freed is returned.
362 PLIST_ENTRY current_entry
;
369 BOOLEAN FlushedPages
= FALSE
;
371 DPRINT("CcRosTrimCache(Target %lu)\n", Target
);
373 InitializeListHead(&FreeList
);
378 KeAcquireGuardedMutex(&ViewLock
);
380 current_entry
= VacbLruListHead
.Flink
;
381 while (current_entry
!= &VacbLruListHead
)
383 current
= CONTAINING_RECORD(current_entry
,
386 current_entry
= current_entry
->Flink
;
388 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
390 /* Reference the VACB */
391 CcRosVacbIncRefCount(current
);
393 /* Check if it's mapped and not dirty */
394 if (current
->MappedCount
> 0 && !current
->Dirty
)
396 /* We have to break these locks because Cc sucks */
397 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
398 KeReleaseGuardedMutex(&ViewLock
);
400 /* Page out the VACB */
401 for (i
= 0; i
< VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
; i
++)
403 Page
= (PFN_NUMBER
)(MmGetPhysicalAddress((PUCHAR
)current
->BaseAddress
+ (i
* PAGE_SIZE
)).QuadPart
>> PAGE_SHIFT
);
405 MmPageOutPhysicalAddress(Page
);
408 /* Reacquire the locks */
409 KeAcquireGuardedMutex(&ViewLock
);
410 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
413 /* Dereference the VACB */
414 CcRosVacbDecRefCount(current
);
416 /* Check if we can free this entry now */
417 if (current
->ReferenceCount
== 0)
419 ASSERT(!current
->Dirty
);
420 ASSERT(!current
->MappedCount
);
422 RemoveEntryList(¤t
->CacheMapVacbListEntry
);
423 RemoveEntryList(¤t
->VacbLruListEntry
);
424 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
426 /* Calculate how many pages we freed for Mm */
427 PagesFreed
= min(VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
, Target
);
428 Target
-= PagesFreed
;
429 (*NrFreed
) += PagesFreed
;
432 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
435 KeReleaseGuardedMutex(&ViewLock
);
437 /* Try flushing pages if we haven't met our target */
438 if ((Target
> 0) && !FlushedPages
)
440 /* Flush dirty pages to disk */
441 CcRosFlushDirtyPages(Target
, &PagesFreed
, FALSE
, FALSE
);
444 /* We can only swap as many pages as we flushed */
445 if (PagesFreed
< Target
) Target
= PagesFreed
;
447 /* Check if we flushed anything */
450 /* Try again after flushing dirty pages */
451 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed
);
456 while (!IsListEmpty(&FreeList
))
458 current_entry
= RemoveHeadList(&FreeList
);
459 current
= CONTAINING_RECORD(current_entry
,
461 CacheMapVacbListEntry
);
462 CcRosInternalFreeVacb(current
);
465 DPRINT("Evicted %lu cache pages\n", (*NrFreed
));
467 return STATUS_SUCCESS
;
473 PROS_SHARED_CACHE_MAP SharedCacheMap
,
482 ASSERT(SharedCacheMap
);
484 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
485 SharedCacheMap
, Vacb
, Valid
);
487 KeAcquireGuardedMutex(&ViewLock
);
488 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
492 WasDirty
= Vacb
->Dirty
;
493 Vacb
->Dirty
= Vacb
->Dirty
|| Dirty
;
495 if (!WasDirty
&& Vacb
->Dirty
)
497 InsertTailList(&DirtyVacbListHead
, &Vacb
->DirtyVacbListEntry
);
498 CcTotalDirtyPages
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
505 CcRosVacbDecRefCount(Vacb
);
506 if (Mapped
&& (Vacb
->MappedCount
== 1))
508 CcRosVacbIncRefCount(Vacb
);
510 if (!WasDirty
&& Vacb
->Dirty
)
512 CcRosVacbIncRefCount(Vacb
);
515 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
516 KeReleaseGuardedMutex(&ViewLock
);
517 CcRosReleaseVacbLock(Vacb
);
519 return STATUS_SUCCESS
;
522 /* Returns with VACB Lock Held! */
526 PROS_SHARED_CACHE_MAP SharedCacheMap
,
529 PLIST_ENTRY current_entry
;
533 ASSERT(SharedCacheMap
);
535 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
536 SharedCacheMap
, FileOffset
);
538 KeAcquireGuardedMutex(&ViewLock
);
539 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
541 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
542 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
544 current
= CONTAINING_RECORD(current_entry
,
546 CacheMapVacbListEntry
);
547 if (IsPointInRange(current
->FileOffset
.QuadPart
,
548 VACB_MAPPING_GRANULARITY
,
551 CcRosVacbIncRefCount(current
);
552 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
553 KeReleaseGuardedMutex(&ViewLock
);
554 CcRosAcquireVacbLock(current
, NULL
);
557 if (current
->FileOffset
.QuadPart
> FileOffset
)
559 current_entry
= current_entry
->Flink
;
562 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
563 KeReleaseGuardedMutex(&ViewLock
);
574 PROS_SHARED_CACHE_MAP SharedCacheMap
;
576 SharedCacheMap
= Vacb
->SharedCacheMap
;
578 KeAcquireGuardedMutex(&ViewLock
);
579 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
583 InsertTailList(&DirtyVacbListHead
, &Vacb
->DirtyVacbListEntry
);
584 CcTotalDirtyPages
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
588 CcRosVacbDecRefCount(Vacb
);
591 /* Move to the tail of the LRU list */
592 RemoveEntryList(&Vacb
->VacbLruListEntry
);
593 InsertTailList(&VacbLruListHead
, &Vacb
->VacbLruListEntry
);
597 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
598 KeReleaseGuardedMutex(&ViewLock
);
604 PROS_SHARED_CACHE_MAP SharedCacheMap
,
609 ASSERT(SharedCacheMap
);
611 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
612 SharedCacheMap
, FileOffset
);
614 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
617 KeBugCheck(CACHE_MANAGER
);
620 CcRosMarkDirtyVacb(Vacb
);
623 CcRosReleaseVacbLock(Vacb
);
625 return STATUS_SUCCESS
;
631 PROS_SHARED_CACHE_MAP SharedCacheMap
,
639 ASSERT(SharedCacheMap
);
641 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
642 SharedCacheMap
, FileOffset
, NowDirty
);
644 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
647 return STATUS_UNSUCCESSFUL
;
650 KeAcquireGuardedMutex(&ViewLock
);
651 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
653 WasDirty
= Vacb
->Dirty
;
654 Vacb
->Dirty
= Vacb
->Dirty
|| NowDirty
;
658 if (!WasDirty
&& NowDirty
)
660 InsertTailList(&DirtyVacbListHead
, &Vacb
->DirtyVacbListEntry
);
661 CcTotalDirtyPages
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
664 CcRosVacbDecRefCount(Vacb
);
665 if (!WasDirty
&& NowDirty
)
667 CcRosVacbIncRefCount(Vacb
);
669 if (Vacb
->MappedCount
== 0)
671 CcRosVacbDecRefCount(Vacb
);
674 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
675 KeReleaseGuardedMutex(&ViewLock
);
676 CcRosReleaseVacbLock(Vacb
);
678 return STATUS_SUCCESS
;
688 ULONG_PTR NumberOfPages
;
690 /* Create a memory area. */
691 MmLockAddressSpace(MmGetKernelAddressSpace());
692 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
693 0, // nothing checks for VACB mareas, so set to 0
695 VACB_MAPPING_GRANULARITY
,
697 (PMEMORY_AREA
*)&Vacb
->MemoryArea
,
700 MmUnlockAddressSpace(MmGetKernelAddressSpace());
701 if (!NT_SUCCESS(Status
))
703 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status
, Vacb
);
707 ASSERT(((ULONG_PTR
)Vacb
->BaseAddress
% PAGE_SIZE
) == 0);
708 ASSERT((ULONG_PTR
)Vacb
->BaseAddress
> (ULONG_PTR
)MmSystemRangeStart
);
710 /* Create a virtual mapping for this memory area */
711 NumberOfPages
= BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY
);
712 for (i
= 0; i
< NumberOfPages
; i
++)
714 PFN_NUMBER PageFrameNumber
;
716 MI_SET_USAGE(MI_USAGE_CACHE
);
717 Status
= MmRequestPageMemoryConsumer(MC_CACHE
, TRUE
, &PageFrameNumber
);
718 if (PageFrameNumber
== 0)
720 DPRINT1("Unable to allocate page\n");
721 KeBugCheck(MEMORY_MANAGEMENT
);
724 Status
= MmCreateVirtualMapping(NULL
,
725 (PVOID
)((ULONG_PTR
)Vacb
->BaseAddress
+ (i
* PAGE_SIZE
)),
729 if (!NT_SUCCESS(Status
))
731 DPRINT1("Unable to create virtual mapping\n");
732 KeBugCheck(MEMORY_MANAGEMENT
);
736 return STATUS_SUCCESS
;
742 PROS_SHARED_CACHE_MAP SharedCacheMap
,
748 PLIST_ENTRY current_entry
;
752 ASSERT(SharedCacheMap
);
754 DPRINT("CcRosCreateVacb()\n");
756 if (FileOffset
>= SharedCacheMap
->SectionSize
.QuadPart
)
759 return STATUS_INVALID_PARAMETER
;
762 current
= ExAllocateFromNPagedLookasideList(&VacbLookasideList
);
763 current
->BaseAddress
= NULL
;
764 current
->Valid
= FALSE
;
765 current
->Dirty
= FALSE
;
766 current
->PageOut
= FALSE
;
767 current
->FileOffset
.QuadPart
= ROUND_DOWN(FileOffset
, VACB_MAPPING_GRANULARITY
);
768 current
->SharedCacheMap
= SharedCacheMap
;
770 if (SharedCacheMap
->Trace
)
772 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap
, current
);
775 current
->MappedCount
= 0;
776 current
->DirtyVacbListEntry
.Flink
= NULL
;
777 current
->DirtyVacbListEntry
.Blink
= NULL
;
778 current
->ReferenceCount
= 1;
779 current
->PinCount
= 0;
780 KeInitializeMutex(¤t
->Mutex
, 0);
781 CcRosAcquireVacbLock(current
, NULL
);
782 KeAcquireGuardedMutex(&ViewLock
);
785 /* There is window between the call to CcRosLookupVacb
786 * and CcRosCreateVacb. We must check if a VACB for the
787 * file offset exist. If there is a VACB, we release
788 * our newly created VACB and return the existing one.
790 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
791 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
793 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
795 current
= CONTAINING_RECORD(current_entry
,
797 CacheMapVacbListEntry
);
798 if (IsPointInRange(current
->FileOffset
.QuadPart
,
799 VACB_MAPPING_GRANULARITY
,
802 CcRosVacbIncRefCount(current
);
803 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
805 if (SharedCacheMap
->Trace
)
807 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
813 CcRosReleaseVacbLock(*Vacb
);
814 KeReleaseGuardedMutex(&ViewLock
);
815 ExFreeToNPagedLookasideList(&VacbLookasideList
, *Vacb
);
817 CcRosAcquireVacbLock(current
, NULL
);
818 return STATUS_SUCCESS
;
820 if (current
->FileOffset
.QuadPart
< FileOffset
)
822 ASSERT(previous
== NULL
||
823 previous
->FileOffset
.QuadPart
< current
->FileOffset
.QuadPart
);
826 if (current
->FileOffset
.QuadPart
> FileOffset
)
828 current_entry
= current_entry
->Flink
;
830 /* There was no existing VACB. */
834 InsertHeadList(&previous
->CacheMapVacbListEntry
, ¤t
->CacheMapVacbListEntry
);
838 InsertHeadList(&SharedCacheMap
->CacheMapVacbListHead
, ¤t
->CacheMapVacbListEntry
);
840 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
841 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
842 KeReleaseGuardedMutex(&ViewLock
);
844 MI_SET_USAGE(MI_USAGE_CACHE
);
846 if ((SharedCacheMap
->FileObject
) && (SharedCacheMap
->FileObject
->FileName
.Buffer
))
850 pos
= wcsrchr(SharedCacheMap
->FileObject
->FileName
.Buffer
, '\\');
853 len
= wcslen(pos
) * sizeof(WCHAR
);
854 snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%S", pos
);
858 snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%wZ", &SharedCacheMap
->FileObject
->FileName
);
863 Status
= CcRosMapVacb(current
);
864 if (!NT_SUCCESS(Status
))
866 RemoveEntryList(¤t
->CacheMapVacbListEntry
);
867 RemoveEntryList(¤t
->VacbLruListEntry
);
868 CcRosReleaseVacbLock(current
);
869 ExFreeToNPagedLookasideList(&VacbLookasideList
, current
);
878 PROS_SHARED_CACHE_MAP SharedCacheMap
,
880 PLONGLONG BaseOffset
,
888 ASSERT(SharedCacheMap
);
890 DPRINT("CcRosGetVacb()\n");
893 * Look for a VACB already mapping the same data.
895 current
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
899 * Otherwise create a new VACB.
901 Status
= CcRosCreateVacb(SharedCacheMap
, FileOffset
, ¤t
);
902 if (!NT_SUCCESS(Status
))
908 KeAcquireGuardedMutex(&ViewLock
);
910 /* Move to the tail of the LRU list */
911 RemoveEntryList(¤t
->VacbLruListEntry
);
912 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
914 KeReleaseGuardedMutex(&ViewLock
);
917 * Return information about the VACB to the caller.
919 *UptoDate
= current
->Valid
;
920 *BaseAddress
= current
->BaseAddress
;
921 DPRINT("*BaseAddress %p\n", *BaseAddress
);
923 *BaseOffset
= current
->FileOffset
.QuadPart
;
924 return STATUS_SUCCESS
;
930 PROS_SHARED_CACHE_MAP SharedCacheMap
,
936 * FUNCTION: Request a page mapping for a shared cache map
941 ASSERT(SharedCacheMap
);
943 if (FileOffset
% VACB_MAPPING_GRANULARITY
!= 0)
945 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
946 FileOffset
, VACB_MAPPING_GRANULARITY
);
947 KeBugCheck(CACHE_MANAGER
);
950 return CcRosGetVacb(SharedCacheMap
,
962 MEMORY_AREA
* MemoryArea
,
968 ASSERT(SwapEntry
== 0);
971 ASSERT(MmGetReferenceCountPage(Page
) == 1);
972 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
977 CcRosInternalFreeVacb (
980 * FUNCTION: Releases a VACB associated with a shared cache map
983 DPRINT("Freeing VACB 0x%p\n", Vacb
);
985 if (Vacb
->SharedCacheMap
->Trace
)
987 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb
->SharedCacheMap
, Vacb
);
991 MmLockAddressSpace(MmGetKernelAddressSpace());
992 MmFreeMemoryArea(MmGetKernelAddressSpace(),
996 MmUnlockAddressSpace(MmGetKernelAddressSpace());
998 ExFreeToNPagedLookasideList(&VacbLookasideList
, Vacb
);
999 return STATUS_SUCCESS
;
1008 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
,
1009 IN PLARGE_INTEGER FileOffset OPTIONAL
,
1011 OUT PIO_STATUS_BLOCK IoStatus
)
1013 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1014 LARGE_INTEGER Offset
;
1015 LONGLONG RemainingLength
;
1020 CCTRACE(CC_API_DEBUG
, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1021 SectionObjectPointers
, FileOffset
, Length
);
1023 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1024 SectionObjectPointers
, FileOffset
, Length
, IoStatus
);
1026 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1028 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
1029 ASSERT(SharedCacheMap
);
1032 Offset
= *FileOffset
;
1033 RemainingLength
= Length
;
1037 Offset
.QuadPart
= 0;
1038 RemainingLength
= SharedCacheMap
->FileSize
.QuadPart
;
1043 IoStatus
->Status
= STATUS_SUCCESS
;
1044 IoStatus
->Information
= 0;
1047 while (RemainingLength
> 0)
1049 current
= CcRosLookupVacb(SharedCacheMap
, Offset
.QuadPart
);
1050 if (current
!= NULL
)
1054 Status
= CcRosFlushVacb(current
);
1055 if (!NT_SUCCESS(Status
) && IoStatus
!= NULL
)
1057 IoStatus
->Status
= Status
;
1061 CcRosReleaseVacbLock(current
);
1063 KeAcquireGuardedMutex(&ViewLock
);
1064 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1065 CcRosVacbDecRefCount(current
);
1066 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1067 KeReleaseGuardedMutex(&ViewLock
);
1070 Offset
.QuadPart
+= VACB_MAPPING_GRANULARITY
;
1071 RemainingLength
-= min(RemainingLength
, VACB_MAPPING_GRANULARITY
);
1078 IoStatus
->Status
= STATUS_INVALID_PARAMETER
;
1085 CcRosDeleteFileCache (
1086 PFILE_OBJECT FileObject
,
1087 PROS_SHARED_CACHE_MAP SharedCacheMap
)
1089 * FUNCTION: Releases the shared cache map associated with a file object
1092 PLIST_ENTRY current_entry
;
1094 LIST_ENTRY FreeList
;
1097 ASSERT(SharedCacheMap
);
1099 SharedCacheMap
->OpenCount
++;
1100 KeReleaseGuardedMutex(&ViewLock
);
1102 CcFlushCache(FileObject
->SectionObjectPointer
, NULL
, 0, NULL
);
1104 KeAcquireGuardedMutex(&ViewLock
);
1105 SharedCacheMap
->OpenCount
--;
1106 if (SharedCacheMap
->OpenCount
== 0)
1108 FileObject
->SectionObjectPointer
->SharedCacheMap
= NULL
;
1113 InitializeListHead(&FreeList
);
1114 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1115 while (!IsListEmpty(&SharedCacheMap
->CacheMapVacbListHead
))
1117 current_entry
= RemoveTailList(&SharedCacheMap
->CacheMapVacbListHead
);
1118 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1119 RemoveEntryList(¤t
->VacbLruListEntry
);
1122 RemoveEntryList(¤t
->DirtyVacbListEntry
);
1123 CcTotalDirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
1124 DPRINT1("Freeing dirty VACB\n");
1126 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
1129 SharedCacheMap
->Trace
= FALSE
;
1131 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1133 KeReleaseGuardedMutex(&ViewLock
);
1134 ObDereferenceObject(SharedCacheMap
->FileObject
);
1136 while (!IsListEmpty(&FreeList
))
1138 current_entry
= RemoveTailList(&FreeList
);
1139 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1140 CcRosInternalFreeVacb(current
);
1142 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList
, SharedCacheMap
);
1143 KeAcquireGuardedMutex(&ViewLock
);
1145 return STATUS_SUCCESS
;
1150 CcRosReferenceCache (
1151 PFILE_OBJECT FileObject
)
1153 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1154 KeAcquireGuardedMutex(&ViewLock
);
1155 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1156 ASSERT(SharedCacheMap
);
1157 ASSERT(SharedCacheMap
->OpenCount
!= 0);
1158 SharedCacheMap
->OpenCount
++;
1159 KeReleaseGuardedMutex(&ViewLock
);
1164 CcRosRemoveIfClosed (
1165 PSECTION_OBJECT_POINTERS SectionObjectPointer
)
1167 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1168 DPRINT("CcRosRemoveIfClosed()\n");
1169 KeAcquireGuardedMutex(&ViewLock
);
1170 SharedCacheMap
= SectionObjectPointer
->SharedCacheMap
;
1171 if (SharedCacheMap
&& SharedCacheMap
->OpenCount
== 0)
1173 CcRosDeleteFileCache(SharedCacheMap
->FileObject
, SharedCacheMap
);
1175 KeReleaseGuardedMutex(&ViewLock
);
1181 CcRosDereferenceCache (
1182 PFILE_OBJECT FileObject
)
1184 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1185 KeAcquireGuardedMutex(&ViewLock
);
1186 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1187 ASSERT(SharedCacheMap
);
1188 if (SharedCacheMap
->OpenCount
> 0)
1190 SharedCacheMap
->OpenCount
--;
1191 if (SharedCacheMap
->OpenCount
== 0)
1193 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1194 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1197 KeReleaseGuardedMutex(&ViewLock
);
1202 CcRosReleaseFileCache (
1203 PFILE_OBJECT FileObject
)
1205 * FUNCTION: Called by the file system when a handle to a file object
1209 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1211 KeAcquireGuardedMutex(&ViewLock
);
1213 if (FileObject
->SectionObjectPointer
->SharedCacheMap
!= NULL
)
1215 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1216 if (FileObject
->PrivateCacheMap
!= NULL
)
1218 FileObject
->PrivateCacheMap
= NULL
;
1219 if (SharedCacheMap
->OpenCount
> 0)
1221 SharedCacheMap
->OpenCount
--;
1222 if (SharedCacheMap
->OpenCount
== 0)
1224 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1225 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1230 KeReleaseGuardedMutex(&ViewLock
);
1231 return STATUS_SUCCESS
;
1236 CcTryToInitializeFileCache (
1237 PFILE_OBJECT FileObject
)
1239 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1242 KeAcquireGuardedMutex(&ViewLock
);
1244 ASSERT(FileObject
->SectionObjectPointer
);
1245 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1246 if (SharedCacheMap
== NULL
)
1248 Status
= STATUS_UNSUCCESSFUL
;
1252 if (FileObject
->PrivateCacheMap
== NULL
)
1254 FileObject
->PrivateCacheMap
= SharedCacheMap
;
1255 SharedCacheMap
->OpenCount
++;
1257 Status
= STATUS_SUCCESS
;
1259 KeReleaseGuardedMutex(&ViewLock
);
1267 CcRosInitializeFileCache (
1268 PFILE_OBJECT FileObject
,
1269 PCC_FILE_SIZES FileSizes
,
1271 PCACHE_MANAGER_CALLBACKS CallBacks
,
1272 PVOID LazyWriterContext
)
1274 * FUNCTION: Initializes a shared cache map for a file object
1277 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1279 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1280 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1281 FileObject
, SharedCacheMap
);
1283 KeAcquireGuardedMutex(&ViewLock
);
1284 if (SharedCacheMap
== NULL
)
1286 SharedCacheMap
= ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList
);
1287 if (SharedCacheMap
== NULL
)
1289 KeReleaseGuardedMutex(&ViewLock
);
1290 return STATUS_INSUFFICIENT_RESOURCES
;
1292 RtlZeroMemory(SharedCacheMap
, sizeof(*SharedCacheMap
));
1293 ObReferenceObjectByPointer(FileObject
,
1297 SharedCacheMap
->FileObject
= FileObject
;
1298 SharedCacheMap
->Callbacks
= CallBacks
;
1299 SharedCacheMap
->LazyWriteContext
= LazyWriterContext
;
1300 SharedCacheMap
->SectionSize
= FileSizes
->AllocationSize
;
1301 SharedCacheMap
->FileSize
= FileSizes
->FileSize
;
1302 SharedCacheMap
->PinAccess
= PinAccess
;
1303 KeInitializeSpinLock(&SharedCacheMap
->CacheMapLock
);
1304 InitializeListHead(&SharedCacheMap
->CacheMapVacbListHead
);
1305 FileObject
->SectionObjectPointer
->SharedCacheMap
= SharedCacheMap
;
1307 if (FileObject
->PrivateCacheMap
== NULL
)
1309 FileObject
->PrivateCacheMap
= SharedCacheMap
;
1310 SharedCacheMap
->OpenCount
++;
1312 KeReleaseGuardedMutex(&ViewLock
);
1314 return STATUS_SUCCESS
;
1322 CcGetFileObjectFromSectionPtrs (
1323 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
)
1325 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1327 CCTRACE(CC_API_DEBUG
, "SectionObjectPointers=%p\n", SectionObjectPointers
);
1329 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1331 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
1332 ASSERT(SharedCacheMap
);
1333 return SharedCacheMap
->FileObject
;
1340 CcShutdownLazyWriter (
1343 /* Simply set the event, lazy writer will stop when it's done */
1344 KeSetEvent(&iLazyWriterShutdown
, IO_DISK_INCREMENT
, FALSE
);
1355 OBJECT_ATTRIBUTES ObjectAttributes
;
1357 DPRINT("CcInitView()\n");
1359 InitializeListHead(&DirtyVacbListHead
);
1360 InitializeListHead(&VacbLruListHead
);
1361 KeInitializeGuardedMutex(&ViewLock
);
1362 ExInitializeNPagedLookasideList(&iBcbLookasideList
,
1366 sizeof(INTERNAL_BCB
),
1369 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList
,
1373 sizeof(ROS_SHARED_CACHE_MAP
),
1374 TAG_SHARED_CACHE_MAP
,
1376 ExInitializeNPagedLookasideList(&VacbLookasideList
,
1384 MmInitializeMemoryConsumer(MC_CACHE
, CcRosTrimCache
);
1386 /* Initialize lazy writer events */
1387 KeInitializeEvent(&iLazyWriterShutdown
, SynchronizationEvent
, FALSE
);
1388 KeInitializeEvent(&iLazyWriterNotify
, NotificationEvent
, FALSE
);
1390 /* Define lazy writer threshold, depending on system type */
1391 switch (MmQuerySystemSize())
1394 CcDirtyPageThreshold
= MmNumberOfPhysicalPages
/ 8;
1397 case MmMediumSystem
:
1398 CcDirtyPageThreshold
= MmNumberOfPhysicalPages
/ 4;
1402 CcDirtyPageThreshold
= MmNumberOfPhysicalPages
/ 8 + MmNumberOfPhysicalPages
/ 4;
1406 /* Start the lazy writer thread */
1407 InitializeObjectAttributes(&ObjectAttributes
,
1412 Status
= PsCreateSystemThread(&LazyWriter
,
1419 if (!NT_SUCCESS(Status
))
1424 /* Handle is not needed */
1425 ObCloseHandle(LazyWriter
, KernelMode
);
1427 CcInitCacheZeroPage();