2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
11 /* NOTES **********************************************************************
13 * This is not the NT implementation of a file cache nor anything much like
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
29 * (4) Copy the data into or out of the page as necessary.
31 * (5) Release the cache page
33 /* INCLUDES ******************************************************************/
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
43 /* GLOBALS *******************************************************************/
45 LIST_ENTRY DirtyVacbListHead
;
46 static LIST_ENTRY VacbLruListHead
;
48 KGUARDED_MUTEX ViewLock
;
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList
;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList
;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList
;
55 * - Amount of pages flushed by lazy writer
56 * - Number of times lazy writer ran
58 ULONG CcLazyWritePages
= 0;
59 ULONG CcLazyWriteIos
= 0;
61 /* Internal vars (MS):
62 * - Threshold above which lazy writer will start action
63 * - Amount of dirty pages
64 * - List for deferred writes
65 * - Spinlock when dealing with the deferred list
66 * - List for "clean" shared cache maps
67 * - One second delay for lazy writer
69 ULONG CcDirtyPageThreshold
= 0;
70 ULONG CcTotalDirtyPages
= 0;
71 LIST_ENTRY CcDeferredWrites
;
72 KSPIN_LOCK CcDeferredWriteSpinLock
;
73 LIST_ENTRY CcCleanSharedCacheMapList
;
75 LARGE_INTEGER CcIdleDelay
= {.QuadPart
= (LONGLONG
)-1*1000*1000*10};
77 LARGE_INTEGER CcIdleDelay
= {(LONGLONG
)-1*1000*1000*10};
80 /* Internal vars (ROS):
81 * - Event to notify lazy writer to shutdown
82 * - Event to inform watchers lazy writer is done for this loop
83 * - Lock for the CcCleanSharedCacheMapList list
85 KEVENT iLazyWriterShutdown
;
86 KEVENT iLazyWriterNotify
;
87 KSPIN_LOCK iSharedCacheMapLock
;
90 static void CcRosVacbIncRefCount_(PROS_VACB vacb
, const char* file
, int line
)
92 ++vacb
->ReferenceCount
;
93 if (vacb
->SharedCacheMap
->Trace
)
95 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
96 file
, line
, vacb
, vacb
->ReferenceCount
, vacb
->Dirty
, vacb
->PageOut
);
99 static void CcRosVacbDecRefCount_(PROS_VACB vacb
, const char* file
, int line
)
101 --vacb
->ReferenceCount
;
102 ASSERT(!(vacb
->ReferenceCount
== 0 && vacb
->Dirty
));
103 if (vacb
->SharedCacheMap
->Trace
)
105 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
106 file
, line
, vacb
, vacb
->ReferenceCount
, vacb
->Dirty
, vacb
->PageOut
);
109 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
110 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
112 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
113 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
117 CcRosInternalFreeVacb(PROS_VACB Vacb
);
120 /* FUNCTIONS *****************************************************************/
125 PROS_SHARED_CACHE_MAP SharedCacheMap
,
130 PLIST_ENTRY current_entry
;
136 SharedCacheMap
->Trace
= Trace
;
140 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
142 KeAcquireGuardedMutex(&ViewLock
);
143 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldirql
);
145 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
146 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
148 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
149 current_entry
= current_entry
->Flink
;
151 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
152 current
, current
->ReferenceCount
, current
->Dirty
, current
->PageOut
);
154 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldirql
);
155 KeReleaseGuardedMutex(&ViewLock
);
159 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
163 UNREFERENCED_PARAMETER(SharedCacheMap
);
164 UNREFERENCED_PARAMETER(Trace
);
176 Status
= CcWriteVirtualAddress(Vacb
);
177 if (NT_SUCCESS(Status
))
179 KeAcquireGuardedMutex(&ViewLock
);
180 KeAcquireSpinLock(&Vacb
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
183 RemoveEntryList(&Vacb
->DirtyVacbListEntry
);
184 CcTotalDirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
185 Vacb
->SharedCacheMap
->DirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
186 CcRosVacbDecRefCount(Vacb
);
188 KeReleaseSpinLock(&Vacb
->SharedCacheMap
->CacheMapLock
, oldIrql
);
189 KeReleaseGuardedMutex(&ViewLock
);
197 CcRosFlushDirtyPages (
201 BOOLEAN CalledFromLazy
)
203 PLIST_ENTRY current_entry
;
207 LARGE_INTEGER ZeroTimeout
;
209 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target
);
212 ZeroTimeout
.QuadPart
= 0;
214 KeEnterCriticalRegion();
215 KeAcquireGuardedMutex(&ViewLock
);
217 current_entry
= DirtyVacbListHead
.Flink
;
218 if (current_entry
== &DirtyVacbListHead
)
220 DPRINT("No Dirty pages\n");
223 while ((current_entry
!= &DirtyVacbListHead
) && (Target
> 0))
225 current
= CONTAINING_RECORD(current_entry
,
228 current_entry
= current_entry
->Flink
;
230 CcRosVacbIncRefCount(current
);
232 /* When performing lazy write, don't handle temporary files */
233 if (CalledFromLazy
&&
234 BooleanFlagOn(current
->SharedCacheMap
->FileObject
->Flags
, FO_TEMPORARY_FILE
))
236 CcRosVacbDecRefCount(current
);
240 Locked
= current
->SharedCacheMap
->Callbacks
->AcquireForLazyWrite(
241 current
->SharedCacheMap
->LazyWriteContext
, Wait
);
244 CcRosVacbDecRefCount(current
);
248 Status
= CcRosAcquireVacbLock(current
,
249 Wait
? NULL
: &ZeroTimeout
);
250 if (Status
!= STATUS_SUCCESS
)
252 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
253 current
->SharedCacheMap
->LazyWriteContext
);
254 CcRosVacbDecRefCount(current
);
258 ASSERT(current
->Dirty
);
260 /* One reference is added above */
261 if ((current
->ReferenceCount
> 2 && current
->PinCount
== 0) ||
262 (current
->ReferenceCount
> 3 && current
->PinCount
> 1))
264 CcRosReleaseVacbLock(current
);
265 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
266 current
->SharedCacheMap
->LazyWriteContext
);
267 CcRosVacbDecRefCount(current
);
271 KeReleaseGuardedMutex(&ViewLock
);
273 Status
= CcRosFlushVacb(current
);
275 CcRosReleaseVacbLock(current
);
276 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
277 current
->SharedCacheMap
->LazyWriteContext
);
279 KeAcquireGuardedMutex(&ViewLock
);
280 CcRosVacbDecRefCount(current
);
282 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
) &&
283 (Status
!= STATUS_MEDIA_WRITE_PROTECTED
))
285 DPRINT1("CC: Failed to flush VACB.\n");
291 /* How many pages did we free? */
292 PagesFreed
= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
293 (*Count
) += PagesFreed
;
295 /* Make sure we don't overflow target! */
296 if (Target
< PagesFreed
)
298 /* If we would have, jump to zero directly */
303 Target
-= PagesFreed
;
307 current_entry
= DirtyVacbListHead
.Flink
;
310 KeReleaseGuardedMutex(&ViewLock
);
311 KeLeaveCriticalRegion();
313 DPRINT("CcRosFlushDirtyPages() finished\n");
314 return STATUS_SUCCESS
;
317 /* FIXME: Someday this could somewhat implement write-behind/read-ahead */
320 CciLazyWriter(PVOID Unused
)
325 PLIST_ENTRY ListEntry
;
326 ULONG Target
, Count
= 0;
328 /* One per second or until we have to stop */
329 Status
= KeWaitForSingleObject(&iLazyWriterShutdown
,
335 /* If we succeeed, we've to stop running! */
336 if (Status
== STATUS_SUCCESS
)
341 /* We're not sleeping anymore */
342 KeClearEvent(&iLazyWriterNotify
);
344 /* Our target is one-eighth of the dirty pages */
345 Target
= CcTotalDirtyPages
/ 8;
349 DPRINT("Lazy writer starting (%d)\n", Target
);
350 CcRosFlushDirtyPages(Target
, &Count
, FALSE
, TRUE
);
352 /* And update stats */
353 CcLazyWritePages
+= Count
;
355 DPRINT("Lazy writer done (%d)\n", Count
);
358 /* Inform people waiting on us that we're done */
359 KeSetEvent(&iLazyWriterNotify
, IO_DISK_INCREMENT
, FALSE
);
361 /* Likely not optimal, but let's handle one deferred write now! */
362 ListEntry
= ExInterlockedRemoveHeadList(&CcDeferredWrites
, &CcDeferredWriteSpinLock
);
363 if (ListEntry
!= NULL
)
365 PROS_DEFERRED_WRITE_CONTEXT Context
;
367 /* Extract the context */
368 Context
= CONTAINING_RECORD(ListEntry
, ROS_DEFERRED_WRITE_CONTEXT
, CcDeferredWritesEntry
);
370 /* Can we write now? */
371 if (CcCanIWrite(Context
->FileObject
, Context
->BytesToWrite
, FALSE
, Context
->Retrying
))
373 /* Yes! Do it, and destroy the associated context */
374 Context
->PostRoutine(Context
->Context1
, Context
->Context2
);
375 ExFreePoolWithTag(Context
, 'CcDw');
379 /* Otherwise, requeue it, but in tail, so that it doesn't block others
380 * This is clearly to improve, but given the poor algorithm used now
381 * It's better than nothing!
383 ExInterlockedInsertTailList(&CcDeferredWrites
,
384 &Context
->CcDeferredWritesEntry
,
385 &CcDeferredWriteSpinLock
);
397 * FUNCTION: Try to free some memory from the file cache.
399 * Target - The number of pages to be freed.
400 * Priority - The priority of free (currently unused).
401 * NrFreed - Points to a variable where the number of pages
402 * actually freed is returned.
405 PLIST_ENTRY current_entry
;
412 BOOLEAN FlushedPages
= FALSE
;
414 DPRINT("CcRosTrimCache(Target %lu)\n", Target
);
416 InitializeListHead(&FreeList
);
421 KeAcquireGuardedMutex(&ViewLock
);
423 current_entry
= VacbLruListHead
.Flink
;
424 while (current_entry
!= &VacbLruListHead
)
426 current
= CONTAINING_RECORD(current_entry
,
429 current_entry
= current_entry
->Flink
;
431 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
433 /* Reference the VACB */
434 CcRosVacbIncRefCount(current
);
436 /* Check if it's mapped and not dirty */
437 if (current
->MappedCount
> 0 && !current
->Dirty
)
439 /* We have to break these locks because Cc sucks */
440 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
441 KeReleaseGuardedMutex(&ViewLock
);
443 /* Page out the VACB */
444 for (i
= 0; i
< VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
; i
++)
446 Page
= (PFN_NUMBER
)(MmGetPhysicalAddress((PUCHAR
)current
->BaseAddress
+ (i
* PAGE_SIZE
)).QuadPart
>> PAGE_SHIFT
);
448 MmPageOutPhysicalAddress(Page
);
451 /* Reacquire the locks */
452 KeAcquireGuardedMutex(&ViewLock
);
453 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
456 /* Dereference the VACB */
457 CcRosVacbDecRefCount(current
);
459 /* Check if we can free this entry now */
460 if (current
->ReferenceCount
== 0)
462 ASSERT(!current
->Dirty
);
463 ASSERT(!current
->MappedCount
);
465 RemoveEntryList(¤t
->CacheMapVacbListEntry
);
466 RemoveEntryList(¤t
->VacbLruListEntry
);
467 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
469 /* Calculate how many pages we freed for Mm */
470 PagesFreed
= min(VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
, Target
);
471 Target
-= PagesFreed
;
472 (*NrFreed
) += PagesFreed
;
475 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
478 KeReleaseGuardedMutex(&ViewLock
);
480 /* Try flushing pages if we haven't met our target */
481 if ((Target
> 0) && !FlushedPages
)
483 /* Flush dirty pages to disk */
484 CcRosFlushDirtyPages(Target
, &PagesFreed
, FALSE
, FALSE
);
487 /* We can only swap as many pages as we flushed */
488 if (PagesFreed
< Target
) Target
= PagesFreed
;
490 /* Check if we flushed anything */
493 /* Try again after flushing dirty pages */
494 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed
);
499 while (!IsListEmpty(&FreeList
))
501 current_entry
= RemoveHeadList(&FreeList
);
502 current
= CONTAINING_RECORD(current_entry
,
504 CacheMapVacbListEntry
);
505 CcRosInternalFreeVacb(current
);
508 DPRINT("Evicted %lu cache pages\n", (*NrFreed
));
510 return STATUS_SUCCESS
;
516 PROS_SHARED_CACHE_MAP SharedCacheMap
,
522 ASSERT(SharedCacheMap
);
524 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
525 SharedCacheMap
, Vacb
, Valid
);
529 if (Dirty
&& !Vacb
->Dirty
)
531 CcRosMarkDirtyVacb(Vacb
);
538 CcRosVacbDecRefCount(Vacb
);
539 if (Mapped
&& (Vacb
->MappedCount
== 1))
541 CcRosVacbIncRefCount(Vacb
);
544 CcRosReleaseVacbLock(Vacb
);
546 return STATUS_SUCCESS
;
549 /* Returns with VACB Lock Held! */
553 PROS_SHARED_CACHE_MAP SharedCacheMap
,
556 PLIST_ENTRY current_entry
;
560 ASSERT(SharedCacheMap
);
562 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
563 SharedCacheMap
, FileOffset
);
565 KeAcquireGuardedMutex(&ViewLock
);
566 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
568 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
569 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
571 current
= CONTAINING_RECORD(current_entry
,
573 CacheMapVacbListEntry
);
574 if (IsPointInRange(current
->FileOffset
.QuadPart
,
575 VACB_MAPPING_GRANULARITY
,
578 CcRosVacbIncRefCount(current
);
579 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
580 KeReleaseGuardedMutex(&ViewLock
);
581 CcRosAcquireVacbLock(current
, NULL
);
584 if (current
->FileOffset
.QuadPart
> FileOffset
)
586 current_entry
= current_entry
->Flink
;
589 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
590 KeReleaseGuardedMutex(&ViewLock
);
601 PROS_SHARED_CACHE_MAP SharedCacheMap
;
603 SharedCacheMap
= Vacb
->SharedCacheMap
;
605 KeAcquireGuardedMutex(&ViewLock
);
606 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
608 ASSERT(!Vacb
->Dirty
);
610 InsertTailList(&DirtyVacbListHead
, &Vacb
->DirtyVacbListEntry
);
611 CcTotalDirtyPages
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
612 Vacb
->SharedCacheMap
->DirtyPages
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
613 CcRosVacbIncRefCount(Vacb
);
615 /* Move to the tail of the LRU list */
616 RemoveEntryList(&Vacb
->VacbLruListEntry
);
617 InsertTailList(&VacbLruListHead
, &Vacb
->VacbLruListEntry
);
621 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
622 KeReleaseGuardedMutex(&ViewLock
);
628 PROS_SHARED_CACHE_MAP SharedCacheMap
,
633 ASSERT(SharedCacheMap
);
635 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
636 SharedCacheMap
, FileOffset
);
638 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
641 KeBugCheck(CACHE_MANAGER
);
646 CcRosMarkDirtyVacb(Vacb
);
649 CcRosReleaseVacbLock(Vacb
);
651 return STATUS_SUCCESS
;
657 PROS_SHARED_CACHE_MAP SharedCacheMap
,
663 ASSERT(SharedCacheMap
);
665 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
666 SharedCacheMap
, FileOffset
, NowDirty
);
668 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
671 return STATUS_UNSUCCESSFUL
;
674 if (NowDirty
&& !Vacb
->Dirty
)
676 CcRosMarkDirtyVacb(Vacb
);
681 CcRosVacbDecRefCount(Vacb
);
682 if (Vacb
->MappedCount
== 0)
684 CcRosVacbDecRefCount(Vacb
);
687 CcRosReleaseVacbLock(Vacb
);
689 return STATUS_SUCCESS
;
699 ULONG_PTR NumberOfPages
;
701 /* Create a memory area. */
702 MmLockAddressSpace(MmGetKernelAddressSpace());
703 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
704 0, // nothing checks for VACB mareas, so set to 0
706 VACB_MAPPING_GRANULARITY
,
708 (PMEMORY_AREA
*)&Vacb
->MemoryArea
,
711 MmUnlockAddressSpace(MmGetKernelAddressSpace());
712 if (!NT_SUCCESS(Status
))
714 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status
, Vacb
);
718 ASSERT(((ULONG_PTR
)Vacb
->BaseAddress
% PAGE_SIZE
) == 0);
719 ASSERT((ULONG_PTR
)Vacb
->BaseAddress
> (ULONG_PTR
)MmSystemRangeStart
);
721 /* Create a virtual mapping for this memory area */
722 NumberOfPages
= BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY
);
723 for (i
= 0; i
< NumberOfPages
; i
++)
725 PFN_NUMBER PageFrameNumber
;
727 MI_SET_USAGE(MI_USAGE_CACHE
);
728 Status
= MmRequestPageMemoryConsumer(MC_CACHE
, TRUE
, &PageFrameNumber
);
729 if (PageFrameNumber
== 0)
731 DPRINT1("Unable to allocate page\n");
732 KeBugCheck(MEMORY_MANAGEMENT
);
735 Status
= MmCreateVirtualMapping(NULL
,
736 (PVOID
)((ULONG_PTR
)Vacb
->BaseAddress
+ (i
* PAGE_SIZE
)),
740 if (!NT_SUCCESS(Status
))
742 DPRINT1("Unable to create virtual mapping\n");
743 KeBugCheck(MEMORY_MANAGEMENT
);
747 return STATUS_SUCCESS
;
753 PROS_SHARED_CACHE_MAP SharedCacheMap
,
759 PLIST_ENTRY current_entry
;
763 ASSERT(SharedCacheMap
);
765 DPRINT("CcRosCreateVacb()\n");
767 if (FileOffset
>= SharedCacheMap
->SectionSize
.QuadPart
)
770 return STATUS_INVALID_PARAMETER
;
773 current
= ExAllocateFromNPagedLookasideList(&VacbLookasideList
);
774 current
->BaseAddress
= NULL
;
775 current
->Valid
= FALSE
;
776 current
->Dirty
= FALSE
;
777 current
->PageOut
= FALSE
;
778 current
->FileOffset
.QuadPart
= ROUND_DOWN(FileOffset
, VACB_MAPPING_GRANULARITY
);
779 current
->SharedCacheMap
= SharedCacheMap
;
781 if (SharedCacheMap
->Trace
)
783 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap
, current
);
786 current
->MappedCount
= 0;
787 current
->DirtyVacbListEntry
.Flink
= NULL
;
788 current
->DirtyVacbListEntry
.Blink
= NULL
;
789 current
->ReferenceCount
= 1;
790 current
->PinCount
= 0;
791 KeInitializeMutex(¤t
->Mutex
, 0);
792 CcRosAcquireVacbLock(current
, NULL
);
793 KeAcquireGuardedMutex(&ViewLock
);
796 /* There is window between the call to CcRosLookupVacb
797 * and CcRosCreateVacb. We must check if a VACB for the
798 * file offset exist. If there is a VACB, we release
799 * our newly created VACB and return the existing one.
801 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
802 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
804 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
806 current
= CONTAINING_RECORD(current_entry
,
808 CacheMapVacbListEntry
);
809 if (IsPointInRange(current
->FileOffset
.QuadPart
,
810 VACB_MAPPING_GRANULARITY
,
813 CcRosVacbIncRefCount(current
);
814 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
816 if (SharedCacheMap
->Trace
)
818 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
824 CcRosReleaseVacbLock(*Vacb
);
825 KeReleaseGuardedMutex(&ViewLock
);
826 ExFreeToNPagedLookasideList(&VacbLookasideList
, *Vacb
);
828 CcRosAcquireVacbLock(current
, NULL
);
829 return STATUS_SUCCESS
;
831 if (current
->FileOffset
.QuadPart
< FileOffset
)
833 ASSERT(previous
== NULL
||
834 previous
->FileOffset
.QuadPart
< current
->FileOffset
.QuadPart
);
837 if (current
->FileOffset
.QuadPart
> FileOffset
)
839 current_entry
= current_entry
->Flink
;
841 /* There was no existing VACB. */
845 InsertHeadList(&previous
->CacheMapVacbListEntry
, ¤t
->CacheMapVacbListEntry
);
849 InsertHeadList(&SharedCacheMap
->CacheMapVacbListHead
, ¤t
->CacheMapVacbListEntry
);
851 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
852 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
853 KeReleaseGuardedMutex(&ViewLock
);
855 MI_SET_USAGE(MI_USAGE_CACHE
);
857 if ((SharedCacheMap
->FileObject
) && (SharedCacheMap
->FileObject
->FileName
.Buffer
))
861 pos
= wcsrchr(SharedCacheMap
->FileObject
->FileName
.Buffer
, '\\');
864 len
= wcslen(pos
) * sizeof(WCHAR
);
865 snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%S", pos
);
869 snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%wZ", &SharedCacheMap
->FileObject
->FileName
);
874 Status
= CcRosMapVacb(current
);
875 if (!NT_SUCCESS(Status
))
877 RemoveEntryList(¤t
->CacheMapVacbListEntry
);
878 RemoveEntryList(¤t
->VacbLruListEntry
);
879 CcRosReleaseVacbLock(current
);
880 ExFreeToNPagedLookasideList(&VacbLookasideList
, current
);
889 PROS_SHARED_CACHE_MAP SharedCacheMap
,
891 PLONGLONG BaseOffset
,
899 ASSERT(SharedCacheMap
);
901 DPRINT("CcRosGetVacb()\n");
904 * Look for a VACB already mapping the same data.
906 current
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
910 * Otherwise create a new VACB.
912 Status
= CcRosCreateVacb(SharedCacheMap
, FileOffset
, ¤t
);
913 if (!NT_SUCCESS(Status
))
919 KeAcquireGuardedMutex(&ViewLock
);
921 /* Move to the tail of the LRU list */
922 RemoveEntryList(¤t
->VacbLruListEntry
);
923 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
925 KeReleaseGuardedMutex(&ViewLock
);
928 * Return information about the VACB to the caller.
930 *UptoDate
= current
->Valid
;
931 *BaseAddress
= current
->BaseAddress
;
932 DPRINT("*BaseAddress %p\n", *BaseAddress
);
934 *BaseOffset
= current
->FileOffset
.QuadPart
;
935 return STATUS_SUCCESS
;
941 PROS_SHARED_CACHE_MAP SharedCacheMap
,
947 * FUNCTION: Request a page mapping for a shared cache map
952 ASSERT(SharedCacheMap
);
954 if (FileOffset
% VACB_MAPPING_GRANULARITY
!= 0)
956 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
957 FileOffset
, VACB_MAPPING_GRANULARITY
);
958 KeBugCheck(CACHE_MANAGER
);
961 return CcRosGetVacb(SharedCacheMap
,
973 MEMORY_AREA
* MemoryArea
,
979 ASSERT(SwapEntry
== 0);
982 ASSERT(MmGetReferenceCountPage(Page
) == 1);
983 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
988 CcRosInternalFreeVacb (
991 * FUNCTION: Releases a VACB associated with a shared cache map
994 DPRINT("Freeing VACB 0x%p\n", Vacb
);
996 if (Vacb
->SharedCacheMap
->Trace
)
998 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb
->SharedCacheMap
, Vacb
);
1002 MmLockAddressSpace(MmGetKernelAddressSpace());
1003 MmFreeMemoryArea(MmGetKernelAddressSpace(),
1007 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1009 ExFreeToNPagedLookasideList(&VacbLookasideList
, Vacb
);
1010 return STATUS_SUCCESS
;
1019 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
,
1020 IN PLARGE_INTEGER FileOffset OPTIONAL
,
1022 OUT PIO_STATUS_BLOCK IoStatus
)
1024 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1025 LARGE_INTEGER Offset
;
1026 LONGLONG RemainingLength
;
1031 CCTRACE(CC_API_DEBUG
, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1032 SectionObjectPointers
, FileOffset
, Length
);
1034 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1035 SectionObjectPointers
, FileOffset
, Length
, IoStatus
);
1037 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1039 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
1040 ASSERT(SharedCacheMap
);
1043 Offset
= *FileOffset
;
1044 RemainingLength
= Length
;
1048 Offset
.QuadPart
= 0;
1049 RemainingLength
= SharedCacheMap
->FileSize
.QuadPart
;
1054 IoStatus
->Status
= STATUS_SUCCESS
;
1055 IoStatus
->Information
= 0;
1058 while (RemainingLength
> 0)
1060 current
= CcRosLookupVacb(SharedCacheMap
, Offset
.QuadPart
);
1061 if (current
!= NULL
)
1065 Status
= CcRosFlushVacb(current
);
1066 if (!NT_SUCCESS(Status
) && IoStatus
!= NULL
)
1068 IoStatus
->Status
= Status
;
1072 CcRosReleaseVacbLock(current
);
1074 KeAcquireGuardedMutex(&ViewLock
);
1075 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1076 CcRosVacbDecRefCount(current
);
1077 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1078 KeReleaseGuardedMutex(&ViewLock
);
1081 Offset
.QuadPart
+= VACB_MAPPING_GRANULARITY
;
1082 RemainingLength
-= min(RemainingLength
, VACB_MAPPING_GRANULARITY
);
1089 IoStatus
->Status
= STATUS_INVALID_PARAMETER
;
1096 CcRosDeleteFileCache (
1097 PFILE_OBJECT FileObject
,
1098 PROS_SHARED_CACHE_MAP SharedCacheMap
)
1100 * FUNCTION: Releases the shared cache map associated with a file object
1103 PLIST_ENTRY current_entry
;
1105 LIST_ENTRY FreeList
;
1108 ASSERT(SharedCacheMap
);
1110 SharedCacheMap
->OpenCount
++;
1111 KeReleaseGuardedMutex(&ViewLock
);
1113 CcFlushCache(FileObject
->SectionObjectPointer
, NULL
, 0, NULL
);
1115 KeAcquireGuardedMutex(&ViewLock
);
1116 SharedCacheMap
->OpenCount
--;
1117 if (SharedCacheMap
->OpenCount
== 0)
1121 FileObject
->SectionObjectPointer
->SharedCacheMap
= NULL
;
1126 InitializeListHead(&FreeList
);
1127 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1128 while (!IsListEmpty(&SharedCacheMap
->CacheMapVacbListHead
))
1130 current_entry
= RemoveTailList(&SharedCacheMap
->CacheMapVacbListHead
);
1131 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1133 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1134 CcRosAcquireVacbLock(current
, NULL
);
1135 RemoveEntryList(¤t
->VacbLruListEntry
);
1138 RemoveEntryList(¤t
->DirtyVacbListEntry
);
1139 CcTotalDirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
1140 current
->SharedCacheMap
->DirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
1141 DPRINT1("Freeing dirty VACB\n");
1143 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
1144 CcRosReleaseVacbLock(current
);
1146 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1149 SharedCacheMap
->Trace
= FALSE
;
1151 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1153 KeReleaseGuardedMutex(&ViewLock
);
1154 ObDereferenceObject(SharedCacheMap
->FileObject
);
1156 while (!IsListEmpty(&FreeList
))
1158 current_entry
= RemoveTailList(&FreeList
);
1159 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1160 CcRosInternalFreeVacb(current
);
1163 KeAcquireSpinLock(&iSharedCacheMapLock
, &OldIrql
);
1164 RemoveEntryList(&SharedCacheMap
->SharedCacheMapLinks
);
1165 KeReleaseSpinLock(&iSharedCacheMapLock
, OldIrql
);
1167 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList
, SharedCacheMap
);
1168 KeAcquireGuardedMutex(&ViewLock
);
1170 return STATUS_SUCCESS
;
1175 CcRosReferenceCache (
1176 PFILE_OBJECT FileObject
)
1178 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1179 KeAcquireGuardedMutex(&ViewLock
);
1180 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1181 ASSERT(SharedCacheMap
);
1182 ASSERT(SharedCacheMap
->OpenCount
!= 0);
1183 SharedCacheMap
->OpenCount
++;
1184 KeReleaseGuardedMutex(&ViewLock
);
1189 CcRosRemoveIfClosed (
1190 PSECTION_OBJECT_POINTERS SectionObjectPointer
)
1192 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1193 DPRINT("CcRosRemoveIfClosed()\n");
1194 KeAcquireGuardedMutex(&ViewLock
);
1195 SharedCacheMap
= SectionObjectPointer
->SharedCacheMap
;
1196 if (SharedCacheMap
&& SharedCacheMap
->OpenCount
== 0)
1198 CcRosDeleteFileCache(SharedCacheMap
->FileObject
, SharedCacheMap
);
1200 KeReleaseGuardedMutex(&ViewLock
);
1206 CcRosDereferenceCache (
1207 PFILE_OBJECT FileObject
)
1209 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1210 KeAcquireGuardedMutex(&ViewLock
);
1211 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1212 ASSERT(SharedCacheMap
);
1213 if (SharedCacheMap
->OpenCount
> 0)
1215 SharedCacheMap
->OpenCount
--;
1216 if (SharedCacheMap
->OpenCount
== 0)
1218 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1219 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1222 KeReleaseGuardedMutex(&ViewLock
);
1227 CcRosReleaseFileCache (
1228 PFILE_OBJECT FileObject
)
1230 * FUNCTION: Called by the file system when a handle to a file object
1234 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1236 KeAcquireGuardedMutex(&ViewLock
);
1238 if (FileObject
->SectionObjectPointer
->SharedCacheMap
!= NULL
)
1240 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1241 if (FileObject
->PrivateCacheMap
!= NULL
)
1243 FileObject
->PrivateCacheMap
= NULL
;
1244 if (SharedCacheMap
->OpenCount
> 0)
1246 SharedCacheMap
->OpenCount
--;
1247 if (SharedCacheMap
->OpenCount
== 0)
1249 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1250 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1255 KeReleaseGuardedMutex(&ViewLock
);
1256 return STATUS_SUCCESS
;
1261 CcTryToInitializeFileCache (
1262 PFILE_OBJECT FileObject
)
1264 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1267 KeAcquireGuardedMutex(&ViewLock
);
1269 ASSERT(FileObject
->SectionObjectPointer
);
1270 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1271 if (SharedCacheMap
== NULL
)
1273 Status
= STATUS_UNSUCCESSFUL
;
1277 if (FileObject
->PrivateCacheMap
== NULL
)
1279 FileObject
->PrivateCacheMap
= SharedCacheMap
;
1280 SharedCacheMap
->OpenCount
++;
1282 Status
= STATUS_SUCCESS
;
1284 KeReleaseGuardedMutex(&ViewLock
);
1292 CcRosInitializeFileCache (
1293 PFILE_OBJECT FileObject
,
1294 PCC_FILE_SIZES FileSizes
,
1296 PCACHE_MANAGER_CALLBACKS CallBacks
,
1297 PVOID LazyWriterContext
)
1299 * FUNCTION: Initializes a shared cache map for a file object
1302 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1304 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1305 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1306 FileObject
, SharedCacheMap
);
1308 KeAcquireGuardedMutex(&ViewLock
);
1309 if (SharedCacheMap
== NULL
)
1313 SharedCacheMap
= ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList
);
1314 if (SharedCacheMap
== NULL
)
1316 KeReleaseGuardedMutex(&ViewLock
);
1317 return STATUS_INSUFFICIENT_RESOURCES
;
1319 RtlZeroMemory(SharedCacheMap
, sizeof(*SharedCacheMap
));
1320 ObReferenceObjectByPointer(FileObject
,
1324 SharedCacheMap
->FileObject
= FileObject
;
1325 SharedCacheMap
->Callbacks
= CallBacks
;
1326 SharedCacheMap
->LazyWriteContext
= LazyWriterContext
;
1327 SharedCacheMap
->SectionSize
= FileSizes
->AllocationSize
;
1328 SharedCacheMap
->FileSize
= FileSizes
->FileSize
;
1329 SharedCacheMap
->PinAccess
= PinAccess
;
1330 SharedCacheMap
->DirtyPageThreshold
= 0;
1331 SharedCacheMap
->DirtyPages
= 0;
1332 KeInitializeSpinLock(&SharedCacheMap
->CacheMapLock
);
1333 InitializeListHead(&SharedCacheMap
->CacheMapVacbListHead
);
1334 FileObject
->SectionObjectPointer
->SharedCacheMap
= SharedCacheMap
;
1336 KeAcquireSpinLock(&iSharedCacheMapLock
, &OldIrql
);
1337 InsertTailList(&CcCleanSharedCacheMapList
, &SharedCacheMap
->SharedCacheMapLinks
);
1338 KeReleaseSpinLock(&iSharedCacheMapLock
, OldIrql
);
1340 if (FileObject
->PrivateCacheMap
== NULL
)
1342 FileObject
->PrivateCacheMap
= SharedCacheMap
;
1343 SharedCacheMap
->OpenCount
++;
1345 KeReleaseGuardedMutex(&ViewLock
);
1347 return STATUS_SUCCESS
;
1355 CcGetFileObjectFromSectionPtrs (
1356 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
)
1358 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1360 CCTRACE(CC_API_DEBUG
, "SectionObjectPointers=%p\n", SectionObjectPointers
);
1362 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1364 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
1365 ASSERT(SharedCacheMap
);
1366 return SharedCacheMap
->FileObject
;
1373 CcShutdownLazyWriter (
1376 /* Simply set the event, lazy writer will stop when it's done */
1377 KeSetEvent(&iLazyWriterShutdown
, IO_DISK_INCREMENT
, FALSE
);
1389 OBJECT_ATTRIBUTES ObjectAttributes
;
1391 DPRINT("CcInitView()\n");
1393 InitializeListHead(&DirtyVacbListHead
);
1394 InitializeListHead(&VacbLruListHead
);
1395 InitializeListHead(&CcDeferredWrites
);
1396 InitializeListHead(&CcCleanSharedCacheMapList
);
1397 KeInitializeSpinLock(&CcDeferredWriteSpinLock
);
1398 KeInitializeSpinLock(&iSharedCacheMapLock
);
1399 KeInitializeGuardedMutex(&ViewLock
);
1400 ExInitializeNPagedLookasideList(&iBcbLookasideList
,
1404 sizeof(INTERNAL_BCB
),
1407 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList
,
1411 sizeof(ROS_SHARED_CACHE_MAP
),
1412 TAG_SHARED_CACHE_MAP
,
1414 ExInitializeNPagedLookasideList(&VacbLookasideList
,
1422 MmInitializeMemoryConsumer(MC_CACHE
, CcRosTrimCache
);
1424 /* Initialize lazy writer events */
1425 KeInitializeEvent(&iLazyWriterShutdown
, SynchronizationEvent
, FALSE
);
1426 KeInitializeEvent(&iLazyWriterNotify
, NotificationEvent
, FALSE
);
1428 /* Define lazy writer threshold, depending on system type */
1429 switch (MmQuerySystemSize())
1432 CcDirtyPageThreshold
= MmNumberOfPhysicalPages
/ 8;
1435 case MmMediumSystem
:
1436 CcDirtyPageThreshold
= MmNumberOfPhysicalPages
/ 4;
1440 CcDirtyPageThreshold
= MmNumberOfPhysicalPages
/ 8 + MmNumberOfPhysicalPages
/ 4;
1444 /* Start the lazy writer thread */
1445 InitializeObjectAttributes(&ObjectAttributes
,
1450 Status
= PsCreateSystemThread(&LazyWriter
,
1457 if (!NT_SUCCESS(Status
))
1463 Status
= NtSetInformationThread(LazyWriter
,
1467 ASSERT(NT_SUCCESS(Status
));
1469 /* Handle is not needed */
1470 ObCloseHandle(LazyWriter
, KernelMode
);
1472 CcInitCacheZeroPage();
1477 #if DBG && defined(KDBG)
1479 ExpKdbgExtFileCache(ULONG Argc
, PCHAR Argv
[])
1481 PLIST_ENTRY ListEntry
;
1482 UNICODE_STRING NoName
= RTL_CONSTANT_STRING(L
"No name for File");
1484 KdbpPrint(" Usage Summary (in kb)\n");
1485 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1486 /* No need to lock the spin lock here, we're in DBG */
1487 for (ListEntry
= CcCleanSharedCacheMapList
.Flink
;
1488 ListEntry
!= &CcCleanSharedCacheMapList
;
1489 ListEntry
= ListEntry
->Flink
)
1492 ULONG Valid
= 0, Dirty
= 0;
1493 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1494 PUNICODE_STRING FileName
;
1496 SharedCacheMap
= CONTAINING_RECORD(ListEntry
, ROS_SHARED_CACHE_MAP
, SharedCacheMapLinks
);
1499 Dirty
= (SharedCacheMap
->DirtyPages
* PAGE_SIZE
) / 1024;
1501 /* First, count for all the associated VACB */
1502 for (Vacbs
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
1503 Vacbs
!= &SharedCacheMap
->CacheMapVacbListHead
;
1504 Vacbs
= Vacbs
->Flink
)
1508 Vacb
= CONTAINING_RECORD(Vacbs
, ROS_VACB
, CacheMapVacbListEntry
);
1511 Valid
+= VACB_MAPPING_GRANULARITY
/ 1024;
1516 if (SharedCacheMap
->FileObject
!= NULL
&&
1517 SharedCacheMap
->FileObject
->FileName
.Length
!= 0)
1519 FileName
= &SharedCacheMap
->FileObject
->FileName
;
1527 KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap
, Valid
, Dirty
, FileName
);