2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
11 /* NOTES **********************************************************************
13 * This is not the NT implementation of a file cache nor anything much like
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
29 * (4) Copy the data into or out of the page as necessary.
31 * (5) Release the cache page
33 /* INCLUDES ******************************************************************/
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
43 /* GLOBALS *******************************************************************/
45 LIST_ENTRY DirtyVacbListHead
;
46 static LIST_ENTRY VacbLruListHead
;
48 KGUARDED_MUTEX ViewLock
;
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList
;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList
;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList
;
55 * - Amount of pages flushed by lazy writer
56 * - Number of times lazy writer ran
58 ULONG CcLazyWritePages
= 0;
59 ULONG CcLazyWriteIos
= 0;
61 /* Internal vars (MS):
62 * - Threshold above which lazy writer will start action
63 * - Amount of dirty pages
64 * - List for deferred writes
65 * - Spinlock when dealing with the deferred list
66 * - List for "clean" shared cache maps
67 * - One second delay for lazy writer
69 ULONG CcDirtyPageThreshold
= 0;
70 ULONG CcTotalDirtyPages
= 0;
71 LIST_ENTRY CcDeferredWrites
;
72 KSPIN_LOCK CcDeferredWriteSpinLock
;
73 LIST_ENTRY CcCleanSharedCacheMapList
;
74 LARGE_INTEGER CcIdleDelay
= RTL_CONSTANT_LARGE_INTEGER((LONGLONG
)-1*1000*1000*10);
76 /* Internal vars (ROS):
77 * - Event to notify lazy writer to shutdown
78 * - Event to inform watchers lazy writer is done for this loop
79 * - Lock for the CcCleanSharedCacheMapList list
81 KEVENT iLazyWriterShutdown
;
82 KEVENT iLazyWriterNotify
;
83 KSPIN_LOCK iSharedCacheMapLock
;
86 static void CcRosVacbIncRefCount_(PROS_VACB vacb
, const char* file
, int line
)
88 ++vacb
->ReferenceCount
;
89 if (vacb
->SharedCacheMap
->Trace
)
91 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
92 file
, line
, vacb
, vacb
->ReferenceCount
, vacb
->Dirty
, vacb
->PageOut
);
95 static void CcRosVacbDecRefCount_(PROS_VACB vacb
, const char* file
, int line
)
97 ASSERT(vacb
->ReferenceCount
!= 0);
98 --vacb
->ReferenceCount
;
99 ASSERT(!(vacb
->ReferenceCount
== 0 && vacb
->Dirty
));
100 if (vacb
->SharedCacheMap
->Trace
)
102 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
103 file
, line
, vacb
, vacb
->ReferenceCount
, vacb
->Dirty
, vacb
->PageOut
);
106 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
107 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
109 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
110 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
114 CcRosInternalFreeVacb(PROS_VACB Vacb
);
117 /* FUNCTIONS *****************************************************************/
122 PROS_SHARED_CACHE_MAP SharedCacheMap
,
127 PLIST_ENTRY current_entry
;
133 SharedCacheMap
->Trace
= Trace
;
137 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
139 KeAcquireGuardedMutex(&ViewLock
);
140 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldirql
);
142 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
143 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
145 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
146 current_entry
= current_entry
->Flink
;
148 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
149 current
, current
->ReferenceCount
, current
->Dirty
, current
->PageOut
);
151 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldirql
);
152 KeReleaseGuardedMutex(&ViewLock
);
156 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
160 UNREFERENCED_PARAMETER(SharedCacheMap
);
161 UNREFERENCED_PARAMETER(Trace
);
172 Status
= CcWriteVirtualAddress(Vacb
);
173 if (NT_SUCCESS(Status
))
175 CcRosUnmarkDirtyVacb(Vacb
, TRUE
);
183 CcRosFlushDirtyPages (
187 BOOLEAN CalledFromLazy
)
189 PLIST_ENTRY current_entry
;
193 LARGE_INTEGER ZeroTimeout
;
195 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target
);
198 ZeroTimeout
.QuadPart
= 0;
200 KeEnterCriticalRegion();
201 KeAcquireGuardedMutex(&ViewLock
);
203 current_entry
= DirtyVacbListHead
.Flink
;
204 if (current_entry
== &DirtyVacbListHead
)
206 DPRINT("No Dirty pages\n");
209 while ((current_entry
!= &DirtyVacbListHead
) && (Target
> 0))
211 current
= CONTAINING_RECORD(current_entry
,
214 current_entry
= current_entry
->Flink
;
216 CcRosVacbIncRefCount(current
);
218 /* When performing lazy write, don't handle temporary files */
219 if (CalledFromLazy
&&
220 BooleanFlagOn(current
->SharedCacheMap
->FileObject
->Flags
, FO_TEMPORARY_FILE
))
222 CcRosVacbDecRefCount(current
);
226 Locked
= current
->SharedCacheMap
->Callbacks
->AcquireForLazyWrite(
227 current
->SharedCacheMap
->LazyWriteContext
, Wait
);
230 CcRosVacbDecRefCount(current
);
234 Status
= CcRosAcquireVacbLock(current
,
235 Wait
? NULL
: &ZeroTimeout
);
236 if (Status
!= STATUS_SUCCESS
)
238 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
239 current
->SharedCacheMap
->LazyWriteContext
);
240 CcRosVacbDecRefCount(current
);
244 ASSERT(current
->Dirty
);
246 /* One reference is added above */
247 if ((current
->ReferenceCount
> 2 && current
->PinCount
== 0) ||
248 (current
->ReferenceCount
> 3 && current
->PinCount
> 1))
250 CcRosReleaseVacbLock(current
);
251 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
252 current
->SharedCacheMap
->LazyWriteContext
);
253 CcRosVacbDecRefCount(current
);
257 KeReleaseGuardedMutex(&ViewLock
);
259 Status
= CcRosFlushVacb(current
);
261 CcRosReleaseVacbLock(current
);
262 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
263 current
->SharedCacheMap
->LazyWriteContext
);
265 KeAcquireGuardedMutex(&ViewLock
);
266 CcRosVacbDecRefCount(current
);
268 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
) &&
269 (Status
!= STATUS_MEDIA_WRITE_PROTECTED
))
271 DPRINT1("CC: Failed to flush VACB.\n");
277 /* How many pages did we free? */
278 PagesFreed
= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
279 (*Count
) += PagesFreed
;
281 /* Make sure we don't overflow target! */
282 if (Target
< PagesFreed
)
284 /* If we would have, jump to zero directly */
289 Target
-= PagesFreed
;
293 current_entry
= DirtyVacbListHead
.Flink
;
296 KeReleaseGuardedMutex(&ViewLock
);
297 KeLeaveCriticalRegion();
299 DPRINT("CcRosFlushDirtyPages() finished\n");
300 return STATUS_SUCCESS
;
303 /* FIXME: Someday this could somewhat implement write-behind/read-ahead */
306 CciLazyWriter(PVOID Unused
)
311 PLIST_ENTRY ListEntry
;
312 ULONG Target
, Count
= 0;
314 /* One per second or until we have to stop */
315 Status
= KeWaitForSingleObject(&iLazyWriterShutdown
,
321 /* If we succeeed, we've to stop running! */
322 if (Status
== STATUS_SUCCESS
)
327 /* We're not sleeping anymore */
328 KeClearEvent(&iLazyWriterNotify
);
330 /* Our target is one-eighth of the dirty pages */
331 Target
= CcTotalDirtyPages
/ 8;
335 DPRINT("Lazy writer starting (%d)\n", Target
);
336 CcRosFlushDirtyPages(Target
, &Count
, FALSE
, TRUE
);
338 /* And update stats */
339 CcLazyWritePages
+= Count
;
341 DPRINT("Lazy writer done (%d)\n", Count
);
344 /* Inform people waiting on us that we're done */
345 KeSetEvent(&iLazyWriterNotify
, IO_DISK_INCREMENT
, FALSE
);
347 /* Likely not optimal, but let's handle one deferred write now! */
348 ListEntry
= ExInterlockedRemoveHeadList(&CcDeferredWrites
, &CcDeferredWriteSpinLock
);
349 if (ListEntry
!= NULL
)
351 PDEFERRED_WRITE Context
;
353 /* Extract the context */
354 Context
= CONTAINING_RECORD(ListEntry
, DEFERRED_WRITE
, DeferredWriteLinks
);
355 ASSERT(Context
->NodeTypeCode
== NODE_TYPE_DEFERRED_WRITE
);
357 /* Can we write now? */
358 if (CcCanIWrite(Context
->FileObject
, Context
->BytesToWrite
, FALSE
, TRUE
))
360 /* Yes! Do it, and destroy the associated context */
361 Context
->PostRoutine(Context
->Context1
, Context
->Context2
);
362 ExFreePoolWithTag(Context
, 'CcDw');
366 /* Otherwise, requeue it, but in tail, so that it doesn't block others
367 * This is clearly to improve, but given the poor algorithm used now
368 * It's better than nothing!
370 ExInterlockedInsertTailList(&CcDeferredWrites
,
371 &Context
->DeferredWriteLinks
,
372 &CcDeferredWriteSpinLock
);
384 * FUNCTION: Try to free some memory from the file cache.
386 * Target - The number of pages to be freed.
387 * Priority - The priority of free (currently unused).
388 * NrFreed - Points to a variable where the number of pages
389 * actually freed is returned.
392 PLIST_ENTRY current_entry
;
399 BOOLEAN FlushedPages
= FALSE
;
401 DPRINT("CcRosTrimCache(Target %lu)\n", Target
);
403 InitializeListHead(&FreeList
);
408 KeAcquireGuardedMutex(&ViewLock
);
410 current_entry
= VacbLruListHead
.Flink
;
411 while (current_entry
!= &VacbLruListHead
)
413 current
= CONTAINING_RECORD(current_entry
,
416 current_entry
= current_entry
->Flink
;
418 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
420 /* Reference the VACB */
421 CcRosVacbIncRefCount(current
);
423 /* Check if it's mapped and not dirty */
424 if (current
->MappedCount
> 0 && !current
->Dirty
)
426 /* We have to break these locks because Cc sucks */
427 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
428 KeReleaseGuardedMutex(&ViewLock
);
430 /* Page out the VACB */
431 for (i
= 0; i
< VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
; i
++)
433 Page
= (PFN_NUMBER
)(MmGetPhysicalAddress((PUCHAR
)current
->BaseAddress
+ (i
* PAGE_SIZE
)).QuadPart
>> PAGE_SHIFT
);
435 MmPageOutPhysicalAddress(Page
);
438 /* Reacquire the locks */
439 KeAcquireGuardedMutex(&ViewLock
);
440 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
443 /* Dereference the VACB */
444 CcRosVacbDecRefCount(current
);
446 /* Check if we can free this entry now */
447 if (current
->ReferenceCount
== 0)
449 ASSERT(!current
->Dirty
);
450 ASSERT(!current
->MappedCount
);
452 RemoveEntryList(¤t
->CacheMapVacbListEntry
);
453 RemoveEntryList(¤t
->VacbLruListEntry
);
454 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
456 /* Calculate how many pages we freed for Mm */
457 PagesFreed
= min(VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
, Target
);
458 Target
-= PagesFreed
;
459 (*NrFreed
) += PagesFreed
;
462 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
465 KeReleaseGuardedMutex(&ViewLock
);
467 /* Try flushing pages if we haven't met our target */
468 if ((Target
> 0) && !FlushedPages
)
470 /* Flush dirty pages to disk */
471 CcRosFlushDirtyPages(Target
, &PagesFreed
, FALSE
, FALSE
);
474 /* We can only swap as many pages as we flushed */
475 if (PagesFreed
< Target
) Target
= PagesFreed
;
477 /* Check if we flushed anything */
480 /* Try again after flushing dirty pages */
481 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed
);
486 while (!IsListEmpty(&FreeList
))
488 current_entry
= RemoveHeadList(&FreeList
);
489 current
= CONTAINING_RECORD(current_entry
,
491 CacheMapVacbListEntry
);
492 CcRosInternalFreeVacb(current
);
495 DPRINT("Evicted %lu cache pages\n", (*NrFreed
));
497 return STATUS_SUCCESS
;
503 PROS_SHARED_CACHE_MAP SharedCacheMap
,
509 ASSERT(SharedCacheMap
);
511 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
512 SharedCacheMap
, Vacb
, Valid
);
516 if (Dirty
&& !Vacb
->Dirty
)
518 CcRosMarkDirtyVacb(Vacb
);
525 CcRosVacbDecRefCount(Vacb
);
526 if (Mapped
&& (Vacb
->MappedCount
== 1))
528 CcRosVacbIncRefCount(Vacb
);
531 CcRosReleaseVacbLock(Vacb
);
533 return STATUS_SUCCESS
;
536 /* Returns with VACB Lock Held! */
540 PROS_SHARED_CACHE_MAP SharedCacheMap
,
543 PLIST_ENTRY current_entry
;
547 ASSERT(SharedCacheMap
);
549 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
550 SharedCacheMap
, FileOffset
);
552 KeAcquireGuardedMutex(&ViewLock
);
553 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
555 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
556 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
558 current
= CONTAINING_RECORD(current_entry
,
560 CacheMapVacbListEntry
);
561 if (IsPointInRange(current
->FileOffset
.QuadPart
,
562 VACB_MAPPING_GRANULARITY
,
565 CcRosVacbIncRefCount(current
);
566 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
567 KeReleaseGuardedMutex(&ViewLock
);
568 CcRosAcquireVacbLock(current
, NULL
);
571 if (current
->FileOffset
.QuadPart
> FileOffset
)
573 current_entry
= current_entry
->Flink
;
576 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
577 KeReleaseGuardedMutex(&ViewLock
);
588 PROS_SHARED_CACHE_MAP SharedCacheMap
;
590 SharedCacheMap
= Vacb
->SharedCacheMap
;
592 KeAcquireGuardedMutex(&ViewLock
);
593 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
595 ASSERT(!Vacb
->Dirty
);
597 InsertTailList(&DirtyVacbListHead
, &Vacb
->DirtyVacbListEntry
);
598 CcTotalDirtyPages
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
599 Vacb
->SharedCacheMap
->DirtyPages
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
600 CcRosVacbIncRefCount(Vacb
);
602 /* Move to the tail of the LRU list */
603 RemoveEntryList(&Vacb
->VacbLruListEntry
);
604 InsertTailList(&VacbLruListHead
, &Vacb
->VacbLruListEntry
);
608 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
609 KeReleaseGuardedMutex(&ViewLock
);
614 CcRosUnmarkDirtyVacb (
619 PROS_SHARED_CACHE_MAP SharedCacheMap
;
621 SharedCacheMap
= Vacb
->SharedCacheMap
;
625 KeAcquireGuardedMutex(&ViewLock
);
626 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
633 RemoveEntryList(&Vacb
->DirtyVacbListEntry
);
634 CcTotalDirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
635 Vacb
->SharedCacheMap
->DirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
636 CcRosVacbDecRefCount(Vacb
);
640 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
641 KeReleaseGuardedMutex(&ViewLock
);
648 PROS_SHARED_CACHE_MAP SharedCacheMap
,
653 ASSERT(SharedCacheMap
);
655 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
656 SharedCacheMap
, FileOffset
);
658 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
661 KeBugCheck(CACHE_MANAGER
);
666 CcRosMarkDirtyVacb(Vacb
);
669 CcRosReleaseVacbLock(Vacb
);
671 return STATUS_SUCCESS
;
677 PROS_SHARED_CACHE_MAP SharedCacheMap
,
683 ASSERT(SharedCacheMap
);
685 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
686 SharedCacheMap
, FileOffset
, NowDirty
);
688 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
691 return STATUS_UNSUCCESSFUL
;
694 if (NowDirty
&& !Vacb
->Dirty
)
696 CcRosMarkDirtyVacb(Vacb
);
701 CcRosVacbDecRefCount(Vacb
);
702 if (Vacb
->MappedCount
== 0)
704 CcRosVacbDecRefCount(Vacb
);
707 CcRosReleaseVacbLock(Vacb
);
709 return STATUS_SUCCESS
;
719 ULONG_PTR NumberOfPages
;
721 /* Create a memory area. */
722 MmLockAddressSpace(MmGetKernelAddressSpace());
723 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
724 0, // nothing checks for VACB mareas, so set to 0
726 VACB_MAPPING_GRANULARITY
,
728 (PMEMORY_AREA
*)&Vacb
->MemoryArea
,
731 MmUnlockAddressSpace(MmGetKernelAddressSpace());
732 if (!NT_SUCCESS(Status
))
734 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status
, Vacb
);
738 ASSERT(((ULONG_PTR
)Vacb
->BaseAddress
% PAGE_SIZE
) == 0);
739 ASSERT((ULONG_PTR
)Vacb
->BaseAddress
> (ULONG_PTR
)MmSystemRangeStart
);
741 /* Create a virtual mapping for this memory area */
742 NumberOfPages
= BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY
);
743 for (i
= 0; i
< NumberOfPages
; i
++)
745 PFN_NUMBER PageFrameNumber
;
747 MI_SET_USAGE(MI_USAGE_CACHE
);
748 Status
= MmRequestPageMemoryConsumer(MC_CACHE
, TRUE
, &PageFrameNumber
);
749 if (PageFrameNumber
== 0)
751 DPRINT1("Unable to allocate page\n");
752 KeBugCheck(MEMORY_MANAGEMENT
);
755 Status
= MmCreateVirtualMapping(NULL
,
756 (PVOID
)((ULONG_PTR
)Vacb
->BaseAddress
+ (i
* PAGE_SIZE
)),
760 if (!NT_SUCCESS(Status
))
762 DPRINT1("Unable to create virtual mapping\n");
763 KeBugCheck(MEMORY_MANAGEMENT
);
767 return STATUS_SUCCESS
;
773 PROS_SHARED_CACHE_MAP SharedCacheMap
,
779 PLIST_ENTRY current_entry
;
783 ASSERT(SharedCacheMap
);
785 DPRINT("CcRosCreateVacb()\n");
787 if (FileOffset
>= SharedCacheMap
->SectionSize
.QuadPart
)
790 return STATUS_INVALID_PARAMETER
;
793 current
= ExAllocateFromNPagedLookasideList(&VacbLookasideList
);
794 current
->BaseAddress
= NULL
;
795 current
->Valid
= FALSE
;
796 current
->Dirty
= FALSE
;
797 current
->PageOut
= FALSE
;
798 current
->FileOffset
.QuadPart
= ROUND_DOWN(FileOffset
, VACB_MAPPING_GRANULARITY
);
799 current
->SharedCacheMap
= SharedCacheMap
;
801 if (SharedCacheMap
->Trace
)
803 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap
, current
);
806 current
->MappedCount
= 0;
807 current
->DirtyVacbListEntry
.Flink
= NULL
;
808 current
->DirtyVacbListEntry
.Blink
= NULL
;
809 current
->ReferenceCount
= 1;
810 current
->PinCount
= 0;
811 KeInitializeMutex(¤t
->Mutex
, 0);
812 CcRosAcquireVacbLock(current
, NULL
);
813 KeAcquireGuardedMutex(&ViewLock
);
816 /* There is window between the call to CcRosLookupVacb
817 * and CcRosCreateVacb. We must check if a VACB for the
818 * file offset exist. If there is a VACB, we release
819 * our newly created VACB and return the existing one.
821 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
822 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
824 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
826 current
= CONTAINING_RECORD(current_entry
,
828 CacheMapVacbListEntry
);
829 if (IsPointInRange(current
->FileOffset
.QuadPart
,
830 VACB_MAPPING_GRANULARITY
,
833 CcRosVacbIncRefCount(current
);
834 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
836 if (SharedCacheMap
->Trace
)
838 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
844 CcRosReleaseVacbLock(*Vacb
);
845 KeReleaseGuardedMutex(&ViewLock
);
846 ExFreeToNPagedLookasideList(&VacbLookasideList
, *Vacb
);
848 CcRosAcquireVacbLock(current
, NULL
);
849 return STATUS_SUCCESS
;
851 if (current
->FileOffset
.QuadPart
< FileOffset
)
853 ASSERT(previous
== NULL
||
854 previous
->FileOffset
.QuadPart
< current
->FileOffset
.QuadPart
);
857 if (current
->FileOffset
.QuadPart
> FileOffset
)
859 current_entry
= current_entry
->Flink
;
861 /* There was no existing VACB. */
865 InsertHeadList(&previous
->CacheMapVacbListEntry
, ¤t
->CacheMapVacbListEntry
);
869 InsertHeadList(&SharedCacheMap
->CacheMapVacbListHead
, ¤t
->CacheMapVacbListEntry
);
871 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
872 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
873 KeReleaseGuardedMutex(&ViewLock
);
875 MI_SET_USAGE(MI_USAGE_CACHE
);
877 if ((SharedCacheMap
->FileObject
) && (SharedCacheMap
->FileObject
->FileName
.Buffer
))
881 pos
= wcsrchr(SharedCacheMap
->FileObject
->FileName
.Buffer
, '\\');
884 len
= wcslen(pos
) * sizeof(WCHAR
);
885 snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%S", pos
);
889 snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%wZ", &SharedCacheMap
->FileObject
->FileName
);
894 Status
= CcRosMapVacb(current
);
895 if (!NT_SUCCESS(Status
))
897 RemoveEntryList(¤t
->CacheMapVacbListEntry
);
898 RemoveEntryList(¤t
->VacbLruListEntry
);
899 CcRosReleaseVacbLock(current
);
900 ExFreeToNPagedLookasideList(&VacbLookasideList
, current
);
909 PROS_SHARED_CACHE_MAP SharedCacheMap
,
911 PLONGLONG BaseOffset
,
919 ASSERT(SharedCacheMap
);
921 DPRINT("CcRosGetVacb()\n");
924 * Look for a VACB already mapping the same data.
926 current
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
930 * Otherwise create a new VACB.
932 Status
= CcRosCreateVacb(SharedCacheMap
, FileOffset
, ¤t
);
933 if (!NT_SUCCESS(Status
))
939 KeAcquireGuardedMutex(&ViewLock
);
941 /* Move to the tail of the LRU list */
942 RemoveEntryList(¤t
->VacbLruListEntry
);
943 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
945 KeReleaseGuardedMutex(&ViewLock
);
948 * Return information about the VACB to the caller.
950 *UptoDate
= current
->Valid
;
951 *BaseAddress
= current
->BaseAddress
;
952 DPRINT("*BaseAddress %p\n", *BaseAddress
);
954 *BaseOffset
= current
->FileOffset
.QuadPart
;
955 return STATUS_SUCCESS
;
961 PROS_SHARED_CACHE_MAP SharedCacheMap
,
967 * FUNCTION: Request a page mapping for a shared cache map
972 ASSERT(SharedCacheMap
);
974 if (FileOffset
% VACB_MAPPING_GRANULARITY
!= 0)
976 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
977 FileOffset
, VACB_MAPPING_GRANULARITY
);
978 KeBugCheck(CACHE_MANAGER
);
981 return CcRosGetVacb(SharedCacheMap
,
993 MEMORY_AREA
* MemoryArea
,
999 ASSERT(SwapEntry
== 0);
1002 ASSERT(MmGetReferenceCountPage(Page
) == 1);
1003 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
1008 CcRosInternalFreeVacb (
1011 * FUNCTION: Releases a VACB associated with a shared cache map
1014 DPRINT("Freeing VACB 0x%p\n", Vacb
);
1016 if (Vacb
->SharedCacheMap
->Trace
)
1018 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb
->SharedCacheMap
, Vacb
);
1022 MmLockAddressSpace(MmGetKernelAddressSpace());
1023 MmFreeMemoryArea(MmGetKernelAddressSpace(),
1027 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1029 ExFreeToNPagedLookasideList(&VacbLookasideList
, Vacb
);
1030 return STATUS_SUCCESS
;
1039 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
,
1040 IN PLARGE_INTEGER FileOffset OPTIONAL
,
1042 OUT PIO_STATUS_BLOCK IoStatus
)
1044 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1045 LARGE_INTEGER Offset
;
1046 LONGLONG RemainingLength
;
1051 CCTRACE(CC_API_DEBUG
, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1052 SectionObjectPointers
, FileOffset
, Length
);
1054 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1055 SectionObjectPointers
, FileOffset
, Length
, IoStatus
);
1057 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1059 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
1060 ASSERT(SharedCacheMap
);
1063 Offset
= *FileOffset
;
1064 RemainingLength
= Length
;
1068 Offset
.QuadPart
= 0;
1069 RemainingLength
= SharedCacheMap
->FileSize
.QuadPart
;
1074 IoStatus
->Status
= STATUS_SUCCESS
;
1075 IoStatus
->Information
= 0;
1078 while (RemainingLength
> 0)
1080 current
= CcRosLookupVacb(SharedCacheMap
, Offset
.QuadPart
);
1081 if (current
!= NULL
)
1085 Status
= CcRosFlushVacb(current
);
1086 if (!NT_SUCCESS(Status
) && IoStatus
!= NULL
)
1088 IoStatus
->Status
= Status
;
1092 CcRosReleaseVacbLock(current
);
1094 KeAcquireGuardedMutex(&ViewLock
);
1095 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1096 CcRosVacbDecRefCount(current
);
1097 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1098 KeReleaseGuardedMutex(&ViewLock
);
1101 Offset
.QuadPart
+= VACB_MAPPING_GRANULARITY
;
1102 RemainingLength
-= min(RemainingLength
, VACB_MAPPING_GRANULARITY
);
1109 IoStatus
->Status
= STATUS_INVALID_PARAMETER
;
1116 CcRosDeleteFileCache (
1117 PFILE_OBJECT FileObject
,
1118 PROS_SHARED_CACHE_MAP SharedCacheMap
)
1120 * FUNCTION: Releases the shared cache map associated with a file object
1123 PLIST_ENTRY current_entry
;
1125 LIST_ENTRY FreeList
;
1128 ASSERT(SharedCacheMap
);
1130 SharedCacheMap
->OpenCount
++;
1131 KeReleaseGuardedMutex(&ViewLock
);
1133 CcFlushCache(FileObject
->SectionObjectPointer
, NULL
, 0, NULL
);
1135 KeAcquireGuardedMutex(&ViewLock
);
1136 SharedCacheMap
->OpenCount
--;
1137 if (SharedCacheMap
->OpenCount
== 0)
1141 FileObject
->SectionObjectPointer
->SharedCacheMap
= NULL
;
1146 InitializeListHead(&FreeList
);
1147 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1148 while (!IsListEmpty(&SharedCacheMap
->CacheMapVacbListHead
))
1150 current_entry
= RemoveTailList(&SharedCacheMap
->CacheMapVacbListHead
);
1151 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1153 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1154 CcRosAcquireVacbLock(current
, NULL
);
1155 RemoveEntryList(¤t
->VacbLruListEntry
);
1158 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1159 CcRosUnmarkDirtyVacb(current
, FALSE
);
1160 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1161 DPRINT1("Freeing dirty VACB\n");
1163 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
1164 CcRosReleaseVacbLock(current
);
1166 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1169 SharedCacheMap
->Trace
= FALSE
;
1171 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1173 KeReleaseGuardedMutex(&ViewLock
);
1174 ObDereferenceObject(SharedCacheMap
->FileObject
);
1176 while (!IsListEmpty(&FreeList
))
1178 current_entry
= RemoveTailList(&FreeList
);
1179 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1180 CcRosInternalFreeVacb(current
);
1183 KeAcquireSpinLock(&iSharedCacheMapLock
, &OldIrql
);
1184 RemoveEntryList(&SharedCacheMap
->SharedCacheMapLinks
);
1185 KeReleaseSpinLock(&iSharedCacheMapLock
, OldIrql
);
1187 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList
, SharedCacheMap
);
1188 KeAcquireGuardedMutex(&ViewLock
);
1190 return STATUS_SUCCESS
;
1195 CcRosReferenceCache (
1196 PFILE_OBJECT FileObject
)
1198 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1199 KeAcquireGuardedMutex(&ViewLock
);
1200 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1201 ASSERT(SharedCacheMap
);
1202 ASSERT(SharedCacheMap
->OpenCount
!= 0);
1203 SharedCacheMap
->OpenCount
++;
1204 KeReleaseGuardedMutex(&ViewLock
);
1209 CcRosRemoveIfClosed (
1210 PSECTION_OBJECT_POINTERS SectionObjectPointer
)
1212 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1213 DPRINT("CcRosRemoveIfClosed()\n");
1214 KeAcquireGuardedMutex(&ViewLock
);
1215 SharedCacheMap
= SectionObjectPointer
->SharedCacheMap
;
1216 if (SharedCacheMap
&& SharedCacheMap
->OpenCount
== 0)
1218 CcRosDeleteFileCache(SharedCacheMap
->FileObject
, SharedCacheMap
);
1220 KeReleaseGuardedMutex(&ViewLock
);
1226 CcRosDereferenceCache (
1227 PFILE_OBJECT FileObject
)
1229 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1230 KeAcquireGuardedMutex(&ViewLock
);
1231 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1232 ASSERT(SharedCacheMap
);
1233 if (SharedCacheMap
->OpenCount
> 0)
1235 SharedCacheMap
->OpenCount
--;
1236 if (SharedCacheMap
->OpenCount
== 0)
1238 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1239 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1242 KeReleaseGuardedMutex(&ViewLock
);
1247 CcRosReleaseFileCache (
1248 PFILE_OBJECT FileObject
)
1250 * FUNCTION: Called by the file system when a handle to a file object
1254 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1256 KeAcquireGuardedMutex(&ViewLock
);
1258 if (FileObject
->SectionObjectPointer
->SharedCacheMap
!= NULL
)
1260 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1261 if (FileObject
->PrivateCacheMap
!= NULL
)
1263 FileObject
->PrivateCacheMap
= NULL
;
1264 if (SharedCacheMap
->OpenCount
> 0)
1266 SharedCacheMap
->OpenCount
--;
1267 if (SharedCacheMap
->OpenCount
== 0)
1269 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1270 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1275 KeReleaseGuardedMutex(&ViewLock
);
1276 return STATUS_SUCCESS
;
1281 CcTryToInitializeFileCache (
1282 PFILE_OBJECT FileObject
)
1284 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1287 KeAcquireGuardedMutex(&ViewLock
);
1289 ASSERT(FileObject
->SectionObjectPointer
);
1290 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1291 if (SharedCacheMap
== NULL
)
1293 Status
= STATUS_UNSUCCESSFUL
;
1297 if (FileObject
->PrivateCacheMap
== NULL
)
1299 FileObject
->PrivateCacheMap
= SharedCacheMap
;
1300 SharedCacheMap
->OpenCount
++;
1302 Status
= STATUS_SUCCESS
;
1304 KeReleaseGuardedMutex(&ViewLock
);
1312 CcRosInitializeFileCache (
1313 PFILE_OBJECT FileObject
,
1314 PCC_FILE_SIZES FileSizes
,
1316 PCACHE_MANAGER_CALLBACKS CallBacks
,
1317 PVOID LazyWriterContext
)
1319 * FUNCTION: Initializes a shared cache map for a file object
1322 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1324 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1325 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1326 FileObject
, SharedCacheMap
);
1328 KeAcquireGuardedMutex(&ViewLock
);
1329 if (SharedCacheMap
== NULL
)
1333 SharedCacheMap
= ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList
);
1334 if (SharedCacheMap
== NULL
)
1336 KeReleaseGuardedMutex(&ViewLock
);
1337 return STATUS_INSUFFICIENT_RESOURCES
;
1339 RtlZeroMemory(SharedCacheMap
, sizeof(*SharedCacheMap
));
1340 ObReferenceObjectByPointer(FileObject
,
1344 SharedCacheMap
->FileObject
= FileObject
;
1345 SharedCacheMap
->Callbacks
= CallBacks
;
1346 SharedCacheMap
->LazyWriteContext
= LazyWriterContext
;
1347 SharedCacheMap
->SectionSize
= FileSizes
->AllocationSize
;
1348 SharedCacheMap
->FileSize
= FileSizes
->FileSize
;
1349 SharedCacheMap
->PinAccess
= PinAccess
;
1350 SharedCacheMap
->DirtyPageThreshold
= 0;
1351 SharedCacheMap
->DirtyPages
= 0;
1352 KeInitializeSpinLock(&SharedCacheMap
->CacheMapLock
);
1353 InitializeListHead(&SharedCacheMap
->CacheMapVacbListHead
);
1354 FileObject
->SectionObjectPointer
->SharedCacheMap
= SharedCacheMap
;
1356 KeAcquireSpinLock(&iSharedCacheMapLock
, &OldIrql
);
1357 InsertTailList(&CcCleanSharedCacheMapList
, &SharedCacheMap
->SharedCacheMapLinks
);
1358 KeReleaseSpinLock(&iSharedCacheMapLock
, OldIrql
);
1360 if (FileObject
->PrivateCacheMap
== NULL
)
1362 FileObject
->PrivateCacheMap
= SharedCacheMap
;
1363 SharedCacheMap
->OpenCount
++;
1365 KeReleaseGuardedMutex(&ViewLock
);
1367 return STATUS_SUCCESS
;
1375 CcGetFileObjectFromSectionPtrs (
1376 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
)
1378 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1380 CCTRACE(CC_API_DEBUG
, "SectionObjectPointers=%p\n", SectionObjectPointers
);
1382 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1384 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
1385 ASSERT(SharedCacheMap
);
1386 return SharedCacheMap
->FileObject
;
1393 CcShutdownLazyWriter (
1396 /* Simply set the event, lazy writer will stop when it's done */
1397 KeSetEvent(&iLazyWriterShutdown
, IO_DISK_INCREMENT
, FALSE
);
1409 OBJECT_ATTRIBUTES ObjectAttributes
;
1411 DPRINT("CcInitView()\n");
1413 InitializeListHead(&DirtyVacbListHead
);
1414 InitializeListHead(&VacbLruListHead
);
1415 InitializeListHead(&CcDeferredWrites
);
1416 InitializeListHead(&CcCleanSharedCacheMapList
);
1417 KeInitializeSpinLock(&CcDeferredWriteSpinLock
);
1418 KeInitializeSpinLock(&iSharedCacheMapLock
);
1419 KeInitializeGuardedMutex(&ViewLock
);
1420 ExInitializeNPagedLookasideList(&iBcbLookasideList
,
1424 sizeof(INTERNAL_BCB
),
1427 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList
,
1431 sizeof(ROS_SHARED_CACHE_MAP
),
1432 TAG_SHARED_CACHE_MAP
,
1434 ExInitializeNPagedLookasideList(&VacbLookasideList
,
1442 MmInitializeMemoryConsumer(MC_CACHE
, CcRosTrimCache
);
1444 /* Initialize lazy writer events */
1445 KeInitializeEvent(&iLazyWriterShutdown
, SynchronizationEvent
, FALSE
);
1446 KeInitializeEvent(&iLazyWriterNotify
, NotificationEvent
, FALSE
);
1448 /* Define lazy writer threshold, depending on system type */
1449 switch (MmQuerySystemSize())
1452 CcDirtyPageThreshold
= MmNumberOfPhysicalPages
/ 8;
1455 case MmMediumSystem
:
1456 CcDirtyPageThreshold
= MmNumberOfPhysicalPages
/ 4;
1460 CcDirtyPageThreshold
= MmNumberOfPhysicalPages
/ 8 + MmNumberOfPhysicalPages
/ 4;
1464 /* Start the lazy writer thread */
1465 InitializeObjectAttributes(&ObjectAttributes
,
1470 Status
= PsCreateSystemThread(&LazyWriter
,
1477 if (!NT_SUCCESS(Status
))
1483 Status
= NtSetInformationThread(LazyWriter
,
1487 ASSERT(NT_SUCCESS(Status
));
1489 /* Handle is not needed */
1490 ObCloseHandle(LazyWriter
, KernelMode
);
1492 CcInitCacheZeroPage();
1497 #if DBG && defined(KDBG)
1499 ExpKdbgExtFileCache(ULONG Argc
, PCHAR Argv
[])
1501 PLIST_ENTRY ListEntry
;
1502 UNICODE_STRING NoName
= RTL_CONSTANT_STRING(L
"No name for File");
1504 KdbpPrint(" Usage Summary (in kb)\n");
1505 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1506 /* No need to lock the spin lock here, we're in DBG */
1507 for (ListEntry
= CcCleanSharedCacheMapList
.Flink
;
1508 ListEntry
!= &CcCleanSharedCacheMapList
;
1509 ListEntry
= ListEntry
->Flink
)
1512 ULONG Valid
= 0, Dirty
= 0;
1513 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1514 PUNICODE_STRING FileName
;
1516 SharedCacheMap
= CONTAINING_RECORD(ListEntry
, ROS_SHARED_CACHE_MAP
, SharedCacheMapLinks
);
1519 Dirty
= (SharedCacheMap
->DirtyPages
* PAGE_SIZE
) / 1024;
1521 /* First, count for all the associated VACB */
1522 for (Vacbs
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
1523 Vacbs
!= &SharedCacheMap
->CacheMapVacbListHead
;
1524 Vacbs
= Vacbs
->Flink
)
1528 Vacb
= CONTAINING_RECORD(Vacbs
, ROS_VACB
, CacheMapVacbListEntry
);
1531 Valid
+= VACB_MAPPING_GRANULARITY
/ 1024;
1536 if (SharedCacheMap
->FileObject
!= NULL
&&
1537 SharedCacheMap
->FileObject
->FileName
.Length
!= 0)
1539 FileName
= &SharedCacheMap
->FileObject
->FileName
;
1547 KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap
, Valid
, Dirty
, FileName
);