2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
11 /* NOTES **********************************************************************
13 * This is not the NT implementation of a file cache nor anything much like
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
29 * (4) Copy the data into or out of the page as necessary.
31 * (5) Release the cache page
33 /* INCLUDES ******************************************************************/
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
43 /* GLOBALS *******************************************************************/
45 LIST_ENTRY DirtyVacbListHead
;
46 static LIST_ENTRY VacbLruListHead
;
48 KGUARDED_MUTEX ViewLock
;
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList
;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList
;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList
;
55 * - Amount of pages flushed by lazy writer
56 * - Number of times lazy writer ran
58 ULONG CcLazyWritePages
= 0;
59 ULONG CcLazyWriteIos
= 0;
61 /* Internal vars (MS):
62 * - Threshold above which lazy writer will start action
63 * - Amount of dirty pages
64 * - List for deferred writes
65 * - Spinlock when dealing with the deferred list
66 * - List for "clean" shared cache maps
67 * - One second delay for lazy writer
69 ULONG CcDirtyPageThreshold
= 0;
70 ULONG CcTotalDirtyPages
= 0;
71 LIST_ENTRY CcDeferredWrites
;
72 KSPIN_LOCK CcDeferredWriteSpinLock
;
73 LIST_ENTRY CcCleanSharedCacheMapList
;
75 LARGE_INTEGER CcIdleDelay
= {.QuadPart
= (LONGLONG
)-1*1000*1000*10};
77 LARGE_INTEGER CcIdleDelay
= {(LONGLONG
)-1*1000*1000*10};
80 /* Internal vars (ROS):
81 * - Event to notify lazy writer to shutdown
82 * - Event to inform watchers lazy writer is done for this loop
83 * - Lock for the CcCleanSharedCacheMapList list
85 KEVENT iLazyWriterShutdown
;
86 KEVENT iLazyWriterNotify
;
87 KSPIN_LOCK iSharedCacheMapLock
;
90 static void CcRosVacbIncRefCount_(PROS_VACB vacb
, const char* file
, int line
)
92 ++vacb
->ReferenceCount
;
93 if (vacb
->SharedCacheMap
->Trace
)
95 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
96 file
, line
, vacb
, vacb
->ReferenceCount
, vacb
->Dirty
, vacb
->PageOut
);
99 static void CcRosVacbDecRefCount_(PROS_VACB vacb
, const char* file
, int line
)
101 --vacb
->ReferenceCount
;
102 if (vacb
->SharedCacheMap
->Trace
)
104 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
105 file
, line
, vacb
, vacb
->ReferenceCount
, vacb
->Dirty
, vacb
->PageOut
);
108 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
109 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
111 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
112 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
116 CcRosInternalFreeVacb(PROS_VACB Vacb
);
119 /* FUNCTIONS *****************************************************************/
124 PROS_SHARED_CACHE_MAP SharedCacheMap
,
129 PLIST_ENTRY current_entry
;
135 SharedCacheMap
->Trace
= Trace
;
139 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
141 KeAcquireGuardedMutex(&ViewLock
);
142 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldirql
);
144 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
145 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
147 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
148 current_entry
= current_entry
->Flink
;
150 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
151 current
, current
->ReferenceCount
, current
->Dirty
, current
->PageOut
);
153 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldirql
);
154 KeReleaseGuardedMutex(&ViewLock
);
158 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
162 UNREFERENCED_PARAMETER(SharedCacheMap
);
163 UNREFERENCED_PARAMETER(Trace
);
175 Status
= CcWriteVirtualAddress(Vacb
);
176 if (NT_SUCCESS(Status
))
178 KeAcquireGuardedMutex(&ViewLock
);
179 KeAcquireSpinLock(&Vacb
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
182 RemoveEntryList(&Vacb
->DirtyVacbListEntry
);
183 CcTotalDirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
184 Vacb
->SharedCacheMap
->DirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
185 CcRosVacbDecRefCount(Vacb
);
187 KeReleaseSpinLock(&Vacb
->SharedCacheMap
->CacheMapLock
, oldIrql
);
188 KeReleaseGuardedMutex(&ViewLock
);
196 CcRosFlushDirtyPages (
200 BOOLEAN CalledFromLazy
)
202 PLIST_ENTRY current_entry
;
206 LARGE_INTEGER ZeroTimeout
;
208 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target
);
211 ZeroTimeout
.QuadPart
= 0;
213 KeEnterCriticalRegion();
214 KeAcquireGuardedMutex(&ViewLock
);
216 current_entry
= DirtyVacbListHead
.Flink
;
217 if (current_entry
== &DirtyVacbListHead
)
219 DPRINT("No Dirty pages\n");
222 while ((current_entry
!= &DirtyVacbListHead
) && (Target
> 0))
224 current
= CONTAINING_RECORD(current_entry
,
227 current_entry
= current_entry
->Flink
;
229 CcRosVacbIncRefCount(current
);
231 /* When performing lazy write, don't handle temporary files */
232 if (CalledFromLazy
&&
233 BooleanFlagOn(current
->SharedCacheMap
->FileObject
->Flags
, FO_TEMPORARY_FILE
))
235 CcRosVacbDecRefCount(current
);
239 Locked
= current
->SharedCacheMap
->Callbacks
->AcquireForLazyWrite(
240 current
->SharedCacheMap
->LazyWriteContext
, Wait
);
243 CcRosVacbDecRefCount(current
);
247 Status
= CcRosAcquireVacbLock(current
,
248 Wait
? NULL
: &ZeroTimeout
);
249 if (Status
!= STATUS_SUCCESS
)
251 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
252 current
->SharedCacheMap
->LazyWriteContext
);
253 CcRosVacbDecRefCount(current
);
257 ASSERT(current
->Dirty
);
259 /* One reference is added above */
260 if ((current
->ReferenceCount
> 2 && current
->PinCount
== 0) ||
261 (current
->ReferenceCount
> 3 && current
->PinCount
> 1))
263 CcRosReleaseVacbLock(current
);
264 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
265 current
->SharedCacheMap
->LazyWriteContext
);
266 CcRosVacbDecRefCount(current
);
270 KeReleaseGuardedMutex(&ViewLock
);
272 Status
= CcRosFlushVacb(current
);
274 CcRosReleaseVacbLock(current
);
275 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
276 current
->SharedCacheMap
->LazyWriteContext
);
278 KeAcquireGuardedMutex(&ViewLock
);
279 CcRosVacbDecRefCount(current
);
281 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
) &&
282 (Status
!= STATUS_MEDIA_WRITE_PROTECTED
))
284 DPRINT1("CC: Failed to flush VACB.\n");
290 /* How many pages did we free? */
291 PagesFreed
= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
292 (*Count
) += PagesFreed
;
294 /* Make sure we don't overflow target! */
295 if (Target
< PagesFreed
)
297 /* If we would have, jump to zero directly */
302 Target
-= PagesFreed
;
306 current_entry
= DirtyVacbListHead
.Flink
;
309 KeReleaseGuardedMutex(&ViewLock
);
310 KeLeaveCriticalRegion();
312 DPRINT("CcRosFlushDirtyPages() finished\n");
313 return STATUS_SUCCESS
;
316 /* FIXME: Someday this could somewhat implement write-behind/read-ahead */
319 CciLazyWriter(PVOID Unused
)
324 PLIST_ENTRY ListEntry
;
325 ULONG Target
, Count
= 0;
327 /* One per second or until we have to stop */
328 Status
= KeWaitForSingleObject(&iLazyWriterShutdown
,
334 /* If we succeeed, we've to stop running! */
335 if (Status
== STATUS_SUCCESS
)
340 /* We're not sleeping anymore */
341 KeClearEvent(&iLazyWriterNotify
);
343 /* Our target is one-eighth of the dirty pages */
344 Target
= CcTotalDirtyPages
/ 8;
348 DPRINT("Lazy writer starting (%d)\n", Target
);
349 CcRosFlushDirtyPages(Target
, &Count
, FALSE
, TRUE
);
351 /* And update stats */
352 CcLazyWritePages
+= Count
;
354 DPRINT("Lazy writer done (%d)\n", Count
);
357 /* Inform people waiting on us that we're done */
358 KeSetEvent(&iLazyWriterNotify
, IO_DISK_INCREMENT
, FALSE
);
360 /* Likely not optimal, but let's handle one deferred write now! */
361 ListEntry
= ExInterlockedRemoveHeadList(&CcDeferredWrites
, &CcDeferredWriteSpinLock
);
362 if (ListEntry
!= NULL
)
364 PROS_DEFERRED_WRITE_CONTEXT Context
;
366 /* Extract the context */
367 Context
= CONTAINING_RECORD(ListEntry
, ROS_DEFERRED_WRITE_CONTEXT
, CcDeferredWritesEntry
);
369 /* Can we write now? */
370 if (CcCanIWrite(Context
->FileObject
, Context
->BytesToWrite
, FALSE
, Context
->Retrying
))
372 /* Yes! Do it, and destroy the associated context */
373 Context
->PostRoutine(Context
->Context1
, Context
->Context2
);
374 ExFreePoolWithTag(Context
, 'CcDw');
378 /* Otherwise, requeue it, but in tail, so that it doesn't block others
379 * This is clearly to improve, but given the poor algorithm used now
380 * It's better than nothing!
382 ExInterlockedInsertTailList(&CcDeferredWrites
,
383 &Context
->CcDeferredWritesEntry
,
384 &CcDeferredWriteSpinLock
);
396 * FUNCTION: Try to free some memory from the file cache.
398 * Target - The number of pages to be freed.
399 * Priority - The priority of free (currently unused).
400 * NrFreed - Points to a variable where the number of pages
401 * actually freed is returned.
404 PLIST_ENTRY current_entry
;
411 BOOLEAN FlushedPages
= FALSE
;
413 DPRINT("CcRosTrimCache(Target %lu)\n", Target
);
415 InitializeListHead(&FreeList
);
420 KeAcquireGuardedMutex(&ViewLock
);
422 current_entry
= VacbLruListHead
.Flink
;
423 while (current_entry
!= &VacbLruListHead
)
425 current
= CONTAINING_RECORD(current_entry
,
428 current_entry
= current_entry
->Flink
;
430 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
432 /* Reference the VACB */
433 CcRosVacbIncRefCount(current
);
435 /* Check if it's mapped and not dirty */
436 if (current
->MappedCount
> 0 && !current
->Dirty
)
438 /* We have to break these locks because Cc sucks */
439 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
440 KeReleaseGuardedMutex(&ViewLock
);
442 /* Page out the VACB */
443 for (i
= 0; i
< VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
; i
++)
445 Page
= (PFN_NUMBER
)(MmGetPhysicalAddress((PUCHAR
)current
->BaseAddress
+ (i
* PAGE_SIZE
)).QuadPart
>> PAGE_SHIFT
);
447 MmPageOutPhysicalAddress(Page
);
450 /* Reacquire the locks */
451 KeAcquireGuardedMutex(&ViewLock
);
452 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
455 /* Dereference the VACB */
456 CcRosVacbDecRefCount(current
);
458 /* Check if we can free this entry now */
459 if (current
->ReferenceCount
== 0)
461 ASSERT(!current
->Dirty
);
462 ASSERT(!current
->MappedCount
);
464 RemoveEntryList(¤t
->CacheMapVacbListEntry
);
465 RemoveEntryList(¤t
->VacbLruListEntry
);
466 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
468 /* Calculate how many pages we freed for Mm */
469 PagesFreed
= min(VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
, Target
);
470 Target
-= PagesFreed
;
471 (*NrFreed
) += PagesFreed
;
474 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
477 KeReleaseGuardedMutex(&ViewLock
);
479 /* Try flushing pages if we haven't met our target */
480 if ((Target
> 0) && !FlushedPages
)
482 /* Flush dirty pages to disk */
483 CcRosFlushDirtyPages(Target
, &PagesFreed
, FALSE
, FALSE
);
486 /* We can only swap as many pages as we flushed */
487 if (PagesFreed
< Target
) Target
= PagesFreed
;
489 /* Check if we flushed anything */
492 /* Try again after flushing dirty pages */
493 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed
);
498 while (!IsListEmpty(&FreeList
))
500 current_entry
= RemoveHeadList(&FreeList
);
501 current
= CONTAINING_RECORD(current_entry
,
503 CacheMapVacbListEntry
);
504 CcRosInternalFreeVacb(current
);
507 DPRINT("Evicted %lu cache pages\n", (*NrFreed
));
509 return STATUS_SUCCESS
;
515 PROS_SHARED_CACHE_MAP SharedCacheMap
,
523 ASSERT(SharedCacheMap
);
525 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
526 SharedCacheMap
, Vacb
, Valid
);
535 CcRosMarkDirtyVacb(Vacb
);
547 CcRosVacbDecRefCount(Vacb
);
548 if (Mapped
&& (Vacb
->MappedCount
== 1))
550 CcRosVacbIncRefCount(Vacb
);
552 if (!WasDirty
&& Vacb
->Dirty
)
554 CcRosVacbIncRefCount(Vacb
);
557 CcRosReleaseVacbLock(Vacb
);
559 return STATUS_SUCCESS
;
562 /* Returns with VACB Lock Held! */
566 PROS_SHARED_CACHE_MAP SharedCacheMap
,
569 PLIST_ENTRY current_entry
;
573 ASSERT(SharedCacheMap
);
575 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
576 SharedCacheMap
, FileOffset
);
578 KeAcquireGuardedMutex(&ViewLock
);
579 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
581 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
582 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
584 current
= CONTAINING_RECORD(current_entry
,
586 CacheMapVacbListEntry
);
587 if (IsPointInRange(current
->FileOffset
.QuadPart
,
588 VACB_MAPPING_GRANULARITY
,
591 CcRosVacbIncRefCount(current
);
592 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
593 KeReleaseGuardedMutex(&ViewLock
);
594 CcRosAcquireVacbLock(current
, NULL
);
597 if (current
->FileOffset
.QuadPart
> FileOffset
)
599 current_entry
= current_entry
->Flink
;
602 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
603 KeReleaseGuardedMutex(&ViewLock
);
614 PROS_SHARED_CACHE_MAP SharedCacheMap
;
616 SharedCacheMap
= Vacb
->SharedCacheMap
;
618 KeAcquireGuardedMutex(&ViewLock
);
619 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
623 InsertTailList(&DirtyVacbListHead
, &Vacb
->DirtyVacbListEntry
);
624 CcTotalDirtyPages
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
625 Vacb
->SharedCacheMap
->DirtyPages
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
629 CcRosVacbDecRefCount(Vacb
);
632 /* Move to the tail of the LRU list */
633 RemoveEntryList(&Vacb
->VacbLruListEntry
);
634 InsertTailList(&VacbLruListHead
, &Vacb
->VacbLruListEntry
);
638 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
639 KeReleaseGuardedMutex(&ViewLock
);
645 PROS_SHARED_CACHE_MAP SharedCacheMap
,
650 ASSERT(SharedCacheMap
);
652 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
653 SharedCacheMap
, FileOffset
);
655 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
658 KeBugCheck(CACHE_MANAGER
);
661 CcRosMarkDirtyVacb(Vacb
);
663 CcRosReleaseVacbLock(Vacb
);
665 return STATUS_SUCCESS
;
671 PROS_SHARED_CACHE_MAP SharedCacheMap
,
678 ASSERT(SharedCacheMap
);
680 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
681 SharedCacheMap
, FileOffset
, NowDirty
);
683 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
686 return STATUS_UNSUCCESSFUL
;
694 CcRosMarkDirtyVacb(Vacb
);
704 CcRosVacbDecRefCount(Vacb
);
705 if (!WasDirty
&& NowDirty
)
707 CcRosVacbIncRefCount(Vacb
);
709 if (Vacb
->MappedCount
== 0)
711 CcRosVacbDecRefCount(Vacb
);
714 CcRosReleaseVacbLock(Vacb
);
716 return STATUS_SUCCESS
;
726 ULONG_PTR NumberOfPages
;
728 /* Create a memory area. */
729 MmLockAddressSpace(MmGetKernelAddressSpace());
730 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
731 0, // nothing checks for VACB mareas, so set to 0
733 VACB_MAPPING_GRANULARITY
,
735 (PMEMORY_AREA
*)&Vacb
->MemoryArea
,
738 MmUnlockAddressSpace(MmGetKernelAddressSpace());
739 if (!NT_SUCCESS(Status
))
741 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status
, Vacb
);
745 ASSERT(((ULONG_PTR
)Vacb
->BaseAddress
% PAGE_SIZE
) == 0);
746 ASSERT((ULONG_PTR
)Vacb
->BaseAddress
> (ULONG_PTR
)MmSystemRangeStart
);
748 /* Create a virtual mapping for this memory area */
749 NumberOfPages
= BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY
);
750 for (i
= 0; i
< NumberOfPages
; i
++)
752 PFN_NUMBER PageFrameNumber
;
754 MI_SET_USAGE(MI_USAGE_CACHE
);
755 Status
= MmRequestPageMemoryConsumer(MC_CACHE
, TRUE
, &PageFrameNumber
);
756 if (PageFrameNumber
== 0)
758 DPRINT1("Unable to allocate page\n");
759 KeBugCheck(MEMORY_MANAGEMENT
);
762 Status
= MmCreateVirtualMapping(NULL
,
763 (PVOID
)((ULONG_PTR
)Vacb
->BaseAddress
+ (i
* PAGE_SIZE
)),
767 if (!NT_SUCCESS(Status
))
769 DPRINT1("Unable to create virtual mapping\n");
770 KeBugCheck(MEMORY_MANAGEMENT
);
774 return STATUS_SUCCESS
;
780 PROS_SHARED_CACHE_MAP SharedCacheMap
,
786 PLIST_ENTRY current_entry
;
790 ASSERT(SharedCacheMap
);
792 DPRINT("CcRosCreateVacb()\n");
794 if (FileOffset
>= SharedCacheMap
->SectionSize
.QuadPart
)
797 return STATUS_INVALID_PARAMETER
;
800 current
= ExAllocateFromNPagedLookasideList(&VacbLookasideList
);
801 current
->BaseAddress
= NULL
;
802 current
->Valid
= FALSE
;
803 current
->Dirty
= FALSE
;
804 current
->PageOut
= FALSE
;
805 current
->FileOffset
.QuadPart
= ROUND_DOWN(FileOffset
, VACB_MAPPING_GRANULARITY
);
806 current
->SharedCacheMap
= SharedCacheMap
;
808 if (SharedCacheMap
->Trace
)
810 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap
, current
);
813 current
->MappedCount
= 0;
814 current
->DirtyVacbListEntry
.Flink
= NULL
;
815 current
->DirtyVacbListEntry
.Blink
= NULL
;
816 current
->ReferenceCount
= 1;
817 current
->PinCount
= 0;
818 KeInitializeMutex(¤t
->Mutex
, 0);
819 CcRosAcquireVacbLock(current
, NULL
);
820 KeAcquireGuardedMutex(&ViewLock
);
823 /* There is window between the call to CcRosLookupVacb
824 * and CcRosCreateVacb. We must check if a VACB for the
825 * file offset exist. If there is a VACB, we release
826 * our newly created VACB and return the existing one.
828 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
829 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
831 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
833 current
= CONTAINING_RECORD(current_entry
,
835 CacheMapVacbListEntry
);
836 if (IsPointInRange(current
->FileOffset
.QuadPart
,
837 VACB_MAPPING_GRANULARITY
,
840 CcRosVacbIncRefCount(current
);
841 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
843 if (SharedCacheMap
->Trace
)
845 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
851 CcRosReleaseVacbLock(*Vacb
);
852 KeReleaseGuardedMutex(&ViewLock
);
853 ExFreeToNPagedLookasideList(&VacbLookasideList
, *Vacb
);
855 CcRosAcquireVacbLock(current
, NULL
);
856 return STATUS_SUCCESS
;
858 if (current
->FileOffset
.QuadPart
< FileOffset
)
860 ASSERT(previous
== NULL
||
861 previous
->FileOffset
.QuadPart
< current
->FileOffset
.QuadPart
);
864 if (current
->FileOffset
.QuadPart
> FileOffset
)
866 current_entry
= current_entry
->Flink
;
868 /* There was no existing VACB. */
872 InsertHeadList(&previous
->CacheMapVacbListEntry
, ¤t
->CacheMapVacbListEntry
);
876 InsertHeadList(&SharedCacheMap
->CacheMapVacbListHead
, ¤t
->CacheMapVacbListEntry
);
878 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
879 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
880 KeReleaseGuardedMutex(&ViewLock
);
882 MI_SET_USAGE(MI_USAGE_CACHE
);
884 if ((SharedCacheMap
->FileObject
) && (SharedCacheMap
->FileObject
->FileName
.Buffer
))
888 pos
= wcsrchr(SharedCacheMap
->FileObject
->FileName
.Buffer
, '\\');
891 len
= wcslen(pos
) * sizeof(WCHAR
);
892 snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%S", pos
);
896 snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%wZ", &SharedCacheMap
->FileObject
->FileName
);
901 Status
= CcRosMapVacb(current
);
902 if (!NT_SUCCESS(Status
))
904 RemoveEntryList(¤t
->CacheMapVacbListEntry
);
905 RemoveEntryList(¤t
->VacbLruListEntry
);
906 CcRosReleaseVacbLock(current
);
907 ExFreeToNPagedLookasideList(&VacbLookasideList
, current
);
916 PROS_SHARED_CACHE_MAP SharedCacheMap
,
918 PLONGLONG BaseOffset
,
926 ASSERT(SharedCacheMap
);
928 DPRINT("CcRosGetVacb()\n");
931 * Look for a VACB already mapping the same data.
933 current
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
937 * Otherwise create a new VACB.
939 Status
= CcRosCreateVacb(SharedCacheMap
, FileOffset
, ¤t
);
940 if (!NT_SUCCESS(Status
))
946 KeAcquireGuardedMutex(&ViewLock
);
948 /* Move to the tail of the LRU list */
949 RemoveEntryList(¤t
->VacbLruListEntry
);
950 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
952 KeReleaseGuardedMutex(&ViewLock
);
955 * Return information about the VACB to the caller.
957 *UptoDate
= current
->Valid
;
958 *BaseAddress
= current
->BaseAddress
;
959 DPRINT("*BaseAddress %p\n", *BaseAddress
);
961 *BaseOffset
= current
->FileOffset
.QuadPart
;
962 return STATUS_SUCCESS
;
968 PROS_SHARED_CACHE_MAP SharedCacheMap
,
974 * FUNCTION: Request a page mapping for a shared cache map
979 ASSERT(SharedCacheMap
);
981 if (FileOffset
% VACB_MAPPING_GRANULARITY
!= 0)
983 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
984 FileOffset
, VACB_MAPPING_GRANULARITY
);
985 KeBugCheck(CACHE_MANAGER
);
988 return CcRosGetVacb(SharedCacheMap
,
1000 MEMORY_AREA
* MemoryArea
,
1003 SWAPENTRY SwapEntry
,
1006 ASSERT(SwapEntry
== 0);
1009 ASSERT(MmGetReferenceCountPage(Page
) == 1);
1010 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
1015 CcRosInternalFreeVacb (
1018 * FUNCTION: Releases a VACB associated with a shared cache map
1021 DPRINT("Freeing VACB 0x%p\n", Vacb
);
1023 if (Vacb
->SharedCacheMap
->Trace
)
1025 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb
->SharedCacheMap
, Vacb
);
1029 MmLockAddressSpace(MmGetKernelAddressSpace());
1030 MmFreeMemoryArea(MmGetKernelAddressSpace(),
1034 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1036 ExFreeToNPagedLookasideList(&VacbLookasideList
, Vacb
);
1037 return STATUS_SUCCESS
;
1046 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
,
1047 IN PLARGE_INTEGER FileOffset OPTIONAL
,
1049 OUT PIO_STATUS_BLOCK IoStatus
)
1051 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1052 LARGE_INTEGER Offset
;
1053 LONGLONG RemainingLength
;
1058 CCTRACE(CC_API_DEBUG
, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1059 SectionObjectPointers
, FileOffset
, Length
);
1061 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1062 SectionObjectPointers
, FileOffset
, Length
, IoStatus
);
1064 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1066 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
1067 ASSERT(SharedCacheMap
);
1070 Offset
= *FileOffset
;
1071 RemainingLength
= Length
;
1075 Offset
.QuadPart
= 0;
1076 RemainingLength
= SharedCacheMap
->FileSize
.QuadPart
;
1081 IoStatus
->Status
= STATUS_SUCCESS
;
1082 IoStatus
->Information
= 0;
1085 while (RemainingLength
> 0)
1087 current
= CcRosLookupVacb(SharedCacheMap
, Offset
.QuadPart
);
1088 if (current
!= NULL
)
1092 Status
= CcRosFlushVacb(current
);
1093 if (!NT_SUCCESS(Status
) && IoStatus
!= NULL
)
1095 IoStatus
->Status
= Status
;
1099 CcRosReleaseVacbLock(current
);
1101 KeAcquireGuardedMutex(&ViewLock
);
1102 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1103 CcRosVacbDecRefCount(current
);
1104 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1105 KeReleaseGuardedMutex(&ViewLock
);
1108 Offset
.QuadPart
+= VACB_MAPPING_GRANULARITY
;
1109 RemainingLength
-= min(RemainingLength
, VACB_MAPPING_GRANULARITY
);
1116 IoStatus
->Status
= STATUS_INVALID_PARAMETER
;
1123 CcRosDeleteFileCache (
1124 PFILE_OBJECT FileObject
,
1125 PROS_SHARED_CACHE_MAP SharedCacheMap
)
1127 * FUNCTION: Releases the shared cache map associated with a file object
1130 PLIST_ENTRY current_entry
;
1132 LIST_ENTRY FreeList
;
1135 ASSERT(SharedCacheMap
);
1137 SharedCacheMap
->OpenCount
++;
1138 KeReleaseGuardedMutex(&ViewLock
);
1140 CcFlushCache(FileObject
->SectionObjectPointer
, NULL
, 0, NULL
);
1142 KeAcquireGuardedMutex(&ViewLock
);
1143 SharedCacheMap
->OpenCount
--;
1144 if (SharedCacheMap
->OpenCount
== 0)
1148 FileObject
->SectionObjectPointer
->SharedCacheMap
= NULL
;
1153 InitializeListHead(&FreeList
);
1154 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1155 while (!IsListEmpty(&SharedCacheMap
->CacheMapVacbListHead
))
1157 current_entry
= RemoveTailList(&SharedCacheMap
->CacheMapVacbListHead
);
1158 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1160 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1161 CcRosAcquireVacbLock(current
, NULL
);
1162 RemoveEntryList(¤t
->VacbLruListEntry
);
1165 RemoveEntryList(¤t
->DirtyVacbListEntry
);
1166 CcTotalDirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
1167 current
->SharedCacheMap
->DirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
1168 DPRINT1("Freeing dirty VACB\n");
1170 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
1171 CcRosReleaseVacbLock(current
);
1173 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1176 SharedCacheMap
->Trace
= FALSE
;
1178 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1180 KeReleaseGuardedMutex(&ViewLock
);
1181 ObDereferenceObject(SharedCacheMap
->FileObject
);
1183 while (!IsListEmpty(&FreeList
))
1185 current_entry
= RemoveTailList(&FreeList
);
1186 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1187 CcRosInternalFreeVacb(current
);
1190 KeAcquireSpinLock(&iSharedCacheMapLock
, &OldIrql
);
1191 RemoveEntryList(&SharedCacheMap
->SharedCacheMapLinks
);
1192 KeReleaseSpinLock(&iSharedCacheMapLock
, OldIrql
);
1194 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList
, SharedCacheMap
);
1195 KeAcquireGuardedMutex(&ViewLock
);
1197 return STATUS_SUCCESS
;
1202 CcRosReferenceCache (
1203 PFILE_OBJECT FileObject
)
1205 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1206 KeAcquireGuardedMutex(&ViewLock
);
1207 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1208 ASSERT(SharedCacheMap
);
1209 ASSERT(SharedCacheMap
->OpenCount
!= 0);
1210 SharedCacheMap
->OpenCount
++;
1211 KeReleaseGuardedMutex(&ViewLock
);
1216 CcRosRemoveIfClosed (
1217 PSECTION_OBJECT_POINTERS SectionObjectPointer
)
1219 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1220 DPRINT("CcRosRemoveIfClosed()\n");
1221 KeAcquireGuardedMutex(&ViewLock
);
1222 SharedCacheMap
= SectionObjectPointer
->SharedCacheMap
;
1223 if (SharedCacheMap
&& SharedCacheMap
->OpenCount
== 0)
1225 CcRosDeleteFileCache(SharedCacheMap
->FileObject
, SharedCacheMap
);
1227 KeReleaseGuardedMutex(&ViewLock
);
1233 CcRosDereferenceCache (
1234 PFILE_OBJECT FileObject
)
1236 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1237 KeAcquireGuardedMutex(&ViewLock
);
1238 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1239 ASSERT(SharedCacheMap
);
1240 if (SharedCacheMap
->OpenCount
> 0)
1242 SharedCacheMap
->OpenCount
--;
1243 if (SharedCacheMap
->OpenCount
== 0)
1245 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1246 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1249 KeReleaseGuardedMutex(&ViewLock
);
1254 CcRosReleaseFileCache (
1255 PFILE_OBJECT FileObject
)
1257 * FUNCTION: Called by the file system when a handle to a file object
1261 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1263 KeAcquireGuardedMutex(&ViewLock
);
1265 if (FileObject
->SectionObjectPointer
->SharedCacheMap
!= NULL
)
1267 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1268 if (FileObject
->PrivateCacheMap
!= NULL
)
1270 FileObject
->PrivateCacheMap
= NULL
;
1271 if (SharedCacheMap
->OpenCount
> 0)
1273 SharedCacheMap
->OpenCount
--;
1274 if (SharedCacheMap
->OpenCount
== 0)
1276 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1277 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1282 KeReleaseGuardedMutex(&ViewLock
);
1283 return STATUS_SUCCESS
;
1288 CcTryToInitializeFileCache (
1289 PFILE_OBJECT FileObject
)
1291 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1294 KeAcquireGuardedMutex(&ViewLock
);
1296 ASSERT(FileObject
->SectionObjectPointer
);
1297 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1298 if (SharedCacheMap
== NULL
)
1300 Status
= STATUS_UNSUCCESSFUL
;
1304 if (FileObject
->PrivateCacheMap
== NULL
)
1306 FileObject
->PrivateCacheMap
= SharedCacheMap
;
1307 SharedCacheMap
->OpenCount
++;
1309 Status
= STATUS_SUCCESS
;
1311 KeReleaseGuardedMutex(&ViewLock
);
1319 CcRosInitializeFileCache (
1320 PFILE_OBJECT FileObject
,
1321 PCC_FILE_SIZES FileSizes
,
1323 PCACHE_MANAGER_CALLBACKS CallBacks
,
1324 PVOID LazyWriterContext
)
1326 * FUNCTION: Initializes a shared cache map for a file object
1329 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1331 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1332 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1333 FileObject
, SharedCacheMap
);
1335 KeAcquireGuardedMutex(&ViewLock
);
1336 if (SharedCacheMap
== NULL
)
1340 SharedCacheMap
= ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList
);
1341 if (SharedCacheMap
== NULL
)
1343 KeReleaseGuardedMutex(&ViewLock
);
1344 return STATUS_INSUFFICIENT_RESOURCES
;
1346 RtlZeroMemory(SharedCacheMap
, sizeof(*SharedCacheMap
));
1347 ObReferenceObjectByPointer(FileObject
,
1351 SharedCacheMap
->FileObject
= FileObject
;
1352 SharedCacheMap
->Callbacks
= CallBacks
;
1353 SharedCacheMap
->LazyWriteContext
= LazyWriterContext
;
1354 SharedCacheMap
->SectionSize
= FileSizes
->AllocationSize
;
1355 SharedCacheMap
->FileSize
= FileSizes
->FileSize
;
1356 SharedCacheMap
->PinAccess
= PinAccess
;
1357 SharedCacheMap
->DirtyPageThreshold
= 0;
1358 SharedCacheMap
->DirtyPages
= 0;
1359 KeInitializeSpinLock(&SharedCacheMap
->CacheMapLock
);
1360 InitializeListHead(&SharedCacheMap
->CacheMapVacbListHead
);
1361 FileObject
->SectionObjectPointer
->SharedCacheMap
= SharedCacheMap
;
1363 KeAcquireSpinLock(&iSharedCacheMapLock
, &OldIrql
);
1364 InsertTailList(&CcCleanSharedCacheMapList
, &SharedCacheMap
->SharedCacheMapLinks
);
1365 KeReleaseSpinLock(&iSharedCacheMapLock
, OldIrql
);
1367 if (FileObject
->PrivateCacheMap
== NULL
)
1369 FileObject
->PrivateCacheMap
= SharedCacheMap
;
1370 SharedCacheMap
->OpenCount
++;
1372 KeReleaseGuardedMutex(&ViewLock
);
1374 return STATUS_SUCCESS
;
1382 CcGetFileObjectFromSectionPtrs (
1383 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
)
1385 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1387 CCTRACE(CC_API_DEBUG
, "SectionObjectPointers=%p\n", SectionObjectPointers
);
1389 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1391 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
1392 ASSERT(SharedCacheMap
);
1393 return SharedCacheMap
->FileObject
;
1400 CcShutdownLazyWriter (
1403 /* Simply set the event, lazy writer will stop when it's done */
1404 KeSetEvent(&iLazyWriterShutdown
, IO_DISK_INCREMENT
, FALSE
);
1416 OBJECT_ATTRIBUTES ObjectAttributes
;
1418 DPRINT("CcInitView()\n");
1420 InitializeListHead(&DirtyVacbListHead
);
1421 InitializeListHead(&VacbLruListHead
);
1422 InitializeListHead(&CcDeferredWrites
);
1423 InitializeListHead(&CcCleanSharedCacheMapList
);
1424 KeInitializeSpinLock(&CcDeferredWriteSpinLock
);
1425 KeInitializeSpinLock(&iSharedCacheMapLock
);
1426 KeInitializeGuardedMutex(&ViewLock
);
1427 ExInitializeNPagedLookasideList(&iBcbLookasideList
,
1431 sizeof(INTERNAL_BCB
),
1434 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList
,
1438 sizeof(ROS_SHARED_CACHE_MAP
),
1439 TAG_SHARED_CACHE_MAP
,
1441 ExInitializeNPagedLookasideList(&VacbLookasideList
,
1449 MmInitializeMemoryConsumer(MC_CACHE
, CcRosTrimCache
);
1451 /* Initialize lazy writer events */
1452 KeInitializeEvent(&iLazyWriterShutdown
, SynchronizationEvent
, FALSE
);
1453 KeInitializeEvent(&iLazyWriterNotify
, NotificationEvent
, FALSE
);
1455 /* Define lazy writer threshold, depending on system type */
1456 switch (MmQuerySystemSize())
1459 CcDirtyPageThreshold
= MmNumberOfPhysicalPages
/ 8;
1462 case MmMediumSystem
:
1463 CcDirtyPageThreshold
= MmNumberOfPhysicalPages
/ 4;
1467 CcDirtyPageThreshold
= MmNumberOfPhysicalPages
/ 8 + MmNumberOfPhysicalPages
/ 4;
1471 /* Start the lazy writer thread */
1472 InitializeObjectAttributes(&ObjectAttributes
,
1477 Status
= PsCreateSystemThread(&LazyWriter
,
1484 if (!NT_SUCCESS(Status
))
1490 Status
= NtSetInformationThread(LazyWriter
,
1494 ASSERT(NT_SUCCESS(Status
));
1496 /* Handle is not needed */
1497 ObCloseHandle(LazyWriter
, KernelMode
);
1499 CcInitCacheZeroPage();
1504 #if DBG && defined(KDBG)
1506 ExpKdbgExtFileCache(ULONG Argc
, PCHAR Argv
[])
1508 PLIST_ENTRY ListEntry
;
1509 UNICODE_STRING NoName
= RTL_CONSTANT_STRING(L
"No name for File");
1511 KdbpPrint(" Usage Summary (in kb)\n");
1512 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1513 /* No need to lock the spin lock here, we're in DBG */
1514 for (ListEntry
= CcCleanSharedCacheMapList
.Flink
;
1515 ListEntry
!= &CcCleanSharedCacheMapList
;
1516 ListEntry
= ListEntry
->Flink
)
1519 ULONG Valid
= 0, Dirty
= 0;
1520 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1521 PUNICODE_STRING FileName
;
1523 SharedCacheMap
= CONTAINING_RECORD(ListEntry
, ROS_SHARED_CACHE_MAP
, SharedCacheMapLinks
);
1526 Dirty
= (SharedCacheMap
->DirtyPages
* PAGE_SIZE
) / 1024;
1528 /* First, count for all the associated VACB */
1529 for (Vacbs
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
1530 Vacbs
!= &SharedCacheMap
->CacheMapVacbListHead
;
1531 Vacbs
= Vacbs
->Flink
)
1535 Vacb
= CONTAINING_RECORD(Vacbs
, ROS_VACB
, CacheMapVacbListEntry
);
1538 Valid
+= VACB_MAPPING_GRANULARITY
/ 1024;
1543 if (SharedCacheMap
->FileObject
!= NULL
&&
1544 SharedCacheMap
->FileObject
->FileName
.Length
!= 0)
1546 FileName
= &SharedCacheMap
->FileObject
->FileName
;
1554 KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap
, Valid
, Dirty
, FileName
);