2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
11 /* NOTES **********************************************************************
13 * This is not the NT implementation of a file cache nor anything much like
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
29 * (4) Copy the data into or out of the page as necessary.
31 * (5) Release the cache page
33 /* INCLUDES ******************************************************************/
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
43 /* GLOBALS *******************************************************************/
45 LIST_ENTRY DirtyVacbListHead
;
46 static LIST_ENTRY VacbLruListHead
;
48 KGUARDED_MUTEX ViewLock
;
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList
;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList
;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList
;
55 * - Amount of pages flushed by lazy writer
56 * - Number of times lazy writer ran
58 ULONG CcLazyWritePages
= 0;
59 ULONG CcLazyWriteIos
= 0;
61 /* Internal vars (MS):
62 * - Threshold above which lazy writer will start action
63 * - Amount of dirty pages
64 * - List for deferred writes
65 * - Spinlock when dealing with the deferred list
66 * - List for "clean" shared cache maps
67 * - One second delay for lazy writer
69 ULONG CcDirtyPageThreshold
= 0;
70 ULONG CcTotalDirtyPages
= 0;
71 LIST_ENTRY CcDeferredWrites
;
72 KSPIN_LOCK CcDeferredWriteSpinLock
;
73 LIST_ENTRY CcCleanSharedCacheMapList
;
74 LARGE_INTEGER CcIdleDelay
= RTL_CONSTANT_LARGE_INTEGER((LONGLONG
)-1*1000*1000*10);
76 /* Internal vars (ROS):
77 * - Event to notify lazy writer to shutdown
78 * - Event to inform watchers lazy writer is done for this loop
79 * - Lock for the CcCleanSharedCacheMapList list
81 KEVENT iLazyWriterShutdown
;
82 KEVENT iLazyWriterNotify
;
83 KSPIN_LOCK iSharedCacheMapLock
;
86 static void CcRosVacbIncRefCount_(PROS_VACB vacb
, const char* file
, int line
)
88 ++vacb
->ReferenceCount
;
89 if (vacb
->SharedCacheMap
->Trace
)
91 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
92 file
, line
, vacb
, vacb
->ReferenceCount
, vacb
->Dirty
, vacb
->PageOut
);
95 static void CcRosVacbDecRefCount_(PROS_VACB vacb
, const char* file
, int line
)
97 --vacb
->ReferenceCount
;
98 ASSERT(!(vacb
->ReferenceCount
== 0 && vacb
->Dirty
));
99 if (vacb
->SharedCacheMap
->Trace
)
101 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
102 file
, line
, vacb
, vacb
->ReferenceCount
, vacb
->Dirty
, vacb
->PageOut
);
105 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
106 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
108 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
109 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
113 CcRosInternalFreeVacb(PROS_VACB Vacb
);
116 /* FUNCTIONS *****************************************************************/
121 PROS_SHARED_CACHE_MAP SharedCacheMap
,
126 PLIST_ENTRY current_entry
;
132 SharedCacheMap
->Trace
= Trace
;
136 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
138 KeAcquireGuardedMutex(&ViewLock
);
139 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldirql
);
141 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
142 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
144 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
145 current_entry
= current_entry
->Flink
;
147 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
148 current
, current
->ReferenceCount
, current
->Dirty
, current
->PageOut
);
150 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldirql
);
151 KeReleaseGuardedMutex(&ViewLock
);
155 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
159 UNREFERENCED_PARAMETER(SharedCacheMap
);
160 UNREFERENCED_PARAMETER(Trace
);
172 Status
= CcWriteVirtualAddress(Vacb
);
173 if (NT_SUCCESS(Status
))
175 KeAcquireGuardedMutex(&ViewLock
);
176 KeAcquireSpinLock(&Vacb
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
179 RemoveEntryList(&Vacb
->DirtyVacbListEntry
);
180 CcTotalDirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
181 Vacb
->SharedCacheMap
->DirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
182 CcRosVacbDecRefCount(Vacb
);
184 KeReleaseSpinLock(&Vacb
->SharedCacheMap
->CacheMapLock
, oldIrql
);
185 KeReleaseGuardedMutex(&ViewLock
);
193 CcRosFlushDirtyPages (
197 BOOLEAN CalledFromLazy
)
199 PLIST_ENTRY current_entry
;
203 LARGE_INTEGER ZeroTimeout
;
205 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target
);
208 ZeroTimeout
.QuadPart
= 0;
210 KeEnterCriticalRegion();
211 KeAcquireGuardedMutex(&ViewLock
);
213 current_entry
= DirtyVacbListHead
.Flink
;
214 if (current_entry
== &DirtyVacbListHead
)
216 DPRINT("No Dirty pages\n");
219 while ((current_entry
!= &DirtyVacbListHead
) && (Target
> 0))
221 current
= CONTAINING_RECORD(current_entry
,
224 current_entry
= current_entry
->Flink
;
226 CcRosVacbIncRefCount(current
);
228 /* When performing lazy write, don't handle temporary files */
229 if (CalledFromLazy
&&
230 BooleanFlagOn(current
->SharedCacheMap
->FileObject
->Flags
, FO_TEMPORARY_FILE
))
232 CcRosVacbDecRefCount(current
);
236 Locked
= current
->SharedCacheMap
->Callbacks
->AcquireForLazyWrite(
237 current
->SharedCacheMap
->LazyWriteContext
, Wait
);
240 CcRosVacbDecRefCount(current
);
244 Status
= CcRosAcquireVacbLock(current
,
245 Wait
? NULL
: &ZeroTimeout
);
246 if (Status
!= STATUS_SUCCESS
)
248 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
249 current
->SharedCacheMap
->LazyWriteContext
);
250 CcRosVacbDecRefCount(current
);
254 ASSERT(current
->Dirty
);
256 /* One reference is added above */
257 if ((current
->ReferenceCount
> 2 && current
->PinCount
== 0) ||
258 (current
->ReferenceCount
> 3 && current
->PinCount
> 1))
260 CcRosReleaseVacbLock(current
);
261 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
262 current
->SharedCacheMap
->LazyWriteContext
);
263 CcRosVacbDecRefCount(current
);
267 KeReleaseGuardedMutex(&ViewLock
);
269 Status
= CcRosFlushVacb(current
);
271 CcRosReleaseVacbLock(current
);
272 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
273 current
->SharedCacheMap
->LazyWriteContext
);
275 KeAcquireGuardedMutex(&ViewLock
);
276 CcRosVacbDecRefCount(current
);
278 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
) &&
279 (Status
!= STATUS_MEDIA_WRITE_PROTECTED
))
281 DPRINT1("CC: Failed to flush VACB.\n");
287 /* How many pages did we free? */
288 PagesFreed
= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
289 (*Count
) += PagesFreed
;
291 /* Make sure we don't overflow target! */
292 if (Target
< PagesFreed
)
294 /* If we would have, jump to zero directly */
299 Target
-= PagesFreed
;
303 current_entry
= DirtyVacbListHead
.Flink
;
306 KeReleaseGuardedMutex(&ViewLock
);
307 KeLeaveCriticalRegion();
309 DPRINT("CcRosFlushDirtyPages() finished\n");
310 return STATUS_SUCCESS
;
313 /* FIXME: Someday this could somewhat implement write-behind/read-ahead */
316 CciLazyWriter(PVOID Unused
)
321 PLIST_ENTRY ListEntry
;
322 ULONG Target
, Count
= 0;
324 /* One per second or until we have to stop */
325 Status
= KeWaitForSingleObject(&iLazyWriterShutdown
,
331 /* If we succeeed, we've to stop running! */
332 if (Status
== STATUS_SUCCESS
)
337 /* We're not sleeping anymore */
338 KeClearEvent(&iLazyWriterNotify
);
340 /* Our target is one-eighth of the dirty pages */
341 Target
= CcTotalDirtyPages
/ 8;
345 DPRINT("Lazy writer starting (%d)\n", Target
);
346 CcRosFlushDirtyPages(Target
, &Count
, FALSE
, TRUE
);
348 /* And update stats */
349 CcLazyWritePages
+= Count
;
351 DPRINT("Lazy writer done (%d)\n", Count
);
354 /* Inform people waiting on us that we're done */
355 KeSetEvent(&iLazyWriterNotify
, IO_DISK_INCREMENT
, FALSE
);
357 /* Likely not optimal, but let's handle one deferred write now! */
358 ListEntry
= ExInterlockedRemoveHeadList(&CcDeferredWrites
, &CcDeferredWriteSpinLock
);
359 if (ListEntry
!= NULL
)
361 PDEFERRED_WRITE Context
;
363 /* Extract the context */
364 Context
= CONTAINING_RECORD(ListEntry
, DEFERRED_WRITE
, DeferredWriteLinks
);
365 ASSERT(Context
->NodeTypeCode
== NODE_TYPE_DEFERRED_WRITE
);
367 /* Can we write now? */
368 if (CcCanIWrite(Context
->FileObject
, Context
->BytesToWrite
, FALSE
, TRUE
))
370 /* Yes! Do it, and destroy the associated context */
371 Context
->PostRoutine(Context
->Context1
, Context
->Context2
);
372 ExFreePoolWithTag(Context
, 'CcDw');
376 /* Otherwise, requeue it, but in tail, so that it doesn't block others
377 * This is clearly to improve, but given the poor algorithm used now
378 * It's better than nothing!
380 ExInterlockedInsertTailList(&CcDeferredWrites
,
381 &Context
->DeferredWriteLinks
,
382 &CcDeferredWriteSpinLock
);
394 * FUNCTION: Try to free some memory from the file cache.
396 * Target - The number of pages to be freed.
397 * Priority - The priority of free (currently unused).
398 * NrFreed - Points to a variable where the number of pages
399 * actually freed is returned.
402 PLIST_ENTRY current_entry
;
409 BOOLEAN FlushedPages
= FALSE
;
411 DPRINT("CcRosTrimCache(Target %lu)\n", Target
);
413 InitializeListHead(&FreeList
);
418 KeAcquireGuardedMutex(&ViewLock
);
420 current_entry
= VacbLruListHead
.Flink
;
421 while (current_entry
!= &VacbLruListHead
)
423 current
= CONTAINING_RECORD(current_entry
,
426 current_entry
= current_entry
->Flink
;
428 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
430 /* Reference the VACB */
431 CcRosVacbIncRefCount(current
);
433 /* Check if it's mapped and not dirty */
434 if (current
->MappedCount
> 0 && !current
->Dirty
)
436 /* We have to break these locks because Cc sucks */
437 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
438 KeReleaseGuardedMutex(&ViewLock
);
440 /* Page out the VACB */
441 for (i
= 0; i
< VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
; i
++)
443 Page
= (PFN_NUMBER
)(MmGetPhysicalAddress((PUCHAR
)current
->BaseAddress
+ (i
* PAGE_SIZE
)).QuadPart
>> PAGE_SHIFT
);
445 MmPageOutPhysicalAddress(Page
);
448 /* Reacquire the locks */
449 KeAcquireGuardedMutex(&ViewLock
);
450 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
453 /* Dereference the VACB */
454 CcRosVacbDecRefCount(current
);
456 /* Check if we can free this entry now */
457 if (current
->ReferenceCount
== 0)
459 ASSERT(!current
->Dirty
);
460 ASSERT(!current
->MappedCount
);
462 RemoveEntryList(¤t
->CacheMapVacbListEntry
);
463 RemoveEntryList(¤t
->VacbLruListEntry
);
464 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
466 /* Calculate how many pages we freed for Mm */
467 PagesFreed
= min(VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
, Target
);
468 Target
-= PagesFreed
;
469 (*NrFreed
) += PagesFreed
;
472 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
475 KeReleaseGuardedMutex(&ViewLock
);
477 /* Try flushing pages if we haven't met our target */
478 if ((Target
> 0) && !FlushedPages
)
480 /* Flush dirty pages to disk */
481 CcRosFlushDirtyPages(Target
, &PagesFreed
, FALSE
, FALSE
);
484 /* We can only swap as many pages as we flushed */
485 if (PagesFreed
< Target
) Target
= PagesFreed
;
487 /* Check if we flushed anything */
490 /* Try again after flushing dirty pages */
491 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed
);
496 while (!IsListEmpty(&FreeList
))
498 current_entry
= RemoveHeadList(&FreeList
);
499 current
= CONTAINING_RECORD(current_entry
,
501 CacheMapVacbListEntry
);
502 CcRosInternalFreeVacb(current
);
505 DPRINT("Evicted %lu cache pages\n", (*NrFreed
));
507 return STATUS_SUCCESS
;
513 PROS_SHARED_CACHE_MAP SharedCacheMap
,
519 ASSERT(SharedCacheMap
);
521 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
522 SharedCacheMap
, Vacb
, Valid
);
526 if (Dirty
&& !Vacb
->Dirty
)
528 CcRosMarkDirtyVacb(Vacb
);
535 CcRosVacbDecRefCount(Vacb
);
536 if (Mapped
&& (Vacb
->MappedCount
== 1))
538 CcRosVacbIncRefCount(Vacb
);
541 CcRosReleaseVacbLock(Vacb
);
543 return STATUS_SUCCESS
;
546 /* Returns with VACB Lock Held! */
550 PROS_SHARED_CACHE_MAP SharedCacheMap
,
553 PLIST_ENTRY current_entry
;
557 ASSERT(SharedCacheMap
);
559 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
560 SharedCacheMap
, FileOffset
);
562 KeAcquireGuardedMutex(&ViewLock
);
563 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
565 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
566 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
568 current
= CONTAINING_RECORD(current_entry
,
570 CacheMapVacbListEntry
);
571 if (IsPointInRange(current
->FileOffset
.QuadPart
,
572 VACB_MAPPING_GRANULARITY
,
575 CcRosVacbIncRefCount(current
);
576 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
577 KeReleaseGuardedMutex(&ViewLock
);
578 CcRosAcquireVacbLock(current
, NULL
);
581 if (current
->FileOffset
.QuadPart
> FileOffset
)
583 current_entry
= current_entry
->Flink
;
586 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
587 KeReleaseGuardedMutex(&ViewLock
);
598 PROS_SHARED_CACHE_MAP SharedCacheMap
;
600 SharedCacheMap
= Vacb
->SharedCacheMap
;
602 KeAcquireGuardedMutex(&ViewLock
);
603 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
605 ASSERT(!Vacb
->Dirty
);
607 InsertTailList(&DirtyVacbListHead
, &Vacb
->DirtyVacbListEntry
);
608 CcTotalDirtyPages
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
609 Vacb
->SharedCacheMap
->DirtyPages
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
610 CcRosVacbIncRefCount(Vacb
);
612 /* Move to the tail of the LRU list */
613 RemoveEntryList(&Vacb
->VacbLruListEntry
);
614 InsertTailList(&VacbLruListHead
, &Vacb
->VacbLruListEntry
);
618 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
619 KeReleaseGuardedMutex(&ViewLock
);
625 PROS_SHARED_CACHE_MAP SharedCacheMap
,
630 ASSERT(SharedCacheMap
);
632 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
633 SharedCacheMap
, FileOffset
);
635 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
638 KeBugCheck(CACHE_MANAGER
);
643 CcRosMarkDirtyVacb(Vacb
);
646 CcRosReleaseVacbLock(Vacb
);
648 return STATUS_SUCCESS
;
654 PROS_SHARED_CACHE_MAP SharedCacheMap
,
660 ASSERT(SharedCacheMap
);
662 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
663 SharedCacheMap
, FileOffset
, NowDirty
);
665 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
668 return STATUS_UNSUCCESSFUL
;
671 if (NowDirty
&& !Vacb
->Dirty
)
673 CcRosMarkDirtyVacb(Vacb
);
678 CcRosVacbDecRefCount(Vacb
);
679 if (Vacb
->MappedCount
== 0)
681 CcRosVacbDecRefCount(Vacb
);
684 CcRosReleaseVacbLock(Vacb
);
686 return STATUS_SUCCESS
;
696 ULONG_PTR NumberOfPages
;
698 /* Create a memory area. */
699 MmLockAddressSpace(MmGetKernelAddressSpace());
700 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
701 0, // nothing checks for VACB mareas, so set to 0
703 VACB_MAPPING_GRANULARITY
,
705 (PMEMORY_AREA
*)&Vacb
->MemoryArea
,
708 MmUnlockAddressSpace(MmGetKernelAddressSpace());
709 if (!NT_SUCCESS(Status
))
711 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status
, Vacb
);
715 ASSERT(((ULONG_PTR
)Vacb
->BaseAddress
% PAGE_SIZE
) == 0);
716 ASSERT((ULONG_PTR
)Vacb
->BaseAddress
> (ULONG_PTR
)MmSystemRangeStart
);
718 /* Create a virtual mapping for this memory area */
719 NumberOfPages
= BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY
);
720 for (i
= 0; i
< NumberOfPages
; i
++)
722 PFN_NUMBER PageFrameNumber
;
724 MI_SET_USAGE(MI_USAGE_CACHE
);
725 Status
= MmRequestPageMemoryConsumer(MC_CACHE
, TRUE
, &PageFrameNumber
);
726 if (PageFrameNumber
== 0)
728 DPRINT1("Unable to allocate page\n");
729 KeBugCheck(MEMORY_MANAGEMENT
);
732 Status
= MmCreateVirtualMapping(NULL
,
733 (PVOID
)((ULONG_PTR
)Vacb
->BaseAddress
+ (i
* PAGE_SIZE
)),
737 if (!NT_SUCCESS(Status
))
739 DPRINT1("Unable to create virtual mapping\n");
740 KeBugCheck(MEMORY_MANAGEMENT
);
744 return STATUS_SUCCESS
;
750 PROS_SHARED_CACHE_MAP SharedCacheMap
,
756 PLIST_ENTRY current_entry
;
760 ASSERT(SharedCacheMap
);
762 DPRINT("CcRosCreateVacb()\n");
764 if (FileOffset
>= SharedCacheMap
->SectionSize
.QuadPart
)
767 return STATUS_INVALID_PARAMETER
;
770 current
= ExAllocateFromNPagedLookasideList(&VacbLookasideList
);
771 current
->BaseAddress
= NULL
;
772 current
->Valid
= FALSE
;
773 current
->Dirty
= FALSE
;
774 current
->PageOut
= FALSE
;
775 current
->FileOffset
.QuadPart
= ROUND_DOWN(FileOffset
, VACB_MAPPING_GRANULARITY
);
776 current
->SharedCacheMap
= SharedCacheMap
;
778 if (SharedCacheMap
->Trace
)
780 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap
, current
);
783 current
->MappedCount
= 0;
784 current
->DirtyVacbListEntry
.Flink
= NULL
;
785 current
->DirtyVacbListEntry
.Blink
= NULL
;
786 current
->ReferenceCount
= 1;
787 current
->PinCount
= 0;
788 KeInitializeMutex(¤t
->Mutex
, 0);
789 CcRosAcquireVacbLock(current
, NULL
);
790 KeAcquireGuardedMutex(&ViewLock
);
793 /* There is window between the call to CcRosLookupVacb
794 * and CcRosCreateVacb. We must check if a VACB for the
795 * file offset exist. If there is a VACB, we release
796 * our newly created VACB and return the existing one.
798 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
799 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
801 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
803 current
= CONTAINING_RECORD(current_entry
,
805 CacheMapVacbListEntry
);
806 if (IsPointInRange(current
->FileOffset
.QuadPart
,
807 VACB_MAPPING_GRANULARITY
,
810 CcRosVacbIncRefCount(current
);
811 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
813 if (SharedCacheMap
->Trace
)
815 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
821 CcRosReleaseVacbLock(*Vacb
);
822 KeReleaseGuardedMutex(&ViewLock
);
823 ExFreeToNPagedLookasideList(&VacbLookasideList
, *Vacb
);
825 CcRosAcquireVacbLock(current
, NULL
);
826 return STATUS_SUCCESS
;
828 if (current
->FileOffset
.QuadPart
< FileOffset
)
830 ASSERT(previous
== NULL
||
831 previous
->FileOffset
.QuadPart
< current
->FileOffset
.QuadPart
);
834 if (current
->FileOffset
.QuadPart
> FileOffset
)
836 current_entry
= current_entry
->Flink
;
838 /* There was no existing VACB. */
842 InsertHeadList(&previous
->CacheMapVacbListEntry
, ¤t
->CacheMapVacbListEntry
);
846 InsertHeadList(&SharedCacheMap
->CacheMapVacbListHead
, ¤t
->CacheMapVacbListEntry
);
848 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
849 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
850 KeReleaseGuardedMutex(&ViewLock
);
852 MI_SET_USAGE(MI_USAGE_CACHE
);
854 if ((SharedCacheMap
->FileObject
) && (SharedCacheMap
->FileObject
->FileName
.Buffer
))
858 pos
= wcsrchr(SharedCacheMap
->FileObject
->FileName
.Buffer
, '\\');
861 len
= wcslen(pos
) * sizeof(WCHAR
);
862 snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%S", pos
);
866 snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%wZ", &SharedCacheMap
->FileObject
->FileName
);
871 Status
= CcRosMapVacb(current
);
872 if (!NT_SUCCESS(Status
))
874 RemoveEntryList(¤t
->CacheMapVacbListEntry
);
875 RemoveEntryList(¤t
->VacbLruListEntry
);
876 CcRosReleaseVacbLock(current
);
877 ExFreeToNPagedLookasideList(&VacbLookasideList
, current
);
886 PROS_SHARED_CACHE_MAP SharedCacheMap
,
888 PLONGLONG BaseOffset
,
896 ASSERT(SharedCacheMap
);
898 DPRINT("CcRosGetVacb()\n");
901 * Look for a VACB already mapping the same data.
903 current
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
907 * Otherwise create a new VACB.
909 Status
= CcRosCreateVacb(SharedCacheMap
, FileOffset
, ¤t
);
910 if (!NT_SUCCESS(Status
))
916 KeAcquireGuardedMutex(&ViewLock
);
918 /* Move to the tail of the LRU list */
919 RemoveEntryList(¤t
->VacbLruListEntry
);
920 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
922 KeReleaseGuardedMutex(&ViewLock
);
925 * Return information about the VACB to the caller.
927 *UptoDate
= current
->Valid
;
928 *BaseAddress
= current
->BaseAddress
;
929 DPRINT("*BaseAddress %p\n", *BaseAddress
);
931 *BaseOffset
= current
->FileOffset
.QuadPart
;
932 return STATUS_SUCCESS
;
938 PROS_SHARED_CACHE_MAP SharedCacheMap
,
944 * FUNCTION: Request a page mapping for a shared cache map
949 ASSERT(SharedCacheMap
);
951 if (FileOffset
% VACB_MAPPING_GRANULARITY
!= 0)
953 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
954 FileOffset
, VACB_MAPPING_GRANULARITY
);
955 KeBugCheck(CACHE_MANAGER
);
958 return CcRosGetVacb(SharedCacheMap
,
970 MEMORY_AREA
* MemoryArea
,
976 ASSERT(SwapEntry
== 0);
979 ASSERT(MmGetReferenceCountPage(Page
) == 1);
980 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
985 CcRosInternalFreeVacb (
988 * FUNCTION: Releases a VACB associated with a shared cache map
991 DPRINT("Freeing VACB 0x%p\n", Vacb
);
993 if (Vacb
->SharedCacheMap
->Trace
)
995 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb
->SharedCacheMap
, Vacb
);
999 MmLockAddressSpace(MmGetKernelAddressSpace());
1000 MmFreeMemoryArea(MmGetKernelAddressSpace(),
1004 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1006 ExFreeToNPagedLookasideList(&VacbLookasideList
, Vacb
);
1007 return STATUS_SUCCESS
;
1016 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
,
1017 IN PLARGE_INTEGER FileOffset OPTIONAL
,
1019 OUT PIO_STATUS_BLOCK IoStatus
)
1021 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1022 LARGE_INTEGER Offset
;
1023 LONGLONG RemainingLength
;
1028 CCTRACE(CC_API_DEBUG
, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1029 SectionObjectPointers
, FileOffset
, Length
);
1031 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1032 SectionObjectPointers
, FileOffset
, Length
, IoStatus
);
1034 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1036 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
1037 ASSERT(SharedCacheMap
);
1040 Offset
= *FileOffset
;
1041 RemainingLength
= Length
;
1045 Offset
.QuadPart
= 0;
1046 RemainingLength
= SharedCacheMap
->FileSize
.QuadPart
;
1051 IoStatus
->Status
= STATUS_SUCCESS
;
1052 IoStatus
->Information
= 0;
1055 while (RemainingLength
> 0)
1057 current
= CcRosLookupVacb(SharedCacheMap
, Offset
.QuadPart
);
1058 if (current
!= NULL
)
1062 Status
= CcRosFlushVacb(current
);
1063 if (!NT_SUCCESS(Status
) && IoStatus
!= NULL
)
1065 IoStatus
->Status
= Status
;
1069 CcRosReleaseVacbLock(current
);
1071 KeAcquireGuardedMutex(&ViewLock
);
1072 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1073 CcRosVacbDecRefCount(current
);
1074 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1075 KeReleaseGuardedMutex(&ViewLock
);
1078 Offset
.QuadPart
+= VACB_MAPPING_GRANULARITY
;
1079 RemainingLength
-= min(RemainingLength
, VACB_MAPPING_GRANULARITY
);
1086 IoStatus
->Status
= STATUS_INVALID_PARAMETER
;
1093 CcRosDeleteFileCache (
1094 PFILE_OBJECT FileObject
,
1095 PROS_SHARED_CACHE_MAP SharedCacheMap
)
1097 * FUNCTION: Releases the shared cache map associated with a file object
1100 PLIST_ENTRY current_entry
;
1102 LIST_ENTRY FreeList
;
1105 ASSERT(SharedCacheMap
);
1107 SharedCacheMap
->OpenCount
++;
1108 KeReleaseGuardedMutex(&ViewLock
);
1110 CcFlushCache(FileObject
->SectionObjectPointer
, NULL
, 0, NULL
);
1112 KeAcquireGuardedMutex(&ViewLock
);
1113 SharedCacheMap
->OpenCount
--;
1114 if (SharedCacheMap
->OpenCount
== 0)
1118 FileObject
->SectionObjectPointer
->SharedCacheMap
= NULL
;
1123 InitializeListHead(&FreeList
);
1124 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1125 while (!IsListEmpty(&SharedCacheMap
->CacheMapVacbListHead
))
1127 current_entry
= RemoveTailList(&SharedCacheMap
->CacheMapVacbListHead
);
1128 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1130 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1131 CcRosAcquireVacbLock(current
, NULL
);
1132 RemoveEntryList(¤t
->VacbLruListEntry
);
1135 RemoveEntryList(¤t
->DirtyVacbListEntry
);
1136 CcTotalDirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
1137 current
->SharedCacheMap
->DirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
1138 DPRINT1("Freeing dirty VACB\n");
1140 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
1141 CcRosReleaseVacbLock(current
);
1143 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1146 SharedCacheMap
->Trace
= FALSE
;
1148 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1150 KeReleaseGuardedMutex(&ViewLock
);
1151 ObDereferenceObject(SharedCacheMap
->FileObject
);
1153 while (!IsListEmpty(&FreeList
))
1155 current_entry
= RemoveTailList(&FreeList
);
1156 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1157 CcRosInternalFreeVacb(current
);
1160 KeAcquireSpinLock(&iSharedCacheMapLock
, &OldIrql
);
1161 RemoveEntryList(&SharedCacheMap
->SharedCacheMapLinks
);
1162 KeReleaseSpinLock(&iSharedCacheMapLock
, OldIrql
);
1164 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList
, SharedCacheMap
);
1165 KeAcquireGuardedMutex(&ViewLock
);
1167 return STATUS_SUCCESS
;
1172 CcRosReferenceCache (
1173 PFILE_OBJECT FileObject
)
1175 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1176 KeAcquireGuardedMutex(&ViewLock
);
1177 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1178 ASSERT(SharedCacheMap
);
1179 ASSERT(SharedCacheMap
->OpenCount
!= 0);
1180 SharedCacheMap
->OpenCount
++;
1181 KeReleaseGuardedMutex(&ViewLock
);
1186 CcRosRemoveIfClosed (
1187 PSECTION_OBJECT_POINTERS SectionObjectPointer
)
1189 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1190 DPRINT("CcRosRemoveIfClosed()\n");
1191 KeAcquireGuardedMutex(&ViewLock
);
1192 SharedCacheMap
= SectionObjectPointer
->SharedCacheMap
;
1193 if (SharedCacheMap
&& SharedCacheMap
->OpenCount
== 0)
1195 CcRosDeleteFileCache(SharedCacheMap
->FileObject
, SharedCacheMap
);
1197 KeReleaseGuardedMutex(&ViewLock
);
1203 CcRosDereferenceCache (
1204 PFILE_OBJECT FileObject
)
1206 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1207 KeAcquireGuardedMutex(&ViewLock
);
1208 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1209 ASSERT(SharedCacheMap
);
1210 if (SharedCacheMap
->OpenCount
> 0)
1212 SharedCacheMap
->OpenCount
--;
1213 if (SharedCacheMap
->OpenCount
== 0)
1215 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1216 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1219 KeReleaseGuardedMutex(&ViewLock
);
1224 CcRosReleaseFileCache (
1225 PFILE_OBJECT FileObject
)
1227 * FUNCTION: Called by the file system when a handle to a file object
1231 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1233 KeAcquireGuardedMutex(&ViewLock
);
1235 if (FileObject
->SectionObjectPointer
->SharedCacheMap
!= NULL
)
1237 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1238 if (FileObject
->PrivateCacheMap
!= NULL
)
1240 FileObject
->PrivateCacheMap
= NULL
;
1241 if (SharedCacheMap
->OpenCount
> 0)
1243 SharedCacheMap
->OpenCount
--;
1244 if (SharedCacheMap
->OpenCount
== 0)
1246 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1247 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1252 KeReleaseGuardedMutex(&ViewLock
);
1253 return STATUS_SUCCESS
;
1258 CcTryToInitializeFileCache (
1259 PFILE_OBJECT FileObject
)
1261 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1264 KeAcquireGuardedMutex(&ViewLock
);
1266 ASSERT(FileObject
->SectionObjectPointer
);
1267 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1268 if (SharedCacheMap
== NULL
)
1270 Status
= STATUS_UNSUCCESSFUL
;
1274 if (FileObject
->PrivateCacheMap
== NULL
)
1276 FileObject
->PrivateCacheMap
= SharedCacheMap
;
1277 SharedCacheMap
->OpenCount
++;
1279 Status
= STATUS_SUCCESS
;
1281 KeReleaseGuardedMutex(&ViewLock
);
1289 CcRosInitializeFileCache (
1290 PFILE_OBJECT FileObject
,
1291 PCC_FILE_SIZES FileSizes
,
1293 PCACHE_MANAGER_CALLBACKS CallBacks
,
1294 PVOID LazyWriterContext
)
1296 * FUNCTION: Initializes a shared cache map for a file object
1299 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1301 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1302 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1303 FileObject
, SharedCacheMap
);
1305 KeAcquireGuardedMutex(&ViewLock
);
1306 if (SharedCacheMap
== NULL
)
1310 SharedCacheMap
= ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList
);
1311 if (SharedCacheMap
== NULL
)
1313 KeReleaseGuardedMutex(&ViewLock
);
1314 return STATUS_INSUFFICIENT_RESOURCES
;
1316 RtlZeroMemory(SharedCacheMap
, sizeof(*SharedCacheMap
));
1317 ObReferenceObjectByPointer(FileObject
,
1321 SharedCacheMap
->FileObject
= FileObject
;
1322 SharedCacheMap
->Callbacks
= CallBacks
;
1323 SharedCacheMap
->LazyWriteContext
= LazyWriterContext
;
1324 SharedCacheMap
->SectionSize
= FileSizes
->AllocationSize
;
1325 SharedCacheMap
->FileSize
= FileSizes
->FileSize
;
1326 SharedCacheMap
->PinAccess
= PinAccess
;
1327 SharedCacheMap
->DirtyPageThreshold
= 0;
1328 SharedCacheMap
->DirtyPages
= 0;
1329 KeInitializeSpinLock(&SharedCacheMap
->CacheMapLock
);
1330 InitializeListHead(&SharedCacheMap
->CacheMapVacbListHead
);
1331 FileObject
->SectionObjectPointer
->SharedCacheMap
= SharedCacheMap
;
1333 KeAcquireSpinLock(&iSharedCacheMapLock
, &OldIrql
);
1334 InsertTailList(&CcCleanSharedCacheMapList
, &SharedCacheMap
->SharedCacheMapLinks
);
1335 KeReleaseSpinLock(&iSharedCacheMapLock
, OldIrql
);
1337 if (FileObject
->PrivateCacheMap
== NULL
)
1339 FileObject
->PrivateCacheMap
= SharedCacheMap
;
1340 SharedCacheMap
->OpenCount
++;
1342 KeReleaseGuardedMutex(&ViewLock
);
1344 return STATUS_SUCCESS
;
1352 CcGetFileObjectFromSectionPtrs (
1353 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
)
1355 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1357 CCTRACE(CC_API_DEBUG
, "SectionObjectPointers=%p\n", SectionObjectPointers
);
1359 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1361 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
1362 ASSERT(SharedCacheMap
);
1363 return SharedCacheMap
->FileObject
;
1370 CcShutdownLazyWriter (
1373 /* Simply set the event, lazy writer will stop when it's done */
1374 KeSetEvent(&iLazyWriterShutdown
, IO_DISK_INCREMENT
, FALSE
);
1386 OBJECT_ATTRIBUTES ObjectAttributes
;
1388 DPRINT("CcInitView()\n");
1390 InitializeListHead(&DirtyVacbListHead
);
1391 InitializeListHead(&VacbLruListHead
);
1392 InitializeListHead(&CcDeferredWrites
);
1393 InitializeListHead(&CcCleanSharedCacheMapList
);
1394 KeInitializeSpinLock(&CcDeferredWriteSpinLock
);
1395 KeInitializeSpinLock(&iSharedCacheMapLock
);
1396 KeInitializeGuardedMutex(&ViewLock
);
1397 ExInitializeNPagedLookasideList(&iBcbLookasideList
,
1401 sizeof(INTERNAL_BCB
),
1404 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList
,
1408 sizeof(ROS_SHARED_CACHE_MAP
),
1409 TAG_SHARED_CACHE_MAP
,
1411 ExInitializeNPagedLookasideList(&VacbLookasideList
,
1419 MmInitializeMemoryConsumer(MC_CACHE
, CcRosTrimCache
);
1421 /* Initialize lazy writer events */
1422 KeInitializeEvent(&iLazyWriterShutdown
, SynchronizationEvent
, FALSE
);
1423 KeInitializeEvent(&iLazyWriterNotify
, NotificationEvent
, FALSE
);
1425 /* Define lazy writer threshold, depending on system type */
1426 switch (MmQuerySystemSize())
1429 CcDirtyPageThreshold
= MmNumberOfPhysicalPages
/ 8;
1432 case MmMediumSystem
:
1433 CcDirtyPageThreshold
= MmNumberOfPhysicalPages
/ 4;
1437 CcDirtyPageThreshold
= MmNumberOfPhysicalPages
/ 8 + MmNumberOfPhysicalPages
/ 4;
1441 /* Start the lazy writer thread */
1442 InitializeObjectAttributes(&ObjectAttributes
,
1447 Status
= PsCreateSystemThread(&LazyWriter
,
1454 if (!NT_SUCCESS(Status
))
1460 Status
= NtSetInformationThread(LazyWriter
,
1464 ASSERT(NT_SUCCESS(Status
));
1466 /* Handle is not needed */
1467 ObCloseHandle(LazyWriter
, KernelMode
);
1469 CcInitCacheZeroPage();
1474 #if DBG && defined(KDBG)
1476 ExpKdbgExtFileCache(ULONG Argc
, PCHAR Argv
[])
1478 PLIST_ENTRY ListEntry
;
1479 UNICODE_STRING NoName
= RTL_CONSTANT_STRING(L
"No name for File");
1481 KdbpPrint(" Usage Summary (in kb)\n");
1482 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1483 /* No need to lock the spin lock here, we're in DBG */
1484 for (ListEntry
= CcCleanSharedCacheMapList
.Flink
;
1485 ListEntry
!= &CcCleanSharedCacheMapList
;
1486 ListEntry
= ListEntry
->Flink
)
1489 ULONG Valid
= 0, Dirty
= 0;
1490 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1491 PUNICODE_STRING FileName
;
1493 SharedCacheMap
= CONTAINING_RECORD(ListEntry
, ROS_SHARED_CACHE_MAP
, SharedCacheMapLinks
);
1496 Dirty
= (SharedCacheMap
->DirtyPages
* PAGE_SIZE
) / 1024;
1498 /* First, count for all the associated VACB */
1499 for (Vacbs
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
1500 Vacbs
!= &SharedCacheMap
->CacheMapVacbListHead
;
1501 Vacbs
= Vacbs
->Flink
)
1505 Vacb
= CONTAINING_RECORD(Vacbs
, ROS_VACB
, CacheMapVacbListEntry
);
1508 Valid
+= VACB_MAPPING_GRANULARITY
/ 1024;
1513 if (SharedCacheMap
->FileObject
!= NULL
&&
1514 SharedCacheMap
->FileObject
->FileName
.Length
!= 0)
1516 FileName
= &SharedCacheMap
->FileObject
->FileName
;
1524 KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap
, Valid
, Dirty
, FileName
);