2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
11 /* NOTES **********************************************************************
13 * This is not the NT implementation of a file cache nor anything much like
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
29 * (4) Copy the data into or out of the page as necessary.
31 * (5) Release the cache page
33 /* INCLUDES ******************************************************************/
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
43 /* GLOBALS *******************************************************************/
45 LIST_ENTRY DirtyVacbListHead
;
46 static LIST_ENTRY VacbLruListHead
;
48 KGUARDED_MUTEX ViewLock
;
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList
;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList
;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList
;
54 /* Internal vars (MS):
55 * - Threshold above which lazy writer will start action
56 * - Amount of dirty pages
57 * - List for deferred writes
58 * - Spinlock when dealing with the deferred list
59 * - List for "clean" shared cache maps
61 ULONG CcDirtyPageThreshold
= 0;
62 ULONG CcTotalDirtyPages
= 0;
63 LIST_ENTRY CcDeferredWrites
;
64 KSPIN_LOCK CcDeferredWriteSpinLock
;
65 LIST_ENTRY CcCleanSharedCacheMapList
;
68 VOID
CcRosVacbIncRefCount_(PROS_VACB vacb
, PCSTR file
, INT line
)
70 ++vacb
->ReferenceCount
;
71 if (vacb
->SharedCacheMap
->Trace
)
73 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
74 file
, line
, vacb
, vacb
->ReferenceCount
, vacb
->Dirty
, vacb
->PageOut
);
77 VOID
CcRosVacbDecRefCount_(PROS_VACB vacb
, PCSTR file
, INT line
)
79 ASSERT(vacb
->ReferenceCount
!= 0);
80 --vacb
->ReferenceCount
;
81 ASSERT(!(vacb
->ReferenceCount
== 0 && vacb
->Dirty
));
82 if (vacb
->SharedCacheMap
->Trace
)
84 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
85 file
, line
, vacb
, vacb
->ReferenceCount
, vacb
->Dirty
, vacb
->PageOut
);
91 CcRosInternalFreeVacb(PROS_VACB Vacb
);
94 /* FUNCTIONS *****************************************************************/
99 PROS_SHARED_CACHE_MAP SharedCacheMap
,
104 PLIST_ENTRY current_entry
;
110 SharedCacheMap
->Trace
= Trace
;
114 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
116 KeAcquireGuardedMutex(&ViewLock
);
117 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldirql
);
119 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
120 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
122 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
123 current_entry
= current_entry
->Flink
;
125 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
126 current
, current
->ReferenceCount
, current
->Dirty
, current
->PageOut
);
128 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldirql
);
129 KeReleaseGuardedMutex(&ViewLock
);
133 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
137 UNREFERENCED_PARAMETER(SharedCacheMap
);
138 UNREFERENCED_PARAMETER(Trace
);
149 Status
= CcWriteVirtualAddress(Vacb
);
150 if (NT_SUCCESS(Status
))
152 CcRosUnmarkDirtyVacb(Vacb
, TRUE
);
160 CcRosFlushDirtyPages (
164 BOOLEAN CalledFromLazy
)
166 PLIST_ENTRY current_entry
;
170 LARGE_INTEGER ZeroTimeout
;
172 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target
);
175 ZeroTimeout
.QuadPart
= 0;
177 KeEnterCriticalRegion();
178 KeAcquireGuardedMutex(&ViewLock
);
180 current_entry
= DirtyVacbListHead
.Flink
;
181 if (current_entry
== &DirtyVacbListHead
)
183 DPRINT("No Dirty pages\n");
186 while ((current_entry
!= &DirtyVacbListHead
) && (Target
> 0))
188 current
= CONTAINING_RECORD(current_entry
,
191 current_entry
= current_entry
->Flink
;
193 CcRosVacbIncRefCount(current
);
195 /* When performing lazy write, don't handle temporary files */
196 if (CalledFromLazy
&&
197 BooleanFlagOn(current
->SharedCacheMap
->FileObject
->Flags
, FO_TEMPORARY_FILE
))
199 CcRosVacbDecRefCount(current
);
203 Locked
= current
->SharedCacheMap
->Callbacks
->AcquireForLazyWrite(
204 current
->SharedCacheMap
->LazyWriteContext
, Wait
);
207 CcRosVacbDecRefCount(current
);
211 Status
= CcRosAcquireVacbLock(current
,
212 Wait
? NULL
: &ZeroTimeout
);
213 if (Status
!= STATUS_SUCCESS
)
215 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
216 current
->SharedCacheMap
->LazyWriteContext
);
217 CcRosVacbDecRefCount(current
);
221 ASSERT(current
->Dirty
);
223 /* One reference is added above */
224 if (current
->ReferenceCount
> 2)
226 CcRosReleaseVacbLock(current
);
227 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
228 current
->SharedCacheMap
->LazyWriteContext
);
229 CcRosVacbDecRefCount(current
);
233 KeReleaseGuardedMutex(&ViewLock
);
235 Status
= CcRosFlushVacb(current
);
237 CcRosReleaseVacbLock(current
);
238 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
239 current
->SharedCacheMap
->LazyWriteContext
);
241 KeAcquireGuardedMutex(&ViewLock
);
242 CcRosVacbDecRefCount(current
);
244 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
) &&
245 (Status
!= STATUS_MEDIA_WRITE_PROTECTED
))
247 DPRINT1("CC: Failed to flush VACB.\n");
253 /* How many pages did we free? */
254 PagesFreed
= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
255 (*Count
) += PagesFreed
;
257 /* Make sure we don't overflow target! */
258 if (Target
< PagesFreed
)
260 /* If we would have, jump to zero directly */
265 Target
-= PagesFreed
;
269 current_entry
= DirtyVacbListHead
.Flink
;
272 KeReleaseGuardedMutex(&ViewLock
);
273 KeLeaveCriticalRegion();
275 DPRINT("CcRosFlushDirtyPages() finished\n");
276 return STATUS_SUCCESS
;
285 * FUNCTION: Try to free some memory from the file cache.
287 * Target - The number of pages to be freed.
288 * Priority - The priority of free (currently unused).
289 * NrFreed - Points to a variable where the number of pages
290 * actually freed is returned.
293 PLIST_ENTRY current_entry
;
300 BOOLEAN FlushedPages
= FALSE
;
302 DPRINT("CcRosTrimCache(Target %lu)\n", Target
);
304 InitializeListHead(&FreeList
);
309 KeAcquireGuardedMutex(&ViewLock
);
311 current_entry
= VacbLruListHead
.Flink
;
312 while (current_entry
!= &VacbLruListHead
)
314 current
= CONTAINING_RECORD(current_entry
,
317 current_entry
= current_entry
->Flink
;
319 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
321 /* Reference the VACB */
322 CcRosVacbIncRefCount(current
);
324 /* Check if it's mapped and not dirty */
325 if (current
->MappedCount
> 0 && !current
->Dirty
)
327 /* We have to break these locks because Cc sucks */
328 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
329 KeReleaseGuardedMutex(&ViewLock
);
331 /* Page out the VACB */
332 for (i
= 0; i
< VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
; i
++)
334 Page
= (PFN_NUMBER
)(MmGetPhysicalAddress((PUCHAR
)current
->BaseAddress
+ (i
* PAGE_SIZE
)).QuadPart
>> PAGE_SHIFT
);
336 MmPageOutPhysicalAddress(Page
);
339 /* Reacquire the locks */
340 KeAcquireGuardedMutex(&ViewLock
);
341 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
344 /* Dereference the VACB */
345 CcRosVacbDecRefCount(current
);
347 /* Check if we can free this entry now */
348 if (current
->ReferenceCount
< 2)
350 ASSERT(!current
->Dirty
);
351 ASSERT(!current
->MappedCount
);
352 ASSERT(current
->ReferenceCount
== 1);
354 RemoveEntryList(¤t
->CacheMapVacbListEntry
);
355 RemoveEntryList(¤t
->VacbLruListEntry
);
356 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
358 /* Calculate how many pages we freed for Mm */
359 PagesFreed
= min(VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
, Target
);
360 Target
-= PagesFreed
;
361 (*NrFreed
) += PagesFreed
;
364 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
367 KeReleaseGuardedMutex(&ViewLock
);
369 /* Try flushing pages if we haven't met our target */
370 if ((Target
> 0) && !FlushedPages
)
372 /* Flush dirty pages to disk */
373 CcRosFlushDirtyPages(Target
, &PagesFreed
, FALSE
, FALSE
);
376 /* We can only swap as many pages as we flushed */
377 if (PagesFreed
< Target
) Target
= PagesFreed
;
379 /* Check if we flushed anything */
382 /* Try again after flushing dirty pages */
383 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed
);
388 while (!IsListEmpty(&FreeList
))
390 current_entry
= RemoveHeadList(&FreeList
);
391 current
= CONTAINING_RECORD(current_entry
,
393 CacheMapVacbListEntry
);
394 CcRosVacbDecRefCount(current
);
395 CcRosInternalFreeVacb(current
);
398 DPRINT("Evicted %lu cache pages\n", (*NrFreed
));
400 return STATUS_SUCCESS
;
406 PROS_SHARED_CACHE_MAP SharedCacheMap
,
412 ASSERT(SharedCacheMap
);
414 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
415 SharedCacheMap
, Vacb
, Valid
);
419 if (Dirty
&& !Vacb
->Dirty
)
421 CcRosMarkDirtyVacb(Vacb
);
428 CcRosVacbDecRefCount(Vacb
);
429 if (Mapped
&& (Vacb
->MappedCount
== 1))
431 CcRosVacbIncRefCount(Vacb
);
434 ASSERT(Vacb
->ReferenceCount
> 0);
436 CcRosReleaseVacbLock(Vacb
);
438 return STATUS_SUCCESS
;
441 /* Returns with VACB Lock Held! */
445 PROS_SHARED_CACHE_MAP SharedCacheMap
,
448 PLIST_ENTRY current_entry
;
452 ASSERT(SharedCacheMap
);
454 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
455 SharedCacheMap
, FileOffset
);
457 KeAcquireGuardedMutex(&ViewLock
);
458 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
460 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
461 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
463 current
= CONTAINING_RECORD(current_entry
,
465 CacheMapVacbListEntry
);
466 if (IsPointInRange(current
->FileOffset
.QuadPart
,
467 VACB_MAPPING_GRANULARITY
,
470 CcRosVacbIncRefCount(current
);
471 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
472 KeReleaseGuardedMutex(&ViewLock
);
473 CcRosAcquireVacbLock(current
, NULL
);
476 if (current
->FileOffset
.QuadPart
> FileOffset
)
478 current_entry
= current_entry
->Flink
;
481 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
482 KeReleaseGuardedMutex(&ViewLock
);
493 PROS_SHARED_CACHE_MAP SharedCacheMap
;
495 SharedCacheMap
= Vacb
->SharedCacheMap
;
497 KeAcquireGuardedMutex(&ViewLock
);
498 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
500 ASSERT(!Vacb
->Dirty
);
502 InsertTailList(&DirtyVacbListHead
, &Vacb
->DirtyVacbListEntry
);
503 CcTotalDirtyPages
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
504 Vacb
->SharedCacheMap
->DirtyPages
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
505 CcRosVacbIncRefCount(Vacb
);
507 /* Move to the tail of the LRU list */
508 RemoveEntryList(&Vacb
->VacbLruListEntry
);
509 InsertTailList(&VacbLruListHead
, &Vacb
->VacbLruListEntry
);
513 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
514 KeReleaseGuardedMutex(&ViewLock
);
516 /* Schedule a lazy writer run to now that we have dirty VACB */
517 oldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
518 if (!LazyWriter
.ScanActive
)
520 CcScheduleLazyWriteScan(FALSE
);
522 KeReleaseQueuedSpinLock(LockQueueMasterLock
, oldIrql
);
527 CcRosUnmarkDirtyVacb (
532 PROS_SHARED_CACHE_MAP SharedCacheMap
;
534 SharedCacheMap
= Vacb
->SharedCacheMap
;
538 KeAcquireGuardedMutex(&ViewLock
);
539 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
546 RemoveEntryList(&Vacb
->DirtyVacbListEntry
);
547 CcTotalDirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
548 Vacb
->SharedCacheMap
->DirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
549 CcRosVacbDecRefCount(Vacb
);
553 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
554 KeReleaseGuardedMutex(&ViewLock
);
561 PROS_SHARED_CACHE_MAP SharedCacheMap
,
566 ASSERT(SharedCacheMap
);
568 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
569 SharedCacheMap
, FileOffset
);
571 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
574 KeBugCheck(CACHE_MANAGER
);
577 CcRosReleaseVacb(SharedCacheMap
, Vacb
, Vacb
->Valid
, TRUE
, FALSE
);
579 return STATUS_SUCCESS
;
583 * Note: this is not the contrary function of
584 * CcRosMapVacbInKernelSpace()
589 PROS_SHARED_CACHE_MAP SharedCacheMap
,
595 ASSERT(SharedCacheMap
);
597 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
598 SharedCacheMap
, FileOffset
, NowDirty
);
600 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
603 return STATUS_UNSUCCESSFUL
;
606 ASSERT(Vacb
->MappedCount
!= 0);
609 if (Vacb
->MappedCount
== 0)
611 CcRosVacbDecRefCount(Vacb
);
614 CcRosReleaseVacb(SharedCacheMap
, Vacb
, Vacb
->Valid
, NowDirty
, FALSE
);
616 return STATUS_SUCCESS
;
621 CcRosMapVacbInKernelSpace(
626 ULONG_PTR NumberOfPages
;
628 /* Create a memory area. */
629 MmLockAddressSpace(MmGetKernelAddressSpace());
630 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
631 0, // nothing checks for VACB mareas, so set to 0
633 VACB_MAPPING_GRANULARITY
,
635 (PMEMORY_AREA
*)&Vacb
->MemoryArea
,
638 MmUnlockAddressSpace(MmGetKernelAddressSpace());
639 if (!NT_SUCCESS(Status
))
641 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status
, Vacb
);
645 ASSERT(((ULONG_PTR
)Vacb
->BaseAddress
% PAGE_SIZE
) == 0);
646 ASSERT((ULONG_PTR
)Vacb
->BaseAddress
> (ULONG_PTR
)MmSystemRangeStart
);
648 /* Create a virtual mapping for this memory area */
649 NumberOfPages
= BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY
);
650 for (i
= 0; i
< NumberOfPages
; i
++)
652 PFN_NUMBER PageFrameNumber
;
654 MI_SET_USAGE(MI_USAGE_CACHE
);
655 Status
= MmRequestPageMemoryConsumer(MC_CACHE
, TRUE
, &PageFrameNumber
);
656 if (PageFrameNumber
== 0)
658 DPRINT1("Unable to allocate page\n");
659 KeBugCheck(MEMORY_MANAGEMENT
);
662 Status
= MmCreateVirtualMapping(NULL
,
663 (PVOID
)((ULONG_PTR
)Vacb
->BaseAddress
+ (i
* PAGE_SIZE
)),
667 if (!NT_SUCCESS(Status
))
669 DPRINT1("Unable to create virtual mapping\n");
670 KeBugCheck(MEMORY_MANAGEMENT
);
674 return STATUS_SUCCESS
;
680 PROS_SHARED_CACHE_MAP SharedCacheMap
,
686 PLIST_ENTRY current_entry
;
690 ASSERT(SharedCacheMap
);
692 DPRINT("CcRosCreateVacb()\n");
694 if (FileOffset
>= SharedCacheMap
->SectionSize
.QuadPart
)
697 return STATUS_INVALID_PARAMETER
;
700 current
= ExAllocateFromNPagedLookasideList(&VacbLookasideList
);
701 current
->BaseAddress
= NULL
;
702 current
->Valid
= FALSE
;
703 current
->Dirty
= FALSE
;
704 current
->PageOut
= FALSE
;
705 current
->FileOffset
.QuadPart
= ROUND_DOWN(FileOffset
, VACB_MAPPING_GRANULARITY
);
706 current
->SharedCacheMap
= SharedCacheMap
;
708 if (SharedCacheMap
->Trace
)
710 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap
, current
);
713 current
->MappedCount
= 0;
714 current
->DirtyVacbListEntry
.Flink
= NULL
;
715 current
->DirtyVacbListEntry
.Blink
= NULL
;
716 current
->ReferenceCount
= 0;
717 current
->PinCount
= 0;
718 KeInitializeMutex(¤t
->Mutex
, 0);
719 CcRosAcquireVacbLock(current
, NULL
);
720 KeAcquireGuardedMutex(&ViewLock
);
723 /* There is window between the call to CcRosLookupVacb
724 * and CcRosCreateVacb. We must check if a VACB for the
725 * file offset exist. If there is a VACB, we release
726 * our newly created VACB and return the existing one.
728 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
729 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
731 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
733 current
= CONTAINING_RECORD(current_entry
,
735 CacheMapVacbListEntry
);
736 if (IsPointInRange(current
->FileOffset
.QuadPart
,
737 VACB_MAPPING_GRANULARITY
,
740 CcRosVacbIncRefCount(current
);
741 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
743 if (SharedCacheMap
->Trace
)
745 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
751 CcRosReleaseVacbLock(*Vacb
);
752 KeReleaseGuardedMutex(&ViewLock
);
753 ExFreeToNPagedLookasideList(&VacbLookasideList
, *Vacb
);
755 CcRosAcquireVacbLock(current
, NULL
);
756 return STATUS_SUCCESS
;
758 if (current
->FileOffset
.QuadPart
< FileOffset
)
760 ASSERT(previous
== NULL
||
761 previous
->FileOffset
.QuadPart
< current
->FileOffset
.QuadPart
);
764 if (current
->FileOffset
.QuadPart
> FileOffset
)
766 current_entry
= current_entry
->Flink
;
768 /* There was no existing VACB. */
772 InsertHeadList(&previous
->CacheMapVacbListEntry
, ¤t
->CacheMapVacbListEntry
);
776 InsertHeadList(&SharedCacheMap
->CacheMapVacbListHead
, ¤t
->CacheMapVacbListEntry
);
778 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
779 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
780 CcRosVacbIncRefCount(current
);
781 KeReleaseGuardedMutex(&ViewLock
);
783 MI_SET_USAGE(MI_USAGE_CACHE
);
785 if ((SharedCacheMap
->FileObject
) && (SharedCacheMap
->FileObject
->FileName
.Buffer
))
789 pos
= wcsrchr(SharedCacheMap
->FileObject
->FileName
.Buffer
, '\\');
792 len
= wcslen(pos
) * sizeof(WCHAR
);
793 snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%S", pos
);
797 snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%wZ", &SharedCacheMap
->FileObject
->FileName
);
802 Status
= CcRosMapVacbInKernelSpace(current
);
803 if (!NT_SUCCESS(Status
))
805 RemoveEntryList(¤t
->CacheMapVacbListEntry
);
806 RemoveEntryList(¤t
->VacbLruListEntry
);
807 CcRosReleaseVacbLock(current
);
808 ExFreeToNPagedLookasideList(&VacbLookasideList
, current
);
811 /* Reference it to allow release */
812 CcRosVacbIncRefCount(current
);
820 PROS_SHARED_CACHE_MAP SharedCacheMap
,
822 PLONGLONG BaseOffset
,
830 ASSERT(SharedCacheMap
);
832 DPRINT("CcRosGetVacb()\n");
835 * Look for a VACB already mapping the same data.
837 current
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
841 * Otherwise create a new VACB.
843 Status
= CcRosCreateVacb(SharedCacheMap
, FileOffset
, ¤t
);
844 if (!NT_SUCCESS(Status
))
850 KeAcquireGuardedMutex(&ViewLock
);
852 /* Move to the tail of the LRU list */
853 RemoveEntryList(¤t
->VacbLruListEntry
);
854 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
856 KeReleaseGuardedMutex(&ViewLock
);
859 * Return information about the VACB to the caller.
861 *UptoDate
= current
->Valid
;
862 *BaseAddress
= current
->BaseAddress
;
863 DPRINT("*BaseAddress %p\n", *BaseAddress
);
865 *BaseOffset
= current
->FileOffset
.QuadPart
;
867 ASSERT(current
->ReferenceCount
> 1);
869 return STATUS_SUCCESS
;
875 PROS_SHARED_CACHE_MAP SharedCacheMap
,
881 * FUNCTION: Request a page mapping for a shared cache map
886 ASSERT(SharedCacheMap
);
888 if (FileOffset
% VACB_MAPPING_GRANULARITY
!= 0)
890 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
891 FileOffset
, VACB_MAPPING_GRANULARITY
);
892 KeBugCheck(CACHE_MANAGER
);
895 return CcRosGetVacb(SharedCacheMap
,
907 MEMORY_AREA
* MemoryArea
,
913 ASSERT(SwapEntry
== 0);
916 ASSERT(MmGetReferenceCountPage(Page
) == 1);
917 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
922 CcRosInternalFreeVacb (
925 * FUNCTION: Releases a VACB associated with a shared cache map
928 DPRINT("Freeing VACB 0x%p\n", Vacb
);
930 if (Vacb
->SharedCacheMap
->Trace
)
932 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb
->SharedCacheMap
, Vacb
);
936 MmLockAddressSpace(MmGetKernelAddressSpace());
937 MmFreeMemoryArea(MmGetKernelAddressSpace(),
941 MmUnlockAddressSpace(MmGetKernelAddressSpace());
943 if (Vacb
->PinCount
!= 0 || Vacb
->ReferenceCount
!= 0)
945 DPRINT1("Invalid free: %ld, %ld\n", Vacb
->ReferenceCount
, Vacb
->PinCount
);
946 if (Vacb
->SharedCacheMap
->FileObject
&& Vacb
->SharedCacheMap
->FileObject
->FileName
.Length
)
948 DPRINT1("For file: %wZ\n", &Vacb
->SharedCacheMap
->FileObject
->FileName
);
952 ASSERT(Vacb
->PinCount
== 0);
953 ASSERT(Vacb
->ReferenceCount
== 0);
954 ExFreeToNPagedLookasideList(&VacbLookasideList
, Vacb
);
955 return STATUS_SUCCESS
;
964 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
,
965 IN PLARGE_INTEGER FileOffset OPTIONAL
,
967 OUT PIO_STATUS_BLOCK IoStatus
)
969 PROS_SHARED_CACHE_MAP SharedCacheMap
;
970 LARGE_INTEGER Offset
;
971 LONGLONG RemainingLength
;
975 CCTRACE(CC_API_DEBUG
, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
976 SectionObjectPointers
, FileOffset
, Length
);
978 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
979 SectionObjectPointers
, FileOffset
, Length
, IoStatus
);
981 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
983 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
984 ASSERT(SharedCacheMap
);
987 Offset
= *FileOffset
;
988 RemainingLength
= Length
;
993 RemainingLength
= SharedCacheMap
->FileSize
.QuadPart
;
998 IoStatus
->Status
= STATUS_SUCCESS
;
999 IoStatus
->Information
= 0;
1002 while (RemainingLength
> 0)
1004 current
= CcRosLookupVacb(SharedCacheMap
, Offset
.QuadPart
);
1005 if (current
!= NULL
)
1009 Status
= CcRosFlushVacb(current
);
1010 if (!NT_SUCCESS(Status
) && IoStatus
!= NULL
)
1012 IoStatus
->Status
= Status
;
1016 CcRosReleaseVacb(SharedCacheMap
, current
, current
->Valid
, current
->Dirty
, FALSE
);
1019 Offset
.QuadPart
+= VACB_MAPPING_GRANULARITY
;
1020 RemainingLength
-= min(RemainingLength
, VACB_MAPPING_GRANULARITY
);
1027 IoStatus
->Status
= STATUS_INVALID_PARAMETER
;
1034 CcRosDeleteFileCache (
1035 PFILE_OBJECT FileObject
,
1036 PROS_SHARED_CACHE_MAP SharedCacheMap
)
1038 * FUNCTION: Releases the shared cache map associated with a file object
1041 PLIST_ENTRY current_entry
;
1043 LIST_ENTRY FreeList
;
1046 ASSERT(SharedCacheMap
);
1048 SharedCacheMap
->OpenCount
++;
1049 KeReleaseGuardedMutex(&ViewLock
);
1051 CcFlushCache(FileObject
->SectionObjectPointer
, NULL
, 0, NULL
);
1053 KeAcquireGuardedMutex(&ViewLock
);
1054 SharedCacheMap
->OpenCount
--;
1055 if (SharedCacheMap
->OpenCount
== 0)
1059 FileObject
->SectionObjectPointer
->SharedCacheMap
= NULL
;
1064 InitializeListHead(&FreeList
);
1065 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1066 while (!IsListEmpty(&SharedCacheMap
->CacheMapVacbListHead
))
1068 current_entry
= RemoveTailList(&SharedCacheMap
->CacheMapVacbListHead
);
1069 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1071 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1072 CcRosAcquireVacbLock(current
, NULL
);
1073 RemoveEntryList(¤t
->VacbLruListEntry
);
1076 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1077 CcRosUnmarkDirtyVacb(current
, FALSE
);
1078 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1079 DPRINT1("Freeing dirty VACB\n");
1081 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
1082 CcRosReleaseVacbLock(current
);
1084 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1087 SharedCacheMap
->Trace
= FALSE
;
1089 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1091 KeReleaseGuardedMutex(&ViewLock
);
1092 ObDereferenceObject(SharedCacheMap
->FileObject
);
1094 while (!IsListEmpty(&FreeList
))
1096 current_entry
= RemoveTailList(&FreeList
);
1097 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1098 CcRosVacbDecRefCount(current
);
1099 CcRosInternalFreeVacb(current
);
1102 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
1103 RemoveEntryList(&SharedCacheMap
->SharedCacheMapLinks
);
1104 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
1106 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList
, SharedCacheMap
);
1107 KeAcquireGuardedMutex(&ViewLock
);
1109 return STATUS_SUCCESS
;
1114 CcRosReferenceCache (
1115 PFILE_OBJECT FileObject
)
1117 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1118 KeAcquireGuardedMutex(&ViewLock
);
1119 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1120 ASSERT(SharedCacheMap
);
1121 ASSERT(SharedCacheMap
->OpenCount
!= 0);
1122 SharedCacheMap
->OpenCount
++;
1123 KeReleaseGuardedMutex(&ViewLock
);
1128 CcRosRemoveIfClosed (
1129 PSECTION_OBJECT_POINTERS SectionObjectPointer
)
1131 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1132 DPRINT("CcRosRemoveIfClosed()\n");
1133 KeAcquireGuardedMutex(&ViewLock
);
1134 SharedCacheMap
= SectionObjectPointer
->SharedCacheMap
;
1135 if (SharedCacheMap
&& SharedCacheMap
->OpenCount
== 0)
1137 CcRosDeleteFileCache(SharedCacheMap
->FileObject
, SharedCacheMap
);
1139 KeReleaseGuardedMutex(&ViewLock
);
1145 CcRosDereferenceCache (
1146 PFILE_OBJECT FileObject
)
1148 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1149 KeAcquireGuardedMutex(&ViewLock
);
1150 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1151 ASSERT(SharedCacheMap
);
1152 if (SharedCacheMap
->OpenCount
> 0)
1154 SharedCacheMap
->OpenCount
--;
1155 if (SharedCacheMap
->OpenCount
== 0)
1157 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1158 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1161 KeReleaseGuardedMutex(&ViewLock
);
1166 CcRosReleaseFileCache (
1167 PFILE_OBJECT FileObject
)
1169 * FUNCTION: Called by the file system when a handle to a file object
1174 PPRIVATE_CACHE_MAP PrivateMap
;
1175 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1177 KeAcquireGuardedMutex(&ViewLock
);
1179 if (FileObject
->SectionObjectPointer
->SharedCacheMap
!= NULL
)
1181 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1183 /* Closing the handle, so kill the private cache map
1184 * Before you event try to remove it from FO, always
1185 * lock the master lock, to be sure not to race
1186 * with a potential read ahead ongoing!
1188 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
1189 PrivateMap
= FileObject
->PrivateCacheMap
;
1190 FileObject
->PrivateCacheMap
= NULL
;
1191 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
1193 if (PrivateMap
!= NULL
)
1195 /* Remove it from the file */
1196 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &OldIrql
);
1197 RemoveEntryList(&PrivateMap
->PrivateLinks
);
1198 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, OldIrql
);
1201 if (PrivateMap
!= &SharedCacheMap
->PrivateCacheMap
)
1203 ExFreePoolWithTag(PrivateMap
, TAG_PRIVATE_CACHE_MAP
);
1207 PrivateMap
->NodeTypeCode
= 0;
1210 if (SharedCacheMap
->OpenCount
> 0)
1212 SharedCacheMap
->OpenCount
--;
1213 if (SharedCacheMap
->OpenCount
== 0)
1215 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1216 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1221 KeReleaseGuardedMutex(&ViewLock
);
1222 return STATUS_SUCCESS
;
1227 CcRosInitializeFileCache (
1228 PFILE_OBJECT FileObject
,
1229 PCC_FILE_SIZES FileSizes
,
1231 PCACHE_MANAGER_CALLBACKS CallBacks
,
1232 PVOID LazyWriterContext
)
1234 * FUNCTION: Initializes a shared cache map for a file object
1239 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1241 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1242 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1243 FileObject
, SharedCacheMap
);
1246 KeAcquireGuardedMutex(&ViewLock
);
1247 if (SharedCacheMap
== NULL
)
1250 SharedCacheMap
= ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList
);
1251 if (SharedCacheMap
== NULL
)
1253 KeReleaseGuardedMutex(&ViewLock
);
1254 return STATUS_INSUFFICIENT_RESOURCES
;
1256 RtlZeroMemory(SharedCacheMap
, sizeof(*SharedCacheMap
));
1257 ObReferenceObjectByPointer(FileObject
,
1261 SharedCacheMap
->NodeTypeCode
= NODE_TYPE_SHARED_MAP
;
1262 SharedCacheMap
->NodeByteSize
= sizeof(*SharedCacheMap
);
1263 SharedCacheMap
->FileObject
= FileObject
;
1264 SharedCacheMap
->Callbacks
= CallBacks
;
1265 SharedCacheMap
->LazyWriteContext
= LazyWriterContext
;
1266 SharedCacheMap
->SectionSize
= FileSizes
->AllocationSize
;
1267 SharedCacheMap
->FileSize
= FileSizes
->FileSize
;
1268 SharedCacheMap
->PinAccess
= PinAccess
;
1269 SharedCacheMap
->DirtyPageThreshold
= 0;
1270 SharedCacheMap
->DirtyPages
= 0;
1271 InitializeListHead(&SharedCacheMap
->PrivateList
);
1272 KeInitializeSpinLock(&SharedCacheMap
->CacheMapLock
);
1273 InitializeListHead(&SharedCacheMap
->CacheMapVacbListHead
);
1274 FileObject
->SectionObjectPointer
->SharedCacheMap
= SharedCacheMap
;
1276 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
1277 InsertTailList(&CcCleanSharedCacheMapList
, &SharedCacheMap
->SharedCacheMapLinks
);
1278 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
1280 if (FileObject
->PrivateCacheMap
== NULL
)
1282 PPRIVATE_CACHE_MAP PrivateMap
;
1284 /* Allocate the private cache map for this handle */
1285 if (SharedCacheMap
->PrivateCacheMap
.NodeTypeCode
!= 0)
1287 PrivateMap
= ExAllocatePoolWithTag(NonPagedPool
, sizeof(PRIVATE_CACHE_MAP
), TAG_PRIVATE_CACHE_MAP
);
1291 PrivateMap
= &SharedCacheMap
->PrivateCacheMap
;
1294 if (PrivateMap
== NULL
)
1296 /* If we also allocated the shared cache map for this file, kill it */
1299 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
1300 RemoveEntryList(&SharedCacheMap
->SharedCacheMapLinks
);
1301 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
1303 FileObject
->SectionObjectPointer
->SharedCacheMap
= NULL
;
1304 ObDereferenceObject(FileObject
);
1305 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList
, SharedCacheMap
);
1308 KeReleaseGuardedMutex(&ViewLock
);
1309 return STATUS_INSUFFICIENT_RESOURCES
;
1313 RtlZeroMemory(PrivateMap
, sizeof(PRIVATE_CACHE_MAP
));
1314 PrivateMap
->NodeTypeCode
= NODE_TYPE_PRIVATE_MAP
;
1315 PrivateMap
->ReadAheadMask
= PAGE_SIZE
- 1;
1316 PrivateMap
->FileObject
= FileObject
;
1317 KeInitializeSpinLock(&PrivateMap
->ReadAheadSpinLock
);
1319 /* Link it to the file */
1320 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &OldIrql
);
1321 InsertTailList(&SharedCacheMap
->PrivateList
, &PrivateMap
->PrivateLinks
);
1322 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, OldIrql
);
1324 FileObject
->PrivateCacheMap
= PrivateMap
;
1325 SharedCacheMap
->OpenCount
++;
1327 KeReleaseGuardedMutex(&ViewLock
);
1329 return STATUS_SUCCESS
;
1337 CcGetFileObjectFromSectionPtrs (
1338 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
)
1340 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1342 CCTRACE(CC_API_DEBUG
, "SectionObjectPointers=%p\n", SectionObjectPointers
);
1344 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1346 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
1347 ASSERT(SharedCacheMap
);
1348 return SharedCacheMap
->FileObject
;
1359 DPRINT("CcInitView()\n");
1361 InitializeListHead(&DirtyVacbListHead
);
1362 InitializeListHead(&VacbLruListHead
);
1363 InitializeListHead(&CcDeferredWrites
);
1364 InitializeListHead(&CcCleanSharedCacheMapList
);
1365 KeInitializeSpinLock(&CcDeferredWriteSpinLock
);
1366 KeInitializeGuardedMutex(&ViewLock
);
1367 ExInitializeNPagedLookasideList(&iBcbLookasideList
,
1371 sizeof(INTERNAL_BCB
),
1374 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList
,
1378 sizeof(ROS_SHARED_CACHE_MAP
),
1379 TAG_SHARED_CACHE_MAP
,
1381 ExInitializeNPagedLookasideList(&VacbLookasideList
,
1389 MmInitializeMemoryConsumer(MC_CACHE
, CcRosTrimCache
);
1391 CcInitCacheZeroPage();
1394 #if DBG && defined(KDBG)
1396 ExpKdbgExtFileCache(ULONG Argc
, PCHAR Argv
[])
1398 PLIST_ENTRY ListEntry
;
1399 UNICODE_STRING NoName
= RTL_CONSTANT_STRING(L
"No name for File");
1401 KdbpPrint(" Usage Summary (in kb)\n");
1402 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1403 /* No need to lock the spin lock here, we're in DBG */
1404 for (ListEntry
= CcCleanSharedCacheMapList
.Flink
;
1405 ListEntry
!= &CcCleanSharedCacheMapList
;
1406 ListEntry
= ListEntry
->Flink
)
1409 ULONG Valid
= 0, Dirty
= 0;
1410 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1411 PUNICODE_STRING FileName
;
1413 SharedCacheMap
= CONTAINING_RECORD(ListEntry
, ROS_SHARED_CACHE_MAP
, SharedCacheMapLinks
);
1416 Dirty
= (SharedCacheMap
->DirtyPages
* PAGE_SIZE
) / 1024;
1418 /* First, count for all the associated VACB */
1419 for (Vacbs
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
1420 Vacbs
!= &SharedCacheMap
->CacheMapVacbListHead
;
1421 Vacbs
= Vacbs
->Flink
)
1425 Vacb
= CONTAINING_RECORD(Vacbs
, ROS_VACB
, CacheMapVacbListEntry
);
1428 Valid
+= VACB_MAPPING_GRANULARITY
/ 1024;
1433 if (SharedCacheMap
->FileObject
!= NULL
&&
1434 SharedCacheMap
->FileObject
->FileName
.Length
!= 0)
1436 FileName
= &SharedCacheMap
->FileObject
->FileName
;
1444 KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap
, Valid
, Dirty
, FileName
);
1451 ExpKdbgExtDefWrites(ULONG Argc
, PCHAR Argv
[])
1453 KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages
,
1454 (CcTotalDirtyPages
* PAGE_SIZE
) / 1024);
1455 KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold
,
1456 (CcDirtyPageThreshold
* PAGE_SIZE
) / 1024);
1457 KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages
,
1458 (MmAvailablePages
* PAGE_SIZE
) / 1024);
1459 KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop
,
1460 (MmThrottleTop
* PAGE_SIZE
) / 1024);
1461 KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom
,
1462 (MmThrottleBottom
* PAGE_SIZE
) / 1024);
1463 KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead
.Total
,
1464 (MmModifiedPageListHead
.Total
* PAGE_SIZE
) / 1024);
1466 if (CcTotalDirtyPages
>= CcDirtyPageThreshold
)
1468 KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n");
1470 else if (CcTotalDirtyPages
+ 64 >= CcDirtyPageThreshold
)
1472 KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n");
1476 KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n");