2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
11 /* NOTES **********************************************************************
13 * This is not the NT implementation of a file cache nor anything much like
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
29 * (4) Copy the data into or out of the page as necessary.
31 * (5) Release the cache page
33 /* INCLUDES ******************************************************************/
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
43 /* GLOBALS *******************************************************************/
45 LIST_ENTRY DirtyVacbListHead
;
46 static LIST_ENTRY VacbLruListHead
;
48 NPAGED_LOOKASIDE_LIST iBcbLookasideList
;
49 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList
;
50 static NPAGED_LOOKASIDE_LIST VacbLookasideList
;
52 /* Internal vars (MS):
53 * - Threshold above which lazy writer will start action
54 * - Amount of dirty pages
55 * - List for deferred writes
56 * - Spinlock when dealing with the deferred list
57 * - List for "clean" shared cache maps
59 ULONG CcDirtyPageThreshold
= 0;
60 ULONG CcTotalDirtyPages
= 0;
61 LIST_ENTRY CcDeferredWrites
;
62 KSPIN_LOCK CcDeferredWriteSpinLock
;
63 LIST_ENTRY CcCleanSharedCacheMapList
;
66 ULONG
CcRosVacbIncRefCount_(PROS_VACB vacb
, PCSTR file
, INT line
)
70 Refs
= InterlockedIncrement((PLONG
)&vacb
->ReferenceCount
);
71 if (vacb
->SharedCacheMap
->Trace
)
73 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
74 file
, line
, vacb
, Refs
, vacb
->Dirty
, vacb
->PageOut
);
79 ULONG
CcRosVacbDecRefCount_(PROS_VACB vacb
, PCSTR file
, INT line
)
83 Refs
= InterlockedDecrement((PLONG
)&vacb
->ReferenceCount
);
84 ASSERT(!(Refs
== 0 && vacb
->Dirty
));
85 if (vacb
->SharedCacheMap
->Trace
)
87 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
88 file
, line
, vacb
, Refs
, vacb
->Dirty
, vacb
->PageOut
);
93 CcRosInternalFreeVacb(vacb
);
98 ULONG
CcRosVacbGetRefCount_(PROS_VACB vacb
, PCSTR file
, INT line
)
102 Refs
= InterlockedCompareExchange((PLONG
)&vacb
->ReferenceCount
, 0, 0);
103 if (vacb
->SharedCacheMap
->Trace
)
105 DbgPrint("(%s:%i) VACB %p ==RefCount=%lu, Dirty %u, PageOut %lu\n",
106 file
, line
, vacb
, Refs
, vacb
->Dirty
, vacb
->PageOut
);
114 /* FUNCTIONS *****************************************************************/
119 PROS_SHARED_CACHE_MAP SharedCacheMap
,
124 PLIST_ENTRY current_entry
;
130 SharedCacheMap
->Trace
= Trace
;
134 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
136 oldirql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
137 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap
->CacheMapLock
);
139 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
140 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
142 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
143 current_entry
= current_entry
->Flink
;
145 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
146 current
, current
->ReferenceCount
, current
->Dirty
, current
->PageOut
);
149 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap
->CacheMapLock
);
150 KeReleaseQueuedSpinLock(LockQueueMasterLock
, oldirql
);
154 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
158 UNREFERENCED_PARAMETER(SharedCacheMap
);
159 UNREFERENCED_PARAMETER(Trace
);
170 CcRosUnmarkDirtyVacb(Vacb
, TRUE
);
172 Status
= CcWriteVirtualAddress(Vacb
);
173 if (!NT_SUCCESS(Status
))
175 CcRosMarkDirtyVacb(Vacb
);
183 CcRosFlushDirtyPages (
187 BOOLEAN CalledFromLazy
)
189 PLIST_ENTRY current_entry
;
195 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target
);
199 KeEnterCriticalRegion();
200 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
202 current_entry
= DirtyVacbListHead
.Flink
;
203 if (current_entry
== &DirtyVacbListHead
)
205 DPRINT("No Dirty pages\n");
208 while ((current_entry
!= &DirtyVacbListHead
) && (Target
> 0))
210 current
= CONTAINING_RECORD(current_entry
,
213 current_entry
= current_entry
->Flink
;
215 CcRosVacbIncRefCount(current
);
217 /* When performing lazy write, don't handle temporary files */
218 if (CalledFromLazy
&&
219 BooleanFlagOn(current
->SharedCacheMap
->FileObject
->Flags
, FO_TEMPORARY_FILE
))
221 CcRosVacbDecRefCount(current
);
225 /* Don't attempt to lazy write the files that asked not to */
226 if (CalledFromLazy
&&
227 BooleanFlagOn(current
->SharedCacheMap
->Flags
, WRITEBEHIND_DISABLED
))
229 CcRosVacbDecRefCount(current
);
233 ASSERT(current
->Dirty
);
235 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
237 Locked
= current
->SharedCacheMap
->Callbacks
->AcquireForLazyWrite(
238 current
->SharedCacheMap
->LazyWriteContext
, Wait
);
241 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
242 CcRosVacbDecRefCount(current
);
246 Status
= CcRosFlushVacb(current
);
248 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
249 current
->SharedCacheMap
->LazyWriteContext
);
251 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
252 CcRosVacbDecRefCount(current
);
254 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
) &&
255 (Status
!= STATUS_MEDIA_WRITE_PROTECTED
))
257 DPRINT1("CC: Failed to flush VACB.\n");
263 /* How many pages did we free? */
264 PagesFreed
= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
265 (*Count
) += PagesFreed
;
267 /* Make sure we don't overflow target! */
268 if (Target
< PagesFreed
)
270 /* If we would have, jump to zero directly */
275 Target
-= PagesFreed
;
279 current_entry
= DirtyVacbListHead
.Flink
;
282 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
283 KeLeaveCriticalRegion();
285 DPRINT("CcRosFlushDirtyPages() finished\n");
286 return STATUS_SUCCESS
;
295 * FUNCTION: Try to free some memory from the file cache.
297 * Target - The number of pages to be freed.
298 * Priority - The priority of free (currently unused).
299 * NrFreed - Points to a variable where the number of pages
300 * actually freed is returned.
303 PLIST_ENTRY current_entry
;
310 BOOLEAN FlushedPages
= FALSE
;
312 DPRINT("CcRosTrimCache(Target %lu)\n", Target
);
314 InitializeListHead(&FreeList
);
319 oldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
321 current_entry
= VacbLruListHead
.Flink
;
322 while (current_entry
!= &VacbLruListHead
)
326 current
= CONTAINING_RECORD(current_entry
,
329 current_entry
= current_entry
->Flink
;
331 KeAcquireSpinLockAtDpcLevel(¤t
->SharedCacheMap
->CacheMapLock
);
333 /* Reference the VACB */
334 CcRosVacbIncRefCount(current
);
336 /* Check if it's mapped and not dirty */
337 if (InterlockedCompareExchange((PLONG
)¤t
->MappedCount
, 0, 0) > 0 && !current
->Dirty
)
339 /* We have to break these locks because Cc sucks */
340 KeReleaseSpinLockFromDpcLevel(¤t
->SharedCacheMap
->CacheMapLock
);
341 KeReleaseQueuedSpinLock(LockQueueMasterLock
, oldIrql
);
343 /* Page out the VACB */
344 for (i
= 0; i
< VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
; i
++)
346 Page
= (PFN_NUMBER
)(MmGetPhysicalAddress((PUCHAR
)current
->BaseAddress
+ (i
* PAGE_SIZE
)).QuadPart
>> PAGE_SHIFT
);
348 MmPageOutPhysicalAddress(Page
);
351 /* Reacquire the locks */
352 oldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
353 KeAcquireSpinLockAtDpcLevel(¤t
->SharedCacheMap
->CacheMapLock
);
356 /* Dereference the VACB */
357 Refs
= CcRosVacbDecRefCount(current
);
359 /* Check if we can free this entry now */
362 ASSERT(!current
->Dirty
);
363 ASSERT(!current
->MappedCount
);
366 RemoveEntryList(¤t
->CacheMapVacbListEntry
);
367 RemoveEntryList(¤t
->VacbLruListEntry
);
368 InitializeListHead(¤t
->VacbLruListEntry
);
369 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
371 /* Calculate how many pages we freed for Mm */
372 PagesFreed
= min(VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
, Target
);
373 Target
-= PagesFreed
;
374 (*NrFreed
) += PagesFreed
;
377 KeReleaseSpinLockFromDpcLevel(¤t
->SharedCacheMap
->CacheMapLock
);
380 KeReleaseQueuedSpinLock(LockQueueMasterLock
, oldIrql
);
382 /* Try flushing pages if we haven't met our target */
383 if ((Target
> 0) && !FlushedPages
)
385 /* Flush dirty pages to disk */
386 CcRosFlushDirtyPages(Target
, &PagesFreed
, FALSE
, FALSE
);
389 /* We can only swap as many pages as we flushed */
390 if (PagesFreed
< Target
) Target
= PagesFreed
;
392 /* Check if we flushed anything */
395 /* Try again after flushing dirty pages */
396 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed
);
401 while (!IsListEmpty(&FreeList
))
405 current_entry
= RemoveHeadList(&FreeList
);
406 current
= CONTAINING_RECORD(current_entry
,
408 CacheMapVacbListEntry
);
409 InitializeListHead(¤t
->CacheMapVacbListEntry
);
410 Refs
= CcRosVacbDecRefCount(current
);
414 DPRINT("Evicted %lu cache pages\n", (*NrFreed
));
416 return STATUS_SUCCESS
;
422 PROS_SHARED_CACHE_MAP SharedCacheMap
,
429 ASSERT(SharedCacheMap
);
431 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
432 SharedCacheMap
, Vacb
, Valid
);
436 if (Dirty
&& !Vacb
->Dirty
)
438 CcRosMarkDirtyVacb(Vacb
);
443 if (InterlockedIncrement((PLONG
)&Vacb
->MappedCount
) == 1)
445 CcRosVacbIncRefCount(Vacb
);
449 Refs
= CcRosVacbDecRefCount(Vacb
);
452 return STATUS_SUCCESS
;
455 /* Returns with VACB Lock Held! */
459 PROS_SHARED_CACHE_MAP SharedCacheMap
,
462 PLIST_ENTRY current_entry
;
466 ASSERT(SharedCacheMap
);
468 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
469 SharedCacheMap
, FileOffset
);
471 oldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
472 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap
->CacheMapLock
);
474 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
475 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
477 current
= CONTAINING_RECORD(current_entry
,
479 CacheMapVacbListEntry
);
480 if (IsPointInRange(current
->FileOffset
.QuadPart
,
481 VACB_MAPPING_GRANULARITY
,
484 CcRosVacbIncRefCount(current
);
485 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap
->CacheMapLock
);
486 KeReleaseQueuedSpinLock(LockQueueMasterLock
, oldIrql
);
489 if (current
->FileOffset
.QuadPart
> FileOffset
)
491 current_entry
= current_entry
->Flink
;
494 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap
->CacheMapLock
);
495 KeReleaseQueuedSpinLock(LockQueueMasterLock
, oldIrql
);
506 PROS_SHARED_CACHE_MAP SharedCacheMap
;
508 SharedCacheMap
= Vacb
->SharedCacheMap
;
510 oldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
511 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap
->CacheMapLock
);
513 ASSERT(!Vacb
->Dirty
);
515 InsertTailList(&DirtyVacbListHead
, &Vacb
->DirtyVacbListEntry
);
516 CcTotalDirtyPages
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
517 Vacb
->SharedCacheMap
->DirtyPages
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
518 CcRosVacbIncRefCount(Vacb
);
520 /* Move to the tail of the LRU list */
521 RemoveEntryList(&Vacb
->VacbLruListEntry
);
522 InsertTailList(&VacbLruListHead
, &Vacb
->VacbLruListEntry
);
526 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap
->CacheMapLock
);
528 /* Schedule a lazy writer run to now that we have dirty VACB */
529 if (!LazyWriter
.ScanActive
)
531 CcScheduleLazyWriteScan(FALSE
);
533 KeReleaseQueuedSpinLock(LockQueueMasterLock
, oldIrql
);
538 CcRosUnmarkDirtyVacb (
543 PROS_SHARED_CACHE_MAP SharedCacheMap
;
545 SharedCacheMap
= Vacb
->SharedCacheMap
;
549 oldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
550 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap
->CacheMapLock
);
557 RemoveEntryList(&Vacb
->DirtyVacbListEntry
);
558 InitializeListHead(&Vacb
->DirtyVacbListEntry
);
559 CcTotalDirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
560 Vacb
->SharedCacheMap
->DirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
561 CcRosVacbDecRefCount(Vacb
);
565 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap
->CacheMapLock
);
566 KeReleaseQueuedSpinLock(LockQueueMasterLock
, oldIrql
);
573 PROS_SHARED_CACHE_MAP SharedCacheMap
,
578 ASSERT(SharedCacheMap
);
580 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
581 SharedCacheMap
, FileOffset
);
583 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
586 KeBugCheck(CACHE_MANAGER
);
589 CcRosReleaseVacb(SharedCacheMap
, Vacb
, Vacb
->Valid
, TRUE
, FALSE
);
591 return STATUS_SUCCESS
;
595 * Note: this is not the contrary function of
596 * CcRosMapVacbInKernelSpace()
601 PROS_SHARED_CACHE_MAP SharedCacheMap
,
607 ASSERT(SharedCacheMap
);
609 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
610 SharedCacheMap
, FileOffset
, NowDirty
);
612 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
615 return STATUS_UNSUCCESSFUL
;
618 ASSERT(Vacb
->MappedCount
!= 0);
619 if (InterlockedDecrement((PLONG
)&Vacb
->MappedCount
) == 0)
621 CcRosVacbDecRefCount(Vacb
);
624 CcRosReleaseVacb(SharedCacheMap
, Vacb
, Vacb
->Valid
, NowDirty
, FALSE
);
626 return STATUS_SUCCESS
;
631 CcRosMapVacbInKernelSpace(
636 ULONG_PTR NumberOfPages
;
637 PVOID BaseAddress
= NULL
;
639 /* Create a memory area. */
640 MmLockAddressSpace(MmGetKernelAddressSpace());
641 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
642 0, // nothing checks for VACB mareas, so set to 0
644 VACB_MAPPING_GRANULARITY
,
646 (PMEMORY_AREA
*)&Vacb
->MemoryArea
,
649 ASSERT(Vacb
->BaseAddress
== NULL
);
650 Vacb
->BaseAddress
= BaseAddress
;
651 MmUnlockAddressSpace(MmGetKernelAddressSpace());
652 if (!NT_SUCCESS(Status
))
654 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status
, Vacb
);
658 ASSERT(((ULONG_PTR
)Vacb
->BaseAddress
% PAGE_SIZE
) == 0);
659 ASSERT((ULONG_PTR
)Vacb
->BaseAddress
> (ULONG_PTR
)MmSystemRangeStart
);
660 ASSERT((ULONG_PTR
)Vacb
->BaseAddress
+ VACB_MAPPING_GRANULARITY
- 1 > (ULONG_PTR
)MmSystemRangeStart
);
662 /* Create a virtual mapping for this memory area */
663 NumberOfPages
= BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY
);
664 for (i
= 0; i
< NumberOfPages
; i
++)
666 PFN_NUMBER PageFrameNumber
;
668 MI_SET_USAGE(MI_USAGE_CACHE
);
669 Status
= MmRequestPageMemoryConsumer(MC_CACHE
, TRUE
, &PageFrameNumber
);
670 if (PageFrameNumber
== 0)
672 DPRINT1("Unable to allocate page\n");
673 KeBugCheck(MEMORY_MANAGEMENT
);
676 ASSERT(BaseAddress
== Vacb
->BaseAddress
);
677 ASSERT(i
* PAGE_SIZE
< VACB_MAPPING_GRANULARITY
);
678 ASSERT((ULONG_PTR
)Vacb
->BaseAddress
+ (i
* PAGE_SIZE
) >= (ULONG_PTR
)BaseAddress
);
679 ASSERT((ULONG_PTR
)Vacb
->BaseAddress
+ (i
* PAGE_SIZE
) > (ULONG_PTR
)MmSystemRangeStart
);
681 Status
= MmCreateVirtualMapping(NULL
,
682 (PVOID
)((ULONG_PTR
)Vacb
->BaseAddress
+ (i
* PAGE_SIZE
)),
686 if (!NT_SUCCESS(Status
))
688 DPRINT1("Unable to create virtual mapping\n");
689 KeBugCheck(MEMORY_MANAGEMENT
);
693 return STATUS_SUCCESS
;
698 CcRosFreeUnusedVacb (
706 PLIST_ENTRY current_entry
;
710 InitializeListHead(&FreeList
);
712 oldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
714 /* Browse all the available VACB */
715 current_entry
= VacbLruListHead
.Flink
;
716 while (current_entry
!= &VacbLruListHead
)
720 current
= CONTAINING_RECORD(current_entry
,
723 current_entry
= current_entry
->Flink
;
725 KeAcquireSpinLockAtDpcLevel(¤t
->SharedCacheMap
->CacheMapLock
);
727 /* Only deal with unused VACB, we will free them */
728 Refs
= CcRosVacbGetRefCount(current
);
731 ASSERT(!current
->Dirty
);
732 ASSERT(!current
->MappedCount
);
735 /* Reset and move to free list */
736 RemoveEntryList(¤t
->CacheMapVacbListEntry
);
737 RemoveEntryList(¤t
->VacbLruListEntry
);
738 InitializeListHead(¤t
->VacbLruListEntry
);
739 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
742 KeReleaseSpinLockFromDpcLevel(¤t
->SharedCacheMap
->CacheMapLock
);
746 KeReleaseQueuedSpinLock(LockQueueMasterLock
, oldIrql
);
748 /* And now, free any of the found VACB, that'll free memory! */
749 while (!IsListEmpty(&FreeList
))
753 current_entry
= RemoveHeadList(&FreeList
);
754 current
= CONTAINING_RECORD(current_entry
,
756 CacheMapVacbListEntry
);
757 InitializeListHead(¤t
->CacheMapVacbListEntry
);
758 Refs
= CcRosVacbDecRefCount(current
);
763 /* If we freed at least one VACB, return success */
769 /* If caller asked for free count, return it */
781 PROS_SHARED_CACHE_MAP SharedCacheMap
,
787 PLIST_ENTRY current_entry
;
793 ASSERT(SharedCacheMap
);
795 DPRINT("CcRosCreateVacb()\n");
797 if (FileOffset
>= SharedCacheMap
->SectionSize
.QuadPart
)
800 return STATUS_INVALID_PARAMETER
;
803 current
= ExAllocateFromNPagedLookasideList(&VacbLookasideList
);
804 current
->BaseAddress
= NULL
;
805 current
->Valid
= FALSE
;
806 current
->Dirty
= FALSE
;
807 current
->PageOut
= FALSE
;
808 current
->FileOffset
.QuadPart
= ROUND_DOWN(FileOffset
, VACB_MAPPING_GRANULARITY
);
809 current
->SharedCacheMap
= SharedCacheMap
;
811 if (SharedCacheMap
->Trace
)
813 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap
, current
);
816 current
->MappedCount
= 0;
817 current
->ReferenceCount
= 0;
818 InitializeListHead(¤t
->CacheMapVacbListEntry
);
819 InitializeListHead(¤t
->DirtyVacbListEntry
);
820 InitializeListHead(¤t
->VacbLruListEntry
);
822 CcRosVacbIncRefCount(current
);
826 /* Map VACB in kernel space */
827 Status
= CcRosMapVacbInKernelSpace(current
);
828 if (!NT_SUCCESS(Status
))
831 /* If no space left, try to prune unused VACB
832 * to recover space to map our VACB
833 * If it succeed, retry to map, otherwise
836 if (!Retried
&& CcRosFreeUnusedVacb(&Freed
))
838 DPRINT("Prunned %d VACB, trying again\n", Freed
);
843 ExFreeToNPagedLookasideList(&VacbLookasideList
, current
);
847 oldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
850 /* There is window between the call to CcRosLookupVacb
851 * and CcRosCreateVacb. We must check if a VACB for the
852 * file offset exist. If there is a VACB, we release
853 * our newly created VACB and return the existing one.
855 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap
->CacheMapLock
);
856 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
858 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
860 current
= CONTAINING_RECORD(current_entry
,
862 CacheMapVacbListEntry
);
863 if (IsPointInRange(current
->FileOffset
.QuadPart
,
864 VACB_MAPPING_GRANULARITY
,
867 CcRosVacbIncRefCount(current
);
868 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap
->CacheMapLock
);
870 if (SharedCacheMap
->Trace
)
872 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
878 KeReleaseQueuedSpinLock(LockQueueMasterLock
, oldIrql
);
880 Refs
= CcRosVacbDecRefCount(*Vacb
);
884 return STATUS_SUCCESS
;
886 if (current
->FileOffset
.QuadPart
< FileOffset
)
888 ASSERT(previous
== NULL
||
889 previous
->FileOffset
.QuadPart
< current
->FileOffset
.QuadPart
);
892 if (current
->FileOffset
.QuadPart
> FileOffset
)
894 current_entry
= current_entry
->Flink
;
896 /* There was no existing VACB. */
900 InsertHeadList(&previous
->CacheMapVacbListEntry
, ¤t
->CacheMapVacbListEntry
);
904 InsertHeadList(&SharedCacheMap
->CacheMapVacbListHead
, ¤t
->CacheMapVacbListEntry
);
906 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap
->CacheMapLock
);
907 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
908 KeReleaseQueuedSpinLock(LockQueueMasterLock
, oldIrql
);
910 MI_SET_USAGE(MI_USAGE_CACHE
);
912 if ((SharedCacheMap
->FileObject
) && (SharedCacheMap
->FileObject
->FileName
.Buffer
))
916 pos
= wcsrchr(SharedCacheMap
->FileObject
->FileName
.Buffer
, '\\');
919 len
= wcslen(pos
) * sizeof(WCHAR
);
920 snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%S", pos
);
924 snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%wZ", &SharedCacheMap
->FileObject
->FileName
);
929 /* Reference it to allow release */
930 CcRosVacbIncRefCount(current
);
938 PROS_SHARED_CACHE_MAP SharedCacheMap
,
940 PLONGLONG BaseOffset
,
950 ASSERT(SharedCacheMap
);
952 DPRINT("CcRosGetVacb()\n");
955 * Look for a VACB already mapping the same data.
957 current
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
961 * Otherwise create a new VACB.
963 Status
= CcRosCreateVacb(SharedCacheMap
, FileOffset
, ¤t
);
964 if (!NT_SUCCESS(Status
))
970 Refs
= CcRosVacbGetRefCount(current
);
972 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
974 /* Move to the tail of the LRU list */
975 RemoveEntryList(¤t
->VacbLruListEntry
);
976 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
978 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
981 * Return information about the VACB to the caller.
983 *UptoDate
= current
->Valid
;
984 *BaseAddress
= current
->BaseAddress
;
985 DPRINT("*BaseAddress %p\n", *BaseAddress
);
987 *BaseOffset
= current
->FileOffset
.QuadPart
;
991 return STATUS_SUCCESS
;
997 PROS_SHARED_CACHE_MAP SharedCacheMap
,
1003 * FUNCTION: Request a page mapping for a shared cache map
1006 LONGLONG BaseOffset
;
1008 ASSERT(SharedCacheMap
);
1010 if (FileOffset
% VACB_MAPPING_GRANULARITY
!= 0)
1012 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
1013 FileOffset
, VACB_MAPPING_GRANULARITY
);
1014 KeBugCheck(CACHE_MANAGER
);
1017 return CcRosGetVacb(SharedCacheMap
,
1029 MEMORY_AREA
* MemoryArea
,
1032 SWAPENTRY SwapEntry
,
1035 ASSERT(SwapEntry
== 0);
1038 ASSERT(MmGetReferenceCountPage(Page
) == 1);
1039 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
1044 CcRosInternalFreeVacb (
1047 * FUNCTION: Releases a VACB associated with a shared cache map
1050 DPRINT("Freeing VACB 0x%p\n", Vacb
);
1052 if (Vacb
->SharedCacheMap
->Trace
)
1054 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb
->SharedCacheMap
, Vacb
);
1058 MmLockAddressSpace(MmGetKernelAddressSpace());
1059 MmFreeMemoryArea(MmGetKernelAddressSpace(),
1063 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1065 if (Vacb
->ReferenceCount
!= 0)
1067 DPRINT1("Invalid free: %ld\n", Vacb
->ReferenceCount
);
1068 if (Vacb
->SharedCacheMap
->FileObject
&& Vacb
->SharedCacheMap
->FileObject
->FileName
.Length
)
1070 DPRINT1("For file: %wZ\n", &Vacb
->SharedCacheMap
->FileObject
->FileName
);
1074 ASSERT(Vacb
->ReferenceCount
== 0);
1075 ASSERT(IsListEmpty(&Vacb
->CacheMapVacbListEntry
));
1076 ASSERT(IsListEmpty(&Vacb
->DirtyVacbListEntry
));
1077 ASSERT(IsListEmpty(&Vacb
->VacbLruListEntry
));
1078 RtlFillMemory(Vacb
, sizeof(*Vacb
), 0xfd);
1079 ExFreeToNPagedLookasideList(&VacbLookasideList
, Vacb
);
1080 return STATUS_SUCCESS
;
1089 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
,
1090 IN PLARGE_INTEGER FileOffset OPTIONAL
,
1092 OUT PIO_STATUS_BLOCK IoStatus
)
1094 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1095 LARGE_INTEGER Offset
;
1096 LONGLONG RemainingLength
;
1100 CCTRACE(CC_API_DEBUG
, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1101 SectionObjectPointers
, FileOffset
, Length
);
1103 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1104 SectionObjectPointers
, FileOffset
, Length
, IoStatus
);
1106 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1108 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
1109 ASSERT(SharedCacheMap
);
1112 Offset
= *FileOffset
;
1113 RemainingLength
= Length
;
1117 Offset
.QuadPart
= 0;
1118 RemainingLength
= SharedCacheMap
->FileSize
.QuadPart
;
1123 IoStatus
->Status
= STATUS_SUCCESS
;
1124 IoStatus
->Information
= 0;
1127 while (RemainingLength
> 0)
1129 current
= CcRosLookupVacb(SharedCacheMap
, Offset
.QuadPart
);
1130 if (current
!= NULL
)
1134 Status
= CcRosFlushVacb(current
);
1135 if (!NT_SUCCESS(Status
) && IoStatus
!= NULL
)
1137 IoStatus
->Status
= Status
;
1141 CcRosReleaseVacb(SharedCacheMap
, current
, current
->Valid
, FALSE
, FALSE
);
1144 Offset
.QuadPart
+= VACB_MAPPING_GRANULARITY
;
1145 RemainingLength
-= min(RemainingLength
, VACB_MAPPING_GRANULARITY
);
1152 IoStatus
->Status
= STATUS_INVALID_PARAMETER
;
1159 CcRosDeleteFileCache (
1160 PFILE_OBJECT FileObject
,
1161 PROS_SHARED_CACHE_MAP SharedCacheMap
,
1164 * FUNCTION: Releases the shared cache map associated with a file object
1167 PLIST_ENTRY current_entry
;
1169 LIST_ENTRY FreeList
;
1171 ASSERT(SharedCacheMap
);
1173 SharedCacheMap
->OpenCount
++;
1174 KeReleaseQueuedSpinLock(LockQueueMasterLock
, *OldIrql
);
1176 CcFlushCache(FileObject
->SectionObjectPointer
, NULL
, 0, NULL
);
1178 *OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
1179 SharedCacheMap
->OpenCount
--;
1180 if (SharedCacheMap
->OpenCount
== 0)
1182 FileObject
->SectionObjectPointer
->SharedCacheMap
= NULL
;
1187 InitializeListHead(&FreeList
);
1188 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap
->CacheMapLock
);
1189 while (!IsListEmpty(&SharedCacheMap
->CacheMapVacbListHead
))
1191 current_entry
= RemoveTailList(&SharedCacheMap
->CacheMapVacbListHead
);
1192 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap
->CacheMapLock
);
1194 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1195 RemoveEntryList(¤t
->VacbLruListEntry
);
1196 InitializeListHead(¤t
->VacbLruListEntry
);
1199 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap
->CacheMapLock
);
1200 CcRosUnmarkDirtyVacb(current
, FALSE
);
1201 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap
->CacheMapLock
);
1202 DPRINT1("Freeing dirty VACB\n");
1204 if (current
->MappedCount
!= 0)
1206 current
->MappedCount
= 0;
1207 NT_VERIFY(CcRosVacbDecRefCount(current
) > 0);
1208 DPRINT1("Freeing mapped VACB\n");
1210 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
1212 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap
->CacheMapLock
);
1215 SharedCacheMap
->Trace
= FALSE
;
1217 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap
->CacheMapLock
);
1219 KeReleaseQueuedSpinLock(LockQueueMasterLock
, *OldIrql
);
1220 ObDereferenceObject(SharedCacheMap
->FileObject
);
1222 while (!IsListEmpty(&FreeList
))
1226 current_entry
= RemoveTailList(&FreeList
);
1227 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1228 InitializeListHead(¤t
->CacheMapVacbListEntry
);
1229 Refs
= CcRosVacbDecRefCount(current
);
1230 #if DBG // CORE-14578
1233 DPRINT1("Leaking VACB %p attached to %p (%I64d)\n", current
, FileObject
, current
->FileOffset
.QuadPart
);
1234 DPRINT1("There are: %d references left\n", Refs
);
1235 DPRINT1("Map: %d\n", current
->MappedCount
);
1236 DPRINT1("Dirty: %d\n", current
->Dirty
);
1237 if (FileObject
->FileName
.Length
!= 0)
1239 DPRINT1("File was: %wZ\n", &FileObject
->FileName
);
1241 else if (FileObject
->FsContext
!= NULL
&&
1242 ((PFSRTL_COMMON_FCB_HEADER
)(FileObject
->FsContext
))->NodeTypeCode
== 0x0502 &&
1243 ((PFSRTL_COMMON_FCB_HEADER
)(FileObject
->FsContext
))->NodeByteSize
== 0x1F8 &&
1244 ((PUNICODE_STRING
)(((PUCHAR
)FileObject
->FsContext
) + 0x100))->Length
!= 0)
1246 DPRINT1("File was: %wZ (FastFAT)\n", (PUNICODE_STRING
)(((PUCHAR
)FileObject
->FsContext
) + 0x100));
1250 DPRINT1("No name for the file\n");
1258 *OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
1259 RemoveEntryList(&SharedCacheMap
->SharedCacheMapLinks
);
1260 KeReleaseQueuedSpinLock(LockQueueMasterLock
, *OldIrql
);
1262 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList
, SharedCacheMap
);
1263 *OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
1265 return STATUS_SUCCESS
;
1270 CcRosReferenceCache (
1271 PFILE_OBJECT FileObject
)
1273 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1276 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
1277 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1278 ASSERT(SharedCacheMap
);
1279 ASSERT(SharedCacheMap
->OpenCount
!= 0);
1280 SharedCacheMap
->OpenCount
++;
1281 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
1286 CcRosRemoveIfClosed (
1287 PSECTION_OBJECT_POINTERS SectionObjectPointer
)
1289 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1292 DPRINT("CcRosRemoveIfClosed()\n");
1293 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
1294 SharedCacheMap
= SectionObjectPointer
->SharedCacheMap
;
1295 if (SharedCacheMap
&& SharedCacheMap
->OpenCount
== 0)
1297 CcRosDeleteFileCache(SharedCacheMap
->FileObject
, SharedCacheMap
, &OldIrql
);
1299 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
1305 CcRosDereferenceCache (
1306 PFILE_OBJECT FileObject
)
1308 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1311 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
1312 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1313 ASSERT(SharedCacheMap
);
1314 if (SharedCacheMap
->OpenCount
> 0)
1316 SharedCacheMap
->OpenCount
--;
1317 if (SharedCacheMap
->OpenCount
== 0)
1319 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
1320 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1322 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
1323 CcRosDeleteFileCache(FileObject
, SharedCacheMap
, &OldIrql
);
1324 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
1329 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
1334 CcRosReleaseFileCache (
1335 PFILE_OBJECT FileObject
)
1337 * FUNCTION: Called by the file system when a handle to a file object
1342 PPRIVATE_CACHE_MAP PrivateMap
;
1343 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1345 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
1347 if (FileObject
->SectionObjectPointer
->SharedCacheMap
!= NULL
)
1349 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1351 /* Closing the handle, so kill the private cache map
1352 * Before you event try to remove it from FO, always
1353 * lock the master lock, to be sure not to race
1354 * with a potential read ahead ongoing!
1356 PrivateMap
= FileObject
->PrivateCacheMap
;
1357 FileObject
->PrivateCacheMap
= NULL
;
1359 if (PrivateMap
!= NULL
)
1361 /* Remove it from the file */
1362 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap
->CacheMapLock
);
1363 RemoveEntryList(&PrivateMap
->PrivateLinks
);
1364 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap
->CacheMapLock
);
1367 if (PrivateMap
!= &SharedCacheMap
->PrivateCacheMap
)
1369 ExFreePoolWithTag(PrivateMap
, TAG_PRIVATE_CACHE_MAP
);
1373 PrivateMap
->NodeTypeCode
= 0;
1376 if (SharedCacheMap
->OpenCount
> 0)
1378 SharedCacheMap
->OpenCount
--;
1379 if (SharedCacheMap
->OpenCount
== 0)
1381 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
1382 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1384 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
1385 CcRosDeleteFileCache(FileObject
, SharedCacheMap
, &OldIrql
);
1386 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
1388 return STATUS_SUCCESS
;
1393 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
1394 return STATUS_SUCCESS
;
1399 CcRosInitializeFileCache (
1400 PFILE_OBJECT FileObject
,
1401 PCC_FILE_SIZES FileSizes
,
1403 PCACHE_MANAGER_CALLBACKS CallBacks
,
1404 PVOID LazyWriterContext
)
1406 * FUNCTION: Initializes a shared cache map for a file object
1411 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1413 DPRINT("CcRosInitializeFileCache(FileObject 0x%p)\n", FileObject
);
1416 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1417 if (SharedCacheMap
== NULL
)
1420 SharedCacheMap
= ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList
);
1421 if (SharedCacheMap
== NULL
)
1423 return STATUS_INSUFFICIENT_RESOURCES
;
1425 RtlZeroMemory(SharedCacheMap
, sizeof(*SharedCacheMap
));
1426 SharedCacheMap
->NodeTypeCode
= NODE_TYPE_SHARED_MAP
;
1427 SharedCacheMap
->NodeByteSize
= sizeof(*SharedCacheMap
);
1428 SharedCacheMap
->FileObject
= FileObject
;
1429 SharedCacheMap
->Callbacks
= CallBacks
;
1430 SharedCacheMap
->LazyWriteContext
= LazyWriterContext
;
1431 SharedCacheMap
->SectionSize
= FileSizes
->AllocationSize
;
1432 SharedCacheMap
->FileSize
= FileSizes
->FileSize
;
1433 SharedCacheMap
->PinAccess
= PinAccess
;
1434 SharedCacheMap
->DirtyPageThreshold
= 0;
1435 SharedCacheMap
->DirtyPages
= 0;
1436 InitializeListHead(&SharedCacheMap
->PrivateList
);
1437 KeInitializeSpinLock(&SharedCacheMap
->CacheMapLock
);
1438 InitializeListHead(&SharedCacheMap
->CacheMapVacbListHead
);
1439 InitializeListHead(&SharedCacheMap
->BcbList
);
1442 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
1445 if (FileObject
->SectionObjectPointer
->SharedCacheMap
== NULL
)
1447 ObReferenceObjectByPointer(FileObject
,
1451 FileObject
->SectionObjectPointer
->SharedCacheMap
= SharedCacheMap
;
1453 InsertTailList(&CcCleanSharedCacheMapList
, &SharedCacheMap
->SharedCacheMapLinks
);
1457 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList
, SharedCacheMap
);
1458 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1461 if (FileObject
->PrivateCacheMap
== NULL
)
1463 PPRIVATE_CACHE_MAP PrivateMap
;
1465 /* Allocate the private cache map for this handle */
1466 if (SharedCacheMap
->PrivateCacheMap
.NodeTypeCode
!= 0)
1468 PrivateMap
= ExAllocatePoolWithTag(NonPagedPool
, sizeof(PRIVATE_CACHE_MAP
), TAG_PRIVATE_CACHE_MAP
);
1472 PrivateMap
= &SharedCacheMap
->PrivateCacheMap
;
1475 if (PrivateMap
== NULL
)
1477 /* If we also allocated the shared cache map for this file, kill it */
1480 RemoveEntryList(&SharedCacheMap
->SharedCacheMapLinks
);
1482 FileObject
->SectionObjectPointer
->SharedCacheMap
= NULL
;
1483 ObDereferenceObject(FileObject
);
1484 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList
, SharedCacheMap
);
1487 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
1488 return STATUS_INSUFFICIENT_RESOURCES
;
1492 RtlZeroMemory(PrivateMap
, sizeof(PRIVATE_CACHE_MAP
));
1493 PrivateMap
->NodeTypeCode
= NODE_TYPE_PRIVATE_MAP
;
1494 PrivateMap
->ReadAheadMask
= PAGE_SIZE
- 1;
1495 PrivateMap
->FileObject
= FileObject
;
1496 KeInitializeSpinLock(&PrivateMap
->ReadAheadSpinLock
);
1498 /* Link it to the file */
1499 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap
->CacheMapLock
);
1500 InsertTailList(&SharedCacheMap
->PrivateList
, &PrivateMap
->PrivateLinks
);
1501 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap
->CacheMapLock
);
1503 FileObject
->PrivateCacheMap
= PrivateMap
;
1504 SharedCacheMap
->OpenCount
++;
1506 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
1508 return STATUS_SUCCESS
;
1516 CcGetFileObjectFromSectionPtrs (
1517 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
)
1519 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1521 CCTRACE(CC_API_DEBUG
, "SectionObjectPointers=%p\n", SectionObjectPointers
);
1523 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1525 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
1526 ASSERT(SharedCacheMap
);
1527 return SharedCacheMap
->FileObject
;
1538 DPRINT("CcInitView()\n");
1540 InitializeListHead(&DirtyVacbListHead
);
1541 InitializeListHead(&VacbLruListHead
);
1542 InitializeListHead(&CcDeferredWrites
);
1543 InitializeListHead(&CcCleanSharedCacheMapList
);
1544 KeInitializeSpinLock(&CcDeferredWriteSpinLock
);
1545 ExInitializeNPagedLookasideList(&iBcbLookasideList
,
1549 sizeof(INTERNAL_BCB
),
1552 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList
,
1556 sizeof(ROS_SHARED_CACHE_MAP
),
1557 TAG_SHARED_CACHE_MAP
,
1559 ExInitializeNPagedLookasideList(&VacbLookasideList
,
1567 MmInitializeMemoryConsumer(MC_CACHE
, CcRosTrimCache
);
1569 CcInitCacheZeroPage();
1572 #if DBG && defined(KDBG)
1574 ExpKdbgExtFileCache(ULONG Argc
, PCHAR Argv
[])
1576 PLIST_ENTRY ListEntry
;
1577 UNICODE_STRING NoName
= RTL_CONSTANT_STRING(L
"No name for File");
1579 KdbpPrint(" Usage Summary (in kb)\n");
1580 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1581 /* No need to lock the spin lock here, we're in DBG */
1582 for (ListEntry
= CcCleanSharedCacheMapList
.Flink
;
1583 ListEntry
!= &CcCleanSharedCacheMapList
;
1584 ListEntry
= ListEntry
->Flink
)
1587 ULONG Valid
= 0, Dirty
= 0;
1588 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1589 PUNICODE_STRING FileName
;
1592 SharedCacheMap
= CONTAINING_RECORD(ListEntry
, ROS_SHARED_CACHE_MAP
, SharedCacheMapLinks
);
1595 Dirty
= (SharedCacheMap
->DirtyPages
* PAGE_SIZE
) / 1024;
1597 /* First, count for all the associated VACB */
1598 for (Vacbs
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
1599 Vacbs
!= &SharedCacheMap
->CacheMapVacbListHead
;
1600 Vacbs
= Vacbs
->Flink
)
1604 Vacb
= CONTAINING_RECORD(Vacbs
, ROS_VACB
, CacheMapVacbListEntry
);
1607 Valid
+= VACB_MAPPING_GRANULARITY
/ 1024;
1612 if (SharedCacheMap
->FileObject
!= NULL
&&
1613 SharedCacheMap
->FileObject
->FileName
.Length
!= 0)
1615 FileName
= &SharedCacheMap
->FileObject
->FileName
;
1617 else if (SharedCacheMap
->FileObject
!= NULL
&&
1618 SharedCacheMap
->FileObject
->FsContext
!= NULL
&&
1619 ((PFSRTL_COMMON_FCB_HEADER
)(SharedCacheMap
->FileObject
->FsContext
))->NodeTypeCode
== 0x0502 &&
1620 ((PFSRTL_COMMON_FCB_HEADER
)(SharedCacheMap
->FileObject
->FsContext
))->NodeByteSize
== 0x1F8 &&
1621 ((PUNICODE_STRING
)(((PUCHAR
)SharedCacheMap
->FileObject
->FsContext
) + 0x100))->Length
!= 0)
1623 FileName
= (PUNICODE_STRING
)(((PUCHAR
)SharedCacheMap
->FileObject
->FsContext
) + 0x100);
1624 Extra
= L
" (FastFAT)";
1632 KdbpPrint("%p\t%d\t%d\t%wZ%S\n", SharedCacheMap
, Valid
, Dirty
, FileName
, Extra
);
1639 ExpKdbgExtDefWrites(ULONG Argc
, PCHAR Argv
[])
1641 KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages
,
1642 (CcTotalDirtyPages
* PAGE_SIZE
) / 1024);
1643 KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold
,
1644 (CcDirtyPageThreshold
* PAGE_SIZE
) / 1024);
1645 KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages
,
1646 (MmAvailablePages
* PAGE_SIZE
) / 1024);
1647 KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop
,
1648 (MmThrottleTop
* PAGE_SIZE
) / 1024);
1649 KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom
,
1650 (MmThrottleBottom
* PAGE_SIZE
) / 1024);
1651 KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead
.Total
,
1652 (MmModifiedPageListHead
.Total
* PAGE_SIZE
) / 1024);
1654 if (CcTotalDirtyPages
>= CcDirtyPageThreshold
)
1656 KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n");
1658 else if (CcTotalDirtyPages
+ 64 >= CcDirtyPageThreshold
)
1660 KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n");
1664 KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n");