2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
11 /* NOTES **********************************************************************
13 * This is not the NT implementation of a file cache nor anything much like
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
29 * (4) Copy the data into or out of the page as necessary.
31 * (5) Release the cache page
33 /* INCLUDES ******************************************************************/
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
43 /* GLOBALS *******************************************************************/
45 LIST_ENTRY DirtyVacbListHead
;
46 static LIST_ENTRY VacbLruListHead
;
48 KGUARDED_MUTEX ViewLock
;
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList
;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList
;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList
;
54 /* Internal vars (MS):
55 * - Threshold above which lazy writer will start action
56 * - Amount of dirty pages
57 * - List for deferred writes
58 * - Spinlock when dealing with the deferred list
59 * - List for "clean" shared cache maps
61 ULONG CcDirtyPageThreshold
= 0;
62 ULONG CcTotalDirtyPages
= 0;
63 LIST_ENTRY CcDeferredWrites
;
64 KSPIN_LOCK CcDeferredWriteSpinLock
;
65 LIST_ENTRY CcCleanSharedCacheMapList
;
68 ULONG
CcRosVacbIncRefCount_(PROS_VACB vacb
, PCSTR file
, INT line
)
72 Refs
= InterlockedIncrement((PLONG
)&vacb
->ReferenceCount
);
73 if (vacb
->SharedCacheMap
->Trace
)
75 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
76 file
, line
, vacb
, Refs
, vacb
->Dirty
, vacb
->PageOut
);
81 ULONG
CcRosVacbDecRefCount_(PROS_VACB vacb
, PCSTR file
, INT line
)
85 Refs
= InterlockedDecrement((PLONG
)&vacb
->ReferenceCount
);
86 ASSERT(!(Refs
== 0 && vacb
->Dirty
));
87 if (vacb
->SharedCacheMap
->Trace
)
89 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
90 file
, line
, vacb
, Refs
, vacb
->Dirty
, vacb
->PageOut
);
95 ULONG
CcRosVacbGetRefCount_(PROS_VACB vacb
, PCSTR file
, INT line
)
99 Refs
= InterlockedCompareExchange((PLONG
)&vacb
->ReferenceCount
, 0, 0);
100 if (vacb
->SharedCacheMap
->Trace
)
102 DbgPrint("(%s:%i) VACB %p ==RefCount=%lu, Dirty %u, PageOut %lu\n",
103 file
, line
, vacb
, Refs
, vacb
->Dirty
, vacb
->PageOut
);
111 CcRosInternalFreeVacb(PROS_VACB Vacb
);
114 /* FUNCTIONS *****************************************************************/
119 PROS_SHARED_CACHE_MAP SharedCacheMap
,
124 PLIST_ENTRY current_entry
;
130 SharedCacheMap
->Trace
= Trace
;
134 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
136 KeAcquireGuardedMutex(&ViewLock
);
137 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldirql
);
139 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
140 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
142 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
143 current_entry
= current_entry
->Flink
;
145 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
146 current
, current
->ReferenceCount
, current
->Dirty
, current
->PageOut
);
148 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldirql
);
149 KeReleaseGuardedMutex(&ViewLock
);
153 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
157 UNREFERENCED_PARAMETER(SharedCacheMap
);
158 UNREFERENCED_PARAMETER(Trace
);
169 Status
= CcWriteVirtualAddress(Vacb
);
170 if (NT_SUCCESS(Status
))
172 CcRosUnmarkDirtyVacb(Vacb
, TRUE
);
180 CcRosFlushDirtyPages (
184 BOOLEAN CalledFromLazy
)
186 PLIST_ENTRY current_entry
;
190 LARGE_INTEGER ZeroTimeout
;
192 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target
);
195 ZeroTimeout
.QuadPart
= 0;
197 KeEnterCriticalRegion();
198 KeAcquireGuardedMutex(&ViewLock
);
200 current_entry
= DirtyVacbListHead
.Flink
;
201 if (current_entry
== &DirtyVacbListHead
)
203 DPRINT("No Dirty pages\n");
206 while ((current_entry
!= &DirtyVacbListHead
) && (Target
> 0))
208 current
= CONTAINING_RECORD(current_entry
,
211 current_entry
= current_entry
->Flink
;
213 CcRosVacbIncRefCount(current
);
215 /* When performing lazy write, don't handle temporary files */
216 if (CalledFromLazy
&&
217 BooleanFlagOn(current
->SharedCacheMap
->FileObject
->Flags
, FO_TEMPORARY_FILE
))
219 CcRosVacbDecRefCount(current
);
223 Locked
= current
->SharedCacheMap
->Callbacks
->AcquireForLazyWrite(
224 current
->SharedCacheMap
->LazyWriteContext
, Wait
);
227 CcRosVacbDecRefCount(current
);
231 Status
= CcRosAcquireVacbLock(current
,
232 Wait
? NULL
: &ZeroTimeout
);
233 if (Status
!= STATUS_SUCCESS
)
235 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
236 current
->SharedCacheMap
->LazyWriteContext
);
237 CcRosVacbDecRefCount(current
);
241 ASSERT(current
->Dirty
);
243 /* One reference is added above */
244 if (CcRosVacbGetRefCount(current
) > 2)
246 CcRosReleaseVacbLock(current
);
247 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
248 current
->SharedCacheMap
->LazyWriteContext
);
249 CcRosVacbDecRefCount(current
);
253 KeReleaseGuardedMutex(&ViewLock
);
255 Status
= CcRosFlushVacb(current
);
257 CcRosReleaseVacbLock(current
);
258 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
259 current
->SharedCacheMap
->LazyWriteContext
);
261 KeAcquireGuardedMutex(&ViewLock
);
262 CcRosVacbDecRefCount(current
);
264 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
) &&
265 (Status
!= STATUS_MEDIA_WRITE_PROTECTED
))
267 DPRINT1("CC: Failed to flush VACB.\n");
273 /* How many pages did we free? */
274 PagesFreed
= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
275 (*Count
) += PagesFreed
;
277 /* Make sure we don't overflow target! */
278 if (Target
< PagesFreed
)
280 /* If we would have, jump to zero directly */
285 Target
-= PagesFreed
;
289 current_entry
= DirtyVacbListHead
.Flink
;
292 KeReleaseGuardedMutex(&ViewLock
);
293 KeLeaveCriticalRegion();
295 DPRINT("CcRosFlushDirtyPages() finished\n");
296 return STATUS_SUCCESS
;
305 * FUNCTION: Try to free some memory from the file cache.
307 * Target - The number of pages to be freed.
308 * Priority - The priority of free (currently unused).
309 * NrFreed - Points to a variable where the number of pages
310 * actually freed is returned.
313 PLIST_ENTRY current_entry
;
320 BOOLEAN FlushedPages
= FALSE
;
322 DPRINT("CcRosTrimCache(Target %lu)\n", Target
);
324 InitializeListHead(&FreeList
);
329 KeAcquireGuardedMutex(&ViewLock
);
331 current_entry
= VacbLruListHead
.Flink
;
332 while (current_entry
!= &VacbLruListHead
)
336 current
= CONTAINING_RECORD(current_entry
,
339 current_entry
= current_entry
->Flink
;
341 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
343 /* Reference the VACB */
344 CcRosVacbIncRefCount(current
);
346 /* Check if it's mapped and not dirty */
347 if (current
->MappedCount
> 0 && !current
->Dirty
)
349 /* We have to break these locks because Cc sucks */
350 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
351 KeReleaseGuardedMutex(&ViewLock
);
353 /* Page out the VACB */
354 for (i
= 0; i
< VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
; i
++)
356 Page
= (PFN_NUMBER
)(MmGetPhysicalAddress((PUCHAR
)current
->BaseAddress
+ (i
* PAGE_SIZE
)).QuadPart
>> PAGE_SHIFT
);
358 MmPageOutPhysicalAddress(Page
);
361 /* Reacquire the locks */
362 KeAcquireGuardedMutex(&ViewLock
);
363 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
366 /* Dereference the VACB */
367 Refs
= CcRosVacbDecRefCount(current
);
369 /* Check if we can free this entry now */
372 ASSERT(!current
->Dirty
);
373 ASSERT(!current
->MappedCount
);
376 RemoveEntryList(¤t
->CacheMapVacbListEntry
);
377 RemoveEntryList(¤t
->VacbLruListEntry
);
378 InitializeListHead(¤t
->VacbLruListEntry
);
379 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
381 /* Calculate how many pages we freed for Mm */
382 PagesFreed
= min(VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
, Target
);
383 Target
-= PagesFreed
;
384 (*NrFreed
) += PagesFreed
;
387 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
390 KeReleaseGuardedMutex(&ViewLock
);
392 /* Try flushing pages if we haven't met our target */
393 if ((Target
> 0) && !FlushedPages
)
395 /* Flush dirty pages to disk */
396 CcRosFlushDirtyPages(Target
, &PagesFreed
, FALSE
, FALSE
);
399 /* We can only swap as many pages as we flushed */
400 if (PagesFreed
< Target
) Target
= PagesFreed
;
402 /* Check if we flushed anything */
405 /* Try again after flushing dirty pages */
406 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed
);
411 while (!IsListEmpty(&FreeList
))
413 current_entry
= RemoveHeadList(&FreeList
);
414 current
= CONTAINING_RECORD(current_entry
,
416 CacheMapVacbListEntry
);
417 InitializeListHead(¤t
->CacheMapVacbListEntry
);
418 CcRosVacbDecRefCount(current
);
419 CcRosInternalFreeVacb(current
);
422 DPRINT("Evicted %lu cache pages\n", (*NrFreed
));
424 return STATUS_SUCCESS
;
430 PROS_SHARED_CACHE_MAP SharedCacheMap
,
437 ASSERT(SharedCacheMap
);
439 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
440 SharedCacheMap
, Vacb
, Valid
);
444 if (Dirty
&& !Vacb
->Dirty
)
446 CcRosMarkDirtyVacb(Vacb
);
453 Refs
= CcRosVacbDecRefCount(Vacb
);
454 if (Mapped
&& (Vacb
->MappedCount
== 1))
456 CcRosVacbIncRefCount(Vacb
);
461 CcRosReleaseVacbLock(Vacb
);
463 return STATUS_SUCCESS
;
466 /* Returns with VACB Lock Held! */
470 PROS_SHARED_CACHE_MAP SharedCacheMap
,
473 PLIST_ENTRY current_entry
;
477 ASSERT(SharedCacheMap
);
479 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
480 SharedCacheMap
, FileOffset
);
482 KeAcquireGuardedMutex(&ViewLock
);
483 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
485 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
486 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
488 current
= CONTAINING_RECORD(current_entry
,
490 CacheMapVacbListEntry
);
491 if (IsPointInRange(current
->FileOffset
.QuadPart
,
492 VACB_MAPPING_GRANULARITY
,
495 CcRosVacbIncRefCount(current
);
496 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
497 KeReleaseGuardedMutex(&ViewLock
);
498 CcRosAcquireVacbLock(current
, NULL
);
501 if (current
->FileOffset
.QuadPart
> FileOffset
)
503 current_entry
= current_entry
->Flink
;
506 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
507 KeReleaseGuardedMutex(&ViewLock
);
518 PROS_SHARED_CACHE_MAP SharedCacheMap
;
520 SharedCacheMap
= Vacb
->SharedCacheMap
;
522 KeAcquireGuardedMutex(&ViewLock
);
523 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
525 ASSERT(!Vacb
->Dirty
);
527 InsertTailList(&DirtyVacbListHead
, &Vacb
->DirtyVacbListEntry
);
528 CcTotalDirtyPages
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
529 Vacb
->SharedCacheMap
->DirtyPages
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
530 CcRosVacbIncRefCount(Vacb
);
532 /* Move to the tail of the LRU list */
533 RemoveEntryList(&Vacb
->VacbLruListEntry
);
534 InsertTailList(&VacbLruListHead
, &Vacb
->VacbLruListEntry
);
538 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
539 KeReleaseGuardedMutex(&ViewLock
);
541 /* Schedule a lazy writer run to now that we have dirty VACB */
542 oldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
543 if (!LazyWriter
.ScanActive
)
545 CcScheduleLazyWriteScan(FALSE
);
547 KeReleaseQueuedSpinLock(LockQueueMasterLock
, oldIrql
);
552 CcRosUnmarkDirtyVacb (
557 PROS_SHARED_CACHE_MAP SharedCacheMap
;
559 SharedCacheMap
= Vacb
->SharedCacheMap
;
563 KeAcquireGuardedMutex(&ViewLock
);
564 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
571 RemoveEntryList(&Vacb
->DirtyVacbListEntry
);
572 InitializeListHead(&Vacb
->DirtyVacbListEntry
);
573 CcTotalDirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
574 Vacb
->SharedCacheMap
->DirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
575 CcRosVacbDecRefCount(Vacb
);
579 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
580 KeReleaseGuardedMutex(&ViewLock
);
587 PROS_SHARED_CACHE_MAP SharedCacheMap
,
592 ASSERT(SharedCacheMap
);
594 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
595 SharedCacheMap
, FileOffset
);
597 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
600 KeBugCheck(CACHE_MANAGER
);
603 CcRosReleaseVacb(SharedCacheMap
, Vacb
, Vacb
->Valid
, TRUE
, FALSE
);
605 return STATUS_SUCCESS
;
609 * Note: this is not the contrary function of
610 * CcRosMapVacbInKernelSpace()
615 PROS_SHARED_CACHE_MAP SharedCacheMap
,
621 ASSERT(SharedCacheMap
);
623 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
624 SharedCacheMap
, FileOffset
, NowDirty
);
626 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
629 return STATUS_UNSUCCESSFUL
;
632 ASSERT(Vacb
->MappedCount
!= 0);
635 if (Vacb
->MappedCount
== 0)
637 CcRosVacbDecRefCount(Vacb
);
640 CcRosReleaseVacb(SharedCacheMap
, Vacb
, Vacb
->Valid
, NowDirty
, FALSE
);
642 return STATUS_SUCCESS
;
647 CcRosMapVacbInKernelSpace(
652 ULONG_PTR NumberOfPages
;
653 PVOID BaseAddress
= NULL
;
655 /* Create a memory area. */
656 MmLockAddressSpace(MmGetKernelAddressSpace());
657 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
658 0, // nothing checks for VACB mareas, so set to 0
660 VACB_MAPPING_GRANULARITY
,
662 (PMEMORY_AREA
*)&Vacb
->MemoryArea
,
665 ASSERT(Vacb
->BaseAddress
== NULL
);
666 Vacb
->BaseAddress
= BaseAddress
;
667 MmUnlockAddressSpace(MmGetKernelAddressSpace());
668 if (!NT_SUCCESS(Status
))
670 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status
, Vacb
);
674 ASSERT(((ULONG_PTR
)Vacb
->BaseAddress
% PAGE_SIZE
) == 0);
675 ASSERT((ULONG_PTR
)Vacb
->BaseAddress
> (ULONG_PTR
)MmSystemRangeStart
);
676 ASSERT((ULONG_PTR
)Vacb
->BaseAddress
+ VACB_MAPPING_GRANULARITY
- 1 > (ULONG_PTR
)MmSystemRangeStart
);
678 /* Create a virtual mapping for this memory area */
679 NumberOfPages
= BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY
);
680 for (i
= 0; i
< NumberOfPages
; i
++)
682 PFN_NUMBER PageFrameNumber
;
684 MI_SET_USAGE(MI_USAGE_CACHE
);
685 Status
= MmRequestPageMemoryConsumer(MC_CACHE
, TRUE
, &PageFrameNumber
);
686 if (PageFrameNumber
== 0)
688 DPRINT1("Unable to allocate page\n");
689 KeBugCheck(MEMORY_MANAGEMENT
);
692 ASSERT(BaseAddress
== Vacb
->BaseAddress
);
693 ASSERT(i
* PAGE_SIZE
< VACB_MAPPING_GRANULARITY
);
694 ASSERT((ULONG_PTR
)Vacb
->BaseAddress
+ (i
* PAGE_SIZE
) >= (ULONG_PTR
)BaseAddress
);
695 ASSERT((ULONG_PTR
)Vacb
->BaseAddress
+ (i
* PAGE_SIZE
) > (ULONG_PTR
)MmSystemRangeStart
);
697 Status
= MmCreateVirtualMapping(NULL
,
698 (PVOID
)((ULONG_PTR
)Vacb
->BaseAddress
+ (i
* PAGE_SIZE
)),
702 if (!NT_SUCCESS(Status
))
704 DPRINT1("Unable to create virtual mapping\n");
705 KeBugCheck(MEMORY_MANAGEMENT
);
709 return STATUS_SUCCESS
;
715 PROS_SHARED_CACHE_MAP SharedCacheMap
,
721 PLIST_ENTRY current_entry
;
725 ASSERT(SharedCacheMap
);
727 DPRINT("CcRosCreateVacb()\n");
729 if (FileOffset
>= SharedCacheMap
->SectionSize
.QuadPart
)
732 return STATUS_INVALID_PARAMETER
;
735 current
= ExAllocateFromNPagedLookasideList(&VacbLookasideList
);
736 current
->BaseAddress
= NULL
;
737 current
->Valid
= FALSE
;
738 current
->Dirty
= FALSE
;
739 current
->PageOut
= FALSE
;
740 current
->FileOffset
.QuadPart
= ROUND_DOWN(FileOffset
, VACB_MAPPING_GRANULARITY
);
741 current
->SharedCacheMap
= SharedCacheMap
;
743 if (SharedCacheMap
->Trace
)
745 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap
, current
);
748 current
->MappedCount
= 0;
749 current
->ReferenceCount
= 0;
750 current
->PinCount
= 0;
751 KeInitializeMutex(¤t
->Mutex
, 0);
752 InitializeListHead(¤t
->CacheMapVacbListEntry
);
753 InitializeListHead(¤t
->DirtyVacbListEntry
);
754 InitializeListHead(¤t
->VacbLruListEntry
);
755 CcRosAcquireVacbLock(current
, NULL
);
756 KeAcquireGuardedMutex(&ViewLock
);
759 /* There is window between the call to CcRosLookupVacb
760 * and CcRosCreateVacb. We must check if a VACB for the
761 * file offset exist. If there is a VACB, we release
762 * our newly created VACB and return the existing one.
764 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
765 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
767 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
769 current
= CONTAINING_RECORD(current_entry
,
771 CacheMapVacbListEntry
);
772 if (IsPointInRange(current
->FileOffset
.QuadPart
,
773 VACB_MAPPING_GRANULARITY
,
776 CcRosVacbIncRefCount(current
);
777 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
779 if (SharedCacheMap
->Trace
)
781 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
787 CcRosReleaseVacbLock(*Vacb
);
788 KeReleaseGuardedMutex(&ViewLock
);
789 ExFreeToNPagedLookasideList(&VacbLookasideList
, *Vacb
);
791 CcRosAcquireVacbLock(current
, NULL
);
792 return STATUS_SUCCESS
;
794 if (current
->FileOffset
.QuadPart
< FileOffset
)
796 ASSERT(previous
== NULL
||
797 previous
->FileOffset
.QuadPart
< current
->FileOffset
.QuadPart
);
800 if (current
->FileOffset
.QuadPart
> FileOffset
)
802 current_entry
= current_entry
->Flink
;
804 /* There was no existing VACB. */
808 InsertHeadList(&previous
->CacheMapVacbListEntry
, ¤t
->CacheMapVacbListEntry
);
812 InsertHeadList(&SharedCacheMap
->CacheMapVacbListHead
, ¤t
->CacheMapVacbListEntry
);
814 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
815 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
816 CcRosVacbIncRefCount(current
);
817 KeReleaseGuardedMutex(&ViewLock
);
819 MI_SET_USAGE(MI_USAGE_CACHE
);
821 if ((SharedCacheMap
->FileObject
) && (SharedCacheMap
->FileObject
->FileName
.Buffer
))
825 pos
= wcsrchr(SharedCacheMap
->FileObject
->FileName
.Buffer
, '\\');
828 len
= wcslen(pos
) * sizeof(WCHAR
);
829 snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%S", pos
);
833 snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%wZ", &SharedCacheMap
->FileObject
->FileName
);
838 /* Reference it to allow release */
839 CcRosVacbIncRefCount(current
);
841 Status
= CcRosMapVacbInKernelSpace(current
);
842 if (!NT_SUCCESS(Status
))
844 RemoveEntryList(¤t
->CacheMapVacbListEntry
);
845 RemoveEntryList(¤t
->VacbLruListEntry
);
846 CcRosReleaseVacb(SharedCacheMap
, current
, FALSE
,
848 CcRosVacbDecRefCount(current
);
849 ExFreeToNPagedLookasideList(&VacbLookasideList
, current
);
858 PROS_SHARED_CACHE_MAP SharedCacheMap
,
860 PLONGLONG BaseOffset
,
869 ASSERT(SharedCacheMap
);
871 DPRINT("CcRosGetVacb()\n");
874 * Look for a VACB already mapping the same data.
876 current
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
880 * Otherwise create a new VACB.
882 Status
= CcRosCreateVacb(SharedCacheMap
, FileOffset
, ¤t
);
883 if (!NT_SUCCESS(Status
))
889 Refs
= CcRosVacbGetRefCount(current
);
891 KeAcquireGuardedMutex(&ViewLock
);
893 /* Move to the tail of the LRU list */
894 RemoveEntryList(¤t
->VacbLruListEntry
);
895 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
897 KeReleaseGuardedMutex(&ViewLock
);
900 * Return information about the VACB to the caller.
902 *UptoDate
= current
->Valid
;
903 *BaseAddress
= current
->BaseAddress
;
904 DPRINT("*BaseAddress %p\n", *BaseAddress
);
906 *BaseOffset
= current
->FileOffset
.QuadPart
;
910 return STATUS_SUCCESS
;
916 PROS_SHARED_CACHE_MAP SharedCacheMap
,
922 * FUNCTION: Request a page mapping for a shared cache map
927 ASSERT(SharedCacheMap
);
929 if (FileOffset
% VACB_MAPPING_GRANULARITY
!= 0)
931 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
932 FileOffset
, VACB_MAPPING_GRANULARITY
);
933 KeBugCheck(CACHE_MANAGER
);
936 return CcRosGetVacb(SharedCacheMap
,
948 MEMORY_AREA
* MemoryArea
,
954 ASSERT(SwapEntry
== 0);
957 ASSERT(MmGetReferenceCountPage(Page
) == 1);
958 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
963 CcRosInternalFreeVacb (
966 * FUNCTION: Releases a VACB associated with a shared cache map
969 DPRINT("Freeing VACB 0x%p\n", Vacb
);
971 if (Vacb
->SharedCacheMap
->Trace
)
973 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb
->SharedCacheMap
, Vacb
);
977 MmLockAddressSpace(MmGetKernelAddressSpace());
978 MmFreeMemoryArea(MmGetKernelAddressSpace(),
982 MmUnlockAddressSpace(MmGetKernelAddressSpace());
984 if (Vacb
->PinCount
!= 0 || Vacb
->ReferenceCount
!= 0)
986 DPRINT1("Invalid free: %ld, %ld\n", Vacb
->ReferenceCount
, Vacb
->PinCount
);
987 if (Vacb
->SharedCacheMap
->FileObject
&& Vacb
->SharedCacheMap
->FileObject
->FileName
.Length
)
989 DPRINT1("For file: %wZ\n", &Vacb
->SharedCacheMap
->FileObject
->FileName
);
993 ASSERT(Vacb
->PinCount
== 0);
994 ASSERT(Vacb
->ReferenceCount
== 0);
995 ASSERT(IsListEmpty(&Vacb
->CacheMapVacbListEntry
));
996 ASSERT(IsListEmpty(&Vacb
->DirtyVacbListEntry
));
997 ASSERT(IsListEmpty(&Vacb
->VacbLruListEntry
));
998 RtlFillMemory(Vacb
, sizeof(Vacb
), 0xfd);
999 ExFreeToNPagedLookasideList(&VacbLookasideList
, Vacb
);
1000 return STATUS_SUCCESS
;
1009 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
,
1010 IN PLARGE_INTEGER FileOffset OPTIONAL
,
1012 OUT PIO_STATUS_BLOCK IoStatus
)
1014 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1015 LARGE_INTEGER Offset
;
1016 LONGLONG RemainingLength
;
1020 CCTRACE(CC_API_DEBUG
, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1021 SectionObjectPointers
, FileOffset
, Length
);
1023 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1024 SectionObjectPointers
, FileOffset
, Length
, IoStatus
);
1026 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1028 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
1029 ASSERT(SharedCacheMap
);
1032 Offset
= *FileOffset
;
1033 RemainingLength
= Length
;
1037 Offset
.QuadPart
= 0;
1038 RemainingLength
= SharedCacheMap
->FileSize
.QuadPart
;
1043 IoStatus
->Status
= STATUS_SUCCESS
;
1044 IoStatus
->Information
= 0;
1047 while (RemainingLength
> 0)
1049 current
= CcRosLookupVacb(SharedCacheMap
, Offset
.QuadPart
);
1050 if (current
!= NULL
)
1054 Status
= CcRosFlushVacb(current
);
1055 if (!NT_SUCCESS(Status
) && IoStatus
!= NULL
)
1057 IoStatus
->Status
= Status
;
1061 CcRosReleaseVacb(SharedCacheMap
, current
, current
->Valid
, current
->Dirty
, FALSE
);
1064 Offset
.QuadPart
+= VACB_MAPPING_GRANULARITY
;
1065 RemainingLength
-= min(RemainingLength
, VACB_MAPPING_GRANULARITY
);
1072 IoStatus
->Status
= STATUS_INVALID_PARAMETER
;
1079 CcRosDeleteFileCache (
1080 PFILE_OBJECT FileObject
,
1081 PROS_SHARED_CACHE_MAP SharedCacheMap
)
1083 * FUNCTION: Releases the shared cache map associated with a file object
1086 PLIST_ENTRY current_entry
;
1088 LIST_ENTRY FreeList
;
1091 ASSERT(SharedCacheMap
);
1093 SharedCacheMap
->OpenCount
++;
1094 KeReleaseGuardedMutex(&ViewLock
);
1096 CcFlushCache(FileObject
->SectionObjectPointer
, NULL
, 0, NULL
);
1098 KeAcquireGuardedMutex(&ViewLock
);
1099 SharedCacheMap
->OpenCount
--;
1100 if (SharedCacheMap
->OpenCount
== 0)
1104 FileObject
->SectionObjectPointer
->SharedCacheMap
= NULL
;
1109 InitializeListHead(&FreeList
);
1110 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1111 while (!IsListEmpty(&SharedCacheMap
->CacheMapVacbListHead
))
1113 current_entry
= RemoveTailList(&SharedCacheMap
->CacheMapVacbListHead
);
1114 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1116 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1117 CcRosAcquireVacbLock(current
, NULL
);
1118 RemoveEntryList(¤t
->VacbLruListEntry
);
1119 InitializeListHead(¤t
->VacbLruListEntry
);
1122 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1123 CcRosUnmarkDirtyVacb(current
, FALSE
);
1124 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1125 DPRINT1("Freeing dirty VACB\n");
1127 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
1128 CcRosReleaseVacbLock(current
);
1130 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1133 SharedCacheMap
->Trace
= FALSE
;
1135 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1137 KeReleaseGuardedMutex(&ViewLock
);
1138 ObDereferenceObject(SharedCacheMap
->FileObject
);
1140 while (!IsListEmpty(&FreeList
))
1142 current_entry
= RemoveTailList(&FreeList
);
1143 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1144 InitializeListHead(¤t
->CacheMapVacbListEntry
);
1145 CcRosVacbDecRefCount(current
);
1146 CcRosInternalFreeVacb(current
);
1149 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
1150 RemoveEntryList(&SharedCacheMap
->SharedCacheMapLinks
);
1151 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
1153 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList
, SharedCacheMap
);
1154 KeAcquireGuardedMutex(&ViewLock
);
1156 return STATUS_SUCCESS
;
1161 CcRosReferenceCache (
1162 PFILE_OBJECT FileObject
)
1164 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1165 KeAcquireGuardedMutex(&ViewLock
);
1166 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1167 ASSERT(SharedCacheMap
);
1168 ASSERT(SharedCacheMap
->OpenCount
!= 0);
1169 SharedCacheMap
->OpenCount
++;
1170 KeReleaseGuardedMutex(&ViewLock
);
1175 CcRosRemoveIfClosed (
1176 PSECTION_OBJECT_POINTERS SectionObjectPointer
)
1178 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1179 DPRINT("CcRosRemoveIfClosed()\n");
1180 KeAcquireGuardedMutex(&ViewLock
);
1181 SharedCacheMap
= SectionObjectPointer
->SharedCacheMap
;
1182 if (SharedCacheMap
&& SharedCacheMap
->OpenCount
== 0)
1184 CcRosDeleteFileCache(SharedCacheMap
->FileObject
, SharedCacheMap
);
1186 KeReleaseGuardedMutex(&ViewLock
);
1192 CcRosDereferenceCache (
1193 PFILE_OBJECT FileObject
)
1195 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1196 KeAcquireGuardedMutex(&ViewLock
);
1197 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1198 ASSERT(SharedCacheMap
);
1199 if (SharedCacheMap
->OpenCount
> 0)
1201 SharedCacheMap
->OpenCount
--;
1202 if (SharedCacheMap
->OpenCount
== 0)
1204 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1205 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1208 KeReleaseGuardedMutex(&ViewLock
);
1213 CcRosReleaseFileCache (
1214 PFILE_OBJECT FileObject
)
1216 * FUNCTION: Called by the file system when a handle to a file object
1221 PPRIVATE_CACHE_MAP PrivateMap
;
1222 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1224 KeAcquireGuardedMutex(&ViewLock
);
1226 if (FileObject
->SectionObjectPointer
->SharedCacheMap
!= NULL
)
1228 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1230 /* Closing the handle, so kill the private cache map
1231 * Before you event try to remove it from FO, always
1232 * lock the master lock, to be sure not to race
1233 * with a potential read ahead ongoing!
1235 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
1236 PrivateMap
= FileObject
->PrivateCacheMap
;
1237 FileObject
->PrivateCacheMap
= NULL
;
1238 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
1240 if (PrivateMap
!= NULL
)
1242 /* Remove it from the file */
1243 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &OldIrql
);
1244 RemoveEntryList(&PrivateMap
->PrivateLinks
);
1245 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, OldIrql
);
1248 if (PrivateMap
!= &SharedCacheMap
->PrivateCacheMap
)
1250 ExFreePoolWithTag(PrivateMap
, TAG_PRIVATE_CACHE_MAP
);
1254 PrivateMap
->NodeTypeCode
= 0;
1257 if (SharedCacheMap
->OpenCount
> 0)
1259 SharedCacheMap
->OpenCount
--;
1260 if (SharedCacheMap
->OpenCount
== 0)
1262 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1263 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1268 KeReleaseGuardedMutex(&ViewLock
);
1269 return STATUS_SUCCESS
;
1274 CcRosInitializeFileCache (
1275 PFILE_OBJECT FileObject
,
1276 PCC_FILE_SIZES FileSizes
,
1278 PCACHE_MANAGER_CALLBACKS CallBacks
,
1279 PVOID LazyWriterContext
)
1281 * FUNCTION: Initializes a shared cache map for a file object
1286 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1288 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1289 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1290 FileObject
, SharedCacheMap
);
1293 KeAcquireGuardedMutex(&ViewLock
);
1294 if (SharedCacheMap
== NULL
)
1297 SharedCacheMap
= ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList
);
1298 if (SharedCacheMap
== NULL
)
1300 KeReleaseGuardedMutex(&ViewLock
);
1301 return STATUS_INSUFFICIENT_RESOURCES
;
1303 RtlZeroMemory(SharedCacheMap
, sizeof(*SharedCacheMap
));
1304 ObReferenceObjectByPointer(FileObject
,
1308 SharedCacheMap
->NodeTypeCode
= NODE_TYPE_SHARED_MAP
;
1309 SharedCacheMap
->NodeByteSize
= sizeof(*SharedCacheMap
);
1310 SharedCacheMap
->FileObject
= FileObject
;
1311 SharedCacheMap
->Callbacks
= CallBacks
;
1312 SharedCacheMap
->LazyWriteContext
= LazyWriterContext
;
1313 SharedCacheMap
->SectionSize
= FileSizes
->AllocationSize
;
1314 SharedCacheMap
->FileSize
= FileSizes
->FileSize
;
1315 SharedCacheMap
->PinAccess
= PinAccess
;
1316 SharedCacheMap
->DirtyPageThreshold
= 0;
1317 SharedCacheMap
->DirtyPages
= 0;
1318 InitializeListHead(&SharedCacheMap
->PrivateList
);
1319 KeInitializeSpinLock(&SharedCacheMap
->CacheMapLock
);
1320 InitializeListHead(&SharedCacheMap
->CacheMapVacbListHead
);
1321 FileObject
->SectionObjectPointer
->SharedCacheMap
= SharedCacheMap
;
1323 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
1324 InsertTailList(&CcCleanSharedCacheMapList
, &SharedCacheMap
->SharedCacheMapLinks
);
1325 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
1327 if (FileObject
->PrivateCacheMap
== NULL
)
1329 PPRIVATE_CACHE_MAP PrivateMap
;
1331 /* Allocate the private cache map for this handle */
1332 if (SharedCacheMap
->PrivateCacheMap
.NodeTypeCode
!= 0)
1334 PrivateMap
= ExAllocatePoolWithTag(NonPagedPool
, sizeof(PRIVATE_CACHE_MAP
), TAG_PRIVATE_CACHE_MAP
);
1338 PrivateMap
= &SharedCacheMap
->PrivateCacheMap
;
1341 if (PrivateMap
== NULL
)
1343 /* If we also allocated the shared cache map for this file, kill it */
1346 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
1347 RemoveEntryList(&SharedCacheMap
->SharedCacheMapLinks
);
1348 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
1350 FileObject
->SectionObjectPointer
->SharedCacheMap
= NULL
;
1351 ObDereferenceObject(FileObject
);
1352 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList
, SharedCacheMap
);
1355 KeReleaseGuardedMutex(&ViewLock
);
1356 return STATUS_INSUFFICIENT_RESOURCES
;
1360 RtlZeroMemory(PrivateMap
, sizeof(PRIVATE_CACHE_MAP
));
1361 PrivateMap
->NodeTypeCode
= NODE_TYPE_PRIVATE_MAP
;
1362 PrivateMap
->ReadAheadMask
= PAGE_SIZE
- 1;
1363 PrivateMap
->FileObject
= FileObject
;
1364 KeInitializeSpinLock(&PrivateMap
->ReadAheadSpinLock
);
1366 /* Link it to the file */
1367 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &OldIrql
);
1368 InsertTailList(&SharedCacheMap
->PrivateList
, &PrivateMap
->PrivateLinks
);
1369 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, OldIrql
);
1371 FileObject
->PrivateCacheMap
= PrivateMap
;
1372 SharedCacheMap
->OpenCount
++;
1374 KeReleaseGuardedMutex(&ViewLock
);
1376 return STATUS_SUCCESS
;
1384 CcGetFileObjectFromSectionPtrs (
1385 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
)
1387 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1389 CCTRACE(CC_API_DEBUG
, "SectionObjectPointers=%p\n", SectionObjectPointers
);
1391 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1393 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
1394 ASSERT(SharedCacheMap
);
1395 return SharedCacheMap
->FileObject
;
1406 DPRINT("CcInitView()\n");
1408 InitializeListHead(&DirtyVacbListHead
);
1409 InitializeListHead(&VacbLruListHead
);
1410 InitializeListHead(&CcDeferredWrites
);
1411 InitializeListHead(&CcCleanSharedCacheMapList
);
1412 KeInitializeSpinLock(&CcDeferredWriteSpinLock
);
1413 KeInitializeGuardedMutex(&ViewLock
);
1414 ExInitializeNPagedLookasideList(&iBcbLookasideList
,
1418 sizeof(INTERNAL_BCB
),
1421 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList
,
1425 sizeof(ROS_SHARED_CACHE_MAP
),
1426 TAG_SHARED_CACHE_MAP
,
1428 ExInitializeNPagedLookasideList(&VacbLookasideList
,
1436 MmInitializeMemoryConsumer(MC_CACHE
, CcRosTrimCache
);
1438 CcInitCacheZeroPage();
1441 #if DBG && defined(KDBG)
1443 ExpKdbgExtFileCache(ULONG Argc
, PCHAR Argv
[])
1445 PLIST_ENTRY ListEntry
;
1446 UNICODE_STRING NoName
= RTL_CONSTANT_STRING(L
"No name for File");
1448 KdbpPrint(" Usage Summary (in kb)\n");
1449 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1450 /* No need to lock the spin lock here, we're in DBG */
1451 for (ListEntry
= CcCleanSharedCacheMapList
.Flink
;
1452 ListEntry
!= &CcCleanSharedCacheMapList
;
1453 ListEntry
= ListEntry
->Flink
)
1456 ULONG Valid
= 0, Dirty
= 0;
1457 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1458 PUNICODE_STRING FileName
;
1460 SharedCacheMap
= CONTAINING_RECORD(ListEntry
, ROS_SHARED_CACHE_MAP
, SharedCacheMapLinks
);
1463 Dirty
= (SharedCacheMap
->DirtyPages
* PAGE_SIZE
) / 1024;
1465 /* First, count for all the associated VACB */
1466 for (Vacbs
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
1467 Vacbs
!= &SharedCacheMap
->CacheMapVacbListHead
;
1468 Vacbs
= Vacbs
->Flink
)
1472 Vacb
= CONTAINING_RECORD(Vacbs
, ROS_VACB
, CacheMapVacbListEntry
);
1475 Valid
+= VACB_MAPPING_GRANULARITY
/ 1024;
1480 if (SharedCacheMap
->FileObject
!= NULL
&&
1481 SharedCacheMap
->FileObject
->FileName
.Length
!= 0)
1483 FileName
= &SharedCacheMap
->FileObject
->FileName
;
1491 KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap
, Valid
, Dirty
, FileName
);
1498 ExpKdbgExtDefWrites(ULONG Argc
, PCHAR Argv
[])
1500 KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages
,
1501 (CcTotalDirtyPages
* PAGE_SIZE
) / 1024);
1502 KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold
,
1503 (CcDirtyPageThreshold
* PAGE_SIZE
) / 1024);
1504 KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages
,
1505 (MmAvailablePages
* PAGE_SIZE
) / 1024);
1506 KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop
,
1507 (MmThrottleTop
* PAGE_SIZE
) / 1024);
1508 KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom
,
1509 (MmThrottleBottom
* PAGE_SIZE
) / 1024);
1510 KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead
.Total
,
1511 (MmModifiedPageListHead
.Total
* PAGE_SIZE
) / 1024);
1513 if (CcTotalDirtyPages
>= CcDirtyPageThreshold
)
1515 KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n");
1517 else if (CcTotalDirtyPages
+ 64 >= CcDirtyPageThreshold
)
1519 KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n");
1523 KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n");