2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
11 /* NOTES **********************************************************************
13 * This is not the NT implementation of a file cache nor anything much like
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
29 * (4) Copy the data into or out of the page as necessary.
31 * (5) Release the cache page
33 /* INCLUDES ******************************************************************/
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
43 /* GLOBALS *******************************************************************/
45 LIST_ENTRY DirtyVacbListHead
;
46 static LIST_ENTRY VacbLruListHead
;
48 KGUARDED_MUTEX ViewLock
;
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList
;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList
;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList
;
54 /* Internal vars (MS):
55 * - Threshold above which lazy writer will start action
56 * - Amount of dirty pages
57 * - List for deferred writes
58 * - Spinlock when dealing with the deferred list
59 * - List for "clean" shared cache maps
61 ULONG CcDirtyPageThreshold
= 0;
62 ULONG CcTotalDirtyPages
= 0;
63 LIST_ENTRY CcDeferredWrites
;
64 KSPIN_LOCK CcDeferredWriteSpinLock
;
65 LIST_ENTRY CcCleanSharedCacheMapList
;
68 ULONG
CcRosVacbIncRefCount_(PROS_VACB vacb
, PCSTR file
, INT line
)
72 Refs
= InterlockedIncrement((PLONG
)&vacb
->ReferenceCount
);
73 if (vacb
->SharedCacheMap
->Trace
)
75 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
76 file
, line
, vacb
, Refs
, vacb
->Dirty
, vacb
->PageOut
);
81 ULONG
CcRosVacbDecRefCount_(PROS_VACB vacb
, PCSTR file
, INT line
)
85 Refs
= InterlockedDecrement((PLONG
)&vacb
->ReferenceCount
);
86 ASSERT(!(Refs
== 0 && vacb
->Dirty
));
87 if (vacb
->SharedCacheMap
->Trace
)
89 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
90 file
, line
, vacb
, Refs
, vacb
->Dirty
, vacb
->PageOut
);
95 CcRosInternalFreeVacb(vacb
);
100 ULONG
CcRosVacbGetRefCount_(PROS_VACB vacb
, PCSTR file
, INT line
)
104 Refs
= InterlockedCompareExchange((PLONG
)&vacb
->ReferenceCount
, 0, 0);
105 if (vacb
->SharedCacheMap
->Trace
)
107 DbgPrint("(%s:%i) VACB %p ==RefCount=%lu, Dirty %u, PageOut %lu\n",
108 file
, line
, vacb
, Refs
, vacb
->Dirty
, vacb
->PageOut
);
116 /* FUNCTIONS *****************************************************************/
121 PROS_SHARED_CACHE_MAP SharedCacheMap
,
126 PLIST_ENTRY current_entry
;
132 SharedCacheMap
->Trace
= Trace
;
136 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
138 KeAcquireGuardedMutex(&ViewLock
);
139 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldirql
);
141 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
142 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
144 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
145 current_entry
= current_entry
->Flink
;
147 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
148 current
, current
->ReferenceCount
, current
->Dirty
, current
->PageOut
);
150 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldirql
);
151 KeReleaseGuardedMutex(&ViewLock
);
155 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
159 UNREFERENCED_PARAMETER(SharedCacheMap
);
160 UNREFERENCED_PARAMETER(Trace
);
171 CcRosUnmarkDirtyVacb(Vacb
, TRUE
);
173 Status
= CcWriteVirtualAddress(Vacb
);
174 if (!NT_SUCCESS(Status
))
176 CcRosMarkDirtyVacb(Vacb
);
184 CcRosFlushDirtyPages (
188 BOOLEAN CalledFromLazy
)
190 PLIST_ENTRY current_entry
;
195 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target
);
199 KeEnterCriticalRegion();
200 KeAcquireGuardedMutex(&ViewLock
);
202 current_entry
= DirtyVacbListHead
.Flink
;
203 if (current_entry
== &DirtyVacbListHead
)
205 DPRINT("No Dirty pages\n");
208 while ((current_entry
!= &DirtyVacbListHead
) && (Target
> 0))
212 current
= CONTAINING_RECORD(current_entry
,
215 current_entry
= current_entry
->Flink
;
217 CcRosVacbIncRefCount(current
);
219 /* When performing lazy write, don't handle temporary files */
220 if (CalledFromLazy
&&
221 BooleanFlagOn(current
->SharedCacheMap
->FileObject
->Flags
, FO_TEMPORARY_FILE
))
223 CcRosVacbDecRefCount(current
);
227 Locked
= current
->SharedCacheMap
->Callbacks
->AcquireForLazyWrite(
228 current
->SharedCacheMap
->LazyWriteContext
, Wait
);
231 CcRosVacbDecRefCount(current
);
235 ASSERT(current
->Dirty
);
237 /* One reference is added above */
238 Refs
= CcRosVacbGetRefCount(current
);
239 if ((Refs
> 3 && current
->PinCount
== 0) ||
240 (Refs
> 4 && current
->PinCount
> 1))
242 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
243 current
->SharedCacheMap
->LazyWriteContext
);
244 CcRosVacbDecRefCount(current
);
248 KeReleaseGuardedMutex(&ViewLock
);
250 Status
= CcRosFlushVacb(current
);
252 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
253 current
->SharedCacheMap
->LazyWriteContext
);
255 KeAcquireGuardedMutex(&ViewLock
);
256 CcRosVacbDecRefCount(current
);
258 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
) &&
259 (Status
!= STATUS_MEDIA_WRITE_PROTECTED
))
261 DPRINT1("CC: Failed to flush VACB.\n");
267 /* How many pages did we free? */
268 PagesFreed
= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
269 (*Count
) += PagesFreed
;
271 /* Make sure we don't overflow target! */
272 if (Target
< PagesFreed
)
274 /* If we would have, jump to zero directly */
279 Target
-= PagesFreed
;
283 current_entry
= DirtyVacbListHead
.Flink
;
286 KeReleaseGuardedMutex(&ViewLock
);
287 KeLeaveCriticalRegion();
289 DPRINT("CcRosFlushDirtyPages() finished\n");
290 return STATUS_SUCCESS
;
299 * FUNCTION: Try to free some memory from the file cache.
301 * Target - The number of pages to be freed.
302 * Priority - The priority of free (currently unused).
303 * NrFreed - Points to a variable where the number of pages
304 * actually freed is returned.
307 PLIST_ENTRY current_entry
;
314 BOOLEAN FlushedPages
= FALSE
;
316 DPRINT("CcRosTrimCache(Target %lu)\n", Target
);
318 InitializeListHead(&FreeList
);
323 KeAcquireGuardedMutex(&ViewLock
);
325 current_entry
= VacbLruListHead
.Flink
;
326 while (current_entry
!= &VacbLruListHead
)
330 current
= CONTAINING_RECORD(current_entry
,
333 current_entry
= current_entry
->Flink
;
335 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
337 /* Reference the VACB */
338 CcRosVacbIncRefCount(current
);
340 /* Check if it's mapped and not dirty */
341 if (InterlockedCompareExchange((PLONG
)¤t
->MappedCount
, 0, 0) > 0 && !current
->Dirty
)
343 /* We have to break these locks because Cc sucks */
344 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
345 KeReleaseGuardedMutex(&ViewLock
);
347 /* Page out the VACB */
348 for (i
= 0; i
< VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
; i
++)
350 Page
= (PFN_NUMBER
)(MmGetPhysicalAddress((PUCHAR
)current
->BaseAddress
+ (i
* PAGE_SIZE
)).QuadPart
>> PAGE_SHIFT
);
352 MmPageOutPhysicalAddress(Page
);
355 /* Reacquire the locks */
356 KeAcquireGuardedMutex(&ViewLock
);
357 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
360 /* Dereference the VACB */
361 Refs
= CcRosVacbDecRefCount(current
);
363 /* Check if we can free this entry now */
366 ASSERT(!current
->Dirty
);
367 ASSERT(!current
->MappedCount
);
370 RemoveEntryList(¤t
->CacheMapVacbListEntry
);
371 RemoveEntryList(¤t
->VacbLruListEntry
);
372 InitializeListHead(¤t
->VacbLruListEntry
);
373 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
375 /* Calculate how many pages we freed for Mm */
376 PagesFreed
= min(VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
, Target
);
377 Target
-= PagesFreed
;
378 (*NrFreed
) += PagesFreed
;
381 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
384 KeReleaseGuardedMutex(&ViewLock
);
386 /* Try flushing pages if we haven't met our target */
387 if ((Target
> 0) && !FlushedPages
)
389 /* Flush dirty pages to disk */
390 CcRosFlushDirtyPages(Target
, &PagesFreed
, FALSE
, FALSE
);
393 /* We can only swap as many pages as we flushed */
394 if (PagesFreed
< Target
) Target
= PagesFreed
;
396 /* Check if we flushed anything */
399 /* Try again after flushing dirty pages */
400 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed
);
405 while (!IsListEmpty(&FreeList
))
409 current_entry
= RemoveHeadList(&FreeList
);
410 current
= CONTAINING_RECORD(current_entry
,
412 CacheMapVacbListEntry
);
413 InitializeListHead(¤t
->CacheMapVacbListEntry
);
414 Refs
= CcRosVacbDecRefCount(current
);
418 DPRINT("Evicted %lu cache pages\n", (*NrFreed
));
420 return STATUS_SUCCESS
;
426 PROS_SHARED_CACHE_MAP SharedCacheMap
,
433 ASSERT(SharedCacheMap
);
435 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
436 SharedCacheMap
, Vacb
, Valid
);
440 if (Dirty
&& !Vacb
->Dirty
)
442 CcRosMarkDirtyVacb(Vacb
);
447 if (InterlockedIncrement((PLONG
)&Vacb
->MappedCount
) == 1)
449 CcRosVacbIncRefCount(Vacb
);
453 Refs
= CcRosVacbDecRefCount(Vacb
);
456 return STATUS_SUCCESS
;
459 /* Returns with VACB Lock Held! */
463 PROS_SHARED_CACHE_MAP SharedCacheMap
,
466 PLIST_ENTRY current_entry
;
470 ASSERT(SharedCacheMap
);
472 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
473 SharedCacheMap
, FileOffset
);
475 KeAcquireGuardedMutex(&ViewLock
);
476 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
478 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
479 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
481 current
= CONTAINING_RECORD(current_entry
,
483 CacheMapVacbListEntry
);
484 if (IsPointInRange(current
->FileOffset
.QuadPart
,
485 VACB_MAPPING_GRANULARITY
,
488 CcRosVacbIncRefCount(current
);
489 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
490 KeReleaseGuardedMutex(&ViewLock
);
493 if (current
->FileOffset
.QuadPart
> FileOffset
)
495 current_entry
= current_entry
->Flink
;
498 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
499 KeReleaseGuardedMutex(&ViewLock
);
510 PROS_SHARED_CACHE_MAP SharedCacheMap
;
512 SharedCacheMap
= Vacb
->SharedCacheMap
;
514 KeAcquireGuardedMutex(&ViewLock
);
515 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
517 ASSERT(!Vacb
->Dirty
);
519 InsertTailList(&DirtyVacbListHead
, &Vacb
->DirtyVacbListEntry
);
520 CcTotalDirtyPages
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
521 Vacb
->SharedCacheMap
->DirtyPages
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
522 CcRosVacbIncRefCount(Vacb
);
524 /* Move to the tail of the LRU list */
525 RemoveEntryList(&Vacb
->VacbLruListEntry
);
526 InsertTailList(&VacbLruListHead
, &Vacb
->VacbLruListEntry
);
530 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
531 KeReleaseGuardedMutex(&ViewLock
);
533 /* Schedule a lazy writer run to now that we have dirty VACB */
534 oldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
535 if (!LazyWriter
.ScanActive
)
537 CcScheduleLazyWriteScan(FALSE
);
539 KeReleaseQueuedSpinLock(LockQueueMasterLock
, oldIrql
);
544 CcRosUnmarkDirtyVacb (
549 PROS_SHARED_CACHE_MAP SharedCacheMap
;
551 SharedCacheMap
= Vacb
->SharedCacheMap
;
555 KeAcquireGuardedMutex(&ViewLock
);
556 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
563 RemoveEntryList(&Vacb
->DirtyVacbListEntry
);
564 InitializeListHead(&Vacb
->DirtyVacbListEntry
);
565 CcTotalDirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
566 Vacb
->SharedCacheMap
->DirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
567 CcRosVacbDecRefCount(Vacb
);
571 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
572 KeReleaseGuardedMutex(&ViewLock
);
579 PROS_SHARED_CACHE_MAP SharedCacheMap
,
584 ASSERT(SharedCacheMap
);
586 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
587 SharedCacheMap
, FileOffset
);
589 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
592 KeBugCheck(CACHE_MANAGER
);
595 CcRosReleaseVacb(SharedCacheMap
, Vacb
, Vacb
->Valid
, TRUE
, FALSE
);
597 return STATUS_SUCCESS
;
601 * Note: this is not the contrary function of
602 * CcRosMapVacbInKernelSpace()
607 PROS_SHARED_CACHE_MAP SharedCacheMap
,
613 ASSERT(SharedCacheMap
);
615 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
616 SharedCacheMap
, FileOffset
, NowDirty
);
618 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
621 return STATUS_UNSUCCESSFUL
;
624 ASSERT(Vacb
->MappedCount
!= 0);
625 if (InterlockedDecrement((PLONG
)&Vacb
->MappedCount
) == 0)
627 CcRosVacbDecRefCount(Vacb
);
630 CcRosReleaseVacb(SharedCacheMap
, Vacb
, Vacb
->Valid
, NowDirty
, FALSE
);
632 return STATUS_SUCCESS
;
637 CcRosMapVacbInKernelSpace(
642 ULONG_PTR NumberOfPages
;
643 PVOID BaseAddress
= NULL
;
645 /* Create a memory area. */
646 MmLockAddressSpace(MmGetKernelAddressSpace());
647 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
648 0, // nothing checks for VACB mareas, so set to 0
650 VACB_MAPPING_GRANULARITY
,
652 (PMEMORY_AREA
*)&Vacb
->MemoryArea
,
655 ASSERT(Vacb
->BaseAddress
== NULL
);
656 Vacb
->BaseAddress
= BaseAddress
;
657 MmUnlockAddressSpace(MmGetKernelAddressSpace());
658 if (!NT_SUCCESS(Status
))
660 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status
, Vacb
);
664 ASSERT(((ULONG_PTR
)Vacb
->BaseAddress
% PAGE_SIZE
) == 0);
665 ASSERT((ULONG_PTR
)Vacb
->BaseAddress
> (ULONG_PTR
)MmSystemRangeStart
);
666 ASSERT((ULONG_PTR
)Vacb
->BaseAddress
+ VACB_MAPPING_GRANULARITY
- 1 > (ULONG_PTR
)MmSystemRangeStart
);
668 /* Create a virtual mapping for this memory area */
669 NumberOfPages
= BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY
);
670 for (i
= 0; i
< NumberOfPages
; i
++)
672 PFN_NUMBER PageFrameNumber
;
674 MI_SET_USAGE(MI_USAGE_CACHE
);
675 Status
= MmRequestPageMemoryConsumer(MC_CACHE
, TRUE
, &PageFrameNumber
);
676 if (PageFrameNumber
== 0)
678 DPRINT1("Unable to allocate page\n");
679 KeBugCheck(MEMORY_MANAGEMENT
);
682 ASSERT(BaseAddress
== Vacb
->BaseAddress
);
683 ASSERT(i
* PAGE_SIZE
< VACB_MAPPING_GRANULARITY
);
684 ASSERT((ULONG_PTR
)Vacb
->BaseAddress
+ (i
* PAGE_SIZE
) >= (ULONG_PTR
)BaseAddress
);
685 ASSERT((ULONG_PTR
)Vacb
->BaseAddress
+ (i
* PAGE_SIZE
) > (ULONG_PTR
)MmSystemRangeStart
);
687 Status
= MmCreateVirtualMapping(NULL
,
688 (PVOID
)((ULONG_PTR
)Vacb
->BaseAddress
+ (i
* PAGE_SIZE
)),
692 if (!NT_SUCCESS(Status
))
694 DPRINT1("Unable to create virtual mapping\n");
695 KeBugCheck(MEMORY_MANAGEMENT
);
699 return STATUS_SUCCESS
;
705 PROS_SHARED_CACHE_MAP SharedCacheMap
,
711 PLIST_ENTRY current_entry
;
716 ASSERT(SharedCacheMap
);
718 DPRINT("CcRosCreateVacb()\n");
720 if (FileOffset
>= SharedCacheMap
->SectionSize
.QuadPart
)
723 return STATUS_INVALID_PARAMETER
;
726 current
= ExAllocateFromNPagedLookasideList(&VacbLookasideList
);
727 current
->BaseAddress
= NULL
;
728 current
->Valid
= FALSE
;
729 current
->Dirty
= FALSE
;
730 current
->PageOut
= FALSE
;
731 current
->FileOffset
.QuadPart
= ROUND_DOWN(FileOffset
, VACB_MAPPING_GRANULARITY
);
732 current
->SharedCacheMap
= SharedCacheMap
;
734 if (SharedCacheMap
->Trace
)
736 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap
, current
);
739 current
->MappedCount
= 0;
740 current
->ReferenceCount
= 0;
741 current
->PinCount
= 0;
742 InitializeListHead(¤t
->CacheMapVacbListEntry
);
743 InitializeListHead(¤t
->DirtyVacbListEntry
);
744 InitializeListHead(¤t
->VacbLruListEntry
);
746 CcRosVacbIncRefCount(current
);
748 Status
= CcRosMapVacbInKernelSpace(current
);
749 if (!NT_SUCCESS(Status
))
751 CcRosVacbDecRefCount(current
);
752 ExFreeToNPagedLookasideList(&VacbLookasideList
, current
);
756 KeAcquireGuardedMutex(&ViewLock
);
759 /* There is window between the call to CcRosLookupVacb
760 * and CcRosCreateVacb. We must check if a VACB for the
761 * file offset exist. If there is a VACB, we release
762 * our newly created VACB and return the existing one.
764 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
765 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
767 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
769 current
= CONTAINING_RECORD(current_entry
,
771 CacheMapVacbListEntry
);
772 if (IsPointInRange(current
->FileOffset
.QuadPart
,
773 VACB_MAPPING_GRANULARITY
,
776 CcRosVacbIncRefCount(current
);
777 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
779 if (SharedCacheMap
->Trace
)
781 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
787 KeReleaseGuardedMutex(&ViewLock
);
789 Refs
= CcRosVacbDecRefCount(*Vacb
);
793 return STATUS_SUCCESS
;
795 if (current
->FileOffset
.QuadPart
< FileOffset
)
797 ASSERT(previous
== NULL
||
798 previous
->FileOffset
.QuadPart
< current
->FileOffset
.QuadPart
);
801 if (current
->FileOffset
.QuadPart
> FileOffset
)
803 current_entry
= current_entry
->Flink
;
805 /* There was no existing VACB. */
809 InsertHeadList(&previous
->CacheMapVacbListEntry
, ¤t
->CacheMapVacbListEntry
);
813 InsertHeadList(&SharedCacheMap
->CacheMapVacbListHead
, ¤t
->CacheMapVacbListEntry
);
815 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
816 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
817 KeReleaseGuardedMutex(&ViewLock
);
819 MI_SET_USAGE(MI_USAGE_CACHE
);
821 if ((SharedCacheMap
->FileObject
) && (SharedCacheMap
->FileObject
->FileName
.Buffer
))
825 pos
= wcsrchr(SharedCacheMap
->FileObject
->FileName
.Buffer
, '\\');
828 len
= wcslen(pos
) * sizeof(WCHAR
);
829 snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%S", pos
);
833 snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%wZ", &SharedCacheMap
->FileObject
->FileName
);
838 /* Reference it to allow release */
839 CcRosVacbIncRefCount(current
);
847 PROS_SHARED_CACHE_MAP SharedCacheMap
,
849 PLONGLONG BaseOffset
,
858 ASSERT(SharedCacheMap
);
860 DPRINT("CcRosGetVacb()\n");
863 * Look for a VACB already mapping the same data.
865 current
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
869 * Otherwise create a new VACB.
871 Status
= CcRosCreateVacb(SharedCacheMap
, FileOffset
, ¤t
);
872 if (!NT_SUCCESS(Status
))
878 Refs
= CcRosVacbGetRefCount(current
);
880 KeAcquireGuardedMutex(&ViewLock
);
882 /* Move to the tail of the LRU list */
883 RemoveEntryList(¤t
->VacbLruListEntry
);
884 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
886 KeReleaseGuardedMutex(&ViewLock
);
889 * Return information about the VACB to the caller.
891 *UptoDate
= current
->Valid
;
892 *BaseAddress
= current
->BaseAddress
;
893 DPRINT("*BaseAddress %p\n", *BaseAddress
);
895 *BaseOffset
= current
->FileOffset
.QuadPart
;
899 return STATUS_SUCCESS
;
905 PROS_SHARED_CACHE_MAP SharedCacheMap
,
911 * FUNCTION: Request a page mapping for a shared cache map
916 ASSERT(SharedCacheMap
);
918 if (FileOffset
% VACB_MAPPING_GRANULARITY
!= 0)
920 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
921 FileOffset
, VACB_MAPPING_GRANULARITY
);
922 KeBugCheck(CACHE_MANAGER
);
925 return CcRosGetVacb(SharedCacheMap
,
937 MEMORY_AREA
* MemoryArea
,
943 ASSERT(SwapEntry
== 0);
946 ASSERT(MmGetReferenceCountPage(Page
) == 1);
947 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
952 CcRosInternalFreeVacb (
955 * FUNCTION: Releases a VACB associated with a shared cache map
958 DPRINT("Freeing VACB 0x%p\n", Vacb
);
960 if (Vacb
->SharedCacheMap
->Trace
)
962 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb
->SharedCacheMap
, Vacb
);
966 MmLockAddressSpace(MmGetKernelAddressSpace());
967 MmFreeMemoryArea(MmGetKernelAddressSpace(),
971 MmUnlockAddressSpace(MmGetKernelAddressSpace());
973 if (Vacb
->PinCount
!= 0 || Vacb
->ReferenceCount
!= 0)
975 DPRINT1("Invalid free: %ld, %ld\n", Vacb
->ReferenceCount
, Vacb
->PinCount
);
976 if (Vacb
->SharedCacheMap
->FileObject
&& Vacb
->SharedCacheMap
->FileObject
->FileName
.Length
)
978 DPRINT1("For file: %wZ\n", &Vacb
->SharedCacheMap
->FileObject
->FileName
);
982 ASSERT(Vacb
->PinCount
== 0);
983 ASSERT(Vacb
->ReferenceCount
== 0);
984 ASSERT(IsListEmpty(&Vacb
->CacheMapVacbListEntry
));
985 ASSERT(IsListEmpty(&Vacb
->DirtyVacbListEntry
));
986 ASSERT(IsListEmpty(&Vacb
->VacbLruListEntry
));
987 RtlFillMemory(Vacb
, sizeof(*Vacb
), 0xfd);
988 ExFreeToNPagedLookasideList(&VacbLookasideList
, Vacb
);
989 return STATUS_SUCCESS
;
998 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
,
999 IN PLARGE_INTEGER FileOffset OPTIONAL
,
1001 OUT PIO_STATUS_BLOCK IoStatus
)
1003 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1004 LARGE_INTEGER Offset
;
1005 LONGLONG RemainingLength
;
1009 CCTRACE(CC_API_DEBUG
, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1010 SectionObjectPointers
, FileOffset
, Length
);
1012 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1013 SectionObjectPointers
, FileOffset
, Length
, IoStatus
);
1015 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1017 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
1018 ASSERT(SharedCacheMap
);
1021 Offset
= *FileOffset
;
1022 RemainingLength
= Length
;
1026 Offset
.QuadPart
= 0;
1027 RemainingLength
= SharedCacheMap
->FileSize
.QuadPart
;
1032 IoStatus
->Status
= STATUS_SUCCESS
;
1033 IoStatus
->Information
= 0;
1036 while (RemainingLength
> 0)
1038 current
= CcRosLookupVacb(SharedCacheMap
, Offset
.QuadPart
);
1039 if (current
!= NULL
)
1043 Status
= CcRosFlushVacb(current
);
1044 if (!NT_SUCCESS(Status
) && IoStatus
!= NULL
)
1046 IoStatus
->Status
= Status
;
1050 CcRosReleaseVacb(SharedCacheMap
, current
, current
->Valid
, current
->Dirty
, FALSE
);
1053 Offset
.QuadPart
+= VACB_MAPPING_GRANULARITY
;
1054 RemainingLength
-= min(RemainingLength
, VACB_MAPPING_GRANULARITY
);
1061 IoStatus
->Status
= STATUS_INVALID_PARAMETER
;
1068 CcRosDeleteFileCache (
1069 PFILE_OBJECT FileObject
,
1070 PROS_SHARED_CACHE_MAP SharedCacheMap
)
1072 * FUNCTION: Releases the shared cache map associated with a file object
1075 PLIST_ENTRY current_entry
;
1077 LIST_ENTRY FreeList
;
1080 ASSERT(SharedCacheMap
);
1082 SharedCacheMap
->OpenCount
++;
1083 KeReleaseGuardedMutex(&ViewLock
);
1085 CcFlushCache(FileObject
->SectionObjectPointer
, NULL
, 0, NULL
);
1087 KeAcquireGuardedMutex(&ViewLock
);
1088 SharedCacheMap
->OpenCount
--;
1089 if (SharedCacheMap
->OpenCount
== 0)
1093 FileObject
->SectionObjectPointer
->SharedCacheMap
= NULL
;
1098 InitializeListHead(&FreeList
);
1099 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1100 while (!IsListEmpty(&SharedCacheMap
->CacheMapVacbListHead
))
1102 current_entry
= RemoveTailList(&SharedCacheMap
->CacheMapVacbListHead
);
1103 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1105 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1106 RemoveEntryList(¤t
->VacbLruListEntry
);
1107 InitializeListHead(¤t
->VacbLruListEntry
);
1110 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1111 CcRosUnmarkDirtyVacb(current
, FALSE
);
1112 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1113 DPRINT1("Freeing dirty VACB\n");
1115 if (current
->MappedCount
!= 0)
1117 current
->MappedCount
= 0;
1118 NT_VERIFY(CcRosVacbDecRefCount(current
) > 0);
1119 DPRINT1("Freeing mapped VACB\n");
1121 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
1123 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1126 SharedCacheMap
->Trace
= FALSE
;
1128 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1130 KeReleaseGuardedMutex(&ViewLock
);
1131 ObDereferenceObject(SharedCacheMap
->FileObject
);
1133 while (!IsListEmpty(&FreeList
))
1137 current_entry
= RemoveTailList(&FreeList
);
1138 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1139 InitializeListHead(¤t
->CacheMapVacbListEntry
);
1140 Refs
= CcRosVacbDecRefCount(current
);
1141 #if DBG // CORE-14578
1144 DPRINT1("Leaking VACB %p attached to %p (%I64d)\n", current
, FileObject
, current
->FileOffset
.QuadPart
);
1145 DPRINT1("There are: %d references left\n", Refs
);
1146 DPRINT1("Pin: %d, Map: %d\n", current
->PinCount
, current
->MappedCount
);
1147 DPRINT1("Dirty: %d\n", current
->Dirty
);
1148 if (FileObject
->FileName
.Length
!= 0)
1150 DPRINT1("File was: %wZ\n", &FileObject
->FileName
);
1152 else if (FileObject
->FsContext
!= NULL
&&
1153 ((PFSRTL_COMMON_FCB_HEADER
)(FileObject
->FsContext
))->NodeTypeCode
== 0x0502 &&
1154 ((PFSRTL_COMMON_FCB_HEADER
)(FileObject
->FsContext
))->NodeByteSize
== 0x1F8 &&
1155 ((PUNICODE_STRING
)(((PUCHAR
)FileObject
->FsContext
) + 0x100))->Length
!= 0)
1157 DPRINT1("File was: %wZ (FastFAT)\n", (PUNICODE_STRING
)(((PUCHAR
)FileObject
->FsContext
) + 0x100));
1161 DPRINT1("No name for the file\n");
1169 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
1170 RemoveEntryList(&SharedCacheMap
->SharedCacheMapLinks
);
1171 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
1173 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList
, SharedCacheMap
);
1174 KeAcquireGuardedMutex(&ViewLock
);
1176 return STATUS_SUCCESS
;
1181 CcRosReferenceCache (
1182 PFILE_OBJECT FileObject
)
1184 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1185 KeAcquireGuardedMutex(&ViewLock
);
1186 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1187 ASSERT(SharedCacheMap
);
1188 ASSERT(SharedCacheMap
->OpenCount
!= 0);
1189 SharedCacheMap
->OpenCount
++;
1190 KeReleaseGuardedMutex(&ViewLock
);
1195 CcRosRemoveIfClosed (
1196 PSECTION_OBJECT_POINTERS SectionObjectPointer
)
1198 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1199 DPRINT("CcRosRemoveIfClosed()\n");
1200 KeAcquireGuardedMutex(&ViewLock
);
1201 SharedCacheMap
= SectionObjectPointer
->SharedCacheMap
;
1202 if (SharedCacheMap
&& SharedCacheMap
->OpenCount
== 0)
1204 CcRosDeleteFileCache(SharedCacheMap
->FileObject
, SharedCacheMap
);
1206 KeReleaseGuardedMutex(&ViewLock
);
1212 CcRosDereferenceCache (
1213 PFILE_OBJECT FileObject
)
1215 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1216 KeAcquireGuardedMutex(&ViewLock
);
1217 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1218 ASSERT(SharedCacheMap
);
1219 if (SharedCacheMap
->OpenCount
> 0)
1221 SharedCacheMap
->OpenCount
--;
1222 if (SharedCacheMap
->OpenCount
== 0)
1224 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1225 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1228 KeReleaseGuardedMutex(&ViewLock
);
1233 CcRosReleaseFileCache (
1234 PFILE_OBJECT FileObject
)
1236 * FUNCTION: Called by the file system when a handle to a file object
1241 PPRIVATE_CACHE_MAP PrivateMap
;
1242 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1244 KeAcquireGuardedMutex(&ViewLock
);
1246 if (FileObject
->SectionObjectPointer
->SharedCacheMap
!= NULL
)
1248 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1250 /* Closing the handle, so kill the private cache map
1251 * Before you event try to remove it from FO, always
1252 * lock the master lock, to be sure not to race
1253 * with a potential read ahead ongoing!
1255 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
1256 PrivateMap
= FileObject
->PrivateCacheMap
;
1257 FileObject
->PrivateCacheMap
= NULL
;
1258 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
1260 if (PrivateMap
!= NULL
)
1262 /* Remove it from the file */
1263 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &OldIrql
);
1264 RemoveEntryList(&PrivateMap
->PrivateLinks
);
1265 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, OldIrql
);
1268 if (PrivateMap
!= &SharedCacheMap
->PrivateCacheMap
)
1270 ExFreePoolWithTag(PrivateMap
, TAG_PRIVATE_CACHE_MAP
);
1274 PrivateMap
->NodeTypeCode
= 0;
1277 if (SharedCacheMap
->OpenCount
> 0)
1279 SharedCacheMap
->OpenCount
--;
1280 if (SharedCacheMap
->OpenCount
== 0)
1282 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1283 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1288 KeReleaseGuardedMutex(&ViewLock
);
1289 return STATUS_SUCCESS
;
1294 CcRosInitializeFileCache (
1295 PFILE_OBJECT FileObject
,
1296 PCC_FILE_SIZES FileSizes
,
1298 PCACHE_MANAGER_CALLBACKS CallBacks
,
1299 PVOID LazyWriterContext
)
1301 * FUNCTION: Initializes a shared cache map for a file object
1306 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1308 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1309 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1310 FileObject
, SharedCacheMap
);
1313 KeAcquireGuardedMutex(&ViewLock
);
1314 if (SharedCacheMap
== NULL
)
1317 SharedCacheMap
= ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList
);
1318 if (SharedCacheMap
== NULL
)
1320 KeReleaseGuardedMutex(&ViewLock
);
1321 return STATUS_INSUFFICIENT_RESOURCES
;
1323 RtlZeroMemory(SharedCacheMap
, sizeof(*SharedCacheMap
));
1324 ObReferenceObjectByPointer(FileObject
,
1328 SharedCacheMap
->NodeTypeCode
= NODE_TYPE_SHARED_MAP
;
1329 SharedCacheMap
->NodeByteSize
= sizeof(*SharedCacheMap
);
1330 SharedCacheMap
->FileObject
= FileObject
;
1331 SharedCacheMap
->Callbacks
= CallBacks
;
1332 SharedCacheMap
->LazyWriteContext
= LazyWriterContext
;
1333 SharedCacheMap
->SectionSize
= FileSizes
->AllocationSize
;
1334 SharedCacheMap
->FileSize
= FileSizes
->FileSize
;
1335 SharedCacheMap
->PinAccess
= PinAccess
;
1336 SharedCacheMap
->DirtyPageThreshold
= 0;
1337 SharedCacheMap
->DirtyPages
= 0;
1338 InitializeListHead(&SharedCacheMap
->PrivateList
);
1339 KeInitializeSpinLock(&SharedCacheMap
->CacheMapLock
);
1340 InitializeListHead(&SharedCacheMap
->CacheMapVacbListHead
);
1341 FileObject
->SectionObjectPointer
->SharedCacheMap
= SharedCacheMap
;
1343 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
1344 InsertTailList(&CcCleanSharedCacheMapList
, &SharedCacheMap
->SharedCacheMapLinks
);
1345 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
1347 if (FileObject
->PrivateCacheMap
== NULL
)
1349 PPRIVATE_CACHE_MAP PrivateMap
;
1351 /* Allocate the private cache map for this handle */
1352 if (SharedCacheMap
->PrivateCacheMap
.NodeTypeCode
!= 0)
1354 PrivateMap
= ExAllocatePoolWithTag(NonPagedPool
, sizeof(PRIVATE_CACHE_MAP
), TAG_PRIVATE_CACHE_MAP
);
1358 PrivateMap
= &SharedCacheMap
->PrivateCacheMap
;
1361 if (PrivateMap
== NULL
)
1363 /* If we also allocated the shared cache map for this file, kill it */
1366 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
1367 RemoveEntryList(&SharedCacheMap
->SharedCacheMapLinks
);
1368 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
1370 FileObject
->SectionObjectPointer
->SharedCacheMap
= NULL
;
1371 ObDereferenceObject(FileObject
);
1372 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList
, SharedCacheMap
);
1375 KeReleaseGuardedMutex(&ViewLock
);
1376 return STATUS_INSUFFICIENT_RESOURCES
;
1380 RtlZeroMemory(PrivateMap
, sizeof(PRIVATE_CACHE_MAP
));
1381 PrivateMap
->NodeTypeCode
= NODE_TYPE_PRIVATE_MAP
;
1382 PrivateMap
->ReadAheadMask
= PAGE_SIZE
- 1;
1383 PrivateMap
->FileObject
= FileObject
;
1384 KeInitializeSpinLock(&PrivateMap
->ReadAheadSpinLock
);
1386 /* Link it to the file */
1387 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &OldIrql
);
1388 InsertTailList(&SharedCacheMap
->PrivateList
, &PrivateMap
->PrivateLinks
);
1389 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, OldIrql
);
1391 FileObject
->PrivateCacheMap
= PrivateMap
;
1392 SharedCacheMap
->OpenCount
++;
1394 KeReleaseGuardedMutex(&ViewLock
);
1396 return STATUS_SUCCESS
;
1404 CcGetFileObjectFromSectionPtrs (
1405 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
)
1407 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1409 CCTRACE(CC_API_DEBUG
, "SectionObjectPointers=%p\n", SectionObjectPointers
);
1411 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1413 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
1414 ASSERT(SharedCacheMap
);
1415 return SharedCacheMap
->FileObject
;
1426 DPRINT("CcInitView()\n");
1428 InitializeListHead(&DirtyVacbListHead
);
1429 InitializeListHead(&VacbLruListHead
);
1430 InitializeListHead(&CcDeferredWrites
);
1431 InitializeListHead(&CcCleanSharedCacheMapList
);
1432 KeInitializeSpinLock(&CcDeferredWriteSpinLock
);
1433 KeInitializeGuardedMutex(&ViewLock
);
1434 ExInitializeNPagedLookasideList(&iBcbLookasideList
,
1438 sizeof(INTERNAL_BCB
),
1441 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList
,
1445 sizeof(ROS_SHARED_CACHE_MAP
),
1446 TAG_SHARED_CACHE_MAP
,
1448 ExInitializeNPagedLookasideList(&VacbLookasideList
,
1456 MmInitializeMemoryConsumer(MC_CACHE
, CcRosTrimCache
);
1458 CcInitCacheZeroPage();
1461 #if DBG && defined(KDBG)
1463 ExpKdbgExtFileCache(ULONG Argc
, PCHAR Argv
[])
1465 PLIST_ENTRY ListEntry
;
1466 UNICODE_STRING NoName
= RTL_CONSTANT_STRING(L
"No name for File");
1468 KdbpPrint(" Usage Summary (in kb)\n");
1469 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1470 /* No need to lock the spin lock here, we're in DBG */
1471 for (ListEntry
= CcCleanSharedCacheMapList
.Flink
;
1472 ListEntry
!= &CcCleanSharedCacheMapList
;
1473 ListEntry
= ListEntry
->Flink
)
1476 ULONG Valid
= 0, Dirty
= 0;
1477 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1478 PUNICODE_STRING FileName
;
1481 SharedCacheMap
= CONTAINING_RECORD(ListEntry
, ROS_SHARED_CACHE_MAP
, SharedCacheMapLinks
);
1484 Dirty
= (SharedCacheMap
->DirtyPages
* PAGE_SIZE
) / 1024;
1486 /* First, count for all the associated VACB */
1487 for (Vacbs
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
1488 Vacbs
!= &SharedCacheMap
->CacheMapVacbListHead
;
1489 Vacbs
= Vacbs
->Flink
)
1493 Vacb
= CONTAINING_RECORD(Vacbs
, ROS_VACB
, CacheMapVacbListEntry
);
1496 Valid
+= VACB_MAPPING_GRANULARITY
/ 1024;
1501 if (SharedCacheMap
->FileObject
!= NULL
&&
1502 SharedCacheMap
->FileObject
->FileName
.Length
!= 0)
1504 FileName
= &SharedCacheMap
->FileObject
->FileName
;
1506 else if (SharedCacheMap
->FileObject
!= NULL
&&
1507 SharedCacheMap
->FileObject
->FsContext
!= NULL
&&
1508 ((PFSRTL_COMMON_FCB_HEADER
)(SharedCacheMap
->FileObject
->FsContext
))->NodeTypeCode
== 0x0502 &&
1509 ((PFSRTL_COMMON_FCB_HEADER
)(SharedCacheMap
->FileObject
->FsContext
))->NodeByteSize
== 0x1F8 &&
1510 ((PUNICODE_STRING
)(((PUCHAR
)SharedCacheMap
->FileObject
->FsContext
) + 0x100))->Length
!= 0)
1512 FileName
= (PUNICODE_STRING
)(((PUCHAR
)SharedCacheMap
->FileObject
->FsContext
) + 0x100);
1513 Extra
= L
" (FastFAT)";
1521 KdbpPrint("%p\t%d\t%d\t%wZ%S\n", SharedCacheMap
, Valid
, Dirty
, FileName
, Extra
);
1528 ExpKdbgExtDefWrites(ULONG Argc
, PCHAR Argv
[])
1530 KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages
,
1531 (CcTotalDirtyPages
* PAGE_SIZE
) / 1024);
1532 KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold
,
1533 (CcDirtyPageThreshold
* PAGE_SIZE
) / 1024);
1534 KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages
,
1535 (MmAvailablePages
* PAGE_SIZE
) / 1024);
1536 KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop
,
1537 (MmThrottleTop
* PAGE_SIZE
) / 1024);
1538 KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom
,
1539 (MmThrottleBottom
* PAGE_SIZE
) / 1024);
1540 KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead
.Total
,
1541 (MmModifiedPageListHead
.Total
* PAGE_SIZE
) / 1024);
1543 if (CcTotalDirtyPages
>= CcDirtyPageThreshold
)
1545 KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n");
1547 else if (CcTotalDirtyPages
+ 64 >= CcDirtyPageThreshold
)
1549 KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n");
1553 KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n");