2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
11 /* NOTES **********************************************************************
13 * This is not the NT implementation of a file cache nor anything much like
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
29 * (4) Copy the data into or out of the page as necessary.
31 * (5) Release the cache page
33 /* INCLUDES ******************************************************************/
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
43 /* GLOBALS *******************************************************************/
45 LIST_ENTRY DirtyVacbListHead
;
46 static LIST_ENTRY VacbLruListHead
;
48 KGUARDED_MUTEX ViewLock
;
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList
;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList
;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList
;
55 * - Amount of pages flushed by lazy writer
56 * - Number of times lazy writer ran
58 ULONG CcLazyWritePages
= 0;
59 ULONG CcLazyWriteIos
= 0;
61 /* Internal vars (MS):
62 * - Threshold above which lazy writer will start action
63 * - Amount of dirty pages
64 * - List for deferred writes
65 * - Spinlock when dealing with the deferred list
66 * - List for "clean" shared cache maps
67 * - One second delay for lazy writer
68 * - System size when system started
70 ULONG CcDirtyPageThreshold
= 0;
71 ULONG CcTotalDirtyPages
= 0;
72 LIST_ENTRY CcDeferredWrites
;
73 KSPIN_LOCK CcDeferredWriteSpinLock
;
74 LIST_ENTRY CcCleanSharedCacheMapList
;
75 LARGE_INTEGER CcIdleDelay
= RTL_CONSTANT_LARGE_INTEGER((LONGLONG
)-1*1000*1000*10);
76 MM_SYSTEMSIZE CcCapturedSystemSize
;
78 /* Internal vars (ROS):
79 * - Event to notify lazy writer to shutdown
80 * - Event to inform watchers lazy writer is done for this loop
81 * - Lock for the CcCleanSharedCacheMapList list
83 KEVENT iLazyWriterShutdown
;
84 KEVENT iLazyWriterNotify
;
85 KSPIN_LOCK iSharedCacheMapLock
;
88 static void CcRosVacbIncRefCount_(PROS_VACB vacb
, const char* file
, int line
)
90 ++vacb
->ReferenceCount
;
91 if (vacb
->SharedCacheMap
->Trace
)
93 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
94 file
, line
, vacb
, vacb
->ReferenceCount
, vacb
->Dirty
, vacb
->PageOut
);
97 static void CcRosVacbDecRefCount_(PROS_VACB vacb
, const char* file
, int line
)
99 ASSERT(vacb
->ReferenceCount
!= 0);
100 --vacb
->ReferenceCount
;
101 ASSERT(!(vacb
->ReferenceCount
== 0 && vacb
->Dirty
));
102 if (vacb
->SharedCacheMap
->Trace
)
104 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
105 file
, line
, vacb
, vacb
->ReferenceCount
, vacb
->Dirty
, vacb
->PageOut
);
108 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
109 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
111 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
112 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
116 CcRosInternalFreeVacb(PROS_VACB Vacb
);
119 /* FUNCTIONS *****************************************************************/
124 PROS_SHARED_CACHE_MAP SharedCacheMap
,
129 PLIST_ENTRY current_entry
;
135 SharedCacheMap
->Trace
= Trace
;
139 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
141 KeAcquireGuardedMutex(&ViewLock
);
142 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldirql
);
144 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
145 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
147 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
148 current_entry
= current_entry
->Flink
;
150 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
151 current
, current
->ReferenceCount
, current
->Dirty
, current
->PageOut
);
153 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldirql
);
154 KeReleaseGuardedMutex(&ViewLock
);
158 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
162 UNREFERENCED_PARAMETER(SharedCacheMap
);
163 UNREFERENCED_PARAMETER(Trace
);
174 Status
= CcWriteVirtualAddress(Vacb
);
175 if (NT_SUCCESS(Status
))
177 CcRosUnmarkDirtyVacb(Vacb
, TRUE
);
185 CcRosFlushDirtyPages (
189 BOOLEAN CalledFromLazy
)
191 PLIST_ENTRY current_entry
;
195 LARGE_INTEGER ZeroTimeout
;
197 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target
);
200 ZeroTimeout
.QuadPart
= 0;
202 KeEnterCriticalRegion();
203 KeAcquireGuardedMutex(&ViewLock
);
205 current_entry
= DirtyVacbListHead
.Flink
;
206 if (current_entry
== &DirtyVacbListHead
)
208 DPRINT("No Dirty pages\n");
211 while ((current_entry
!= &DirtyVacbListHead
) && (Target
> 0))
213 current
= CONTAINING_RECORD(current_entry
,
216 current_entry
= current_entry
->Flink
;
218 CcRosVacbIncRefCount(current
);
220 /* When performing lazy write, don't handle temporary files */
221 if (CalledFromLazy
&&
222 BooleanFlagOn(current
->SharedCacheMap
->FileObject
->Flags
, FO_TEMPORARY_FILE
))
224 CcRosVacbDecRefCount(current
);
228 Locked
= current
->SharedCacheMap
->Callbacks
->AcquireForLazyWrite(
229 current
->SharedCacheMap
->LazyWriteContext
, Wait
);
232 CcRosVacbDecRefCount(current
);
236 Status
= CcRosAcquireVacbLock(current
,
237 Wait
? NULL
: &ZeroTimeout
);
238 if (Status
!= STATUS_SUCCESS
)
240 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
241 current
->SharedCacheMap
->LazyWriteContext
);
242 CcRosVacbDecRefCount(current
);
246 ASSERT(current
->Dirty
);
248 /* One reference is added above */
249 if ((current
->ReferenceCount
> 2 && current
->PinCount
== 0) ||
250 (current
->ReferenceCount
> 3 && current
->PinCount
> 1))
252 CcRosReleaseVacbLock(current
);
253 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
254 current
->SharedCacheMap
->LazyWriteContext
);
255 CcRosVacbDecRefCount(current
);
259 KeReleaseGuardedMutex(&ViewLock
);
261 Status
= CcRosFlushVacb(current
);
263 CcRosReleaseVacbLock(current
);
264 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
265 current
->SharedCacheMap
->LazyWriteContext
);
267 KeAcquireGuardedMutex(&ViewLock
);
268 CcRosVacbDecRefCount(current
);
270 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
) &&
271 (Status
!= STATUS_MEDIA_WRITE_PROTECTED
))
273 DPRINT1("CC: Failed to flush VACB.\n");
279 /* How many pages did we free? */
280 PagesFreed
= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
281 (*Count
) += PagesFreed
;
283 /* Make sure we don't overflow target! */
284 if (Target
< PagesFreed
)
286 /* If we would have, jump to zero directly */
291 Target
-= PagesFreed
;
295 current_entry
= DirtyVacbListHead
.Flink
;
298 KeReleaseGuardedMutex(&ViewLock
);
299 KeLeaveCriticalRegion();
301 DPRINT("CcRosFlushDirtyPages() finished\n");
302 return STATUS_SUCCESS
;
305 /* FIXME: Someday this could somewhat implement write-behind/read-ahead */
308 CciLazyWriter(PVOID Unused
)
313 PLIST_ENTRY ListEntry
;
314 ULONG Target
, Count
= 0;
316 /* One per second or until we have to stop */
317 Status
= KeWaitForSingleObject(&iLazyWriterShutdown
,
323 /* If we succeeed, we've to stop running! */
324 if (Status
== STATUS_SUCCESS
)
329 /* We're not sleeping anymore */
330 KeClearEvent(&iLazyWriterNotify
);
332 /* Our target is one-eighth of the dirty pages */
333 Target
= CcTotalDirtyPages
/ 8;
337 DPRINT("Lazy writer starting (%d)\n", Target
);
338 CcRosFlushDirtyPages(Target
, &Count
, FALSE
, TRUE
);
340 /* And update stats */
341 CcLazyWritePages
+= Count
;
343 DPRINT("Lazy writer done (%d)\n", Count
);
346 /* Inform people waiting on us that we're done */
347 KeSetEvent(&iLazyWriterNotify
, IO_DISK_INCREMENT
, FALSE
);
349 /* Likely not optimal, but let's handle one deferred write now! */
350 ListEntry
= ExInterlockedRemoveHeadList(&CcDeferredWrites
, &CcDeferredWriteSpinLock
);
351 if (ListEntry
!= NULL
)
353 PDEFERRED_WRITE Context
;
355 /* Extract the context */
356 Context
= CONTAINING_RECORD(ListEntry
, DEFERRED_WRITE
, DeferredWriteLinks
);
357 ASSERT(Context
->NodeTypeCode
== NODE_TYPE_DEFERRED_WRITE
);
359 /* Can we write now? */
360 if (CcCanIWrite(Context
->FileObject
, Context
->BytesToWrite
, FALSE
, TRUE
))
362 /* Yes! Do it, and destroy the associated context */
363 Context
->PostRoutine(Context
->Context1
, Context
->Context2
);
364 ExFreePoolWithTag(Context
, 'CcDw');
368 /* Otherwise, requeue it, but in tail, so that it doesn't block others
369 * This is clearly to improve, but given the poor algorithm used now
370 * It's better than nothing!
372 ExInterlockedInsertTailList(&CcDeferredWrites
,
373 &Context
->DeferredWriteLinks
,
374 &CcDeferredWriteSpinLock
);
386 * FUNCTION: Try to free some memory from the file cache.
388 * Target - The number of pages to be freed.
389 * Priority - The priority of free (currently unused).
390 * NrFreed - Points to a variable where the number of pages
391 * actually freed is returned.
394 PLIST_ENTRY current_entry
;
401 BOOLEAN FlushedPages
= FALSE
;
403 DPRINT("CcRosTrimCache(Target %lu)\n", Target
);
405 InitializeListHead(&FreeList
);
410 KeAcquireGuardedMutex(&ViewLock
);
412 current_entry
= VacbLruListHead
.Flink
;
413 while (current_entry
!= &VacbLruListHead
)
415 current
= CONTAINING_RECORD(current_entry
,
418 current_entry
= current_entry
->Flink
;
420 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
422 /* Reference the VACB */
423 CcRosVacbIncRefCount(current
);
425 /* Check if it's mapped and not dirty */
426 if (current
->MappedCount
> 0 && !current
->Dirty
)
428 /* We have to break these locks because Cc sucks */
429 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
430 KeReleaseGuardedMutex(&ViewLock
);
432 /* Page out the VACB */
433 for (i
= 0; i
< VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
; i
++)
435 Page
= (PFN_NUMBER
)(MmGetPhysicalAddress((PUCHAR
)current
->BaseAddress
+ (i
* PAGE_SIZE
)).QuadPart
>> PAGE_SHIFT
);
437 MmPageOutPhysicalAddress(Page
);
440 /* Reacquire the locks */
441 KeAcquireGuardedMutex(&ViewLock
);
442 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
445 /* Dereference the VACB */
446 CcRosVacbDecRefCount(current
);
448 /* Check if we can free this entry now */
449 if (current
->ReferenceCount
== 0)
451 ASSERT(!current
->Dirty
);
452 ASSERT(!current
->MappedCount
);
454 RemoveEntryList(¤t
->CacheMapVacbListEntry
);
455 RemoveEntryList(¤t
->VacbLruListEntry
);
456 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
458 /* Calculate how many pages we freed for Mm */
459 PagesFreed
= min(VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
, Target
);
460 Target
-= PagesFreed
;
461 (*NrFreed
) += PagesFreed
;
464 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
467 KeReleaseGuardedMutex(&ViewLock
);
469 /* Try flushing pages if we haven't met our target */
470 if ((Target
> 0) && !FlushedPages
)
472 /* Flush dirty pages to disk */
473 CcRosFlushDirtyPages(Target
, &PagesFreed
, FALSE
, FALSE
);
476 /* We can only swap as many pages as we flushed */
477 if (PagesFreed
< Target
) Target
= PagesFreed
;
479 /* Check if we flushed anything */
482 /* Try again after flushing dirty pages */
483 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed
);
488 while (!IsListEmpty(&FreeList
))
490 current_entry
= RemoveHeadList(&FreeList
);
491 current
= CONTAINING_RECORD(current_entry
,
493 CacheMapVacbListEntry
);
494 CcRosInternalFreeVacb(current
);
497 DPRINT("Evicted %lu cache pages\n", (*NrFreed
));
499 return STATUS_SUCCESS
;
505 PROS_SHARED_CACHE_MAP SharedCacheMap
,
511 ASSERT(SharedCacheMap
);
513 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
514 SharedCacheMap
, Vacb
, Valid
);
518 if (Dirty
&& !Vacb
->Dirty
)
520 CcRosMarkDirtyVacb(Vacb
);
527 CcRosVacbDecRefCount(Vacb
);
528 if (Mapped
&& (Vacb
->MappedCount
== 1))
530 CcRosVacbIncRefCount(Vacb
);
533 CcRosReleaseVacbLock(Vacb
);
535 return STATUS_SUCCESS
;
538 /* Returns with VACB Lock Held! */
542 PROS_SHARED_CACHE_MAP SharedCacheMap
,
545 PLIST_ENTRY current_entry
;
549 ASSERT(SharedCacheMap
);
551 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
552 SharedCacheMap
, FileOffset
);
554 KeAcquireGuardedMutex(&ViewLock
);
555 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
557 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
558 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
560 current
= CONTAINING_RECORD(current_entry
,
562 CacheMapVacbListEntry
);
563 if (IsPointInRange(current
->FileOffset
.QuadPart
,
564 VACB_MAPPING_GRANULARITY
,
567 CcRosVacbIncRefCount(current
);
568 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
569 KeReleaseGuardedMutex(&ViewLock
);
570 CcRosAcquireVacbLock(current
, NULL
);
573 if (current
->FileOffset
.QuadPart
> FileOffset
)
575 current_entry
= current_entry
->Flink
;
578 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
579 KeReleaseGuardedMutex(&ViewLock
);
590 PROS_SHARED_CACHE_MAP SharedCacheMap
;
592 SharedCacheMap
= Vacb
->SharedCacheMap
;
594 KeAcquireGuardedMutex(&ViewLock
);
595 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
597 ASSERT(!Vacb
->Dirty
);
599 InsertTailList(&DirtyVacbListHead
, &Vacb
->DirtyVacbListEntry
);
600 CcTotalDirtyPages
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
601 Vacb
->SharedCacheMap
->DirtyPages
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
602 CcRosVacbIncRefCount(Vacb
);
604 /* Move to the tail of the LRU list */
605 RemoveEntryList(&Vacb
->VacbLruListEntry
);
606 InsertTailList(&VacbLruListHead
, &Vacb
->VacbLruListEntry
);
610 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
611 KeReleaseGuardedMutex(&ViewLock
);
616 CcRosUnmarkDirtyVacb (
621 PROS_SHARED_CACHE_MAP SharedCacheMap
;
623 SharedCacheMap
= Vacb
->SharedCacheMap
;
627 KeAcquireGuardedMutex(&ViewLock
);
628 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
635 RemoveEntryList(&Vacb
->DirtyVacbListEntry
);
636 CcTotalDirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
637 Vacb
->SharedCacheMap
->DirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
638 CcRosVacbDecRefCount(Vacb
);
642 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
643 KeReleaseGuardedMutex(&ViewLock
);
650 PROS_SHARED_CACHE_MAP SharedCacheMap
,
655 ASSERT(SharedCacheMap
);
657 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
658 SharedCacheMap
, FileOffset
);
660 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
663 KeBugCheck(CACHE_MANAGER
);
668 CcRosMarkDirtyVacb(Vacb
);
671 CcRosReleaseVacbLock(Vacb
);
673 return STATUS_SUCCESS
;
679 PROS_SHARED_CACHE_MAP SharedCacheMap
,
685 ASSERT(SharedCacheMap
);
687 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
688 SharedCacheMap
, FileOffset
, NowDirty
);
690 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
693 return STATUS_UNSUCCESSFUL
;
696 if (NowDirty
&& !Vacb
->Dirty
)
698 CcRosMarkDirtyVacb(Vacb
);
701 ASSERT(Vacb
->MappedCount
!= 0);
704 CcRosVacbDecRefCount(Vacb
);
705 if (Vacb
->MappedCount
== 0)
707 CcRosVacbDecRefCount(Vacb
);
710 CcRosReleaseVacbLock(Vacb
);
712 return STATUS_SUCCESS
;
722 ULONG_PTR NumberOfPages
;
724 /* Create a memory area. */
725 MmLockAddressSpace(MmGetKernelAddressSpace());
726 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
727 0, // nothing checks for VACB mareas, so set to 0
729 VACB_MAPPING_GRANULARITY
,
731 (PMEMORY_AREA
*)&Vacb
->MemoryArea
,
734 MmUnlockAddressSpace(MmGetKernelAddressSpace());
735 if (!NT_SUCCESS(Status
))
737 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status
, Vacb
);
741 ASSERT(((ULONG_PTR
)Vacb
->BaseAddress
% PAGE_SIZE
) == 0);
742 ASSERT((ULONG_PTR
)Vacb
->BaseAddress
> (ULONG_PTR
)MmSystemRangeStart
);
744 /* Create a virtual mapping for this memory area */
745 NumberOfPages
= BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY
);
746 for (i
= 0; i
< NumberOfPages
; i
++)
748 PFN_NUMBER PageFrameNumber
;
750 MI_SET_USAGE(MI_USAGE_CACHE
);
751 Status
= MmRequestPageMemoryConsumer(MC_CACHE
, TRUE
, &PageFrameNumber
);
752 if (PageFrameNumber
== 0)
754 DPRINT1("Unable to allocate page\n");
755 KeBugCheck(MEMORY_MANAGEMENT
);
758 Status
= MmCreateVirtualMapping(NULL
,
759 (PVOID
)((ULONG_PTR
)Vacb
->BaseAddress
+ (i
* PAGE_SIZE
)),
763 if (!NT_SUCCESS(Status
))
765 DPRINT1("Unable to create virtual mapping\n");
766 KeBugCheck(MEMORY_MANAGEMENT
);
770 return STATUS_SUCCESS
;
776 PROS_SHARED_CACHE_MAP SharedCacheMap
,
782 PLIST_ENTRY current_entry
;
786 ASSERT(SharedCacheMap
);
788 DPRINT("CcRosCreateVacb()\n");
790 if (FileOffset
>= SharedCacheMap
->SectionSize
.QuadPart
)
793 return STATUS_INVALID_PARAMETER
;
796 current
= ExAllocateFromNPagedLookasideList(&VacbLookasideList
);
797 current
->BaseAddress
= NULL
;
798 current
->Valid
= FALSE
;
799 current
->Dirty
= FALSE
;
800 current
->PageOut
= FALSE
;
801 current
->FileOffset
.QuadPart
= ROUND_DOWN(FileOffset
, VACB_MAPPING_GRANULARITY
);
802 current
->SharedCacheMap
= SharedCacheMap
;
804 if (SharedCacheMap
->Trace
)
806 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap
, current
);
809 current
->MappedCount
= 0;
810 current
->DirtyVacbListEntry
.Flink
= NULL
;
811 current
->DirtyVacbListEntry
.Blink
= NULL
;
812 current
->ReferenceCount
= 1;
813 current
->PinCount
= 0;
814 KeInitializeMutex(¤t
->Mutex
, 0);
815 CcRosAcquireVacbLock(current
, NULL
);
816 KeAcquireGuardedMutex(&ViewLock
);
819 /* There is window between the call to CcRosLookupVacb
820 * and CcRosCreateVacb. We must check if a VACB for the
821 * file offset exist. If there is a VACB, we release
822 * our newly created VACB and return the existing one.
824 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
825 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
827 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
829 current
= CONTAINING_RECORD(current_entry
,
831 CacheMapVacbListEntry
);
832 if (IsPointInRange(current
->FileOffset
.QuadPart
,
833 VACB_MAPPING_GRANULARITY
,
836 CcRosVacbIncRefCount(current
);
837 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
839 if (SharedCacheMap
->Trace
)
841 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
847 CcRosReleaseVacbLock(*Vacb
);
848 KeReleaseGuardedMutex(&ViewLock
);
849 ExFreeToNPagedLookasideList(&VacbLookasideList
, *Vacb
);
851 CcRosAcquireVacbLock(current
, NULL
);
852 return STATUS_SUCCESS
;
854 if (current
->FileOffset
.QuadPart
< FileOffset
)
856 ASSERT(previous
== NULL
||
857 previous
->FileOffset
.QuadPart
< current
->FileOffset
.QuadPart
);
860 if (current
->FileOffset
.QuadPart
> FileOffset
)
862 current_entry
= current_entry
->Flink
;
864 /* There was no existing VACB. */
868 InsertHeadList(&previous
->CacheMapVacbListEntry
, ¤t
->CacheMapVacbListEntry
);
872 InsertHeadList(&SharedCacheMap
->CacheMapVacbListHead
, ¤t
->CacheMapVacbListEntry
);
874 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
875 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
876 KeReleaseGuardedMutex(&ViewLock
);
878 MI_SET_USAGE(MI_USAGE_CACHE
);
880 if ((SharedCacheMap
->FileObject
) && (SharedCacheMap
->FileObject
->FileName
.Buffer
))
884 pos
= wcsrchr(SharedCacheMap
->FileObject
->FileName
.Buffer
, '\\');
887 len
= wcslen(pos
) * sizeof(WCHAR
);
888 snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%S", pos
);
892 snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%wZ", &SharedCacheMap
->FileObject
->FileName
);
897 Status
= CcRosMapVacb(current
);
898 if (!NT_SUCCESS(Status
))
900 RemoveEntryList(¤t
->CacheMapVacbListEntry
);
901 RemoveEntryList(¤t
->VacbLruListEntry
);
902 CcRosReleaseVacbLock(current
);
903 ExFreeToNPagedLookasideList(&VacbLookasideList
, current
);
912 PROS_SHARED_CACHE_MAP SharedCacheMap
,
914 PLONGLONG BaseOffset
,
922 ASSERT(SharedCacheMap
);
924 DPRINT("CcRosGetVacb()\n");
927 * Look for a VACB already mapping the same data.
929 current
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
933 * Otherwise create a new VACB.
935 Status
= CcRosCreateVacb(SharedCacheMap
, FileOffset
, ¤t
);
936 if (!NT_SUCCESS(Status
))
942 KeAcquireGuardedMutex(&ViewLock
);
944 /* Move to the tail of the LRU list */
945 RemoveEntryList(¤t
->VacbLruListEntry
);
946 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
948 KeReleaseGuardedMutex(&ViewLock
);
951 * Return information about the VACB to the caller.
953 *UptoDate
= current
->Valid
;
954 *BaseAddress
= current
->BaseAddress
;
955 DPRINT("*BaseAddress %p\n", *BaseAddress
);
957 *BaseOffset
= current
->FileOffset
.QuadPart
;
958 return STATUS_SUCCESS
;
964 PROS_SHARED_CACHE_MAP SharedCacheMap
,
970 * FUNCTION: Request a page mapping for a shared cache map
975 ASSERT(SharedCacheMap
);
977 if (FileOffset
% VACB_MAPPING_GRANULARITY
!= 0)
979 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
980 FileOffset
, VACB_MAPPING_GRANULARITY
);
981 KeBugCheck(CACHE_MANAGER
);
984 return CcRosGetVacb(SharedCacheMap
,
996 MEMORY_AREA
* MemoryArea
,
1002 ASSERT(SwapEntry
== 0);
1005 ASSERT(MmGetReferenceCountPage(Page
) == 1);
1006 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
1011 CcRosInternalFreeVacb (
1014 * FUNCTION: Releases a VACB associated with a shared cache map
1017 DPRINT("Freeing VACB 0x%p\n", Vacb
);
1019 if (Vacb
->SharedCacheMap
->Trace
)
1021 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb
->SharedCacheMap
, Vacb
);
1025 MmLockAddressSpace(MmGetKernelAddressSpace());
1026 MmFreeMemoryArea(MmGetKernelAddressSpace(),
1030 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1032 ExFreeToNPagedLookasideList(&VacbLookasideList
, Vacb
);
1033 return STATUS_SUCCESS
;
1042 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
,
1043 IN PLARGE_INTEGER FileOffset OPTIONAL
,
1045 OUT PIO_STATUS_BLOCK IoStatus
)
1047 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1048 LARGE_INTEGER Offset
;
1049 LONGLONG RemainingLength
;
1054 CCTRACE(CC_API_DEBUG
, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1055 SectionObjectPointers
, FileOffset
, Length
);
1057 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1058 SectionObjectPointers
, FileOffset
, Length
, IoStatus
);
1060 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1062 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
1063 ASSERT(SharedCacheMap
);
1066 Offset
= *FileOffset
;
1067 RemainingLength
= Length
;
1071 Offset
.QuadPart
= 0;
1072 RemainingLength
= SharedCacheMap
->FileSize
.QuadPart
;
1077 IoStatus
->Status
= STATUS_SUCCESS
;
1078 IoStatus
->Information
= 0;
1081 while (RemainingLength
> 0)
1083 current
= CcRosLookupVacb(SharedCacheMap
, Offset
.QuadPart
);
1084 if (current
!= NULL
)
1088 Status
= CcRosFlushVacb(current
);
1089 if (!NT_SUCCESS(Status
) && IoStatus
!= NULL
)
1091 IoStatus
->Status
= Status
;
1095 CcRosReleaseVacbLock(current
);
1097 KeAcquireGuardedMutex(&ViewLock
);
1098 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1099 CcRosVacbDecRefCount(current
);
1100 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1101 KeReleaseGuardedMutex(&ViewLock
);
1104 Offset
.QuadPart
+= VACB_MAPPING_GRANULARITY
;
1105 RemainingLength
-= min(RemainingLength
, VACB_MAPPING_GRANULARITY
);
1112 IoStatus
->Status
= STATUS_INVALID_PARAMETER
;
1119 CcRosDeleteFileCache (
1120 PFILE_OBJECT FileObject
,
1121 PROS_SHARED_CACHE_MAP SharedCacheMap
)
1123 * FUNCTION: Releases the shared cache map associated with a file object
1126 PLIST_ENTRY current_entry
;
1128 LIST_ENTRY FreeList
;
1131 ASSERT(SharedCacheMap
);
1133 SharedCacheMap
->OpenCount
++;
1134 KeReleaseGuardedMutex(&ViewLock
);
1136 CcFlushCache(FileObject
->SectionObjectPointer
, NULL
, 0, NULL
);
1138 KeAcquireGuardedMutex(&ViewLock
);
1139 SharedCacheMap
->OpenCount
--;
1140 if (SharedCacheMap
->OpenCount
== 0)
1144 FileObject
->SectionObjectPointer
->SharedCacheMap
= NULL
;
1149 InitializeListHead(&FreeList
);
1150 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1151 while (!IsListEmpty(&SharedCacheMap
->CacheMapVacbListHead
))
1153 current_entry
= RemoveTailList(&SharedCacheMap
->CacheMapVacbListHead
);
1154 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1156 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1157 CcRosAcquireVacbLock(current
, NULL
);
1158 RemoveEntryList(¤t
->VacbLruListEntry
);
1161 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1162 CcRosUnmarkDirtyVacb(current
, FALSE
);
1163 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1164 DPRINT1("Freeing dirty VACB\n");
1166 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
1167 CcRosReleaseVacbLock(current
);
1169 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1172 SharedCacheMap
->Trace
= FALSE
;
1174 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1176 KeReleaseGuardedMutex(&ViewLock
);
1177 ObDereferenceObject(SharedCacheMap
->FileObject
);
1179 while (!IsListEmpty(&FreeList
))
1181 current_entry
= RemoveTailList(&FreeList
);
1182 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1183 CcRosInternalFreeVacb(current
);
1186 KeAcquireSpinLock(&iSharedCacheMapLock
, &OldIrql
);
1187 RemoveEntryList(&SharedCacheMap
->SharedCacheMapLinks
);
1188 KeReleaseSpinLock(&iSharedCacheMapLock
, OldIrql
);
1190 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList
, SharedCacheMap
);
1191 KeAcquireGuardedMutex(&ViewLock
);
1193 return STATUS_SUCCESS
;
1198 CcRosReferenceCache (
1199 PFILE_OBJECT FileObject
)
1201 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1202 KeAcquireGuardedMutex(&ViewLock
);
1203 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1204 ASSERT(SharedCacheMap
);
1205 ASSERT(SharedCacheMap
->OpenCount
!= 0);
1206 SharedCacheMap
->OpenCount
++;
1207 KeReleaseGuardedMutex(&ViewLock
);
1212 CcRosRemoveIfClosed (
1213 PSECTION_OBJECT_POINTERS SectionObjectPointer
)
1215 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1216 DPRINT("CcRosRemoveIfClosed()\n");
1217 KeAcquireGuardedMutex(&ViewLock
);
1218 SharedCacheMap
= SectionObjectPointer
->SharedCacheMap
;
1219 if (SharedCacheMap
&& SharedCacheMap
->OpenCount
== 0)
1221 CcRosDeleteFileCache(SharedCacheMap
->FileObject
, SharedCacheMap
);
1223 KeReleaseGuardedMutex(&ViewLock
);
1229 CcRosDereferenceCache (
1230 PFILE_OBJECT FileObject
)
1232 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1233 KeAcquireGuardedMutex(&ViewLock
);
1234 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1235 ASSERT(SharedCacheMap
);
1236 if (SharedCacheMap
->OpenCount
> 0)
1238 SharedCacheMap
->OpenCount
--;
1239 if (SharedCacheMap
->OpenCount
== 0)
1241 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1242 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1245 KeReleaseGuardedMutex(&ViewLock
);
1250 CcRosReleaseFileCache (
1251 PFILE_OBJECT FileObject
)
1253 * FUNCTION: Called by the file system when a handle to a file object
1257 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1259 KeAcquireGuardedMutex(&ViewLock
);
1261 if (FileObject
->SectionObjectPointer
->SharedCacheMap
!= NULL
)
1263 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1264 if (FileObject
->PrivateCacheMap
!= NULL
)
1266 FileObject
->PrivateCacheMap
= NULL
;
1267 if (SharedCacheMap
->OpenCount
> 0)
1269 SharedCacheMap
->OpenCount
--;
1270 if (SharedCacheMap
->OpenCount
== 0)
1272 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1273 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1278 KeReleaseGuardedMutex(&ViewLock
);
1279 return STATUS_SUCCESS
;
1284 CcTryToInitializeFileCache (
1285 PFILE_OBJECT FileObject
)
1287 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1290 KeAcquireGuardedMutex(&ViewLock
);
1292 ASSERT(FileObject
->SectionObjectPointer
);
1293 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1294 if (SharedCacheMap
== NULL
)
1296 Status
= STATUS_UNSUCCESSFUL
;
1300 if (FileObject
->PrivateCacheMap
== NULL
)
1302 FileObject
->PrivateCacheMap
= SharedCacheMap
;
1303 SharedCacheMap
->OpenCount
++;
1305 Status
= STATUS_SUCCESS
;
1307 KeReleaseGuardedMutex(&ViewLock
);
1315 CcRosInitializeFileCache (
1316 PFILE_OBJECT FileObject
,
1317 PCC_FILE_SIZES FileSizes
,
1319 PCACHE_MANAGER_CALLBACKS CallBacks
,
1320 PVOID LazyWriterContext
)
1322 * FUNCTION: Initializes a shared cache map for a file object
1325 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1327 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1328 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1329 FileObject
, SharedCacheMap
);
1331 KeAcquireGuardedMutex(&ViewLock
);
1332 if (SharedCacheMap
== NULL
)
1336 SharedCacheMap
= ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList
);
1337 if (SharedCacheMap
== NULL
)
1339 KeReleaseGuardedMutex(&ViewLock
);
1340 return STATUS_INSUFFICIENT_RESOURCES
;
1342 RtlZeroMemory(SharedCacheMap
, sizeof(*SharedCacheMap
));
1343 ObReferenceObjectByPointer(FileObject
,
1347 SharedCacheMap
->FileObject
= FileObject
;
1348 SharedCacheMap
->Callbacks
= CallBacks
;
1349 SharedCacheMap
->LazyWriteContext
= LazyWriterContext
;
1350 SharedCacheMap
->SectionSize
= FileSizes
->AllocationSize
;
1351 SharedCacheMap
->FileSize
= FileSizes
->FileSize
;
1352 SharedCacheMap
->PinAccess
= PinAccess
;
1353 SharedCacheMap
->DirtyPageThreshold
= 0;
1354 SharedCacheMap
->DirtyPages
= 0;
1355 KeInitializeSpinLock(&SharedCacheMap
->CacheMapLock
);
1356 InitializeListHead(&SharedCacheMap
->CacheMapVacbListHead
);
1357 FileObject
->SectionObjectPointer
->SharedCacheMap
= SharedCacheMap
;
1359 KeAcquireSpinLock(&iSharedCacheMapLock
, &OldIrql
);
1360 InsertTailList(&CcCleanSharedCacheMapList
, &SharedCacheMap
->SharedCacheMapLinks
);
1361 KeReleaseSpinLock(&iSharedCacheMapLock
, OldIrql
);
1363 if (FileObject
->PrivateCacheMap
== NULL
)
1365 FileObject
->PrivateCacheMap
= SharedCacheMap
;
1366 SharedCacheMap
->OpenCount
++;
1368 KeReleaseGuardedMutex(&ViewLock
);
1370 return STATUS_SUCCESS
;
1378 CcGetFileObjectFromSectionPtrs (
1379 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
)
1381 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1383 CCTRACE(CC_API_DEBUG
, "SectionObjectPointers=%p\n", SectionObjectPointers
);
1385 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1387 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
1388 ASSERT(SharedCacheMap
);
1389 return SharedCacheMap
->FileObject
;
1396 CcShutdownLazyWriter (
1399 /* Simply set the event, lazy writer will stop when it's done */
1400 KeSetEvent(&iLazyWriterShutdown
, IO_DISK_INCREMENT
, FALSE
);
1412 OBJECT_ATTRIBUTES ObjectAttributes
;
1414 DPRINT("CcInitView()\n");
1416 InitializeListHead(&DirtyVacbListHead
);
1417 InitializeListHead(&VacbLruListHead
);
1418 InitializeListHead(&CcDeferredWrites
);
1419 InitializeListHead(&CcCleanSharedCacheMapList
);
1420 KeInitializeSpinLock(&CcDeferredWriteSpinLock
);
1421 KeInitializeSpinLock(&iSharedCacheMapLock
);
1422 KeInitializeGuardedMutex(&ViewLock
);
1423 ExInitializeNPagedLookasideList(&iBcbLookasideList
,
1427 sizeof(INTERNAL_BCB
),
1430 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList
,
1434 sizeof(ROS_SHARED_CACHE_MAP
),
1435 TAG_SHARED_CACHE_MAP
,
1437 ExInitializeNPagedLookasideList(&VacbLookasideList
,
1445 MmInitializeMemoryConsumer(MC_CACHE
, CcRosTrimCache
);
1447 /* Initialize lazy writer events */
1448 KeInitializeEvent(&iLazyWriterShutdown
, SynchronizationEvent
, FALSE
);
1449 KeInitializeEvent(&iLazyWriterNotify
, NotificationEvent
, FALSE
);
1451 /* Define lazy writer threshold, depending on system type */
1452 CcCapturedSystemSize
= MmQuerySystemSize();
1453 switch (CcCapturedSystemSize
)
1456 CcDirtyPageThreshold
= MmNumberOfPhysicalPages
/ 8;
1459 case MmMediumSystem
:
1460 CcDirtyPageThreshold
= MmNumberOfPhysicalPages
/ 4;
1464 CcDirtyPageThreshold
= MmNumberOfPhysicalPages
/ 8 + MmNumberOfPhysicalPages
/ 4;
1468 /* Start the lazy writer thread */
1469 InitializeObjectAttributes(&ObjectAttributes
,
1474 Status
= PsCreateSystemThread(&LazyWriter
,
1481 if (!NT_SUCCESS(Status
))
1487 Status
= NtSetInformationThread(LazyWriter
,
1491 ASSERT(NT_SUCCESS(Status
));
1493 /* Handle is not needed */
1494 ObCloseHandle(LazyWriter
, KernelMode
);
1496 CcInitCacheZeroPage();
1501 #if DBG && defined(KDBG)
1503 ExpKdbgExtFileCache(ULONG Argc
, PCHAR Argv
[])
1505 PLIST_ENTRY ListEntry
;
1506 UNICODE_STRING NoName
= RTL_CONSTANT_STRING(L
"No name for File");
1508 KdbpPrint(" Usage Summary (in kb)\n");
1509 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1510 /* No need to lock the spin lock here, we're in DBG */
1511 for (ListEntry
= CcCleanSharedCacheMapList
.Flink
;
1512 ListEntry
!= &CcCleanSharedCacheMapList
;
1513 ListEntry
= ListEntry
->Flink
)
1516 ULONG Valid
= 0, Dirty
= 0;
1517 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1518 PUNICODE_STRING FileName
;
1520 SharedCacheMap
= CONTAINING_RECORD(ListEntry
, ROS_SHARED_CACHE_MAP
, SharedCacheMapLinks
);
1523 Dirty
= (SharedCacheMap
->DirtyPages
* PAGE_SIZE
) / 1024;
1525 /* First, count for all the associated VACB */
1526 for (Vacbs
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
1527 Vacbs
!= &SharedCacheMap
->CacheMapVacbListHead
;
1528 Vacbs
= Vacbs
->Flink
)
1532 Vacb
= CONTAINING_RECORD(Vacbs
, ROS_VACB
, CacheMapVacbListEntry
);
1535 Valid
+= VACB_MAPPING_GRANULARITY
/ 1024;
1540 if (SharedCacheMap
->FileObject
!= NULL
&&
1541 SharedCacheMap
->FileObject
->FileName
.Length
!= 0)
1543 FileName
= &SharedCacheMap
->FileObject
->FileName
;
1551 KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap
, Valid
, Dirty
, FileName
);