2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
11 /* NOTES **********************************************************************
13 * This is not the NT implementation of a file cache nor anything much like
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
29 * (4) Copy the data into or out of the page as necessary.
31 * (5) Release the cache page
33 /* INCLUDES ******************************************************************/
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
43 /* GLOBALS *******************************************************************/
45 LIST_ENTRY DirtyVacbListHead
;
46 static LIST_ENTRY VacbLruListHead
;
48 KGUARDED_MUTEX ViewLock
;
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList
;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList
;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList
;
55 * - Amount of pages flushed by lazy writer
56 * - Number of times lazy writer ran
58 ULONG CcLazyWritePages
= 0;
59 ULONG CcLazyWriteIos
= 0;
61 /* Internal vars (MS):
62 * - Threshold above which lazy writer will start action
63 * - Amount of dirty pages
64 * - List for deferred writes
65 * - Spinlock when dealing with the deferred list
66 * - List for "clean" shared cache maps
68 ULONG CcDirtyPageThreshold
= 0;
69 ULONG CcTotalDirtyPages
= 0;
70 LIST_ENTRY CcDeferredWrites
;
71 KSPIN_LOCK CcDeferredWriteSpinLock
;
72 LIST_ENTRY CcCleanSharedCacheMapList
;
74 /* Internal vars (ROS):
75 * - Event to notify lazy writer to shutdown
76 * - Event to inform watchers lazy writer is done for this loop
77 * - Lock for the CcCleanSharedCacheMapList list
79 KEVENT iLazyWriterShutdown
;
80 KEVENT iLazyWriterNotify
;
81 KSPIN_LOCK iSharedCacheMapLock
;
84 static void CcRosVacbIncRefCount_(PROS_VACB vacb
, const char* file
, int line
)
86 ++vacb
->ReferenceCount
;
87 if (vacb
->SharedCacheMap
->Trace
)
89 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
90 file
, line
, vacb
, vacb
->ReferenceCount
, vacb
->Dirty
, vacb
->PageOut
);
93 static void CcRosVacbDecRefCount_(PROS_VACB vacb
, const char* file
, int line
)
95 --vacb
->ReferenceCount
;
96 if (vacb
->SharedCacheMap
->Trace
)
98 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
99 file
, line
, vacb
, vacb
->ReferenceCount
, vacb
->Dirty
, vacb
->PageOut
);
102 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
103 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
105 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
106 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
110 CcRosInternalFreeVacb(PROS_VACB Vacb
);
113 /* FUNCTIONS *****************************************************************/
118 PROS_SHARED_CACHE_MAP SharedCacheMap
,
123 PLIST_ENTRY current_entry
;
129 SharedCacheMap
->Trace
= Trace
;
133 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
135 KeAcquireGuardedMutex(&ViewLock
);
136 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldirql
);
138 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
139 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
141 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
142 current_entry
= current_entry
->Flink
;
144 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
145 current
, current
->ReferenceCount
, current
->Dirty
, current
->PageOut
);
147 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldirql
);
148 KeReleaseGuardedMutex(&ViewLock
);
152 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
156 UNREFERENCED_PARAMETER(SharedCacheMap
);
157 UNREFERENCED_PARAMETER(Trace
);
169 Status
= CcWriteVirtualAddress(Vacb
);
170 if (NT_SUCCESS(Status
))
172 KeAcquireGuardedMutex(&ViewLock
);
173 KeAcquireSpinLock(&Vacb
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
176 RemoveEntryList(&Vacb
->DirtyVacbListEntry
);
177 CcTotalDirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
178 Vacb
->SharedCacheMap
->DirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
179 CcRosVacbDecRefCount(Vacb
);
181 KeReleaseSpinLock(&Vacb
->SharedCacheMap
->CacheMapLock
, oldIrql
);
182 KeReleaseGuardedMutex(&ViewLock
);
190 CcRosFlushDirtyPages (
194 BOOLEAN CalledFromLazy
)
196 PLIST_ENTRY current_entry
;
200 LARGE_INTEGER ZeroTimeout
;
202 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target
);
205 ZeroTimeout
.QuadPart
= 0;
207 KeEnterCriticalRegion();
208 KeAcquireGuardedMutex(&ViewLock
);
210 current_entry
= DirtyVacbListHead
.Flink
;
211 if (current_entry
== &DirtyVacbListHead
)
213 DPRINT("No Dirty pages\n");
216 while ((current_entry
!= &DirtyVacbListHead
) && (Target
> 0))
218 current
= CONTAINING_RECORD(current_entry
,
221 current_entry
= current_entry
->Flink
;
223 CcRosVacbIncRefCount(current
);
225 /* When performing lazy write, don't handle temporary files */
226 if (CalledFromLazy
&&
227 BooleanFlagOn(current
->SharedCacheMap
->FileObject
->Flags
, FO_TEMPORARY_FILE
))
229 CcRosVacbDecRefCount(current
);
233 Locked
= current
->SharedCacheMap
->Callbacks
->AcquireForLazyWrite(
234 current
->SharedCacheMap
->LazyWriteContext
, Wait
);
237 CcRosVacbDecRefCount(current
);
241 Status
= CcRosAcquireVacbLock(current
,
242 Wait
? NULL
: &ZeroTimeout
);
243 if (Status
!= STATUS_SUCCESS
)
245 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
246 current
->SharedCacheMap
->LazyWriteContext
);
247 CcRosVacbDecRefCount(current
);
251 ASSERT(current
->Dirty
);
253 /* One reference is added above */
254 if ((current
->ReferenceCount
> 2 && current
->PinCount
== 0) ||
255 (current
->ReferenceCount
> 3 && current
->PinCount
> 1))
257 CcRosReleaseVacbLock(current
);
258 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
259 current
->SharedCacheMap
->LazyWriteContext
);
260 CcRosVacbDecRefCount(current
);
264 KeReleaseGuardedMutex(&ViewLock
);
266 Status
= CcRosFlushVacb(current
);
268 CcRosReleaseVacbLock(current
);
269 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
270 current
->SharedCacheMap
->LazyWriteContext
);
272 KeAcquireGuardedMutex(&ViewLock
);
273 CcRosVacbDecRefCount(current
);
275 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
) &&
276 (Status
!= STATUS_MEDIA_WRITE_PROTECTED
))
278 DPRINT1("CC: Failed to flush VACB.\n");
284 /* How many pages did we free? */
285 PagesFreed
= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
286 (*Count
) += PagesFreed
;
288 /* Make sure we don't overflow target! */
289 if (Target
< PagesFreed
)
291 /* If we would have, jump to zero directly */
296 Target
-= PagesFreed
;
300 current_entry
= DirtyVacbListHead
.Flink
;
303 KeReleaseGuardedMutex(&ViewLock
);
304 KeLeaveCriticalRegion();
306 DPRINT("CcRosFlushDirtyPages() finished\n");
307 return STATUS_SUCCESS
;
310 /* FIXME: Someday this could somewhat implement write-behind/read-ahead */
313 CciLazyWriter(PVOID Unused
)
315 LARGE_INTEGER OneSecond
;
317 OneSecond
.QuadPart
= (LONGLONG
)-1*1000*1000*10;
322 PLIST_ENTRY ListEntry
;
323 ULONG Target
, Count
= 0;
325 /* One per second or until we have to stop */
326 Status
= KeWaitForSingleObject(&iLazyWriterShutdown
,
332 /* If we succeeed, we've to stop running! */
333 if (Status
== STATUS_SUCCESS
)
338 /* We're not sleeping anymore */
339 KeClearEvent(&iLazyWriterNotify
);
341 /* Our target is one-eighth of the dirty pages */
342 Target
= CcTotalDirtyPages
/ 8;
346 DPRINT("Lazy writer starting (%d)\n", Target
);
347 CcRosFlushDirtyPages(Target
, &Count
, FALSE
, TRUE
);
349 /* And update stats */
350 CcLazyWritePages
+= Count
;
352 DPRINT("Lazy writer done (%d)\n", Count
);
355 /* Inform people waiting on us that we're done */
356 KeSetEvent(&iLazyWriterNotify
, IO_DISK_INCREMENT
, FALSE
);
358 /* Likely not optimal, but let's handle one deferred write now! */
359 ListEntry
= ExInterlockedRemoveHeadList(&CcDeferredWrites
, &CcDeferredWriteSpinLock
);
360 if (ListEntry
!= NULL
)
362 PROS_DEFERRED_WRITE_CONTEXT Context
;
364 /* Extract the context */
365 Context
= CONTAINING_RECORD(ListEntry
, ROS_DEFERRED_WRITE_CONTEXT
, CcDeferredWritesEntry
);
367 /* Can we write now? */
368 if (CcCanIWrite(Context
->FileObject
, Context
->BytesToWrite
, FALSE
, Context
->Retrying
))
370 /* Yes! Do it, and destroy the associated context */
371 Context
->PostRoutine(Context
->Context1
, Context
->Context2
);
372 ExFreePoolWithTag(Context
, 'CcDw');
376 /* Otherwise, requeue it, but in tail, so that it doesn't block others
377 * This is clearly to improve, but given the poor algorithm used now
378 * It's better than nothing!
380 ExInterlockedInsertTailList(&CcDeferredWrites
,
381 &Context
->CcDeferredWritesEntry
,
382 &CcDeferredWriteSpinLock
);
394 * FUNCTION: Try to free some memory from the file cache.
396 * Target - The number of pages to be freed.
397 * Priority - The priority of free (currently unused).
398 * NrFreed - Points to a variable where the number of pages
399 * actually freed is returned.
402 PLIST_ENTRY current_entry
;
409 BOOLEAN FlushedPages
= FALSE
;
411 DPRINT("CcRosTrimCache(Target %lu)\n", Target
);
413 InitializeListHead(&FreeList
);
418 KeAcquireGuardedMutex(&ViewLock
);
420 current_entry
= VacbLruListHead
.Flink
;
421 while (current_entry
!= &VacbLruListHead
)
423 current
= CONTAINING_RECORD(current_entry
,
426 current_entry
= current_entry
->Flink
;
428 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
430 /* Reference the VACB */
431 CcRosVacbIncRefCount(current
);
433 /* Check if it's mapped and not dirty */
434 if (current
->MappedCount
> 0 && !current
->Dirty
)
436 /* We have to break these locks because Cc sucks */
437 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
438 KeReleaseGuardedMutex(&ViewLock
);
440 /* Page out the VACB */
441 for (i
= 0; i
< VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
; i
++)
443 Page
= (PFN_NUMBER
)(MmGetPhysicalAddress((PUCHAR
)current
->BaseAddress
+ (i
* PAGE_SIZE
)).QuadPart
>> PAGE_SHIFT
);
445 MmPageOutPhysicalAddress(Page
);
448 /* Reacquire the locks */
449 KeAcquireGuardedMutex(&ViewLock
);
450 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
453 /* Dereference the VACB */
454 CcRosVacbDecRefCount(current
);
456 /* Check if we can free this entry now */
457 if (current
->ReferenceCount
== 0)
459 ASSERT(!current
->Dirty
);
460 ASSERT(!current
->MappedCount
);
462 RemoveEntryList(¤t
->CacheMapVacbListEntry
);
463 RemoveEntryList(¤t
->VacbLruListEntry
);
464 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
466 /* Calculate how many pages we freed for Mm */
467 PagesFreed
= min(VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
, Target
);
468 Target
-= PagesFreed
;
469 (*NrFreed
) += PagesFreed
;
472 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
475 KeReleaseGuardedMutex(&ViewLock
);
477 /* Try flushing pages if we haven't met our target */
478 if ((Target
> 0) && !FlushedPages
)
480 /* Flush dirty pages to disk */
481 CcRosFlushDirtyPages(Target
, &PagesFreed
, FALSE
, FALSE
);
484 /* We can only swap as many pages as we flushed */
485 if (PagesFreed
< Target
) Target
= PagesFreed
;
487 /* Check if we flushed anything */
490 /* Try again after flushing dirty pages */
491 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed
);
496 while (!IsListEmpty(&FreeList
))
498 current_entry
= RemoveHeadList(&FreeList
);
499 current
= CONTAINING_RECORD(current_entry
,
501 CacheMapVacbListEntry
);
502 CcRosInternalFreeVacb(current
);
505 DPRINT("Evicted %lu cache pages\n", (*NrFreed
));
507 return STATUS_SUCCESS
;
513 PROS_SHARED_CACHE_MAP SharedCacheMap
,
521 ASSERT(SharedCacheMap
);
523 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
524 SharedCacheMap
, Vacb
, Valid
);
533 CcRosMarkDirtyVacb(Vacb
);
545 CcRosVacbDecRefCount(Vacb
);
546 if (Mapped
&& (Vacb
->MappedCount
== 1))
548 CcRosVacbIncRefCount(Vacb
);
550 if (!WasDirty
&& Vacb
->Dirty
)
552 CcRosVacbIncRefCount(Vacb
);
555 CcRosReleaseVacbLock(Vacb
);
557 return STATUS_SUCCESS
;
560 /* Returns with VACB Lock Held! */
564 PROS_SHARED_CACHE_MAP SharedCacheMap
,
567 PLIST_ENTRY current_entry
;
571 ASSERT(SharedCacheMap
);
573 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
574 SharedCacheMap
, FileOffset
);
576 KeAcquireGuardedMutex(&ViewLock
);
577 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
579 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
580 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
582 current
= CONTAINING_RECORD(current_entry
,
584 CacheMapVacbListEntry
);
585 if (IsPointInRange(current
->FileOffset
.QuadPart
,
586 VACB_MAPPING_GRANULARITY
,
589 CcRosVacbIncRefCount(current
);
590 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
591 KeReleaseGuardedMutex(&ViewLock
);
592 CcRosAcquireVacbLock(current
, NULL
);
595 if (current
->FileOffset
.QuadPart
> FileOffset
)
597 current_entry
= current_entry
->Flink
;
600 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
601 KeReleaseGuardedMutex(&ViewLock
);
612 PROS_SHARED_CACHE_MAP SharedCacheMap
;
614 SharedCacheMap
= Vacb
->SharedCacheMap
;
616 KeAcquireGuardedMutex(&ViewLock
);
617 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
621 InsertTailList(&DirtyVacbListHead
, &Vacb
->DirtyVacbListEntry
);
622 CcTotalDirtyPages
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
623 Vacb
->SharedCacheMap
->DirtyPages
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
627 CcRosVacbDecRefCount(Vacb
);
630 /* Move to the tail of the LRU list */
631 RemoveEntryList(&Vacb
->VacbLruListEntry
);
632 InsertTailList(&VacbLruListHead
, &Vacb
->VacbLruListEntry
);
636 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
637 KeReleaseGuardedMutex(&ViewLock
);
643 PROS_SHARED_CACHE_MAP SharedCacheMap
,
648 ASSERT(SharedCacheMap
);
650 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
651 SharedCacheMap
, FileOffset
);
653 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
656 KeBugCheck(CACHE_MANAGER
);
659 CcRosMarkDirtyVacb(Vacb
);
661 CcRosReleaseVacbLock(Vacb
);
663 return STATUS_SUCCESS
;
669 PROS_SHARED_CACHE_MAP SharedCacheMap
,
676 ASSERT(SharedCacheMap
);
678 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
679 SharedCacheMap
, FileOffset
, NowDirty
);
681 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
684 return STATUS_UNSUCCESSFUL
;
692 CcRosMarkDirtyVacb(Vacb
);
702 CcRosVacbDecRefCount(Vacb
);
703 if (!WasDirty
&& NowDirty
)
705 CcRosVacbIncRefCount(Vacb
);
707 if (Vacb
->MappedCount
== 0)
709 CcRosVacbDecRefCount(Vacb
);
712 CcRosReleaseVacbLock(Vacb
);
714 return STATUS_SUCCESS
;
724 ULONG_PTR NumberOfPages
;
726 /* Create a memory area. */
727 MmLockAddressSpace(MmGetKernelAddressSpace());
728 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
729 0, // nothing checks for VACB mareas, so set to 0
731 VACB_MAPPING_GRANULARITY
,
733 (PMEMORY_AREA
*)&Vacb
->MemoryArea
,
736 MmUnlockAddressSpace(MmGetKernelAddressSpace());
737 if (!NT_SUCCESS(Status
))
739 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status
, Vacb
);
743 ASSERT(((ULONG_PTR
)Vacb
->BaseAddress
% PAGE_SIZE
) == 0);
744 ASSERT((ULONG_PTR
)Vacb
->BaseAddress
> (ULONG_PTR
)MmSystemRangeStart
);
746 /* Create a virtual mapping for this memory area */
747 NumberOfPages
= BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY
);
748 for (i
= 0; i
< NumberOfPages
; i
++)
750 PFN_NUMBER PageFrameNumber
;
752 MI_SET_USAGE(MI_USAGE_CACHE
);
753 Status
= MmRequestPageMemoryConsumer(MC_CACHE
, TRUE
, &PageFrameNumber
);
754 if (PageFrameNumber
== 0)
756 DPRINT1("Unable to allocate page\n");
757 KeBugCheck(MEMORY_MANAGEMENT
);
760 Status
= MmCreateVirtualMapping(NULL
,
761 (PVOID
)((ULONG_PTR
)Vacb
->BaseAddress
+ (i
* PAGE_SIZE
)),
765 if (!NT_SUCCESS(Status
))
767 DPRINT1("Unable to create virtual mapping\n");
768 KeBugCheck(MEMORY_MANAGEMENT
);
772 return STATUS_SUCCESS
;
778 PROS_SHARED_CACHE_MAP SharedCacheMap
,
784 PLIST_ENTRY current_entry
;
788 ASSERT(SharedCacheMap
);
790 DPRINT("CcRosCreateVacb()\n");
792 if (FileOffset
>= SharedCacheMap
->SectionSize
.QuadPart
)
795 return STATUS_INVALID_PARAMETER
;
798 current
= ExAllocateFromNPagedLookasideList(&VacbLookasideList
);
799 current
->BaseAddress
= NULL
;
800 current
->Valid
= FALSE
;
801 current
->Dirty
= FALSE
;
802 current
->PageOut
= FALSE
;
803 current
->FileOffset
.QuadPart
= ROUND_DOWN(FileOffset
, VACB_MAPPING_GRANULARITY
);
804 current
->SharedCacheMap
= SharedCacheMap
;
806 if (SharedCacheMap
->Trace
)
808 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap
, current
);
811 current
->MappedCount
= 0;
812 current
->DirtyVacbListEntry
.Flink
= NULL
;
813 current
->DirtyVacbListEntry
.Blink
= NULL
;
814 current
->ReferenceCount
= 1;
815 current
->PinCount
= 0;
816 KeInitializeMutex(¤t
->Mutex
, 0);
817 CcRosAcquireVacbLock(current
, NULL
);
818 KeAcquireGuardedMutex(&ViewLock
);
821 /* There is window between the call to CcRosLookupVacb
822 * and CcRosCreateVacb. We must check if a VACB for the
823 * file offset exist. If there is a VACB, we release
824 * our newly created VACB and return the existing one.
826 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
827 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
829 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
831 current
= CONTAINING_RECORD(current_entry
,
833 CacheMapVacbListEntry
);
834 if (IsPointInRange(current
->FileOffset
.QuadPart
,
835 VACB_MAPPING_GRANULARITY
,
838 CcRosVacbIncRefCount(current
);
839 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
841 if (SharedCacheMap
->Trace
)
843 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
849 CcRosReleaseVacbLock(*Vacb
);
850 KeReleaseGuardedMutex(&ViewLock
);
851 ExFreeToNPagedLookasideList(&VacbLookasideList
, *Vacb
);
853 CcRosAcquireVacbLock(current
, NULL
);
854 return STATUS_SUCCESS
;
856 if (current
->FileOffset
.QuadPart
< FileOffset
)
858 ASSERT(previous
== NULL
||
859 previous
->FileOffset
.QuadPart
< current
->FileOffset
.QuadPart
);
862 if (current
->FileOffset
.QuadPart
> FileOffset
)
864 current_entry
= current_entry
->Flink
;
866 /* There was no existing VACB. */
870 InsertHeadList(&previous
->CacheMapVacbListEntry
, ¤t
->CacheMapVacbListEntry
);
874 InsertHeadList(&SharedCacheMap
->CacheMapVacbListHead
, ¤t
->CacheMapVacbListEntry
);
876 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
877 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
878 KeReleaseGuardedMutex(&ViewLock
);
880 MI_SET_USAGE(MI_USAGE_CACHE
);
882 if ((SharedCacheMap
->FileObject
) && (SharedCacheMap
->FileObject
->FileName
.Buffer
))
886 pos
= wcsrchr(SharedCacheMap
->FileObject
->FileName
.Buffer
, '\\');
889 len
= wcslen(pos
) * sizeof(WCHAR
);
890 snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%S", pos
);
894 snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%wZ", &SharedCacheMap
->FileObject
->FileName
);
899 Status
= CcRosMapVacb(current
);
900 if (!NT_SUCCESS(Status
))
902 RemoveEntryList(¤t
->CacheMapVacbListEntry
);
903 RemoveEntryList(¤t
->VacbLruListEntry
);
904 CcRosReleaseVacbLock(current
);
905 ExFreeToNPagedLookasideList(&VacbLookasideList
, current
);
914 PROS_SHARED_CACHE_MAP SharedCacheMap
,
916 PLONGLONG BaseOffset
,
924 ASSERT(SharedCacheMap
);
926 DPRINT("CcRosGetVacb()\n");
929 * Look for a VACB already mapping the same data.
931 current
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
935 * Otherwise create a new VACB.
937 Status
= CcRosCreateVacb(SharedCacheMap
, FileOffset
, ¤t
);
938 if (!NT_SUCCESS(Status
))
944 KeAcquireGuardedMutex(&ViewLock
);
946 /* Move to the tail of the LRU list */
947 RemoveEntryList(¤t
->VacbLruListEntry
);
948 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
950 KeReleaseGuardedMutex(&ViewLock
);
953 * Return information about the VACB to the caller.
955 *UptoDate
= current
->Valid
;
956 *BaseAddress
= current
->BaseAddress
;
957 DPRINT("*BaseAddress %p\n", *BaseAddress
);
959 *BaseOffset
= current
->FileOffset
.QuadPart
;
960 return STATUS_SUCCESS
;
966 PROS_SHARED_CACHE_MAP SharedCacheMap
,
972 * FUNCTION: Request a page mapping for a shared cache map
977 ASSERT(SharedCacheMap
);
979 if (FileOffset
% VACB_MAPPING_GRANULARITY
!= 0)
981 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
982 FileOffset
, VACB_MAPPING_GRANULARITY
);
983 KeBugCheck(CACHE_MANAGER
);
986 return CcRosGetVacb(SharedCacheMap
,
998 MEMORY_AREA
* MemoryArea
,
1001 SWAPENTRY SwapEntry
,
1004 ASSERT(SwapEntry
== 0);
1007 ASSERT(MmGetReferenceCountPage(Page
) == 1);
1008 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
1013 CcRosInternalFreeVacb (
1016 * FUNCTION: Releases a VACB associated with a shared cache map
1019 DPRINT("Freeing VACB 0x%p\n", Vacb
);
1021 if (Vacb
->SharedCacheMap
->Trace
)
1023 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb
->SharedCacheMap
, Vacb
);
1027 MmLockAddressSpace(MmGetKernelAddressSpace());
1028 MmFreeMemoryArea(MmGetKernelAddressSpace(),
1032 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1034 ExFreeToNPagedLookasideList(&VacbLookasideList
, Vacb
);
1035 return STATUS_SUCCESS
;
1044 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
,
1045 IN PLARGE_INTEGER FileOffset OPTIONAL
,
1047 OUT PIO_STATUS_BLOCK IoStatus
)
1049 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1050 LARGE_INTEGER Offset
;
1051 LONGLONG RemainingLength
;
1056 CCTRACE(CC_API_DEBUG
, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1057 SectionObjectPointers
, FileOffset
, Length
);
1059 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1060 SectionObjectPointers
, FileOffset
, Length
, IoStatus
);
1062 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1064 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
1065 ASSERT(SharedCacheMap
);
1068 Offset
= *FileOffset
;
1069 RemainingLength
= Length
;
1073 Offset
.QuadPart
= 0;
1074 RemainingLength
= SharedCacheMap
->FileSize
.QuadPart
;
1079 IoStatus
->Status
= STATUS_SUCCESS
;
1080 IoStatus
->Information
= 0;
1083 while (RemainingLength
> 0)
1085 current
= CcRosLookupVacb(SharedCacheMap
, Offset
.QuadPart
);
1086 if (current
!= NULL
)
1090 Status
= CcRosFlushVacb(current
);
1091 if (!NT_SUCCESS(Status
) && IoStatus
!= NULL
)
1093 IoStatus
->Status
= Status
;
1097 CcRosReleaseVacbLock(current
);
1099 KeAcquireGuardedMutex(&ViewLock
);
1100 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1101 CcRosVacbDecRefCount(current
);
1102 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1103 KeReleaseGuardedMutex(&ViewLock
);
1106 Offset
.QuadPart
+= VACB_MAPPING_GRANULARITY
;
1107 RemainingLength
-= min(RemainingLength
, VACB_MAPPING_GRANULARITY
);
1114 IoStatus
->Status
= STATUS_INVALID_PARAMETER
;
1121 CcRosDeleteFileCache (
1122 PFILE_OBJECT FileObject
,
1123 PROS_SHARED_CACHE_MAP SharedCacheMap
)
1125 * FUNCTION: Releases the shared cache map associated with a file object
1128 PLIST_ENTRY current_entry
;
1130 LIST_ENTRY FreeList
;
1133 ASSERT(SharedCacheMap
);
1135 SharedCacheMap
->OpenCount
++;
1136 KeReleaseGuardedMutex(&ViewLock
);
1138 CcFlushCache(FileObject
->SectionObjectPointer
, NULL
, 0, NULL
);
1140 KeAcquireGuardedMutex(&ViewLock
);
1141 SharedCacheMap
->OpenCount
--;
1142 if (SharedCacheMap
->OpenCount
== 0)
1146 FileObject
->SectionObjectPointer
->SharedCacheMap
= NULL
;
1151 InitializeListHead(&FreeList
);
1152 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1153 while (!IsListEmpty(&SharedCacheMap
->CacheMapVacbListHead
))
1155 current_entry
= RemoveTailList(&SharedCacheMap
->CacheMapVacbListHead
);
1156 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1158 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1159 CcRosAcquireVacbLock(current
, NULL
);
1160 RemoveEntryList(¤t
->VacbLruListEntry
);
1163 RemoveEntryList(¤t
->DirtyVacbListEntry
);
1164 CcTotalDirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
1165 current
->SharedCacheMap
->DirtyPages
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
1166 DPRINT1("Freeing dirty VACB\n");
1168 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
1169 CcRosReleaseVacbLock(current
);
1171 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1174 SharedCacheMap
->Trace
= FALSE
;
1176 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1178 KeReleaseGuardedMutex(&ViewLock
);
1179 ObDereferenceObject(SharedCacheMap
->FileObject
);
1181 while (!IsListEmpty(&FreeList
))
1183 current_entry
= RemoveTailList(&FreeList
);
1184 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1185 CcRosInternalFreeVacb(current
);
1188 KeAcquireSpinLock(&iSharedCacheMapLock
, &OldIrql
);
1189 RemoveEntryList(&SharedCacheMap
->SharedCacheMapLinks
);
1190 KeReleaseSpinLock(&iSharedCacheMapLock
, OldIrql
);
1192 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList
, SharedCacheMap
);
1193 KeAcquireGuardedMutex(&ViewLock
);
1195 return STATUS_SUCCESS
;
1200 CcRosReferenceCache (
1201 PFILE_OBJECT FileObject
)
1203 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1204 KeAcquireGuardedMutex(&ViewLock
);
1205 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1206 ASSERT(SharedCacheMap
);
1207 ASSERT(SharedCacheMap
->OpenCount
!= 0);
1208 SharedCacheMap
->OpenCount
++;
1209 KeReleaseGuardedMutex(&ViewLock
);
1214 CcRosRemoveIfClosed (
1215 PSECTION_OBJECT_POINTERS SectionObjectPointer
)
1217 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1218 DPRINT("CcRosRemoveIfClosed()\n");
1219 KeAcquireGuardedMutex(&ViewLock
);
1220 SharedCacheMap
= SectionObjectPointer
->SharedCacheMap
;
1221 if (SharedCacheMap
&& SharedCacheMap
->OpenCount
== 0)
1223 CcRosDeleteFileCache(SharedCacheMap
->FileObject
, SharedCacheMap
);
1225 KeReleaseGuardedMutex(&ViewLock
);
1231 CcRosDereferenceCache (
1232 PFILE_OBJECT FileObject
)
1234 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1235 KeAcquireGuardedMutex(&ViewLock
);
1236 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1237 ASSERT(SharedCacheMap
);
1238 if (SharedCacheMap
->OpenCount
> 0)
1240 SharedCacheMap
->OpenCount
--;
1241 if (SharedCacheMap
->OpenCount
== 0)
1243 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1244 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1247 KeReleaseGuardedMutex(&ViewLock
);
1252 CcRosReleaseFileCache (
1253 PFILE_OBJECT FileObject
)
1255 * FUNCTION: Called by the file system when a handle to a file object
1259 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1261 KeAcquireGuardedMutex(&ViewLock
);
1263 if (FileObject
->SectionObjectPointer
->SharedCacheMap
!= NULL
)
1265 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1266 if (FileObject
->PrivateCacheMap
!= NULL
)
1268 FileObject
->PrivateCacheMap
= NULL
;
1269 if (SharedCacheMap
->OpenCount
> 0)
1271 SharedCacheMap
->OpenCount
--;
1272 if (SharedCacheMap
->OpenCount
== 0)
1274 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1275 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1280 KeReleaseGuardedMutex(&ViewLock
);
1281 return STATUS_SUCCESS
;
1286 CcTryToInitializeFileCache (
1287 PFILE_OBJECT FileObject
)
1289 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1292 KeAcquireGuardedMutex(&ViewLock
);
1294 ASSERT(FileObject
->SectionObjectPointer
);
1295 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1296 if (SharedCacheMap
== NULL
)
1298 Status
= STATUS_UNSUCCESSFUL
;
1302 if (FileObject
->PrivateCacheMap
== NULL
)
1304 FileObject
->PrivateCacheMap
= SharedCacheMap
;
1305 SharedCacheMap
->OpenCount
++;
1307 Status
= STATUS_SUCCESS
;
1309 KeReleaseGuardedMutex(&ViewLock
);
1317 CcRosInitializeFileCache (
1318 PFILE_OBJECT FileObject
,
1319 PCC_FILE_SIZES FileSizes
,
1321 PCACHE_MANAGER_CALLBACKS CallBacks
,
1322 PVOID LazyWriterContext
)
1324 * FUNCTION: Initializes a shared cache map for a file object
1327 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1329 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1330 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1331 FileObject
, SharedCacheMap
);
1333 KeAcquireGuardedMutex(&ViewLock
);
1334 if (SharedCacheMap
== NULL
)
1338 SharedCacheMap
= ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList
);
1339 if (SharedCacheMap
== NULL
)
1341 KeReleaseGuardedMutex(&ViewLock
);
1342 return STATUS_INSUFFICIENT_RESOURCES
;
1344 RtlZeroMemory(SharedCacheMap
, sizeof(*SharedCacheMap
));
1345 ObReferenceObjectByPointer(FileObject
,
1349 SharedCacheMap
->FileObject
= FileObject
;
1350 SharedCacheMap
->Callbacks
= CallBacks
;
1351 SharedCacheMap
->LazyWriteContext
= LazyWriterContext
;
1352 SharedCacheMap
->SectionSize
= FileSizes
->AllocationSize
;
1353 SharedCacheMap
->FileSize
= FileSizes
->FileSize
;
1354 SharedCacheMap
->PinAccess
= PinAccess
;
1355 SharedCacheMap
->DirtyPageThreshold
= 0;
1356 SharedCacheMap
->DirtyPages
= 0;
1357 KeInitializeSpinLock(&SharedCacheMap
->CacheMapLock
);
1358 InitializeListHead(&SharedCacheMap
->CacheMapVacbListHead
);
1359 FileObject
->SectionObjectPointer
->SharedCacheMap
= SharedCacheMap
;
1361 KeAcquireSpinLock(&iSharedCacheMapLock
, &OldIrql
);
1362 InsertTailList(&CcCleanSharedCacheMapList
, &SharedCacheMap
->SharedCacheMapLinks
);
1363 KeReleaseSpinLock(&iSharedCacheMapLock
, OldIrql
);
1365 if (FileObject
->PrivateCacheMap
== NULL
)
1367 FileObject
->PrivateCacheMap
= SharedCacheMap
;
1368 SharedCacheMap
->OpenCount
++;
1370 KeReleaseGuardedMutex(&ViewLock
);
1372 return STATUS_SUCCESS
;
1380 CcGetFileObjectFromSectionPtrs (
1381 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
)
1383 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1385 CCTRACE(CC_API_DEBUG
, "SectionObjectPointers=%p\n", SectionObjectPointers
);
1387 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1389 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
1390 ASSERT(SharedCacheMap
);
1391 return SharedCacheMap
->FileObject
;
1398 CcShutdownLazyWriter (
1401 /* Simply set the event, lazy writer will stop when it's done */
1402 KeSetEvent(&iLazyWriterShutdown
, IO_DISK_INCREMENT
, FALSE
);
1414 OBJECT_ATTRIBUTES ObjectAttributes
;
1416 DPRINT("CcInitView()\n");
1418 InitializeListHead(&DirtyVacbListHead
);
1419 InitializeListHead(&VacbLruListHead
);
1420 InitializeListHead(&CcDeferredWrites
);
1421 InitializeListHead(&CcCleanSharedCacheMapList
);
1422 KeInitializeSpinLock(&CcDeferredWriteSpinLock
);
1423 KeInitializeSpinLock(&iSharedCacheMapLock
);
1424 KeInitializeGuardedMutex(&ViewLock
);
1425 ExInitializeNPagedLookasideList(&iBcbLookasideList
,
1429 sizeof(INTERNAL_BCB
),
1432 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList
,
1436 sizeof(ROS_SHARED_CACHE_MAP
),
1437 TAG_SHARED_CACHE_MAP
,
1439 ExInitializeNPagedLookasideList(&VacbLookasideList
,
1447 MmInitializeMemoryConsumer(MC_CACHE
, CcRosTrimCache
);
1449 /* Initialize lazy writer events */
1450 KeInitializeEvent(&iLazyWriterShutdown
, SynchronizationEvent
, FALSE
);
1451 KeInitializeEvent(&iLazyWriterNotify
, NotificationEvent
, FALSE
);
1453 /* Define lazy writer threshold, depending on system type */
1454 switch (MmQuerySystemSize())
1457 CcDirtyPageThreshold
= MmNumberOfPhysicalPages
/ 8;
1460 case MmMediumSystem
:
1461 CcDirtyPageThreshold
= MmNumberOfPhysicalPages
/ 4;
1465 CcDirtyPageThreshold
= MmNumberOfPhysicalPages
/ 8 + MmNumberOfPhysicalPages
/ 4;
1469 /* Start the lazy writer thread */
1470 InitializeObjectAttributes(&ObjectAttributes
,
1475 Status
= PsCreateSystemThread(&LazyWriter
,
1482 if (!NT_SUCCESS(Status
))
1488 Status
= NtSetInformationThread(LazyWriter
,
1492 ASSERT(NT_SUCCESS(Status
));
1494 /* Handle is not needed */
1495 ObCloseHandle(LazyWriter
, KernelMode
);
1497 CcInitCacheZeroPage();
1502 #if DBG && defined(KDBG)
1504 ExpKdbgExtFileCache(ULONG Argc
, PCHAR Argv
[])
1506 PLIST_ENTRY ListEntry
;
1507 UNICODE_STRING NoName
= RTL_CONSTANT_STRING(L
"No name for File");
1509 KdbpPrint(" Usage Summary (in kb)\n");
1510 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1511 /* No need to lock the spin lock here, we're in DBG */
1512 for (ListEntry
= CcCleanSharedCacheMapList
.Flink
;
1513 ListEntry
!= &CcCleanSharedCacheMapList
;
1514 ListEntry
= ListEntry
->Flink
)
1517 ULONG Valid
= 0, Dirty
= 0;
1518 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1519 PUNICODE_STRING FileName
;
1521 SharedCacheMap
= CONTAINING_RECORD(ListEntry
, ROS_SHARED_CACHE_MAP
, SharedCacheMapLinks
);
1524 Dirty
= (SharedCacheMap
->DirtyPages
* PAGE_SIZE
) / 1024;
1526 /* First, count for all the associated VACB */
1527 for (Vacbs
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
1528 Vacbs
!= &SharedCacheMap
->CacheMapVacbListHead
;
1529 Vacbs
= Vacbs
->Flink
)
1533 Vacb
= CONTAINING_RECORD(Vacbs
, ROS_VACB
, CacheMapVacbListEntry
);
1536 Valid
+= VACB_MAPPING_GRANULARITY
/ 1024;
1541 if (SharedCacheMap
->FileObject
!= NULL
&&
1542 SharedCacheMap
->FileObject
->FileName
.Length
!= 0)
1544 FileName
= &SharedCacheMap
->FileObject
->FileName
;
1552 KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap
, Valid
, Dirty
, FileName
);