2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
10 /* NOTES **********************************************************************
12 * This is not the NT implementation of a file cache nor anything much like
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 * (4) Copy the data into or out of the page as necessary.
30 * (5) Release the cache page
32 /* INCLUDES ******************************************************************/
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
42 /* GLOBALS *******************************************************************/
44 static LIST_ENTRY DirtyVacbListHead
;
45 static LIST_ENTRY VacbListHead
;
46 static LIST_ENTRY VacbLruListHead
;
47 ULONG DirtyPageCount
= 0;
49 KGUARDED_MUTEX ViewLock
;
51 NPAGED_LOOKASIDE_LIST iBcbLookasideList
;
52 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList
;
53 static NPAGED_LOOKASIDE_LIST VacbLookasideList
;
56 static void CcRosVacbIncRefCount_(PROS_VACB vacb
, const char* file
, int line
)
58 ++vacb
->ReferenceCount
;
59 if (vacb
->SharedCacheMap
->Trace
)
61 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
62 file
, line
, vacb
, vacb
->ReferenceCount
, vacb
->Dirty
, vacb
->PageOut
);
65 static void CcRosVacbDecRefCount_(PROS_VACB vacb
, const char* file
, int line
)
67 --vacb
->ReferenceCount
;
68 if (vacb
->SharedCacheMap
->Trace
)
70 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
71 file
, line
, vacb
, vacb
->ReferenceCount
, vacb
->Dirty
, vacb
->PageOut
);
74 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
75 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
77 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
78 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
82 CcRosInternalFreeVacb(PROS_VACB Vacb
);
85 /* FUNCTIONS *****************************************************************/
90 PROS_SHARED_CACHE_MAP SharedCacheMap
,
95 PLIST_ENTRY current_entry
;
101 SharedCacheMap
->Trace
= Trace
;
105 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
107 KeAcquireGuardedMutex(&ViewLock
);
108 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldirql
);
110 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
111 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
113 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
114 current_entry
= current_entry
->Flink
;
116 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
117 current
, current
->ReferenceCount
, current
->Dirty
, current
->PageOut
);
119 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldirql
);
120 KeReleaseGuardedMutex(&ViewLock
);
124 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
128 UNREFERENCED_PARAMETER(SharedCacheMap
);
129 UNREFERENCED_PARAMETER(Trace
);
141 Status
= CcWriteVirtualAddress(Vacb
);
142 if (NT_SUCCESS(Status
))
144 KeAcquireGuardedMutex(&ViewLock
);
145 KeAcquireSpinLock(&Vacb
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
148 RemoveEntryList(&Vacb
->DirtyVacbListEntry
);
149 DirtyPageCount
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
150 CcRosVacbDecRefCount(Vacb
);
152 KeReleaseSpinLock(&Vacb
->SharedCacheMap
->CacheMapLock
, oldIrql
);
153 KeReleaseGuardedMutex(&ViewLock
);
161 CcRosFlushDirtyPages (
166 PLIST_ENTRY current_entry
;
170 LARGE_INTEGER ZeroTimeout
;
172 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target
);
175 ZeroTimeout
.QuadPart
= 0;
177 KeEnterCriticalRegion();
178 KeAcquireGuardedMutex(&ViewLock
);
180 current_entry
= DirtyVacbListHead
.Flink
;
181 if (current_entry
== &DirtyVacbListHead
)
183 DPRINT("No Dirty pages\n");
186 while ((current_entry
!= &DirtyVacbListHead
) && (Target
> 0))
188 current
= CONTAINING_RECORD(current_entry
,
191 current_entry
= current_entry
->Flink
;
193 CcRosVacbIncRefCount(current
);
195 Locked
= current
->SharedCacheMap
->Callbacks
->AcquireForLazyWrite(
196 current
->SharedCacheMap
->LazyWriteContext
, Wait
);
199 CcRosVacbDecRefCount(current
);
203 Status
= KeWaitForSingleObject(¤t
->Mutex
,
207 Wait
? NULL
: &ZeroTimeout
);
208 if (Status
!= STATUS_SUCCESS
)
210 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
211 current
->SharedCacheMap
->LazyWriteContext
);
212 CcRosVacbDecRefCount(current
);
216 ASSERT(current
->Dirty
);
218 /* One reference is added above */
219 if (current
->ReferenceCount
> 2)
221 KeReleaseMutex(¤t
->Mutex
, FALSE
);
222 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
223 current
->SharedCacheMap
->LazyWriteContext
);
224 CcRosVacbDecRefCount(current
);
228 KeReleaseGuardedMutex(&ViewLock
);
230 Status
= CcRosFlushVacb(current
);
232 KeReleaseMutex(¤t
->Mutex
, FALSE
);
233 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
234 current
->SharedCacheMap
->LazyWriteContext
);
236 KeAcquireGuardedMutex(&ViewLock
);
237 CcRosVacbDecRefCount(current
);
239 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
))
241 DPRINT1("CC: Failed to flush VACB.\n");
245 (*Count
) += VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
246 Target
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
249 current_entry
= DirtyVacbListHead
.Flink
;
252 KeReleaseGuardedMutex(&ViewLock
);
253 KeLeaveCriticalRegion();
255 DPRINT("CcRosFlushDirtyPages() finished\n");
256 return STATUS_SUCCESS
;
265 * FUNCTION: Try to free some memory from the file cache.
267 * Target - The number of pages to be freed.
268 * Priority - The priority of free (currently unused).
269 * NrFreed - Points to a variable where the number of pages
270 * actually freed is returned.
273 PLIST_ENTRY current_entry
;
280 BOOLEAN FlushedPages
= FALSE
;
282 DPRINT("CcRosTrimCache(Target %lu)\n", Target
);
284 InitializeListHead(&FreeList
);
289 KeAcquireGuardedMutex(&ViewLock
);
291 current_entry
= VacbLruListHead
.Flink
;
292 while (current_entry
!= &VacbLruListHead
)
294 current
= CONTAINING_RECORD(current_entry
,
297 current_entry
= current_entry
->Flink
;
299 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
301 /* Reference the VACB */
302 CcRosVacbIncRefCount(current
);
304 /* Check if it's mapped and not dirty */
305 if (current
->MappedCount
> 0 && !current
->Dirty
)
307 /* We have to break these locks because Cc sucks */
308 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
309 KeReleaseGuardedMutex(&ViewLock
);
311 /* Page out the VACB */
312 for (i
= 0; i
< VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
; i
++)
314 Page
= (PFN_NUMBER
)(MmGetPhysicalAddress((PUCHAR
)current
->BaseAddress
+ (i
* PAGE_SIZE
)).QuadPart
>> PAGE_SHIFT
);
316 MmPageOutPhysicalAddress(Page
);
319 /* Reacquire the locks */
320 KeAcquireGuardedMutex(&ViewLock
);
321 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
324 /* Dereference the VACB */
325 CcRosVacbDecRefCount(current
);
327 /* Check if we can free this entry now */
328 if (current
->ReferenceCount
== 0)
330 ASSERT(!current
->Dirty
);
331 ASSERT(!current
->MappedCount
);
333 RemoveEntryList(¤t
->CacheMapVacbListEntry
);
334 RemoveEntryList(¤t
->VacbListEntry
);
335 RemoveEntryList(¤t
->VacbLruListEntry
);
336 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
338 /* Calculate how many pages we freed for Mm */
339 PagesFreed
= min(VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
, Target
);
340 Target
-= PagesFreed
;
341 (*NrFreed
) += PagesFreed
;
344 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
347 KeReleaseGuardedMutex(&ViewLock
);
349 /* Try flushing pages if we haven't met our target */
350 if ((Target
> 0) && !FlushedPages
)
352 /* Flush dirty pages to disk */
353 CcRosFlushDirtyPages(Target
, &PagesFreed
, FALSE
);
356 /* We can only swap as many pages as we flushed */
357 if (PagesFreed
< Target
) Target
= PagesFreed
;
359 /* Check if we flushed anything */
362 /* Try again after flushing dirty pages */
363 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed
);
368 while (!IsListEmpty(&FreeList
))
370 current_entry
= RemoveHeadList(&FreeList
);
371 current
= CONTAINING_RECORD(current_entry
,
373 CacheMapVacbListEntry
);
374 CcRosInternalFreeVacb(current
);
377 DPRINT("Evicted %lu cache pages\n", (*NrFreed
));
379 return STATUS_SUCCESS
;
385 PROS_SHARED_CACHE_MAP SharedCacheMap
,
394 ASSERT(SharedCacheMap
);
396 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
397 SharedCacheMap
, Vacb
, Valid
);
399 KeAcquireGuardedMutex(&ViewLock
);
400 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
404 WasDirty
= Vacb
->Dirty
;
405 Vacb
->Dirty
= Vacb
->Dirty
|| Dirty
;
407 if (!WasDirty
&& Vacb
->Dirty
)
409 InsertTailList(&DirtyVacbListHead
, &Vacb
->DirtyVacbListEntry
);
410 DirtyPageCount
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
417 CcRosVacbDecRefCount(Vacb
);
418 if (Mapped
&& (Vacb
->MappedCount
== 1))
420 CcRosVacbIncRefCount(Vacb
);
422 if (!WasDirty
&& Vacb
->Dirty
)
424 CcRosVacbIncRefCount(Vacb
);
427 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
428 KeReleaseGuardedMutex(&ViewLock
);
429 KeReleaseMutex(&Vacb
->Mutex
, FALSE
);
431 return STATUS_SUCCESS
;
434 /* Returns with VACB Lock Held! */
438 PROS_SHARED_CACHE_MAP SharedCacheMap
,
441 PLIST_ENTRY current_entry
;
445 ASSERT(SharedCacheMap
);
447 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %lu)\n",
448 SharedCacheMap
, FileOffset
);
450 KeAcquireGuardedMutex(&ViewLock
);
451 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
453 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
454 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
456 current
= CONTAINING_RECORD(current_entry
,
458 CacheMapVacbListEntry
);
459 if (IsPointInRange(current
->FileOffset
, VACB_MAPPING_GRANULARITY
,
462 CcRosVacbIncRefCount(current
);
463 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
464 KeReleaseGuardedMutex(&ViewLock
);
465 KeWaitForSingleObject(¤t
->Mutex
,
472 if (current
->FileOffset
> FileOffset
)
474 current_entry
= current_entry
->Flink
;
477 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
478 KeReleaseGuardedMutex(&ViewLock
);
486 PROS_SHARED_CACHE_MAP SharedCacheMap
,
492 ASSERT(SharedCacheMap
);
494 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %lu)\n",
495 SharedCacheMap
, FileOffset
);
497 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
500 KeBugCheck(CACHE_MANAGER
);
503 KeAcquireGuardedMutex(&ViewLock
);
504 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
508 InsertTailList(&DirtyVacbListHead
, &Vacb
->DirtyVacbListEntry
);
509 DirtyPageCount
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
513 CcRosVacbDecRefCount(Vacb
);
516 /* Move to the tail of the LRU list */
517 RemoveEntryList(&Vacb
->VacbLruListEntry
);
518 InsertTailList(&VacbLruListHead
, &Vacb
->VacbLruListEntry
);
522 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
523 KeReleaseGuardedMutex(&ViewLock
);
524 KeReleaseMutex(&Vacb
->Mutex
, FALSE
);
526 return STATUS_SUCCESS
;
532 PROS_SHARED_CACHE_MAP SharedCacheMap
,
540 ASSERT(SharedCacheMap
);
542 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %lu, NowDirty %u)\n",
543 SharedCacheMap
, FileOffset
, NowDirty
);
545 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
548 return STATUS_UNSUCCESSFUL
;
551 KeAcquireGuardedMutex(&ViewLock
);
552 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
554 WasDirty
= Vacb
->Dirty
;
555 Vacb
->Dirty
= Vacb
->Dirty
|| NowDirty
;
559 if (!WasDirty
&& NowDirty
)
561 InsertTailList(&DirtyVacbListHead
, &Vacb
->DirtyVacbListEntry
);
562 DirtyPageCount
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
565 CcRosVacbDecRefCount(Vacb
);
566 if (!WasDirty
&& NowDirty
)
568 CcRosVacbIncRefCount(Vacb
);
570 if (Vacb
->MappedCount
== 0)
572 CcRosVacbDecRefCount(Vacb
);
575 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
576 KeReleaseGuardedMutex(&ViewLock
);
577 KeReleaseMutex(&Vacb
->Mutex
, FALSE
);
579 return STATUS_SUCCESS
;
585 PROS_SHARED_CACHE_MAP SharedCacheMap
,
591 PLIST_ENTRY current_entry
;
595 ASSERT(SharedCacheMap
);
597 DPRINT("CcRosCreateVacb()\n");
599 if (FileOffset
>= SharedCacheMap
->FileSize
.u
.LowPart
)
602 return STATUS_INVALID_PARAMETER
;
605 current
= ExAllocateFromNPagedLookasideList(&VacbLookasideList
);
606 current
->Valid
= FALSE
;
607 current
->Dirty
= FALSE
;
608 current
->PageOut
= FALSE
;
609 current
->FileOffset
= ROUND_DOWN(FileOffset
, VACB_MAPPING_GRANULARITY
);
610 current
->SharedCacheMap
= SharedCacheMap
;
612 if (SharedCacheMap
->Trace
)
614 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap
, current
);
617 current
->MappedCount
= 0;
618 current
->DirtyVacbListEntry
.Flink
= NULL
;
619 current
->DirtyVacbListEntry
.Blink
= NULL
;
620 current
->ReferenceCount
= 1;
621 KeInitializeMutex(¤t
->Mutex
, 0);
622 KeWaitForSingleObject(¤t
->Mutex
,
627 KeAcquireGuardedMutex(&ViewLock
);
630 /* There is window between the call to CcRosLookupVacb
631 * and CcRosCreateVacb. We must check if a VACB for the
632 * file offset exist. If there is a VACB, we release
633 * our newly created VACB and return the existing one.
635 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
636 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
638 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
640 current
= CONTAINING_RECORD(current_entry
,
642 CacheMapVacbListEntry
);
643 if (IsPointInRange(current
->FileOffset
, VACB_MAPPING_GRANULARITY
,
646 CcRosVacbIncRefCount(current
);
647 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
649 if (SharedCacheMap
->Trace
)
651 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
657 KeReleaseMutex(&(*Vacb
)->Mutex
, FALSE
);
658 KeReleaseGuardedMutex(&ViewLock
);
659 ExFreeToNPagedLookasideList(&VacbLookasideList
, *Vacb
);
661 KeWaitForSingleObject(¤t
->Mutex
,
666 return STATUS_SUCCESS
;
668 if (current
->FileOffset
< FileOffset
)
670 ASSERT(previous
== NULL
||
671 previous
->FileOffset
< current
->FileOffset
);
674 if (current
->FileOffset
> FileOffset
)
676 current_entry
= current_entry
->Flink
;
678 /* There was no existing VACB. */
682 InsertHeadList(&previous
->CacheMapVacbListEntry
, ¤t
->CacheMapVacbListEntry
);
686 InsertHeadList(&SharedCacheMap
->CacheMapVacbListHead
, ¤t
->CacheMapVacbListEntry
);
688 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
689 InsertTailList(&VacbListHead
, ¤t
->VacbListEntry
);
690 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
691 KeReleaseGuardedMutex(&ViewLock
);
693 MmLockAddressSpace(MmGetKernelAddressSpace());
694 current
->BaseAddress
= NULL
;
695 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
696 0, // nothing checks for VACB mareas, so set to 0
697 ¤t
->BaseAddress
,
698 VACB_MAPPING_GRANULARITY
,
700 (PMEMORY_AREA
*)¤t
->MemoryArea
,
704 MmUnlockAddressSpace(MmGetKernelAddressSpace());
705 if (!NT_SUCCESS(Status
))
707 KeBugCheck(CACHE_MANAGER
);
710 /* Create a virtual mapping for this memory area */
711 MI_SET_USAGE(MI_USAGE_CACHE
);
715 if ((SharedCacheMap
->FileObject
) && (SharedCacheMap
->FileObject
->FileName
.Buffer
))
717 pos
= wcsrchr(SharedCacheMap
->FileObject
->FileName
.Buffer
, '\\');
718 len
= wcslen(pos
) * sizeof(WCHAR
);
719 if (pos
) snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%S", pos
);
723 MmMapMemoryArea(current
->BaseAddress
, VACB_MAPPING_GRANULARITY
,
724 MC_CACHE
, PAGE_READWRITE
);
726 return STATUS_SUCCESS
;
732 PROS_SHARED_CACHE_MAP SharedCacheMap
,
740 PROS_VACB Previous
= NULL
;
742 ASSERT(SharedCacheMap
);
744 DPRINT("CcRosGetVacbChain()\n");
746 Length
= ROUND_UP(Length
, VACB_MAPPING_GRANULARITY
);
748 VacbList
= _alloca(sizeof(PROS_VACB
) *
749 (Length
/ VACB_MAPPING_GRANULARITY
));
752 * Look for a VACB already mapping the same data.
754 for (i
= 0; i
< (Length
/ VACB_MAPPING_GRANULARITY
); i
++)
756 ULONG CurrentOffset
= FileOffset
+ (i
* VACB_MAPPING_GRANULARITY
);
757 current
= CcRosLookupVacb(SharedCacheMap
, CurrentOffset
);
760 KeAcquireGuardedMutex(&ViewLock
);
762 /* Move to tail of LRU list */
763 RemoveEntryList(¤t
->VacbLruListEntry
);
764 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
766 KeReleaseGuardedMutex(&ViewLock
);
768 VacbList
[i
] = current
;
772 CcRosCreateVacb(SharedCacheMap
, CurrentOffset
, ¤t
);
773 VacbList
[i
] = current
;
777 for (i
= 0; i
< Length
/ VACB_MAPPING_GRANULARITY
; i
++)
782 Previous
= VacbList
[i
];
786 Previous
->NextInChain
= VacbList
[i
];
787 Previous
= VacbList
[i
];
791 Previous
->NextInChain
= NULL
;
793 return STATUS_SUCCESS
;
799 PROS_SHARED_CACHE_MAP SharedCacheMap
,
809 ASSERT(SharedCacheMap
);
811 DPRINT("CcRosGetVacb()\n");
814 * Look for a VACB already mapping the same data.
816 current
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
820 * Otherwise create a new VACB.
822 Status
= CcRosCreateVacb(SharedCacheMap
, FileOffset
, ¤t
);
823 if (!NT_SUCCESS(Status
))
829 KeAcquireGuardedMutex(&ViewLock
);
831 /* Move to the tail of the LRU list */
832 RemoveEntryList(¤t
->VacbLruListEntry
);
833 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
835 KeReleaseGuardedMutex(&ViewLock
);
838 * Return information about the VACB to the caller.
840 *UptoDate
= current
->Valid
;
841 *BaseAddress
= current
->BaseAddress
;
842 DPRINT("*BaseAddress %p\n", *BaseAddress
);
844 *BaseOffset
= current
->FileOffset
;
845 return STATUS_SUCCESS
;
851 PROS_SHARED_CACHE_MAP SharedCacheMap
,
857 * FUNCTION: Request a page mapping for a shared cache map
862 ASSERT(SharedCacheMap
);
864 if (FileOffset
% VACB_MAPPING_GRANULARITY
!= 0)
866 DPRINT1("Bad fileoffset %x should be multiple of %x",
867 FileOffset
, VACB_MAPPING_GRANULARITY
);
868 KeBugCheck(CACHE_MANAGER
);
871 return CcRosGetVacb(SharedCacheMap
,
883 MEMORY_AREA
* MemoryArea
,
889 ASSERT(SwapEntry
== 0);
892 ASSERT(MmGetReferenceCountPage(Page
) == 1);
893 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
898 CcRosInternalFreeVacb (
901 * FUNCTION: Releases a VACB associated with a shared cache map
904 DPRINT("Freeing VACB 0x%p\n", Vacb
);
906 if (Vacb
->SharedCacheMap
->Trace
)
908 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb
->SharedCacheMap
, Vacb
);
912 MmLockAddressSpace(MmGetKernelAddressSpace());
913 MmFreeMemoryArea(MmGetKernelAddressSpace(),
917 MmUnlockAddressSpace(MmGetKernelAddressSpace());
919 ExFreeToNPagedLookasideList(&VacbLookasideList
, Vacb
);
920 return STATUS_SUCCESS
;
929 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
,
930 IN PLARGE_INTEGER FileOffset OPTIONAL
,
932 OUT PIO_STATUS_BLOCK IoStatus
)
934 PROS_SHARED_CACHE_MAP SharedCacheMap
;
935 LARGE_INTEGER Offset
;
940 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
941 SectionObjectPointers
, FileOffset
, Length
, IoStatus
);
943 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
945 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
946 ASSERT(SharedCacheMap
);
949 Offset
= *FileOffset
;
953 Offset
.QuadPart
= (LONGLONG
)0;
954 Length
= SharedCacheMap
->FileSize
.u
.LowPart
;
959 IoStatus
->Status
= STATUS_SUCCESS
;
960 IoStatus
->Information
= 0;
965 current
= CcRosLookupVacb(SharedCacheMap
, Offset
.u
.LowPart
);
970 Status
= CcRosFlushVacb(current
);
971 if (!NT_SUCCESS(Status
) && IoStatus
!= NULL
)
973 IoStatus
->Status
= Status
;
976 KeReleaseMutex(¤t
->Mutex
, FALSE
);
978 KeAcquireGuardedMutex(&ViewLock
);
979 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
980 CcRosVacbDecRefCount(current
);
981 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
982 KeReleaseGuardedMutex(&ViewLock
);
985 Offset
.QuadPart
+= VACB_MAPPING_GRANULARITY
;
986 if (Length
> VACB_MAPPING_GRANULARITY
)
988 Length
-= VACB_MAPPING_GRANULARITY
;
1000 IoStatus
->Status
= STATUS_INVALID_PARAMETER
;
1007 CcRosDeleteFileCache (
1008 PFILE_OBJECT FileObject
,
1009 PROS_SHARED_CACHE_MAP SharedCacheMap
)
1011 * FUNCTION: Releases the shared cache map associated with a file object
1014 PLIST_ENTRY current_entry
;
1016 LIST_ENTRY FreeList
;
1019 ASSERT(SharedCacheMap
);
1021 SharedCacheMap
->RefCount
++;
1022 KeReleaseGuardedMutex(&ViewLock
);
1024 CcFlushCache(FileObject
->SectionObjectPointer
, NULL
, 0, NULL
);
1026 KeAcquireGuardedMutex(&ViewLock
);
1027 SharedCacheMap
->RefCount
--;
1028 if (SharedCacheMap
->RefCount
== 0)
1030 FileObject
->SectionObjectPointer
->SharedCacheMap
= NULL
;
1035 InitializeListHead(&FreeList
);
1036 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1037 while (!IsListEmpty(&SharedCacheMap
->CacheMapVacbListHead
))
1039 current_entry
= RemoveTailList(&SharedCacheMap
->CacheMapVacbListHead
);
1040 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1041 RemoveEntryList(¤t
->VacbListEntry
);
1042 RemoveEntryList(¤t
->VacbLruListEntry
);
1045 RemoveEntryList(¤t
->DirtyVacbListEntry
);
1046 DirtyPageCount
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
1047 DPRINT1("Freeing dirty VACB\n");
1049 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
1052 SharedCacheMap
->Trace
= FALSE
;
1054 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1056 KeReleaseGuardedMutex(&ViewLock
);
1057 ObDereferenceObject(SharedCacheMap
->FileObject
);
1059 while (!IsListEmpty(&FreeList
))
1061 current_entry
= RemoveTailList(&FreeList
);
1062 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1063 CcRosInternalFreeVacb(current
);
1065 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList
, SharedCacheMap
);
1066 KeAcquireGuardedMutex(&ViewLock
);
1068 return STATUS_SUCCESS
;
1073 CcRosReferenceCache (
1074 PFILE_OBJECT FileObject
)
1076 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1077 KeAcquireGuardedMutex(&ViewLock
);
1078 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1079 ASSERT(SharedCacheMap
);
1080 ASSERT(SharedCacheMap
->RefCount
!= 0);
1081 SharedCacheMap
->RefCount
++;
1082 KeReleaseGuardedMutex(&ViewLock
);
1087 CcRosRemoveIfClosed (
1088 PSECTION_OBJECT_POINTERS SectionObjectPointer
)
1090 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1091 DPRINT("CcRosRemoveIfClosed()\n");
1092 KeAcquireGuardedMutex(&ViewLock
);
1093 SharedCacheMap
= SectionObjectPointer
->SharedCacheMap
;
1094 if (SharedCacheMap
&& SharedCacheMap
->RefCount
== 0)
1096 CcRosDeleteFileCache(SharedCacheMap
->FileObject
, SharedCacheMap
);
1098 KeReleaseGuardedMutex(&ViewLock
);
1104 CcRosDereferenceCache (
1105 PFILE_OBJECT FileObject
)
1107 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1108 KeAcquireGuardedMutex(&ViewLock
);
1109 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1110 ASSERT(SharedCacheMap
);
1111 if (SharedCacheMap
->RefCount
> 0)
1113 SharedCacheMap
->RefCount
--;
1114 if (SharedCacheMap
->RefCount
== 0)
1116 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1117 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1120 KeReleaseGuardedMutex(&ViewLock
);
1125 CcRosReleaseFileCache (
1126 PFILE_OBJECT FileObject
)
1128 * FUNCTION: Called by the file system when a handle to a file object
1132 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1134 KeAcquireGuardedMutex(&ViewLock
);
1136 if (FileObject
->SectionObjectPointer
->SharedCacheMap
!= NULL
)
1138 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1139 if (FileObject
->PrivateCacheMap
!= NULL
)
1141 FileObject
->PrivateCacheMap
= NULL
;
1142 if (SharedCacheMap
->RefCount
> 0)
1144 SharedCacheMap
->RefCount
--;
1145 if (SharedCacheMap
->RefCount
== 0)
1147 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1148 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1153 KeReleaseGuardedMutex(&ViewLock
);
1154 return STATUS_SUCCESS
;
1159 CcTryToInitializeFileCache (
1160 PFILE_OBJECT FileObject
)
1162 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1165 KeAcquireGuardedMutex(&ViewLock
);
1167 ASSERT(FileObject
->SectionObjectPointer
);
1168 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1169 if (SharedCacheMap
== NULL
)
1171 Status
= STATUS_UNSUCCESSFUL
;
1175 if (FileObject
->PrivateCacheMap
== NULL
)
1177 FileObject
->PrivateCacheMap
= SharedCacheMap
;
1178 SharedCacheMap
->RefCount
++;
1180 Status
= STATUS_SUCCESS
;
1182 KeReleaseGuardedMutex(&ViewLock
);
1190 CcRosInitializeFileCache (
1191 PFILE_OBJECT FileObject
,
1192 PCACHE_MANAGER_CALLBACKS CallBacks
,
1193 PVOID LazyWriterContext
)
1195 * FUNCTION: Initializes a shared cache map for a file object
1198 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1200 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1201 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1202 FileObject
, SharedCacheMap
);
1204 KeAcquireGuardedMutex(&ViewLock
);
1205 if (SharedCacheMap
== NULL
)
1207 SharedCacheMap
= ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList
);
1208 if (SharedCacheMap
== NULL
)
1210 KeReleaseGuardedMutex(&ViewLock
);
1211 return STATUS_UNSUCCESSFUL
;
1213 RtlZeroMemory(SharedCacheMap
, sizeof(*SharedCacheMap
));
1214 ObReferenceObjectByPointer(FileObject
,
1218 SharedCacheMap
->FileObject
= FileObject
;
1219 SharedCacheMap
->Callbacks
= CallBacks
;
1220 SharedCacheMap
->LazyWriteContext
= LazyWriterContext
;
1221 if (FileObject
->FsContext
)
1223 SharedCacheMap
->AllocationSize
=
1224 ((PFSRTL_COMMON_FCB_HEADER
)FileObject
->FsContext
)->AllocationSize
;
1225 SharedCacheMap
->FileSize
=
1226 ((PFSRTL_COMMON_FCB_HEADER
)FileObject
->FsContext
)->FileSize
;
1228 KeInitializeSpinLock(&SharedCacheMap
->CacheMapLock
);
1229 InitializeListHead(&SharedCacheMap
->CacheMapVacbListHead
);
1230 FileObject
->SectionObjectPointer
->SharedCacheMap
= SharedCacheMap
;
1232 if (FileObject
->PrivateCacheMap
== NULL
)
1234 FileObject
->PrivateCacheMap
= SharedCacheMap
;
1235 SharedCacheMap
->RefCount
++;
1237 KeReleaseGuardedMutex(&ViewLock
);
1239 return STATUS_SUCCESS
;
1247 CcGetFileObjectFromSectionPtrs (
1248 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
)
1250 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1251 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1253 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
1254 ASSERT(SharedCacheMap
);
1255 return SharedCacheMap
->FileObject
;
1266 DPRINT("CcInitView()\n");
1268 InitializeListHead(&VacbListHead
);
1269 InitializeListHead(&DirtyVacbListHead
);
1270 InitializeListHead(&VacbLruListHead
);
1271 KeInitializeGuardedMutex(&ViewLock
);
1272 ExInitializeNPagedLookasideList (&iBcbLookasideList
,
1276 sizeof(INTERNAL_BCB
),
1279 ExInitializeNPagedLookasideList (&SharedCacheMapLookasideList
,
1283 sizeof(ROS_SHARED_CACHE_MAP
),
1286 ExInitializeNPagedLookasideList (&VacbLookasideList
,
1294 MmInitializeMemoryConsumer(MC_CACHE
, CcRosTrimCache
);
1296 CcInitCacheZeroPage();