2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
10 /* NOTES **********************************************************************
12 * This is not the NT implementation of a file cache nor anything much like
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 * (4) Copy the data into or out of the page as necessary.
30 * (5) Release the cache page
32 /* INCLUDES ******************************************************************/
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
42 /* GLOBALS *******************************************************************/
44 static LIST_ENTRY DirtyVacbListHead
;
45 static LIST_ENTRY VacbLruListHead
;
46 ULONG DirtyPageCount
= 0;
48 KGUARDED_MUTEX ViewLock
;
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList
;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList
;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList
;
55 static void CcRosVacbIncRefCount_(PROS_VACB vacb
, const char* file
, int line
)
57 ++vacb
->ReferenceCount
;
58 if (vacb
->SharedCacheMap
->Trace
)
60 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
61 file
, line
, vacb
, vacb
->ReferenceCount
, vacb
->Dirty
, vacb
->PageOut
);
64 static void CcRosVacbDecRefCount_(PROS_VACB vacb
, const char* file
, int line
)
66 --vacb
->ReferenceCount
;
67 if (vacb
->SharedCacheMap
->Trace
)
69 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
70 file
, line
, vacb
, vacb
->ReferenceCount
, vacb
->Dirty
, vacb
->PageOut
);
73 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
74 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
76 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
77 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
81 CcRosInternalFreeVacb(PROS_VACB Vacb
);
84 /* FUNCTIONS *****************************************************************/
89 PROS_SHARED_CACHE_MAP SharedCacheMap
,
94 PLIST_ENTRY current_entry
;
100 SharedCacheMap
->Trace
= Trace
;
104 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
106 KeAcquireGuardedMutex(&ViewLock
);
107 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldirql
);
109 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
110 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
112 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
113 current_entry
= current_entry
->Flink
;
115 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
116 current
, current
->ReferenceCount
, current
->Dirty
, current
->PageOut
);
118 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldirql
);
119 KeReleaseGuardedMutex(&ViewLock
);
123 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
127 UNREFERENCED_PARAMETER(SharedCacheMap
);
128 UNREFERENCED_PARAMETER(Trace
);
140 Status
= CcWriteVirtualAddress(Vacb
);
141 if (NT_SUCCESS(Status
))
143 KeAcquireGuardedMutex(&ViewLock
);
144 KeAcquireSpinLock(&Vacb
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
147 RemoveEntryList(&Vacb
->DirtyVacbListEntry
);
148 DirtyPageCount
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
149 CcRosVacbDecRefCount(Vacb
);
151 KeReleaseSpinLock(&Vacb
->SharedCacheMap
->CacheMapLock
, oldIrql
);
152 KeReleaseGuardedMutex(&ViewLock
);
160 CcRosFlushDirtyPages (
165 PLIST_ENTRY current_entry
;
169 LARGE_INTEGER ZeroTimeout
;
171 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target
);
174 ZeroTimeout
.QuadPart
= 0;
176 KeEnterCriticalRegion();
177 KeAcquireGuardedMutex(&ViewLock
);
179 current_entry
= DirtyVacbListHead
.Flink
;
180 if (current_entry
== &DirtyVacbListHead
)
182 DPRINT("No Dirty pages\n");
185 while ((current_entry
!= &DirtyVacbListHead
) && (Target
> 0))
187 current
= CONTAINING_RECORD(current_entry
,
190 current_entry
= current_entry
->Flink
;
192 CcRosVacbIncRefCount(current
);
194 Locked
= current
->SharedCacheMap
->Callbacks
->AcquireForLazyWrite(
195 current
->SharedCacheMap
->LazyWriteContext
, Wait
);
198 CcRosVacbDecRefCount(current
);
202 Status
= KeWaitForSingleObject(¤t
->Mutex
,
206 Wait
? NULL
: &ZeroTimeout
);
207 if (Status
!= STATUS_SUCCESS
)
209 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
210 current
->SharedCacheMap
->LazyWriteContext
);
211 CcRosVacbDecRefCount(current
);
215 ASSERT(current
->Dirty
);
217 /* One reference is added above */
218 if (current
->ReferenceCount
> 2)
220 KeReleaseMutex(¤t
->Mutex
, FALSE
);
221 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
222 current
->SharedCacheMap
->LazyWriteContext
);
223 CcRosVacbDecRefCount(current
);
227 KeReleaseGuardedMutex(&ViewLock
);
229 Status
= CcRosFlushVacb(current
);
231 KeReleaseMutex(¤t
->Mutex
, FALSE
);
232 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
233 current
->SharedCacheMap
->LazyWriteContext
);
235 KeAcquireGuardedMutex(&ViewLock
);
236 CcRosVacbDecRefCount(current
);
238 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
) &&
239 (Status
!= STATUS_MEDIA_WRITE_PROTECTED
))
241 DPRINT1("CC: Failed to flush VACB.\n");
245 (*Count
) += VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
246 Target
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
249 current_entry
= DirtyVacbListHead
.Flink
;
252 KeReleaseGuardedMutex(&ViewLock
);
253 KeLeaveCriticalRegion();
255 DPRINT("CcRosFlushDirtyPages() finished\n");
256 return STATUS_SUCCESS
;
265 * FUNCTION: Try to free some memory from the file cache.
267 * Target - The number of pages to be freed.
268 * Priority - The priority of free (currently unused).
269 * NrFreed - Points to a variable where the number of pages
270 * actually freed is returned.
273 PLIST_ENTRY current_entry
;
280 BOOLEAN FlushedPages
= FALSE
;
282 DPRINT("CcRosTrimCache(Target %lu)\n", Target
);
284 InitializeListHead(&FreeList
);
289 KeAcquireGuardedMutex(&ViewLock
);
291 current_entry
= VacbLruListHead
.Flink
;
292 while (current_entry
!= &VacbLruListHead
)
294 current
= CONTAINING_RECORD(current_entry
,
297 current_entry
= current_entry
->Flink
;
299 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
301 /* Reference the VACB */
302 CcRosVacbIncRefCount(current
);
304 /* Check if it's mapped and not dirty */
305 if (current
->MappedCount
> 0 && !current
->Dirty
)
307 /* We have to break these locks because Cc sucks */
308 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
309 KeReleaseGuardedMutex(&ViewLock
);
311 /* Page out the VACB */
312 for (i
= 0; i
< VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
; i
++)
314 Page
= (PFN_NUMBER
)(MmGetPhysicalAddress((PUCHAR
)current
->BaseAddress
+ (i
* PAGE_SIZE
)).QuadPart
>> PAGE_SHIFT
);
316 MmPageOutPhysicalAddress(Page
);
319 /* Reacquire the locks */
320 KeAcquireGuardedMutex(&ViewLock
);
321 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
324 /* Dereference the VACB */
325 CcRosVacbDecRefCount(current
);
327 /* Check if we can free this entry now */
328 if (current
->ReferenceCount
== 0)
330 ASSERT(!current
->Dirty
);
331 ASSERT(!current
->MappedCount
);
333 RemoveEntryList(¤t
->CacheMapVacbListEntry
);
334 RemoveEntryList(¤t
->VacbLruListEntry
);
335 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
337 /* Calculate how many pages we freed for Mm */
338 PagesFreed
= min(VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
, Target
);
339 Target
-= PagesFreed
;
340 (*NrFreed
) += PagesFreed
;
343 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
346 KeReleaseGuardedMutex(&ViewLock
);
348 /* Try flushing pages if we haven't met our target */
349 if ((Target
> 0) && !FlushedPages
)
351 /* Flush dirty pages to disk */
352 CcRosFlushDirtyPages(Target
, &PagesFreed
, FALSE
);
355 /* We can only swap as many pages as we flushed */
356 if (PagesFreed
< Target
) Target
= PagesFreed
;
358 /* Check if we flushed anything */
361 /* Try again after flushing dirty pages */
362 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed
);
367 while (!IsListEmpty(&FreeList
))
369 current_entry
= RemoveHeadList(&FreeList
);
370 current
= CONTAINING_RECORD(current_entry
,
372 CacheMapVacbListEntry
);
373 CcRosInternalFreeVacb(current
);
376 DPRINT("Evicted %lu cache pages\n", (*NrFreed
));
378 return STATUS_SUCCESS
;
384 PROS_SHARED_CACHE_MAP SharedCacheMap
,
393 ASSERT(SharedCacheMap
);
395 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
396 SharedCacheMap
, Vacb
, Valid
);
398 KeAcquireGuardedMutex(&ViewLock
);
399 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
403 WasDirty
= Vacb
->Dirty
;
404 Vacb
->Dirty
= Vacb
->Dirty
|| Dirty
;
406 if (!WasDirty
&& Vacb
->Dirty
)
408 InsertTailList(&DirtyVacbListHead
, &Vacb
->DirtyVacbListEntry
);
409 DirtyPageCount
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
416 CcRosVacbDecRefCount(Vacb
);
417 if (Mapped
&& (Vacb
->MappedCount
== 1))
419 CcRosVacbIncRefCount(Vacb
);
421 if (!WasDirty
&& Vacb
->Dirty
)
423 CcRosVacbIncRefCount(Vacb
);
426 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
427 KeReleaseGuardedMutex(&ViewLock
);
428 if (InterlockedCompareExchange(&Vacb
->PinCount
, 0, 0) == 0)
430 KeReleaseMutex(&Vacb
->Mutex
, FALSE
);
433 return STATUS_SUCCESS
;
436 /* Returns with VACB Lock Held! */
440 PROS_SHARED_CACHE_MAP SharedCacheMap
,
443 PLIST_ENTRY current_entry
;
447 ASSERT(SharedCacheMap
);
449 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
450 SharedCacheMap
, FileOffset
);
452 KeAcquireGuardedMutex(&ViewLock
);
453 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
455 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
456 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
458 current
= CONTAINING_RECORD(current_entry
,
460 CacheMapVacbListEntry
);
461 if (IsPointInRange(current
->FileOffset
.QuadPart
,
462 VACB_MAPPING_GRANULARITY
,
465 CcRosVacbIncRefCount(current
);
466 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
467 KeReleaseGuardedMutex(&ViewLock
);
468 if (InterlockedCompareExchange(¤t
->PinCount
, 0, 0) == 0)
470 KeWaitForSingleObject(¤t
->Mutex
,
478 if (current
->FileOffset
.QuadPart
> FileOffset
)
480 current_entry
= current_entry
->Flink
;
483 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
484 KeReleaseGuardedMutex(&ViewLock
);
492 PROS_SHARED_CACHE_MAP SharedCacheMap
,
498 ASSERT(SharedCacheMap
);
500 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
501 SharedCacheMap
, FileOffset
);
503 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
506 KeBugCheck(CACHE_MANAGER
);
509 KeAcquireGuardedMutex(&ViewLock
);
510 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
514 InsertTailList(&DirtyVacbListHead
, &Vacb
->DirtyVacbListEntry
);
515 DirtyPageCount
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
519 CcRosVacbDecRefCount(Vacb
);
522 /* Move to the tail of the LRU list */
523 RemoveEntryList(&Vacb
->VacbLruListEntry
);
524 InsertTailList(&VacbLruListHead
, &Vacb
->VacbLruListEntry
);
528 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
529 KeReleaseGuardedMutex(&ViewLock
);
530 KeReleaseMutex(&Vacb
->Mutex
, FALSE
);
532 return STATUS_SUCCESS
;
538 PROS_SHARED_CACHE_MAP SharedCacheMap
,
546 ASSERT(SharedCacheMap
);
548 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
549 SharedCacheMap
, FileOffset
, NowDirty
);
551 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
554 return STATUS_UNSUCCESSFUL
;
557 KeAcquireGuardedMutex(&ViewLock
);
558 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
560 WasDirty
= Vacb
->Dirty
;
561 Vacb
->Dirty
= Vacb
->Dirty
|| NowDirty
;
565 if (!WasDirty
&& NowDirty
)
567 InsertTailList(&DirtyVacbListHead
, &Vacb
->DirtyVacbListEntry
);
568 DirtyPageCount
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
571 CcRosVacbDecRefCount(Vacb
);
572 if (!WasDirty
&& NowDirty
)
574 CcRosVacbIncRefCount(Vacb
);
576 if (Vacb
->MappedCount
== 0)
578 CcRosVacbDecRefCount(Vacb
);
581 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
582 KeReleaseGuardedMutex(&ViewLock
);
583 KeReleaseMutex(&Vacb
->Mutex
, FALSE
);
585 return STATUS_SUCCESS
;
595 ULONG_PTR NumberOfPages
;
597 /* Create a memory area. */
598 MmLockAddressSpace(MmGetKernelAddressSpace());
599 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
600 0, // nothing checks for VACB mareas, so set to 0
602 VACB_MAPPING_GRANULARITY
,
604 (PMEMORY_AREA
*)&Vacb
->MemoryArea
,
607 MmUnlockAddressSpace(MmGetKernelAddressSpace());
608 if (!NT_SUCCESS(Status
))
610 KeBugCheck(CACHE_MANAGER
);
613 ASSERT(((ULONG_PTR
)Vacb
->BaseAddress
% PAGE_SIZE
) == 0);
614 ASSERT((ULONG_PTR
)Vacb
->BaseAddress
> (ULONG_PTR
)MmSystemRangeStart
);
616 /* Create a virtual mapping for this memory area */
617 NumberOfPages
= BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY
);
618 for (i
= 0; i
< NumberOfPages
; i
++)
620 PFN_NUMBER PageFrameNumber
;
622 Status
= MmRequestPageMemoryConsumer(MC_CACHE
, TRUE
, &PageFrameNumber
);
623 if (PageFrameNumber
== 0)
625 DPRINT1("Unable to allocate page\n");
626 KeBugCheck(MEMORY_MANAGEMENT
);
629 Status
= MmCreateVirtualMapping(NULL
,
630 (PVOID
)((ULONG_PTR
)Vacb
->BaseAddress
+ (i
* PAGE_SIZE
)),
634 if (!NT_SUCCESS(Status
))
636 DPRINT1("Unable to create virtual mapping\n");
637 KeBugCheck(MEMORY_MANAGEMENT
);
641 return STATUS_SUCCESS
;
647 PROS_SHARED_CACHE_MAP SharedCacheMap
,
653 PLIST_ENTRY current_entry
;
657 ASSERT(SharedCacheMap
);
659 DPRINT("CcRosCreateVacb()\n");
661 if (FileOffset
>= SharedCacheMap
->FileSize
.QuadPart
)
664 return STATUS_INVALID_PARAMETER
;
667 current
= ExAllocateFromNPagedLookasideList(&VacbLookasideList
);
668 current
->BaseAddress
= NULL
;
669 current
->Valid
= FALSE
;
670 current
->Dirty
= FALSE
;
671 current
->PageOut
= FALSE
;
672 current
->FileOffset
.QuadPart
= ROUND_DOWN(FileOffset
, VACB_MAPPING_GRANULARITY
);
673 current
->SharedCacheMap
= SharedCacheMap
;
675 if (SharedCacheMap
->Trace
)
677 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap
, current
);
680 current
->MappedCount
= 0;
681 current
->DirtyVacbListEntry
.Flink
= NULL
;
682 current
->DirtyVacbListEntry
.Blink
= NULL
;
683 current
->ReferenceCount
= 1;
684 current
->PinCount
= 0;
685 KeInitializeMutex(¤t
->Mutex
, 0);
686 KeWaitForSingleObject(¤t
->Mutex
,
691 KeAcquireGuardedMutex(&ViewLock
);
694 /* There is window between the call to CcRosLookupVacb
695 * and CcRosCreateVacb. We must check if a VACB for the
696 * file offset exist. If there is a VACB, we release
697 * our newly created VACB and return the existing one.
699 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
700 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
702 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
704 current
= CONTAINING_RECORD(current_entry
,
706 CacheMapVacbListEntry
);
707 if (IsPointInRange(current
->FileOffset
.QuadPart
,
708 VACB_MAPPING_GRANULARITY
,
711 CcRosVacbIncRefCount(current
);
712 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
714 if (SharedCacheMap
->Trace
)
716 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
722 KeReleaseMutex(&(*Vacb
)->Mutex
, FALSE
);
723 KeReleaseGuardedMutex(&ViewLock
);
724 ExFreeToNPagedLookasideList(&VacbLookasideList
, *Vacb
);
726 if (InterlockedCompareExchange(¤t
->PinCount
, 0, 0) == 0)
728 KeWaitForSingleObject(¤t
->Mutex
,
734 return STATUS_SUCCESS
;
736 if (current
->FileOffset
.QuadPart
< FileOffset
)
738 ASSERT(previous
== NULL
||
739 previous
->FileOffset
.QuadPart
< current
->FileOffset
.QuadPart
);
742 if (current
->FileOffset
.QuadPart
> FileOffset
)
744 current_entry
= current_entry
->Flink
;
746 /* There was no existing VACB. */
750 InsertHeadList(&previous
->CacheMapVacbListEntry
, ¤t
->CacheMapVacbListEntry
);
754 InsertHeadList(&SharedCacheMap
->CacheMapVacbListHead
, ¤t
->CacheMapVacbListEntry
);
756 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
757 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
758 KeReleaseGuardedMutex(&ViewLock
);
760 MI_SET_USAGE(MI_USAGE_CACHE
);
762 if ((SharedCacheMap
->FileObject
) && (SharedCacheMap
->FileObject
->FileName
.Buffer
))
766 pos
= wcsrchr(SharedCacheMap
->FileObject
->FileName
.Buffer
, '\\');
767 len
= wcslen(pos
) * sizeof(WCHAR
);
768 if (pos
) snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%S", pos
);
772 Status
= CcRosMapVacb(current
);
780 PROS_SHARED_CACHE_MAP SharedCacheMap
,
782 PLONGLONG BaseOffset
,
790 ASSERT(SharedCacheMap
);
792 DPRINT("CcRosGetVacb()\n");
795 * Look for a VACB already mapping the same data.
797 current
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
801 * Otherwise create a new VACB.
803 Status
= CcRosCreateVacb(SharedCacheMap
, FileOffset
, ¤t
);
804 if (!NT_SUCCESS(Status
))
810 KeAcquireGuardedMutex(&ViewLock
);
812 /* Move to the tail of the LRU list */
813 RemoveEntryList(¤t
->VacbLruListEntry
);
814 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
816 KeReleaseGuardedMutex(&ViewLock
);
819 * Return information about the VACB to the caller.
821 *UptoDate
= current
->Valid
;
822 *BaseAddress
= current
->BaseAddress
;
823 DPRINT("*BaseAddress %p\n", *BaseAddress
);
825 *BaseOffset
= current
->FileOffset
.QuadPart
;
826 return STATUS_SUCCESS
;
832 PROS_SHARED_CACHE_MAP SharedCacheMap
,
838 * FUNCTION: Request a page mapping for a shared cache map
843 ASSERT(SharedCacheMap
);
845 if (FileOffset
% VACB_MAPPING_GRANULARITY
!= 0)
847 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
848 FileOffset
, VACB_MAPPING_GRANULARITY
);
849 KeBugCheck(CACHE_MANAGER
);
852 return CcRosGetVacb(SharedCacheMap
,
864 MEMORY_AREA
* MemoryArea
,
870 ASSERT(SwapEntry
== 0);
873 ASSERT(MmGetReferenceCountPage(Page
) == 1);
874 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
879 CcRosInternalFreeVacb (
882 * FUNCTION: Releases a VACB associated with a shared cache map
885 DPRINT("Freeing VACB 0x%p\n", Vacb
);
887 if (Vacb
->SharedCacheMap
->Trace
)
889 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb
->SharedCacheMap
, Vacb
);
893 MmLockAddressSpace(MmGetKernelAddressSpace());
894 MmFreeMemoryArea(MmGetKernelAddressSpace(),
898 MmUnlockAddressSpace(MmGetKernelAddressSpace());
900 ExFreeToNPagedLookasideList(&VacbLookasideList
, Vacb
);
901 return STATUS_SUCCESS
;
910 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
,
911 IN PLARGE_INTEGER FileOffset OPTIONAL
,
913 OUT PIO_STATUS_BLOCK IoStatus
)
915 PROS_SHARED_CACHE_MAP SharedCacheMap
;
916 LARGE_INTEGER Offset
;
917 LONGLONG RemainingLength
;
922 CCTRACE(CC_API_DEBUG
, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
923 SectionObjectPointers
, FileOffset
, Length
);
925 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
926 SectionObjectPointers
, FileOffset
, Length
, IoStatus
);
928 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
930 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
931 ASSERT(SharedCacheMap
);
934 Offset
= *FileOffset
;
935 RemainingLength
= Length
;
940 RemainingLength
= SharedCacheMap
->FileSize
.QuadPart
;
945 IoStatus
->Status
= STATUS_SUCCESS
;
946 IoStatus
->Information
= 0;
949 while (RemainingLength
> 0)
951 current
= CcRosLookupVacb(SharedCacheMap
, Offset
.QuadPart
);
956 Status
= CcRosFlushVacb(current
);
957 if (!NT_SUCCESS(Status
) && IoStatus
!= NULL
)
959 IoStatus
->Status
= Status
;
963 if (InterlockedCompareExchange(¤t
->PinCount
, 0, 0) == 0)
965 KeReleaseMutex(¤t
->Mutex
, FALSE
);
968 KeAcquireGuardedMutex(&ViewLock
);
969 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
970 CcRosVacbDecRefCount(current
);
971 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
972 KeReleaseGuardedMutex(&ViewLock
);
975 Offset
.QuadPart
+= VACB_MAPPING_GRANULARITY
;
976 RemainingLength
-= min(RemainingLength
, VACB_MAPPING_GRANULARITY
);
983 IoStatus
->Status
= STATUS_INVALID_PARAMETER
;
990 CcRosDeleteFileCache (
991 PFILE_OBJECT FileObject
,
992 PROS_SHARED_CACHE_MAP SharedCacheMap
)
994 * FUNCTION: Releases the shared cache map associated with a file object
997 PLIST_ENTRY current_entry
;
1002 ASSERT(SharedCacheMap
);
1004 SharedCacheMap
->RefCount
++;
1005 KeReleaseGuardedMutex(&ViewLock
);
1007 CcFlushCache(FileObject
->SectionObjectPointer
, NULL
, 0, NULL
);
1009 KeAcquireGuardedMutex(&ViewLock
);
1010 SharedCacheMap
->RefCount
--;
1011 if (SharedCacheMap
->RefCount
== 0)
1013 FileObject
->SectionObjectPointer
->SharedCacheMap
= NULL
;
1018 InitializeListHead(&FreeList
);
1019 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1020 while (!IsListEmpty(&SharedCacheMap
->CacheMapVacbListHead
))
1022 current_entry
= RemoveTailList(&SharedCacheMap
->CacheMapVacbListHead
);
1023 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1024 RemoveEntryList(¤t
->VacbLruListEntry
);
1027 RemoveEntryList(¤t
->DirtyVacbListEntry
);
1028 DirtyPageCount
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
1029 DPRINT1("Freeing dirty VACB\n");
1031 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
1034 SharedCacheMap
->Trace
= FALSE
;
1036 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1038 KeReleaseGuardedMutex(&ViewLock
);
1039 ObDereferenceObject(SharedCacheMap
->FileObject
);
1041 while (!IsListEmpty(&FreeList
))
1043 current_entry
= RemoveTailList(&FreeList
);
1044 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1045 CcRosInternalFreeVacb(current
);
1047 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList
, SharedCacheMap
);
1048 KeAcquireGuardedMutex(&ViewLock
);
1050 return STATUS_SUCCESS
;
1055 CcRosReferenceCache (
1056 PFILE_OBJECT FileObject
)
1058 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1059 KeAcquireGuardedMutex(&ViewLock
);
1060 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1061 ASSERT(SharedCacheMap
);
1062 ASSERT(SharedCacheMap
->RefCount
!= 0);
1063 SharedCacheMap
->RefCount
++;
1064 KeReleaseGuardedMutex(&ViewLock
);
1069 CcRosRemoveIfClosed (
1070 PSECTION_OBJECT_POINTERS SectionObjectPointer
)
1072 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1073 DPRINT("CcRosRemoveIfClosed()\n");
1074 KeAcquireGuardedMutex(&ViewLock
);
1075 SharedCacheMap
= SectionObjectPointer
->SharedCacheMap
;
1076 if (SharedCacheMap
&& SharedCacheMap
->RefCount
== 0)
1078 CcRosDeleteFileCache(SharedCacheMap
->FileObject
, SharedCacheMap
);
1080 KeReleaseGuardedMutex(&ViewLock
);
1086 CcRosDereferenceCache (
1087 PFILE_OBJECT FileObject
)
1089 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1090 KeAcquireGuardedMutex(&ViewLock
);
1091 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1092 ASSERT(SharedCacheMap
);
1093 if (SharedCacheMap
->RefCount
> 0)
1095 SharedCacheMap
->RefCount
--;
1096 if (SharedCacheMap
->RefCount
== 0)
1098 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1099 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1102 KeReleaseGuardedMutex(&ViewLock
);
1107 CcRosReleaseFileCache (
1108 PFILE_OBJECT FileObject
)
1110 * FUNCTION: Called by the file system when a handle to a file object
1114 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1116 KeAcquireGuardedMutex(&ViewLock
);
1118 if (FileObject
->SectionObjectPointer
->SharedCacheMap
!= NULL
)
1120 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1121 if (FileObject
->PrivateCacheMap
!= NULL
)
1123 FileObject
->PrivateCacheMap
= NULL
;
1124 if (SharedCacheMap
->RefCount
> 0)
1126 SharedCacheMap
->RefCount
--;
1127 if (SharedCacheMap
->RefCount
== 0)
1129 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1130 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1135 KeReleaseGuardedMutex(&ViewLock
);
1136 return STATUS_SUCCESS
;
1141 CcTryToInitializeFileCache (
1142 PFILE_OBJECT FileObject
)
1144 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1147 KeAcquireGuardedMutex(&ViewLock
);
1149 ASSERT(FileObject
->SectionObjectPointer
);
1150 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1151 if (SharedCacheMap
== NULL
)
1153 Status
= STATUS_UNSUCCESSFUL
;
1157 if (FileObject
->PrivateCacheMap
== NULL
)
1159 FileObject
->PrivateCacheMap
= SharedCacheMap
;
1160 SharedCacheMap
->RefCount
++;
1162 Status
= STATUS_SUCCESS
;
1164 KeReleaseGuardedMutex(&ViewLock
);
1172 CcRosInitializeFileCache (
1173 PFILE_OBJECT FileObject
,
1174 PCC_FILE_SIZES FileSizes
,
1176 PCACHE_MANAGER_CALLBACKS CallBacks
,
1177 PVOID LazyWriterContext
)
1179 * FUNCTION: Initializes a shared cache map for a file object
1182 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1184 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1185 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1186 FileObject
, SharedCacheMap
);
1188 KeAcquireGuardedMutex(&ViewLock
);
1189 if (SharedCacheMap
== NULL
)
1191 SharedCacheMap
= ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList
);
1192 if (SharedCacheMap
== NULL
)
1194 KeReleaseGuardedMutex(&ViewLock
);
1195 return STATUS_INSUFFICIENT_RESOURCES
;
1197 RtlZeroMemory(SharedCacheMap
, sizeof(*SharedCacheMap
));
1198 ObReferenceObjectByPointer(FileObject
,
1202 SharedCacheMap
->FileObject
= FileObject
;
1203 SharedCacheMap
->Callbacks
= CallBacks
;
1204 SharedCacheMap
->LazyWriteContext
= LazyWriterContext
;
1205 SharedCacheMap
->SectionSize
= FileSizes
->AllocationSize
;
1206 SharedCacheMap
->FileSize
= FileSizes
->FileSize
;
1207 SharedCacheMap
->PinAccess
= PinAccess
;
1208 KeInitializeSpinLock(&SharedCacheMap
->CacheMapLock
);
1209 InitializeListHead(&SharedCacheMap
->CacheMapVacbListHead
);
1210 FileObject
->SectionObjectPointer
->SharedCacheMap
= SharedCacheMap
;
1212 if (FileObject
->PrivateCacheMap
== NULL
)
1214 FileObject
->PrivateCacheMap
= SharedCacheMap
;
1215 SharedCacheMap
->RefCount
++;
1217 KeReleaseGuardedMutex(&ViewLock
);
1219 return STATUS_SUCCESS
;
1227 CcGetFileObjectFromSectionPtrs (
1228 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
)
1230 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1232 CCTRACE(CC_API_DEBUG
, "SectionObjectPointers=%p\n", SectionObjectPointers
);
1234 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1236 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
1237 ASSERT(SharedCacheMap
);
1238 return SharedCacheMap
->FileObject
;
1249 DPRINT("CcInitView()\n");
1251 InitializeListHead(&DirtyVacbListHead
);
1252 InitializeListHead(&VacbLruListHead
);
1253 KeInitializeGuardedMutex(&ViewLock
);
1254 ExInitializeNPagedLookasideList(&iBcbLookasideList
,
1258 sizeof(INTERNAL_BCB
),
1261 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList
,
1265 sizeof(ROS_SHARED_CACHE_MAP
),
1266 TAG_SHARED_CACHE_MAP
,
1268 ExInitializeNPagedLookasideList(&VacbLookasideList
,
1276 MmInitializeMemoryConsumer(MC_CACHE
, CcRosTrimCache
);
1278 CcInitCacheZeroPage();