2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
10 /* NOTES **********************************************************************
12 * This is not the NT implementation of a file cache nor anything much like
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 * (4) Copy the data into or out of the page as necessary.
30 * (5) Release the cache page
32 /* INCLUDES ******************************************************************/
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
42 /* GLOBALS *******************************************************************/
44 static LIST_ENTRY DirtyVacbListHead
;
45 static LIST_ENTRY VacbListHead
;
46 static LIST_ENTRY VacbLruListHead
;
47 ULONG DirtyPageCount
= 0;
49 KGUARDED_MUTEX ViewLock
;
51 NPAGED_LOOKASIDE_LIST iBcbLookasideList
;
52 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList
;
53 static NPAGED_LOOKASIDE_LIST VacbLookasideList
;
56 static void CcRosVacbIncRefCount_(PROS_VACB vacb
, const char* file
, int line
)
58 ++vacb
->ReferenceCount
;
59 if (vacb
->SharedCacheMap
->Trace
)
61 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
62 file
, line
, vacb
, vacb
->ReferenceCount
, vacb
->Dirty
, vacb
->PageOut
);
65 static void CcRosVacbDecRefCount_(PROS_VACB vacb
, const char* file
, int line
)
67 --vacb
->ReferenceCount
;
68 if (vacb
->SharedCacheMap
->Trace
)
70 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
71 file
, line
, vacb
, vacb
->ReferenceCount
, vacb
->Dirty
, vacb
->PageOut
);
74 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
75 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
77 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
78 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
82 CcRosInternalFreeVacb(PROS_VACB Vacb
);
85 /* FUNCTIONS *****************************************************************/
90 PROS_SHARED_CACHE_MAP SharedCacheMap
,
95 PLIST_ENTRY current_entry
;
101 SharedCacheMap
->Trace
= Trace
;
105 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
107 KeAcquireGuardedMutex(&ViewLock
);
108 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldirql
);
110 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
111 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
113 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
114 current_entry
= current_entry
->Flink
;
116 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
117 current
, current
->ReferenceCount
, current
->Dirty
, current
->PageOut
);
119 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldirql
);
120 KeReleaseGuardedMutex(&ViewLock
);
124 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
128 UNREFERENCED_PARAMETER(SharedCacheMap
);
129 UNREFERENCED_PARAMETER(Trace
);
141 Status
= CcWriteVirtualAddress(Vacb
);
142 if (NT_SUCCESS(Status
))
144 KeAcquireGuardedMutex(&ViewLock
);
145 KeAcquireSpinLock(&Vacb
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
148 RemoveEntryList(&Vacb
->DirtyVacbListEntry
);
149 DirtyPageCount
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
150 CcRosVacbDecRefCount(Vacb
);
152 KeReleaseSpinLock(&Vacb
->SharedCacheMap
->CacheMapLock
, oldIrql
);
153 KeReleaseGuardedMutex(&ViewLock
);
161 CcRosFlushDirtyPages (
166 PLIST_ENTRY current_entry
;
170 LARGE_INTEGER ZeroTimeout
;
172 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target
);
175 ZeroTimeout
.QuadPart
= 0;
177 KeEnterCriticalRegion();
178 KeAcquireGuardedMutex(&ViewLock
);
180 current_entry
= DirtyVacbListHead
.Flink
;
181 if (current_entry
== &DirtyVacbListHead
)
183 DPRINT("No Dirty pages\n");
186 while ((current_entry
!= &DirtyVacbListHead
) && (Target
> 0))
188 current
= CONTAINING_RECORD(current_entry
,
191 current_entry
= current_entry
->Flink
;
193 CcRosVacbIncRefCount(current
);
195 Locked
= current
->SharedCacheMap
->Callbacks
->AcquireForLazyWrite(
196 current
->SharedCacheMap
->LazyWriteContext
, Wait
);
199 CcRosVacbDecRefCount(current
);
203 Status
= KeWaitForSingleObject(¤t
->Mutex
,
207 Wait
? NULL
: &ZeroTimeout
);
208 if (Status
!= STATUS_SUCCESS
)
210 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
211 current
->SharedCacheMap
->LazyWriteContext
);
212 CcRosVacbDecRefCount(current
);
216 ASSERT(current
->Dirty
);
218 /* One reference is added above */
219 if (current
->ReferenceCount
> 2)
221 KeReleaseMutex(¤t
->Mutex
, FALSE
);
222 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
223 current
->SharedCacheMap
->LazyWriteContext
);
224 CcRosVacbDecRefCount(current
);
228 KeReleaseGuardedMutex(&ViewLock
);
230 Status
= CcRosFlushVacb(current
);
232 KeReleaseMutex(¤t
->Mutex
, FALSE
);
233 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
234 current
->SharedCacheMap
->LazyWriteContext
);
236 KeAcquireGuardedMutex(&ViewLock
);
237 CcRosVacbDecRefCount(current
);
239 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
))
241 DPRINT1("CC: Failed to flush VACB.\n");
245 (*Count
) += VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
246 Target
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
249 current_entry
= DirtyVacbListHead
.Flink
;
252 KeReleaseGuardedMutex(&ViewLock
);
253 KeLeaveCriticalRegion();
255 DPRINT("CcRosFlushDirtyPages() finished\n");
256 return STATUS_SUCCESS
;
265 * FUNCTION: Try to free some memory from the file cache.
267 * Target - The number of pages to be freed.
268 * Priority - The priority of free (currently unused).
269 * NrFreed - Points to a variable where the number of pages
270 * actually freed is returned.
273 PLIST_ENTRY current_entry
;
280 BOOLEAN FlushedPages
= FALSE
;
282 DPRINT("CcRosTrimCache(Target %lu)\n", Target
);
284 InitializeListHead(&FreeList
);
289 KeAcquireGuardedMutex(&ViewLock
);
291 current_entry
= VacbLruListHead
.Flink
;
292 while (current_entry
!= &VacbLruListHead
)
294 current
= CONTAINING_RECORD(current_entry
,
297 current_entry
= current_entry
->Flink
;
299 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
301 /* Reference the VACB */
302 CcRosVacbIncRefCount(current
);
304 /* Check if it's mapped and not dirty */
305 if (current
->MappedCount
> 0 && !current
->Dirty
)
307 /* We have to break these locks because Cc sucks */
308 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
309 KeReleaseGuardedMutex(&ViewLock
);
311 /* Page out the VACB */
312 for (i
= 0; i
< VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
; i
++)
314 Page
= (PFN_NUMBER
)(MmGetPhysicalAddress((PUCHAR
)current
->BaseAddress
+ (i
* PAGE_SIZE
)).QuadPart
>> PAGE_SHIFT
);
316 MmPageOutPhysicalAddress(Page
);
319 /* Reacquire the locks */
320 KeAcquireGuardedMutex(&ViewLock
);
321 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
324 /* Dereference the VACB */
325 CcRosVacbDecRefCount(current
);
327 /* Check if we can free this entry now */
328 if (current
->ReferenceCount
== 0)
330 ASSERT(!current
->Dirty
);
331 ASSERT(!current
->MappedCount
);
333 RemoveEntryList(¤t
->CacheMapVacbListEntry
);
334 RemoveEntryList(¤t
->VacbListEntry
);
335 RemoveEntryList(¤t
->VacbLruListEntry
);
336 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
338 /* Calculate how many pages we freed for Mm */
339 PagesFreed
= min(VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
, Target
);
340 Target
-= PagesFreed
;
341 (*NrFreed
) += PagesFreed
;
344 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
347 KeReleaseGuardedMutex(&ViewLock
);
349 /* Try flushing pages if we haven't met our target */
350 if ((Target
> 0) && !FlushedPages
)
352 /* Flush dirty pages to disk */
353 CcRosFlushDirtyPages(Target
, &PagesFreed
, FALSE
);
356 /* We can only swap as many pages as we flushed */
357 if (PagesFreed
< Target
) Target
= PagesFreed
;
359 /* Check if we flushed anything */
362 /* Try again after flushing dirty pages */
363 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed
);
368 while (!IsListEmpty(&FreeList
))
370 current_entry
= RemoveHeadList(&FreeList
);
371 current
= CONTAINING_RECORD(current_entry
,
373 CacheMapVacbListEntry
);
374 CcRosInternalFreeVacb(current
);
377 DPRINT("Evicted %lu cache pages\n", (*NrFreed
));
379 return STATUS_SUCCESS
;
385 PROS_SHARED_CACHE_MAP SharedCacheMap
,
394 ASSERT(SharedCacheMap
);
396 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
397 SharedCacheMap
, Vacb
, Valid
);
399 KeAcquireGuardedMutex(&ViewLock
);
400 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
404 WasDirty
= Vacb
->Dirty
;
405 Vacb
->Dirty
= Vacb
->Dirty
|| Dirty
;
407 if (!WasDirty
&& Vacb
->Dirty
)
409 InsertTailList(&DirtyVacbListHead
, &Vacb
->DirtyVacbListEntry
);
410 DirtyPageCount
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
417 CcRosVacbDecRefCount(Vacb
);
418 if (Mapped
&& (Vacb
->MappedCount
== 1))
420 CcRosVacbIncRefCount(Vacb
);
422 if (!WasDirty
&& Vacb
->Dirty
)
424 CcRosVacbIncRefCount(Vacb
);
427 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
428 KeReleaseGuardedMutex(&ViewLock
);
429 KeReleaseMutex(&Vacb
->Mutex
, FALSE
);
431 return STATUS_SUCCESS
;
434 /* Returns with VACB Lock Held! */
438 PROS_SHARED_CACHE_MAP SharedCacheMap
,
441 PLIST_ENTRY current_entry
;
445 ASSERT(SharedCacheMap
);
447 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %lu)\n",
448 SharedCacheMap
, FileOffset
);
450 KeAcquireGuardedMutex(&ViewLock
);
451 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
453 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
454 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
456 current
= CONTAINING_RECORD(current_entry
,
458 CacheMapVacbListEntry
);
459 if (IsPointInRange(current
->FileOffset
.QuadPart
,
460 VACB_MAPPING_GRANULARITY
,
463 CcRosVacbIncRefCount(current
);
464 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
465 KeReleaseGuardedMutex(&ViewLock
);
466 KeWaitForSingleObject(¤t
->Mutex
,
473 if (current
->FileOffset
.QuadPart
> FileOffset
)
475 current_entry
= current_entry
->Flink
;
478 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
479 KeReleaseGuardedMutex(&ViewLock
);
487 PROS_SHARED_CACHE_MAP SharedCacheMap
,
493 ASSERT(SharedCacheMap
);
495 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %lu)\n",
496 SharedCacheMap
, FileOffset
);
498 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
501 KeBugCheck(CACHE_MANAGER
);
504 KeAcquireGuardedMutex(&ViewLock
);
505 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
509 InsertTailList(&DirtyVacbListHead
, &Vacb
->DirtyVacbListEntry
);
510 DirtyPageCount
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
514 CcRosVacbDecRefCount(Vacb
);
517 /* Move to the tail of the LRU list */
518 RemoveEntryList(&Vacb
->VacbLruListEntry
);
519 InsertTailList(&VacbLruListHead
, &Vacb
->VacbLruListEntry
);
523 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
524 KeReleaseGuardedMutex(&ViewLock
);
525 KeReleaseMutex(&Vacb
->Mutex
, FALSE
);
527 return STATUS_SUCCESS
;
533 PROS_SHARED_CACHE_MAP SharedCacheMap
,
541 ASSERT(SharedCacheMap
);
543 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %lu, NowDirty %u)\n",
544 SharedCacheMap
, FileOffset
, NowDirty
);
546 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
549 return STATUS_UNSUCCESSFUL
;
552 KeAcquireGuardedMutex(&ViewLock
);
553 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
555 WasDirty
= Vacb
->Dirty
;
556 Vacb
->Dirty
= Vacb
->Dirty
|| NowDirty
;
560 if (!WasDirty
&& NowDirty
)
562 InsertTailList(&DirtyVacbListHead
, &Vacb
->DirtyVacbListEntry
);
563 DirtyPageCount
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
566 CcRosVacbDecRefCount(Vacb
);
567 if (!WasDirty
&& NowDirty
)
569 CcRosVacbIncRefCount(Vacb
);
571 if (Vacb
->MappedCount
== 0)
573 CcRosVacbDecRefCount(Vacb
);
576 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
577 KeReleaseGuardedMutex(&ViewLock
);
578 KeReleaseMutex(&Vacb
->Mutex
, FALSE
);
580 return STATUS_SUCCESS
;
586 PROS_SHARED_CACHE_MAP SharedCacheMap
,
592 PLIST_ENTRY current_entry
;
596 ASSERT(SharedCacheMap
);
598 DPRINT("CcRosCreateVacb()\n");
600 if (FileOffset
>= SharedCacheMap
->FileSize
.u
.LowPart
)
603 return STATUS_INVALID_PARAMETER
;
606 current
= ExAllocateFromNPagedLookasideList(&VacbLookasideList
);
607 current
->Valid
= FALSE
;
608 current
->Dirty
= FALSE
;
609 current
->PageOut
= FALSE
;
610 current
->FileOffset
.QuadPart
= ROUND_DOWN(FileOffset
, VACB_MAPPING_GRANULARITY
);
611 current
->SharedCacheMap
= SharedCacheMap
;
613 if (SharedCacheMap
->Trace
)
615 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap
, current
);
618 current
->MappedCount
= 0;
619 current
->DirtyVacbListEntry
.Flink
= NULL
;
620 current
->DirtyVacbListEntry
.Blink
= NULL
;
621 current
->ReferenceCount
= 1;
622 KeInitializeMutex(¤t
->Mutex
, 0);
623 KeWaitForSingleObject(¤t
->Mutex
,
628 KeAcquireGuardedMutex(&ViewLock
);
631 /* There is window between the call to CcRosLookupVacb
632 * and CcRosCreateVacb. We must check if a VACB for the
633 * file offset exist. If there is a VACB, we release
634 * our newly created VACB and return the existing one.
636 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
637 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
639 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
641 current
= CONTAINING_RECORD(current_entry
,
643 CacheMapVacbListEntry
);
644 if (IsPointInRange(current
->FileOffset
.QuadPart
,
645 VACB_MAPPING_GRANULARITY
,
648 CcRosVacbIncRefCount(current
);
649 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
651 if (SharedCacheMap
->Trace
)
653 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
659 KeReleaseMutex(&(*Vacb
)->Mutex
, FALSE
);
660 KeReleaseGuardedMutex(&ViewLock
);
661 ExFreeToNPagedLookasideList(&VacbLookasideList
, *Vacb
);
663 KeWaitForSingleObject(¤t
->Mutex
,
668 return STATUS_SUCCESS
;
670 if (current
->FileOffset
.QuadPart
< FileOffset
)
672 ASSERT(previous
== NULL
||
673 previous
->FileOffset
.QuadPart
< current
->FileOffset
.QuadPart
);
676 if (current
->FileOffset
.QuadPart
> FileOffset
)
678 current_entry
= current_entry
->Flink
;
680 /* There was no existing VACB. */
684 InsertHeadList(&previous
->CacheMapVacbListEntry
, ¤t
->CacheMapVacbListEntry
);
688 InsertHeadList(&SharedCacheMap
->CacheMapVacbListHead
, ¤t
->CacheMapVacbListEntry
);
690 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
691 InsertTailList(&VacbListHead
, ¤t
->VacbListEntry
);
692 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
693 KeReleaseGuardedMutex(&ViewLock
);
695 MmLockAddressSpace(MmGetKernelAddressSpace());
696 current
->BaseAddress
= NULL
;
697 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
698 0, // nothing checks for VACB mareas, so set to 0
699 ¤t
->BaseAddress
,
700 VACB_MAPPING_GRANULARITY
,
702 (PMEMORY_AREA
*)¤t
->MemoryArea
,
706 MmUnlockAddressSpace(MmGetKernelAddressSpace());
707 if (!NT_SUCCESS(Status
))
709 KeBugCheck(CACHE_MANAGER
);
712 /* Create a virtual mapping for this memory area */
713 MI_SET_USAGE(MI_USAGE_CACHE
);
717 if ((SharedCacheMap
->FileObject
) && (SharedCacheMap
->FileObject
->FileName
.Buffer
))
719 pos
= wcsrchr(SharedCacheMap
->FileObject
->FileName
.Buffer
, '\\');
720 len
= wcslen(pos
) * sizeof(WCHAR
);
721 if (pos
) snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%S", pos
);
725 MmMapMemoryArea(current
->BaseAddress
, VACB_MAPPING_GRANULARITY
,
726 MC_CACHE
, PAGE_READWRITE
);
728 return STATUS_SUCCESS
;
734 PROS_SHARED_CACHE_MAP SharedCacheMap
,
742 PROS_VACB Previous
= NULL
;
744 ASSERT(SharedCacheMap
);
746 DPRINT("CcRosGetVacbChain()\n");
748 Length
= ROUND_UP(Length
, VACB_MAPPING_GRANULARITY
);
750 VacbList
= _alloca(sizeof(PROS_VACB
) *
751 (Length
/ VACB_MAPPING_GRANULARITY
));
754 * Look for a VACB already mapping the same data.
756 for (i
= 0; i
< (Length
/ VACB_MAPPING_GRANULARITY
); i
++)
758 ULONG CurrentOffset
= FileOffset
+ (i
* VACB_MAPPING_GRANULARITY
);
759 current
= CcRosLookupVacb(SharedCacheMap
, CurrentOffset
);
762 KeAcquireGuardedMutex(&ViewLock
);
764 /* Move to tail of LRU list */
765 RemoveEntryList(¤t
->VacbLruListEntry
);
766 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
768 KeReleaseGuardedMutex(&ViewLock
);
770 VacbList
[i
] = current
;
774 CcRosCreateVacb(SharedCacheMap
, CurrentOffset
, ¤t
);
775 VacbList
[i
] = current
;
779 for (i
= 0; i
< Length
/ VACB_MAPPING_GRANULARITY
; i
++)
784 Previous
= VacbList
[i
];
788 Previous
->NextInChain
= VacbList
[i
];
789 Previous
= VacbList
[i
];
793 Previous
->NextInChain
= NULL
;
795 return STATUS_SUCCESS
;
801 PROS_SHARED_CACHE_MAP SharedCacheMap
,
803 PULONGLONG BaseOffset
,
811 ASSERT(SharedCacheMap
);
813 DPRINT("CcRosGetVacb()\n");
816 * Look for a VACB already mapping the same data.
818 current
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
822 * Otherwise create a new VACB.
824 Status
= CcRosCreateVacb(SharedCacheMap
, FileOffset
, ¤t
);
825 if (!NT_SUCCESS(Status
))
831 KeAcquireGuardedMutex(&ViewLock
);
833 /* Move to the tail of the LRU list */
834 RemoveEntryList(¤t
->VacbLruListEntry
);
835 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
837 KeReleaseGuardedMutex(&ViewLock
);
840 * Return information about the VACB to the caller.
842 *UptoDate
= current
->Valid
;
843 *BaseAddress
= current
->BaseAddress
;
844 DPRINT("*BaseAddress %p\n", *BaseAddress
);
846 *BaseOffset
= current
->FileOffset
.QuadPart
;
847 return STATUS_SUCCESS
;
853 PROS_SHARED_CACHE_MAP SharedCacheMap
,
859 * FUNCTION: Request a page mapping for a shared cache map
862 ULONGLONG BaseOffset
;
864 ASSERT(SharedCacheMap
);
866 if (FileOffset
% VACB_MAPPING_GRANULARITY
!= 0)
868 DPRINT1("Bad fileoffset %x should be multiple of %x",
869 FileOffset
, VACB_MAPPING_GRANULARITY
);
870 KeBugCheck(CACHE_MANAGER
);
873 return CcRosGetVacb(SharedCacheMap
,
885 MEMORY_AREA
* MemoryArea
,
891 ASSERT(SwapEntry
== 0);
894 ASSERT(MmGetReferenceCountPage(Page
) == 1);
895 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
900 CcRosInternalFreeVacb (
903 * FUNCTION: Releases a VACB associated with a shared cache map
906 DPRINT("Freeing VACB 0x%p\n", Vacb
);
908 if (Vacb
->SharedCacheMap
->Trace
)
910 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb
->SharedCacheMap
, Vacb
);
914 MmLockAddressSpace(MmGetKernelAddressSpace());
915 MmFreeMemoryArea(MmGetKernelAddressSpace(),
919 MmUnlockAddressSpace(MmGetKernelAddressSpace());
921 ExFreeToNPagedLookasideList(&VacbLookasideList
, Vacb
);
922 return STATUS_SUCCESS
;
931 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
,
932 IN PLARGE_INTEGER FileOffset OPTIONAL
,
934 OUT PIO_STATUS_BLOCK IoStatus
)
936 PROS_SHARED_CACHE_MAP SharedCacheMap
;
937 LARGE_INTEGER Offset
;
942 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
943 SectionObjectPointers
, FileOffset
, Length
, IoStatus
);
945 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
947 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
948 ASSERT(SharedCacheMap
);
951 Offset
= *FileOffset
;
955 Offset
.QuadPart
= (LONGLONG
)0;
956 Length
= SharedCacheMap
->FileSize
.u
.LowPart
;
961 IoStatus
->Status
= STATUS_SUCCESS
;
962 IoStatus
->Information
= 0;
967 current
= CcRosLookupVacb(SharedCacheMap
, Offset
.u
.LowPart
);
972 Status
= CcRosFlushVacb(current
);
973 if (!NT_SUCCESS(Status
) && IoStatus
!= NULL
)
975 IoStatus
->Status
= Status
;
978 KeReleaseMutex(¤t
->Mutex
, FALSE
);
980 KeAcquireGuardedMutex(&ViewLock
);
981 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
982 CcRosVacbDecRefCount(current
);
983 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
984 KeReleaseGuardedMutex(&ViewLock
);
987 Offset
.QuadPart
+= VACB_MAPPING_GRANULARITY
;
988 if (Length
> VACB_MAPPING_GRANULARITY
)
990 Length
-= VACB_MAPPING_GRANULARITY
;
1002 IoStatus
->Status
= STATUS_INVALID_PARAMETER
;
1009 CcRosDeleteFileCache (
1010 PFILE_OBJECT FileObject
,
1011 PROS_SHARED_CACHE_MAP SharedCacheMap
)
1013 * FUNCTION: Releases the shared cache map associated with a file object
1016 PLIST_ENTRY current_entry
;
1018 LIST_ENTRY FreeList
;
1021 ASSERT(SharedCacheMap
);
1023 SharedCacheMap
->RefCount
++;
1024 KeReleaseGuardedMutex(&ViewLock
);
1026 CcFlushCache(FileObject
->SectionObjectPointer
, NULL
, 0, NULL
);
1028 KeAcquireGuardedMutex(&ViewLock
);
1029 SharedCacheMap
->RefCount
--;
1030 if (SharedCacheMap
->RefCount
== 0)
1032 FileObject
->SectionObjectPointer
->SharedCacheMap
= NULL
;
1037 InitializeListHead(&FreeList
);
1038 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1039 while (!IsListEmpty(&SharedCacheMap
->CacheMapVacbListHead
))
1041 current_entry
= RemoveTailList(&SharedCacheMap
->CacheMapVacbListHead
);
1042 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1043 RemoveEntryList(¤t
->VacbListEntry
);
1044 RemoveEntryList(¤t
->VacbLruListEntry
);
1047 RemoveEntryList(¤t
->DirtyVacbListEntry
);
1048 DirtyPageCount
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
1049 DPRINT1("Freeing dirty VACB\n");
1051 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
1054 SharedCacheMap
->Trace
= FALSE
;
1056 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1058 KeReleaseGuardedMutex(&ViewLock
);
1059 ObDereferenceObject(SharedCacheMap
->FileObject
);
1061 while (!IsListEmpty(&FreeList
))
1063 current_entry
= RemoveTailList(&FreeList
);
1064 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1065 CcRosInternalFreeVacb(current
);
1067 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList
, SharedCacheMap
);
1068 KeAcquireGuardedMutex(&ViewLock
);
1070 return STATUS_SUCCESS
;
1075 CcRosReferenceCache (
1076 PFILE_OBJECT FileObject
)
1078 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1079 KeAcquireGuardedMutex(&ViewLock
);
1080 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1081 ASSERT(SharedCacheMap
);
1082 ASSERT(SharedCacheMap
->RefCount
!= 0);
1083 SharedCacheMap
->RefCount
++;
1084 KeReleaseGuardedMutex(&ViewLock
);
1089 CcRosRemoveIfClosed (
1090 PSECTION_OBJECT_POINTERS SectionObjectPointer
)
1092 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1093 DPRINT("CcRosRemoveIfClosed()\n");
1094 KeAcquireGuardedMutex(&ViewLock
);
1095 SharedCacheMap
= SectionObjectPointer
->SharedCacheMap
;
1096 if (SharedCacheMap
&& SharedCacheMap
->RefCount
== 0)
1098 CcRosDeleteFileCache(SharedCacheMap
->FileObject
, SharedCacheMap
);
1100 KeReleaseGuardedMutex(&ViewLock
);
1106 CcRosDereferenceCache (
1107 PFILE_OBJECT FileObject
)
1109 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1110 KeAcquireGuardedMutex(&ViewLock
);
1111 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1112 ASSERT(SharedCacheMap
);
1113 if (SharedCacheMap
->RefCount
> 0)
1115 SharedCacheMap
->RefCount
--;
1116 if (SharedCacheMap
->RefCount
== 0)
1118 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1119 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1122 KeReleaseGuardedMutex(&ViewLock
);
1127 CcRosReleaseFileCache (
1128 PFILE_OBJECT FileObject
)
1130 * FUNCTION: Called by the file system when a handle to a file object
1134 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1136 KeAcquireGuardedMutex(&ViewLock
);
1138 if (FileObject
->SectionObjectPointer
->SharedCacheMap
!= NULL
)
1140 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1141 if (FileObject
->PrivateCacheMap
!= NULL
)
1143 FileObject
->PrivateCacheMap
= NULL
;
1144 if (SharedCacheMap
->RefCount
> 0)
1146 SharedCacheMap
->RefCount
--;
1147 if (SharedCacheMap
->RefCount
== 0)
1149 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1150 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1155 KeReleaseGuardedMutex(&ViewLock
);
1156 return STATUS_SUCCESS
;
1161 CcTryToInitializeFileCache (
1162 PFILE_OBJECT FileObject
)
1164 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1167 KeAcquireGuardedMutex(&ViewLock
);
1169 ASSERT(FileObject
->SectionObjectPointer
);
1170 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1171 if (SharedCacheMap
== NULL
)
1173 Status
= STATUS_UNSUCCESSFUL
;
1177 if (FileObject
->PrivateCacheMap
== NULL
)
1179 FileObject
->PrivateCacheMap
= SharedCacheMap
;
1180 SharedCacheMap
->RefCount
++;
1182 Status
= STATUS_SUCCESS
;
1184 KeReleaseGuardedMutex(&ViewLock
);
1192 CcRosInitializeFileCache (
1193 PFILE_OBJECT FileObject
,
1194 PCACHE_MANAGER_CALLBACKS CallBacks
,
1195 PVOID LazyWriterContext
)
1197 * FUNCTION: Initializes a shared cache map for a file object
1200 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1202 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1203 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1204 FileObject
, SharedCacheMap
);
1206 KeAcquireGuardedMutex(&ViewLock
);
1207 if (SharedCacheMap
== NULL
)
1209 SharedCacheMap
= ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList
);
1210 if (SharedCacheMap
== NULL
)
1212 KeReleaseGuardedMutex(&ViewLock
);
1213 return STATUS_UNSUCCESSFUL
;
1215 RtlZeroMemory(SharedCacheMap
, sizeof(*SharedCacheMap
));
1216 ObReferenceObjectByPointer(FileObject
,
1220 SharedCacheMap
->FileObject
= FileObject
;
1221 SharedCacheMap
->Callbacks
= CallBacks
;
1222 SharedCacheMap
->LazyWriteContext
= LazyWriterContext
;
1223 if (FileObject
->FsContext
)
1225 SharedCacheMap
->SectionSize
=
1226 ((PFSRTL_COMMON_FCB_HEADER
)FileObject
->FsContext
)->AllocationSize
;
1227 SharedCacheMap
->FileSize
=
1228 ((PFSRTL_COMMON_FCB_HEADER
)FileObject
->FsContext
)->FileSize
;
1230 KeInitializeSpinLock(&SharedCacheMap
->CacheMapLock
);
1231 InitializeListHead(&SharedCacheMap
->CacheMapVacbListHead
);
1232 FileObject
->SectionObjectPointer
->SharedCacheMap
= SharedCacheMap
;
1234 if (FileObject
->PrivateCacheMap
== NULL
)
1236 FileObject
->PrivateCacheMap
= SharedCacheMap
;
1237 SharedCacheMap
->RefCount
++;
1239 KeReleaseGuardedMutex(&ViewLock
);
1241 return STATUS_SUCCESS
;
1249 CcGetFileObjectFromSectionPtrs (
1250 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
)
1252 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1253 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1255 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
1256 ASSERT(SharedCacheMap
);
1257 return SharedCacheMap
->FileObject
;
1268 DPRINT("CcInitView()\n");
1270 InitializeListHead(&VacbListHead
);
1271 InitializeListHead(&DirtyVacbListHead
);
1272 InitializeListHead(&VacbLruListHead
);
1273 KeInitializeGuardedMutex(&ViewLock
);
1274 ExInitializeNPagedLookasideList(&iBcbLookasideList
,
1278 sizeof(INTERNAL_BCB
),
1281 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList
,
1285 sizeof(ROS_SHARED_CACHE_MAP
),
1286 TAG_SHARED_CACHE_MAP
,
1288 ExInitializeNPagedLookasideList(&VacbLookasideList
,
1296 MmInitializeMemoryConsumer(MC_CACHE
, CcRosTrimCache
);
1298 CcInitCacheZeroPage();