2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
10 /* NOTES **********************************************************************
12 * This is not the NT implementation of a file cache nor anything much like
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 * (4) Copy the data into or out of the page as necessary.
30 * (5) Release the cache page
32 /* INCLUDES ******************************************************************/
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
42 /* GLOBALS *******************************************************************/
44 static LIST_ENTRY DirtyVacbListHead
;
45 static LIST_ENTRY VacbLruListHead
;
46 ULONG DirtyPageCount
= 0;
48 KGUARDED_MUTEX ViewLock
;
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList
;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList
;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList
;
55 static void CcRosVacbIncRefCount_(PROS_VACB vacb
, const char* file
, int line
)
57 ++vacb
->ReferenceCount
;
58 if (vacb
->SharedCacheMap
->Trace
)
60 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
61 file
, line
, vacb
, vacb
->ReferenceCount
, vacb
->Dirty
, vacb
->PageOut
);
64 static void CcRosVacbDecRefCount_(PROS_VACB vacb
, const char* file
, int line
)
66 --vacb
->ReferenceCount
;
67 if (vacb
->SharedCacheMap
->Trace
)
69 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
70 file
, line
, vacb
, vacb
->ReferenceCount
, vacb
->Dirty
, vacb
->PageOut
);
73 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
74 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
76 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
77 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
81 CcRosInternalFreeVacb(PROS_VACB Vacb
);
84 /* FUNCTIONS *****************************************************************/
89 PROS_SHARED_CACHE_MAP SharedCacheMap
,
94 PLIST_ENTRY current_entry
;
100 SharedCacheMap
->Trace
= Trace
;
104 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
106 KeAcquireGuardedMutex(&ViewLock
);
107 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldirql
);
109 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
110 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
112 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
113 current_entry
= current_entry
->Flink
;
115 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
116 current
, current
->ReferenceCount
, current
->Dirty
, current
->PageOut
);
118 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldirql
);
119 KeReleaseGuardedMutex(&ViewLock
);
123 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
127 UNREFERENCED_PARAMETER(SharedCacheMap
);
128 UNREFERENCED_PARAMETER(Trace
);
140 Status
= CcWriteVirtualAddress(Vacb
);
141 if (NT_SUCCESS(Status
))
143 KeAcquireGuardedMutex(&ViewLock
);
144 KeAcquireSpinLock(&Vacb
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
147 RemoveEntryList(&Vacb
->DirtyVacbListEntry
);
148 DirtyPageCount
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
149 CcRosVacbDecRefCount(Vacb
);
151 KeReleaseSpinLock(&Vacb
->SharedCacheMap
->CacheMapLock
, oldIrql
);
152 KeReleaseGuardedMutex(&ViewLock
);
160 CcRosFlushDirtyPages (
165 PLIST_ENTRY current_entry
;
169 LARGE_INTEGER ZeroTimeout
;
171 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target
);
174 ZeroTimeout
.QuadPart
= 0;
176 KeEnterCriticalRegion();
177 KeAcquireGuardedMutex(&ViewLock
);
179 current_entry
= DirtyVacbListHead
.Flink
;
180 if (current_entry
== &DirtyVacbListHead
)
182 DPRINT("No Dirty pages\n");
185 while ((current_entry
!= &DirtyVacbListHead
) && (Target
> 0))
187 current
= CONTAINING_RECORD(current_entry
,
190 current_entry
= current_entry
->Flink
;
192 CcRosVacbIncRefCount(current
);
194 Locked
= current
->SharedCacheMap
->Callbacks
->AcquireForLazyWrite(
195 current
->SharedCacheMap
->LazyWriteContext
, Wait
);
198 CcRosVacbDecRefCount(current
);
202 Status
= KeWaitForSingleObject(¤t
->Mutex
,
206 Wait
? NULL
: &ZeroTimeout
);
207 if (Status
!= STATUS_SUCCESS
)
209 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
210 current
->SharedCacheMap
->LazyWriteContext
);
211 CcRosVacbDecRefCount(current
);
215 ASSERT(current
->Dirty
);
217 /* One reference is added above */
218 if (current
->ReferenceCount
> 2)
220 KeReleaseMutex(¤t
->Mutex
, FALSE
);
221 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
222 current
->SharedCacheMap
->LazyWriteContext
);
223 CcRosVacbDecRefCount(current
);
227 KeReleaseGuardedMutex(&ViewLock
);
229 Status
= CcRosFlushVacb(current
);
231 KeReleaseMutex(¤t
->Mutex
, FALSE
);
232 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
233 current
->SharedCacheMap
->LazyWriteContext
);
235 KeAcquireGuardedMutex(&ViewLock
);
236 CcRosVacbDecRefCount(current
);
238 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
))
240 DPRINT1("CC: Failed to flush VACB.\n");
244 (*Count
) += VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
245 Target
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
248 current_entry
= DirtyVacbListHead
.Flink
;
251 KeReleaseGuardedMutex(&ViewLock
);
252 KeLeaveCriticalRegion();
254 DPRINT("CcRosFlushDirtyPages() finished\n");
255 return STATUS_SUCCESS
;
264 * FUNCTION: Try to free some memory from the file cache.
266 * Target - The number of pages to be freed.
267 * Priority - The priority of free (currently unused).
268 * NrFreed - Points to a variable where the number of pages
269 * actually freed is returned.
272 PLIST_ENTRY current_entry
;
279 BOOLEAN FlushedPages
= FALSE
;
281 DPRINT("CcRosTrimCache(Target %lu)\n", Target
);
283 InitializeListHead(&FreeList
);
288 KeAcquireGuardedMutex(&ViewLock
);
290 current_entry
= VacbLruListHead
.Flink
;
291 while (current_entry
!= &VacbLruListHead
)
293 current
= CONTAINING_RECORD(current_entry
,
296 current_entry
= current_entry
->Flink
;
298 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
300 /* Reference the VACB */
301 CcRosVacbIncRefCount(current
);
303 /* Check if it's mapped and not dirty */
304 if (current
->MappedCount
> 0 && !current
->Dirty
)
306 /* We have to break these locks because Cc sucks */
307 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
308 KeReleaseGuardedMutex(&ViewLock
);
310 /* Page out the VACB */
311 for (i
= 0; i
< VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
; i
++)
313 Page
= (PFN_NUMBER
)(MmGetPhysicalAddress((PUCHAR
)current
->BaseAddress
+ (i
* PAGE_SIZE
)).QuadPart
>> PAGE_SHIFT
);
315 MmPageOutPhysicalAddress(Page
);
318 /* Reacquire the locks */
319 KeAcquireGuardedMutex(&ViewLock
);
320 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
323 /* Dereference the VACB */
324 CcRosVacbDecRefCount(current
);
326 /* Check if we can free this entry now */
327 if (current
->ReferenceCount
== 0)
329 ASSERT(!current
->Dirty
);
330 ASSERT(!current
->MappedCount
);
332 RemoveEntryList(¤t
->CacheMapVacbListEntry
);
333 RemoveEntryList(¤t
->VacbLruListEntry
);
334 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
336 /* Calculate how many pages we freed for Mm */
337 PagesFreed
= min(VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
, Target
);
338 Target
-= PagesFreed
;
339 (*NrFreed
) += PagesFreed
;
342 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
345 KeReleaseGuardedMutex(&ViewLock
);
347 /* Try flushing pages if we haven't met our target */
348 if ((Target
> 0) && !FlushedPages
)
350 /* Flush dirty pages to disk */
351 CcRosFlushDirtyPages(Target
, &PagesFreed
, FALSE
);
354 /* We can only swap as many pages as we flushed */
355 if (PagesFreed
< Target
) Target
= PagesFreed
;
357 /* Check if we flushed anything */
360 /* Try again after flushing dirty pages */
361 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed
);
366 while (!IsListEmpty(&FreeList
))
368 current_entry
= RemoveHeadList(&FreeList
);
369 current
= CONTAINING_RECORD(current_entry
,
371 CacheMapVacbListEntry
);
372 CcRosInternalFreeVacb(current
);
375 DPRINT("Evicted %lu cache pages\n", (*NrFreed
));
377 return STATUS_SUCCESS
;
383 PROS_SHARED_CACHE_MAP SharedCacheMap
,
392 ASSERT(SharedCacheMap
);
394 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
395 SharedCacheMap
, Vacb
, Valid
);
397 KeAcquireGuardedMutex(&ViewLock
);
398 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
402 WasDirty
= Vacb
->Dirty
;
403 Vacb
->Dirty
= Vacb
->Dirty
|| Dirty
;
405 if (!WasDirty
&& Vacb
->Dirty
)
407 InsertTailList(&DirtyVacbListHead
, &Vacb
->DirtyVacbListEntry
);
408 DirtyPageCount
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
415 CcRosVacbDecRefCount(Vacb
);
416 if (Mapped
&& (Vacb
->MappedCount
== 1))
418 CcRosVacbIncRefCount(Vacb
);
420 if (!WasDirty
&& Vacb
->Dirty
)
422 CcRosVacbIncRefCount(Vacb
);
425 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
426 KeReleaseGuardedMutex(&ViewLock
);
427 KeReleaseMutex(&Vacb
->Mutex
, FALSE
);
429 return STATUS_SUCCESS
;
432 /* Returns with VACB Lock Held! */
436 PROS_SHARED_CACHE_MAP SharedCacheMap
,
439 PLIST_ENTRY current_entry
;
443 ASSERT(SharedCacheMap
);
445 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %lu)\n",
446 SharedCacheMap
, FileOffset
);
448 KeAcquireGuardedMutex(&ViewLock
);
449 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
451 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
452 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
454 current
= CONTAINING_RECORD(current_entry
,
456 CacheMapVacbListEntry
);
457 if (IsPointInRange(current
->FileOffset
.QuadPart
,
458 VACB_MAPPING_GRANULARITY
,
461 CcRosVacbIncRefCount(current
);
462 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
463 KeReleaseGuardedMutex(&ViewLock
);
464 KeWaitForSingleObject(¤t
->Mutex
,
471 if (current
->FileOffset
.QuadPart
> FileOffset
)
473 current_entry
= current_entry
->Flink
;
476 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
477 KeReleaseGuardedMutex(&ViewLock
);
485 PROS_SHARED_CACHE_MAP SharedCacheMap
,
491 ASSERT(SharedCacheMap
);
493 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %lu)\n",
494 SharedCacheMap
, FileOffset
);
496 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
499 KeBugCheck(CACHE_MANAGER
);
502 KeAcquireGuardedMutex(&ViewLock
);
503 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
507 InsertTailList(&DirtyVacbListHead
, &Vacb
->DirtyVacbListEntry
);
508 DirtyPageCount
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
512 CcRosVacbDecRefCount(Vacb
);
515 /* Move to the tail of the LRU list */
516 RemoveEntryList(&Vacb
->VacbLruListEntry
);
517 InsertTailList(&VacbLruListHead
, &Vacb
->VacbLruListEntry
);
521 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
522 KeReleaseGuardedMutex(&ViewLock
);
523 KeReleaseMutex(&Vacb
->Mutex
, FALSE
);
525 return STATUS_SUCCESS
;
531 PROS_SHARED_CACHE_MAP SharedCacheMap
,
539 ASSERT(SharedCacheMap
);
541 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %lu, NowDirty %u)\n",
542 SharedCacheMap
, FileOffset
, NowDirty
);
544 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
547 return STATUS_UNSUCCESSFUL
;
550 KeAcquireGuardedMutex(&ViewLock
);
551 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
553 WasDirty
= Vacb
->Dirty
;
554 Vacb
->Dirty
= Vacb
->Dirty
|| NowDirty
;
558 if (!WasDirty
&& NowDirty
)
560 InsertTailList(&DirtyVacbListHead
, &Vacb
->DirtyVacbListEntry
);
561 DirtyPageCount
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
564 CcRosVacbDecRefCount(Vacb
);
565 if (!WasDirty
&& NowDirty
)
567 CcRosVacbIncRefCount(Vacb
);
569 if (Vacb
->MappedCount
== 0)
571 CcRosVacbDecRefCount(Vacb
);
574 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
575 KeReleaseGuardedMutex(&ViewLock
);
576 KeReleaseMutex(&Vacb
->Mutex
, FALSE
);
578 return STATUS_SUCCESS
;
584 PROS_SHARED_CACHE_MAP SharedCacheMap
,
590 PLIST_ENTRY current_entry
;
594 ASSERT(SharedCacheMap
);
596 DPRINT("CcRosCreateVacb()\n");
598 if (FileOffset
>= SharedCacheMap
->FileSize
.u
.LowPart
)
601 return STATUS_INVALID_PARAMETER
;
604 current
= ExAllocateFromNPagedLookasideList(&VacbLookasideList
);
605 current
->Valid
= FALSE
;
606 current
->Dirty
= FALSE
;
607 current
->PageOut
= FALSE
;
608 current
->FileOffset
.QuadPart
= ROUND_DOWN(FileOffset
, VACB_MAPPING_GRANULARITY
);
609 current
->SharedCacheMap
= SharedCacheMap
;
611 if (SharedCacheMap
->Trace
)
613 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap
, current
);
616 current
->MappedCount
= 0;
617 current
->DirtyVacbListEntry
.Flink
= NULL
;
618 current
->DirtyVacbListEntry
.Blink
= NULL
;
619 current
->ReferenceCount
= 1;
620 KeInitializeMutex(¤t
->Mutex
, 0);
621 KeWaitForSingleObject(¤t
->Mutex
,
626 KeAcquireGuardedMutex(&ViewLock
);
629 /* There is window between the call to CcRosLookupVacb
630 * and CcRosCreateVacb. We must check if a VACB for the
631 * file offset exist. If there is a VACB, we release
632 * our newly created VACB and return the existing one.
634 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
635 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
637 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
639 current
= CONTAINING_RECORD(current_entry
,
641 CacheMapVacbListEntry
);
642 if (IsPointInRange(current
->FileOffset
.QuadPart
,
643 VACB_MAPPING_GRANULARITY
,
646 CcRosVacbIncRefCount(current
);
647 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
649 if (SharedCacheMap
->Trace
)
651 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
657 KeReleaseMutex(&(*Vacb
)->Mutex
, FALSE
);
658 KeReleaseGuardedMutex(&ViewLock
);
659 ExFreeToNPagedLookasideList(&VacbLookasideList
, *Vacb
);
661 KeWaitForSingleObject(¤t
->Mutex
,
666 return STATUS_SUCCESS
;
668 if (current
->FileOffset
.QuadPart
< FileOffset
)
670 ASSERT(previous
== NULL
||
671 previous
->FileOffset
.QuadPart
< current
->FileOffset
.QuadPart
);
674 if (current
->FileOffset
.QuadPart
> FileOffset
)
676 current_entry
= current_entry
->Flink
;
678 /* There was no existing VACB. */
682 InsertHeadList(&previous
->CacheMapVacbListEntry
, ¤t
->CacheMapVacbListEntry
);
686 InsertHeadList(&SharedCacheMap
->CacheMapVacbListHead
, ¤t
->CacheMapVacbListEntry
);
688 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
689 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
690 KeReleaseGuardedMutex(&ViewLock
);
692 MmLockAddressSpace(MmGetKernelAddressSpace());
693 current
->BaseAddress
= NULL
;
694 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
695 0, // nothing checks for VACB mareas, so set to 0
696 ¤t
->BaseAddress
,
697 VACB_MAPPING_GRANULARITY
,
699 (PMEMORY_AREA
*)¤t
->MemoryArea
,
703 MmUnlockAddressSpace(MmGetKernelAddressSpace());
704 if (!NT_SUCCESS(Status
))
706 KeBugCheck(CACHE_MANAGER
);
709 /* Create a virtual mapping for this memory area */
710 MI_SET_USAGE(MI_USAGE_CACHE
);
714 if ((SharedCacheMap
->FileObject
) && (SharedCacheMap
->FileObject
->FileName
.Buffer
))
716 pos
= wcsrchr(SharedCacheMap
->FileObject
->FileName
.Buffer
, '\\');
717 len
= wcslen(pos
) * sizeof(WCHAR
);
718 if (pos
) snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%S", pos
);
722 MmMapMemoryArea(current
->BaseAddress
, VACB_MAPPING_GRANULARITY
,
723 MC_CACHE
, PAGE_READWRITE
);
725 return STATUS_SUCCESS
;
731 PROS_SHARED_CACHE_MAP SharedCacheMap
,
739 PROS_VACB Previous
= NULL
;
741 ASSERT(SharedCacheMap
);
743 DPRINT("CcRosGetVacbChain()\n");
745 Length
= ROUND_UP(Length
, VACB_MAPPING_GRANULARITY
);
747 VacbList
= _alloca(sizeof(PROS_VACB
) *
748 (Length
/ VACB_MAPPING_GRANULARITY
));
751 * Look for a VACB already mapping the same data.
753 for (i
= 0; i
< (Length
/ VACB_MAPPING_GRANULARITY
); i
++)
755 ULONG CurrentOffset
= FileOffset
+ (i
* VACB_MAPPING_GRANULARITY
);
756 current
= CcRosLookupVacb(SharedCacheMap
, CurrentOffset
);
759 KeAcquireGuardedMutex(&ViewLock
);
761 /* Move to tail of LRU list */
762 RemoveEntryList(¤t
->VacbLruListEntry
);
763 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
765 KeReleaseGuardedMutex(&ViewLock
);
767 VacbList
[i
] = current
;
771 CcRosCreateVacb(SharedCacheMap
, CurrentOffset
, ¤t
);
772 VacbList
[i
] = current
;
776 for (i
= 0; i
< Length
/ VACB_MAPPING_GRANULARITY
; i
++)
781 Previous
= VacbList
[i
];
785 Previous
->NextInChain
= VacbList
[i
];
786 Previous
= VacbList
[i
];
790 Previous
->NextInChain
= NULL
;
792 return STATUS_SUCCESS
;
798 PROS_SHARED_CACHE_MAP SharedCacheMap
,
800 PULONGLONG BaseOffset
,
808 ASSERT(SharedCacheMap
);
810 DPRINT("CcRosGetVacb()\n");
813 * Look for a VACB already mapping the same data.
815 current
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
819 * Otherwise create a new VACB.
821 Status
= CcRosCreateVacb(SharedCacheMap
, FileOffset
, ¤t
);
822 if (!NT_SUCCESS(Status
))
828 KeAcquireGuardedMutex(&ViewLock
);
830 /* Move to the tail of the LRU list */
831 RemoveEntryList(¤t
->VacbLruListEntry
);
832 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
834 KeReleaseGuardedMutex(&ViewLock
);
837 * Return information about the VACB to the caller.
839 *UptoDate
= current
->Valid
;
840 *BaseAddress
= current
->BaseAddress
;
841 DPRINT("*BaseAddress %p\n", *BaseAddress
);
843 *BaseOffset
= current
->FileOffset
.QuadPart
;
844 return STATUS_SUCCESS
;
850 PROS_SHARED_CACHE_MAP SharedCacheMap
,
856 * FUNCTION: Request a page mapping for a shared cache map
859 ULONGLONG BaseOffset
;
861 ASSERT(SharedCacheMap
);
863 if (FileOffset
% VACB_MAPPING_GRANULARITY
!= 0)
865 DPRINT1("Bad fileoffset %x should be multiple of %x",
866 FileOffset
, VACB_MAPPING_GRANULARITY
);
867 KeBugCheck(CACHE_MANAGER
);
870 return CcRosGetVacb(SharedCacheMap
,
882 MEMORY_AREA
* MemoryArea
,
888 ASSERT(SwapEntry
== 0);
891 ASSERT(MmGetReferenceCountPage(Page
) == 1);
892 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
897 CcRosInternalFreeVacb (
900 * FUNCTION: Releases a VACB associated with a shared cache map
903 DPRINT("Freeing VACB 0x%p\n", Vacb
);
905 if (Vacb
->SharedCacheMap
->Trace
)
907 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb
->SharedCacheMap
, Vacb
);
911 MmLockAddressSpace(MmGetKernelAddressSpace());
912 MmFreeMemoryArea(MmGetKernelAddressSpace(),
916 MmUnlockAddressSpace(MmGetKernelAddressSpace());
918 ExFreeToNPagedLookasideList(&VacbLookasideList
, Vacb
);
919 return STATUS_SUCCESS
;
928 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
,
929 IN PLARGE_INTEGER FileOffset OPTIONAL
,
931 OUT PIO_STATUS_BLOCK IoStatus
)
933 PROS_SHARED_CACHE_MAP SharedCacheMap
;
934 LARGE_INTEGER Offset
;
939 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
940 SectionObjectPointers
, FileOffset
, Length
, IoStatus
);
942 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
944 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
945 ASSERT(SharedCacheMap
);
948 Offset
= *FileOffset
;
952 Offset
.QuadPart
= (LONGLONG
)0;
953 Length
= SharedCacheMap
->FileSize
.u
.LowPart
;
958 IoStatus
->Status
= STATUS_SUCCESS
;
959 IoStatus
->Information
= 0;
964 current
= CcRosLookupVacb(SharedCacheMap
, Offset
.u
.LowPart
);
969 Status
= CcRosFlushVacb(current
);
970 if (!NT_SUCCESS(Status
) && IoStatus
!= NULL
)
972 IoStatus
->Status
= Status
;
975 KeReleaseMutex(¤t
->Mutex
, FALSE
);
977 KeAcquireGuardedMutex(&ViewLock
);
978 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
979 CcRosVacbDecRefCount(current
);
980 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
981 KeReleaseGuardedMutex(&ViewLock
);
984 Offset
.QuadPart
+= VACB_MAPPING_GRANULARITY
;
985 if (Length
> VACB_MAPPING_GRANULARITY
)
987 Length
-= VACB_MAPPING_GRANULARITY
;
999 IoStatus
->Status
= STATUS_INVALID_PARAMETER
;
1006 CcRosDeleteFileCache (
1007 PFILE_OBJECT FileObject
,
1008 PROS_SHARED_CACHE_MAP SharedCacheMap
)
1010 * FUNCTION: Releases the shared cache map associated with a file object
1013 PLIST_ENTRY current_entry
;
1015 LIST_ENTRY FreeList
;
1018 ASSERT(SharedCacheMap
);
1020 SharedCacheMap
->RefCount
++;
1021 KeReleaseGuardedMutex(&ViewLock
);
1023 CcFlushCache(FileObject
->SectionObjectPointer
, NULL
, 0, NULL
);
1025 KeAcquireGuardedMutex(&ViewLock
);
1026 SharedCacheMap
->RefCount
--;
1027 if (SharedCacheMap
->RefCount
== 0)
1029 FileObject
->SectionObjectPointer
->SharedCacheMap
= NULL
;
1034 InitializeListHead(&FreeList
);
1035 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1036 while (!IsListEmpty(&SharedCacheMap
->CacheMapVacbListHead
))
1038 current_entry
= RemoveTailList(&SharedCacheMap
->CacheMapVacbListHead
);
1039 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1040 RemoveEntryList(¤t
->VacbLruListEntry
);
1043 RemoveEntryList(¤t
->DirtyVacbListEntry
);
1044 DirtyPageCount
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
1045 DPRINT1("Freeing dirty VACB\n");
1047 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
1050 SharedCacheMap
->Trace
= FALSE
;
1052 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1054 KeReleaseGuardedMutex(&ViewLock
);
1055 ObDereferenceObject(SharedCacheMap
->FileObject
);
1057 while (!IsListEmpty(&FreeList
))
1059 current_entry
= RemoveTailList(&FreeList
);
1060 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1061 CcRosInternalFreeVacb(current
);
1063 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList
, SharedCacheMap
);
1064 KeAcquireGuardedMutex(&ViewLock
);
1066 return STATUS_SUCCESS
;
1071 CcRosReferenceCache (
1072 PFILE_OBJECT FileObject
)
1074 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1075 KeAcquireGuardedMutex(&ViewLock
);
1076 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1077 ASSERT(SharedCacheMap
);
1078 ASSERT(SharedCacheMap
->RefCount
!= 0);
1079 SharedCacheMap
->RefCount
++;
1080 KeReleaseGuardedMutex(&ViewLock
);
1085 CcRosRemoveIfClosed (
1086 PSECTION_OBJECT_POINTERS SectionObjectPointer
)
1088 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1089 DPRINT("CcRosRemoveIfClosed()\n");
1090 KeAcquireGuardedMutex(&ViewLock
);
1091 SharedCacheMap
= SectionObjectPointer
->SharedCacheMap
;
1092 if (SharedCacheMap
&& SharedCacheMap
->RefCount
== 0)
1094 CcRosDeleteFileCache(SharedCacheMap
->FileObject
, SharedCacheMap
);
1096 KeReleaseGuardedMutex(&ViewLock
);
1102 CcRosDereferenceCache (
1103 PFILE_OBJECT FileObject
)
1105 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1106 KeAcquireGuardedMutex(&ViewLock
);
1107 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1108 ASSERT(SharedCacheMap
);
1109 if (SharedCacheMap
->RefCount
> 0)
1111 SharedCacheMap
->RefCount
--;
1112 if (SharedCacheMap
->RefCount
== 0)
1114 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1115 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1118 KeReleaseGuardedMutex(&ViewLock
);
1123 CcRosReleaseFileCache (
1124 PFILE_OBJECT FileObject
)
1126 * FUNCTION: Called by the file system when a handle to a file object
1130 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1132 KeAcquireGuardedMutex(&ViewLock
);
1134 if (FileObject
->SectionObjectPointer
->SharedCacheMap
!= NULL
)
1136 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1137 if (FileObject
->PrivateCacheMap
!= NULL
)
1139 FileObject
->PrivateCacheMap
= NULL
;
1140 if (SharedCacheMap
->RefCount
> 0)
1142 SharedCacheMap
->RefCount
--;
1143 if (SharedCacheMap
->RefCount
== 0)
1145 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1146 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1151 KeReleaseGuardedMutex(&ViewLock
);
1152 return STATUS_SUCCESS
;
1157 CcTryToInitializeFileCache (
1158 PFILE_OBJECT FileObject
)
1160 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1163 KeAcquireGuardedMutex(&ViewLock
);
1165 ASSERT(FileObject
->SectionObjectPointer
);
1166 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1167 if (SharedCacheMap
== NULL
)
1169 Status
= STATUS_UNSUCCESSFUL
;
1173 if (FileObject
->PrivateCacheMap
== NULL
)
1175 FileObject
->PrivateCacheMap
= SharedCacheMap
;
1176 SharedCacheMap
->RefCount
++;
1178 Status
= STATUS_SUCCESS
;
1180 KeReleaseGuardedMutex(&ViewLock
);
1188 CcRosInitializeFileCache (
1189 PFILE_OBJECT FileObject
,
1190 PCACHE_MANAGER_CALLBACKS CallBacks
,
1191 PVOID LazyWriterContext
)
1193 * FUNCTION: Initializes a shared cache map for a file object
1196 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1198 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1199 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1200 FileObject
, SharedCacheMap
);
1202 KeAcquireGuardedMutex(&ViewLock
);
1203 if (SharedCacheMap
== NULL
)
1205 SharedCacheMap
= ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList
);
1206 if (SharedCacheMap
== NULL
)
1208 KeReleaseGuardedMutex(&ViewLock
);
1209 return STATUS_UNSUCCESSFUL
;
1211 RtlZeroMemory(SharedCacheMap
, sizeof(*SharedCacheMap
));
1212 ObReferenceObjectByPointer(FileObject
,
1216 SharedCacheMap
->FileObject
= FileObject
;
1217 SharedCacheMap
->Callbacks
= CallBacks
;
1218 SharedCacheMap
->LazyWriteContext
= LazyWriterContext
;
1219 if (FileObject
->FsContext
)
1221 SharedCacheMap
->SectionSize
=
1222 ((PFSRTL_COMMON_FCB_HEADER
)FileObject
->FsContext
)->AllocationSize
;
1223 SharedCacheMap
->FileSize
=
1224 ((PFSRTL_COMMON_FCB_HEADER
)FileObject
->FsContext
)->FileSize
;
1226 KeInitializeSpinLock(&SharedCacheMap
->CacheMapLock
);
1227 InitializeListHead(&SharedCacheMap
->CacheMapVacbListHead
);
1228 FileObject
->SectionObjectPointer
->SharedCacheMap
= SharedCacheMap
;
1230 if (FileObject
->PrivateCacheMap
== NULL
)
1232 FileObject
->PrivateCacheMap
= SharedCacheMap
;
1233 SharedCacheMap
->RefCount
++;
1235 KeReleaseGuardedMutex(&ViewLock
);
1237 return STATUS_SUCCESS
;
1245 CcGetFileObjectFromSectionPtrs (
1246 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
)
1248 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1249 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1251 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
1252 ASSERT(SharedCacheMap
);
1253 return SharedCacheMap
->FileObject
;
1264 DPRINT("CcInitView()\n");
1266 InitializeListHead(&DirtyVacbListHead
);
1267 InitializeListHead(&VacbLruListHead
);
1268 KeInitializeGuardedMutex(&ViewLock
);
1269 ExInitializeNPagedLookasideList(&iBcbLookasideList
,
1273 sizeof(INTERNAL_BCB
),
1276 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList
,
1280 sizeof(ROS_SHARED_CACHE_MAP
),
1281 TAG_SHARED_CACHE_MAP
,
1283 ExInitializeNPagedLookasideList(&VacbLookasideList
,
1291 MmInitializeMemoryConsumer(MC_CACHE
, CcRosTrimCache
);
1293 CcInitCacheZeroPage();