2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
10 /* NOTES **********************************************************************
12 * This is not the NT implementation of a file cache nor anything much like
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 * (4) Copy the data into or out of the page as necessary.
30 * (5) Release the cache page
32 /* INCLUDES ******************************************************************/
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
42 /* GLOBALS *******************************************************************/
44 static LIST_ENTRY DirtyVacbListHead
;
45 static LIST_ENTRY VacbLruListHead
;
46 ULONG DirtyPageCount
= 0;
48 KGUARDED_MUTEX ViewLock
;
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList
;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList
;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList
;
55 static void CcRosVacbIncRefCount_(PROS_VACB vacb
, const char* file
, int line
)
57 ++vacb
->ReferenceCount
;
58 if (vacb
->SharedCacheMap
->Trace
)
60 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
61 file
, line
, vacb
, vacb
->ReferenceCount
, vacb
->Dirty
, vacb
->PageOut
);
64 static void CcRosVacbDecRefCount_(PROS_VACB vacb
, const char* file
, int line
)
66 --vacb
->ReferenceCount
;
67 if (vacb
->SharedCacheMap
->Trace
)
69 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
70 file
, line
, vacb
, vacb
->ReferenceCount
, vacb
->Dirty
, vacb
->PageOut
);
73 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
74 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
76 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
77 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
81 CcRosInternalFreeVacb(PROS_VACB Vacb
);
84 /* FUNCTIONS *****************************************************************/
89 PROS_SHARED_CACHE_MAP SharedCacheMap
,
94 PLIST_ENTRY current_entry
;
100 SharedCacheMap
->Trace
= Trace
;
104 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
106 KeAcquireGuardedMutex(&ViewLock
);
107 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldirql
);
109 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
110 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
112 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
113 current_entry
= current_entry
->Flink
;
115 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
116 current
, current
->ReferenceCount
, current
->Dirty
, current
->PageOut
);
118 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldirql
);
119 KeReleaseGuardedMutex(&ViewLock
);
123 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap
);
127 UNREFERENCED_PARAMETER(SharedCacheMap
);
128 UNREFERENCED_PARAMETER(Trace
);
140 Status
= CcWriteVirtualAddress(Vacb
);
141 if (NT_SUCCESS(Status
))
143 KeAcquireGuardedMutex(&ViewLock
);
144 KeAcquireSpinLock(&Vacb
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
147 RemoveEntryList(&Vacb
->DirtyVacbListEntry
);
148 DirtyPageCount
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
149 CcRosVacbDecRefCount(Vacb
);
151 KeReleaseSpinLock(&Vacb
->SharedCacheMap
->CacheMapLock
, oldIrql
);
152 KeReleaseGuardedMutex(&ViewLock
);
160 CcRosFlushDirtyPages (
165 PLIST_ENTRY current_entry
;
169 LARGE_INTEGER ZeroTimeout
;
171 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target
);
174 ZeroTimeout
.QuadPart
= 0;
176 KeEnterCriticalRegion();
177 KeAcquireGuardedMutex(&ViewLock
);
179 current_entry
= DirtyVacbListHead
.Flink
;
180 if (current_entry
== &DirtyVacbListHead
)
182 DPRINT("No Dirty pages\n");
185 while ((current_entry
!= &DirtyVacbListHead
) && (Target
> 0))
187 current
= CONTAINING_RECORD(current_entry
,
190 current_entry
= current_entry
->Flink
;
192 CcRosVacbIncRefCount(current
);
194 Locked
= current
->SharedCacheMap
->Callbacks
->AcquireForLazyWrite(
195 current
->SharedCacheMap
->LazyWriteContext
, Wait
);
198 CcRosVacbDecRefCount(current
);
202 Status
= CcRosAcquireVacbLock(current
,
203 Wait
? NULL
: &ZeroTimeout
);
204 if (Status
!= STATUS_SUCCESS
)
206 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
207 current
->SharedCacheMap
->LazyWriteContext
);
208 CcRosVacbDecRefCount(current
);
212 ASSERT(current
->Dirty
);
214 /* One reference is added above */
215 if (current
->ReferenceCount
> 2)
217 CcRosReleaseVacbLock(current
);
218 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
219 current
->SharedCacheMap
->LazyWriteContext
);
220 CcRosVacbDecRefCount(current
);
224 KeReleaseGuardedMutex(&ViewLock
);
226 Status
= CcRosFlushVacb(current
);
228 CcRosReleaseVacbLock(current
);
229 current
->SharedCacheMap
->Callbacks
->ReleaseFromLazyWrite(
230 current
->SharedCacheMap
->LazyWriteContext
);
232 KeAcquireGuardedMutex(&ViewLock
);
233 CcRosVacbDecRefCount(current
);
235 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
) &&
236 (Status
!= STATUS_MEDIA_WRITE_PROTECTED
))
238 DPRINT1("CC: Failed to flush VACB.\n");
242 (*Count
) += VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
243 Target
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
246 current_entry
= DirtyVacbListHead
.Flink
;
249 KeReleaseGuardedMutex(&ViewLock
);
250 KeLeaveCriticalRegion();
252 DPRINT("CcRosFlushDirtyPages() finished\n");
253 return STATUS_SUCCESS
;
262 * FUNCTION: Try to free some memory from the file cache.
264 * Target - The number of pages to be freed.
265 * Priority - The priority of free (currently unused).
266 * NrFreed - Points to a variable where the number of pages
267 * actually freed is returned.
270 PLIST_ENTRY current_entry
;
277 BOOLEAN FlushedPages
= FALSE
;
279 DPRINT("CcRosTrimCache(Target %lu)\n", Target
);
281 InitializeListHead(&FreeList
);
286 KeAcquireGuardedMutex(&ViewLock
);
288 current_entry
= VacbLruListHead
.Flink
;
289 while (current_entry
!= &VacbLruListHead
)
291 current
= CONTAINING_RECORD(current_entry
,
294 current_entry
= current_entry
->Flink
;
296 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
298 /* Reference the VACB */
299 CcRosVacbIncRefCount(current
);
301 /* Check if it's mapped and not dirty */
302 if (current
->MappedCount
> 0 && !current
->Dirty
)
304 /* We have to break these locks because Cc sucks */
305 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
306 KeReleaseGuardedMutex(&ViewLock
);
308 /* Page out the VACB */
309 for (i
= 0; i
< VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
; i
++)
311 Page
= (PFN_NUMBER
)(MmGetPhysicalAddress((PUCHAR
)current
->BaseAddress
+ (i
* PAGE_SIZE
)).QuadPart
>> PAGE_SHIFT
);
313 MmPageOutPhysicalAddress(Page
);
316 /* Reacquire the locks */
317 KeAcquireGuardedMutex(&ViewLock
);
318 KeAcquireSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, &oldIrql
);
321 /* Dereference the VACB */
322 CcRosVacbDecRefCount(current
);
324 /* Check if we can free this entry now */
325 if (current
->ReferenceCount
== 0)
327 ASSERT(!current
->Dirty
);
328 ASSERT(!current
->MappedCount
);
330 RemoveEntryList(¤t
->CacheMapVacbListEntry
);
331 RemoveEntryList(¤t
->VacbLruListEntry
);
332 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
334 /* Calculate how many pages we freed for Mm */
335 PagesFreed
= min(VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
, Target
);
336 Target
-= PagesFreed
;
337 (*NrFreed
) += PagesFreed
;
340 KeReleaseSpinLock(¤t
->SharedCacheMap
->CacheMapLock
, oldIrql
);
343 KeReleaseGuardedMutex(&ViewLock
);
345 /* Try flushing pages if we haven't met our target */
346 if ((Target
> 0) && !FlushedPages
)
348 /* Flush dirty pages to disk */
349 CcRosFlushDirtyPages(Target
, &PagesFreed
, FALSE
);
352 /* We can only swap as many pages as we flushed */
353 if (PagesFreed
< Target
) Target
= PagesFreed
;
355 /* Check if we flushed anything */
358 /* Try again after flushing dirty pages */
359 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed
);
364 while (!IsListEmpty(&FreeList
))
366 current_entry
= RemoveHeadList(&FreeList
);
367 current
= CONTAINING_RECORD(current_entry
,
369 CacheMapVacbListEntry
);
370 CcRosInternalFreeVacb(current
);
373 DPRINT("Evicted %lu cache pages\n", (*NrFreed
));
375 return STATUS_SUCCESS
;
381 PROS_SHARED_CACHE_MAP SharedCacheMap
,
390 ASSERT(SharedCacheMap
);
392 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
393 SharedCacheMap
, Vacb
, Valid
);
395 KeAcquireGuardedMutex(&ViewLock
);
396 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
400 WasDirty
= Vacb
->Dirty
;
401 Vacb
->Dirty
= Vacb
->Dirty
|| Dirty
;
403 if (!WasDirty
&& Vacb
->Dirty
)
405 InsertTailList(&DirtyVacbListHead
, &Vacb
->DirtyVacbListEntry
);
406 DirtyPageCount
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
413 CcRosVacbDecRefCount(Vacb
);
414 if (Mapped
&& (Vacb
->MappedCount
== 1))
416 CcRosVacbIncRefCount(Vacb
);
418 if (!WasDirty
&& Vacb
->Dirty
)
420 CcRosVacbIncRefCount(Vacb
);
423 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
424 KeReleaseGuardedMutex(&ViewLock
);
425 if (InterlockedCompareExchange(&Vacb
->PinCount
, 0, 0) == 0)
427 CcRosReleaseVacbLock(Vacb
);
430 return STATUS_SUCCESS
;
433 /* Returns with VACB Lock Held! */
437 PROS_SHARED_CACHE_MAP SharedCacheMap
,
440 PLIST_ENTRY current_entry
;
444 ASSERT(SharedCacheMap
);
446 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
447 SharedCacheMap
, FileOffset
);
449 KeAcquireGuardedMutex(&ViewLock
);
450 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
452 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
453 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
455 current
= CONTAINING_RECORD(current_entry
,
457 CacheMapVacbListEntry
);
458 if (IsPointInRange(current
->FileOffset
.QuadPart
,
459 VACB_MAPPING_GRANULARITY
,
462 CcRosVacbIncRefCount(current
);
463 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
464 KeReleaseGuardedMutex(&ViewLock
);
465 if (InterlockedCompareExchange(¤t
->PinCount
, 0, 0) == 0)
467 CcRosAcquireVacbLock(current
, NULL
);
471 if (current
->FileOffset
.QuadPart
> FileOffset
)
473 current_entry
= current_entry
->Flink
;
476 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
477 KeReleaseGuardedMutex(&ViewLock
);
485 PROS_SHARED_CACHE_MAP SharedCacheMap
,
491 ASSERT(SharedCacheMap
);
493 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
494 SharedCacheMap
, FileOffset
);
496 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
499 KeBugCheck(CACHE_MANAGER
);
502 KeAcquireGuardedMutex(&ViewLock
);
503 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
507 InsertTailList(&DirtyVacbListHead
, &Vacb
->DirtyVacbListEntry
);
508 DirtyPageCount
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
512 CcRosVacbDecRefCount(Vacb
);
515 /* Move to the tail of the LRU list */
516 RemoveEntryList(&Vacb
->VacbLruListEntry
);
517 InsertTailList(&VacbLruListHead
, &Vacb
->VacbLruListEntry
);
521 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
522 KeReleaseGuardedMutex(&ViewLock
);
523 CcRosReleaseVacbLock(Vacb
);
525 return STATUS_SUCCESS
;
531 PROS_SHARED_CACHE_MAP SharedCacheMap
,
539 ASSERT(SharedCacheMap
);
541 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
542 SharedCacheMap
, FileOffset
, NowDirty
);
544 Vacb
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
547 return STATUS_UNSUCCESSFUL
;
550 KeAcquireGuardedMutex(&ViewLock
);
551 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
553 WasDirty
= Vacb
->Dirty
;
554 Vacb
->Dirty
= Vacb
->Dirty
|| NowDirty
;
558 if (!WasDirty
&& NowDirty
)
560 InsertTailList(&DirtyVacbListHead
, &Vacb
->DirtyVacbListEntry
);
561 DirtyPageCount
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
564 CcRosVacbDecRefCount(Vacb
);
565 if (!WasDirty
&& NowDirty
)
567 CcRosVacbIncRefCount(Vacb
);
569 if (Vacb
->MappedCount
== 0)
571 CcRosVacbDecRefCount(Vacb
);
574 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
575 KeReleaseGuardedMutex(&ViewLock
);
576 CcRosReleaseVacbLock(Vacb
);
578 return STATUS_SUCCESS
;
588 ULONG_PTR NumberOfPages
;
590 /* Create a memory area. */
591 MmLockAddressSpace(MmGetKernelAddressSpace());
592 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
593 0, // nothing checks for VACB mareas, so set to 0
595 VACB_MAPPING_GRANULARITY
,
597 (PMEMORY_AREA
*)&Vacb
->MemoryArea
,
600 MmUnlockAddressSpace(MmGetKernelAddressSpace());
601 if (!NT_SUCCESS(Status
))
603 KeBugCheck(CACHE_MANAGER
);
606 ASSERT(((ULONG_PTR
)Vacb
->BaseAddress
% PAGE_SIZE
) == 0);
607 ASSERT((ULONG_PTR
)Vacb
->BaseAddress
> (ULONG_PTR
)MmSystemRangeStart
);
609 /* Create a virtual mapping for this memory area */
610 NumberOfPages
= BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY
);
611 for (i
= 0; i
< NumberOfPages
; i
++)
613 PFN_NUMBER PageFrameNumber
;
615 Status
= MmRequestPageMemoryConsumer(MC_CACHE
, TRUE
, &PageFrameNumber
);
616 if (PageFrameNumber
== 0)
618 DPRINT1("Unable to allocate page\n");
619 KeBugCheck(MEMORY_MANAGEMENT
);
622 Status
= MmCreateVirtualMapping(NULL
,
623 (PVOID
)((ULONG_PTR
)Vacb
->BaseAddress
+ (i
* PAGE_SIZE
)),
627 if (!NT_SUCCESS(Status
))
629 DPRINT1("Unable to create virtual mapping\n");
630 KeBugCheck(MEMORY_MANAGEMENT
);
634 return STATUS_SUCCESS
;
640 PROS_SHARED_CACHE_MAP SharedCacheMap
,
646 PLIST_ENTRY current_entry
;
650 ASSERT(SharedCacheMap
);
652 DPRINT("CcRosCreateVacb()\n");
654 if (FileOffset
>= SharedCacheMap
->FileSize
.QuadPart
)
657 return STATUS_INVALID_PARAMETER
;
660 current
= ExAllocateFromNPagedLookasideList(&VacbLookasideList
);
661 current
->BaseAddress
= NULL
;
662 current
->Valid
= FALSE
;
663 current
->Dirty
= FALSE
;
664 current
->PageOut
= FALSE
;
665 current
->FileOffset
.QuadPart
= ROUND_DOWN(FileOffset
, VACB_MAPPING_GRANULARITY
);
666 current
->SharedCacheMap
= SharedCacheMap
;
668 if (SharedCacheMap
->Trace
)
670 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap
, current
);
673 current
->MappedCount
= 0;
674 current
->DirtyVacbListEntry
.Flink
= NULL
;
675 current
->DirtyVacbListEntry
.Blink
= NULL
;
676 current
->ReferenceCount
= 1;
677 current
->PinCount
= 0;
678 KeInitializeMutex(¤t
->Mutex
, 0);
679 CcRosAcquireVacbLock(current
, NULL
);
680 KeAcquireGuardedMutex(&ViewLock
);
683 /* There is window between the call to CcRosLookupVacb
684 * and CcRosCreateVacb. We must check if a VACB for the
685 * file offset exist. If there is a VACB, we release
686 * our newly created VACB and return the existing one.
688 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
689 current_entry
= SharedCacheMap
->CacheMapVacbListHead
.Flink
;
691 while (current_entry
!= &SharedCacheMap
->CacheMapVacbListHead
)
693 current
= CONTAINING_RECORD(current_entry
,
695 CacheMapVacbListEntry
);
696 if (IsPointInRange(current
->FileOffset
.QuadPart
,
697 VACB_MAPPING_GRANULARITY
,
700 CcRosVacbIncRefCount(current
);
701 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
703 if (SharedCacheMap
->Trace
)
705 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
711 CcRosReleaseVacbLock(*Vacb
);
712 KeReleaseGuardedMutex(&ViewLock
);
713 ExFreeToNPagedLookasideList(&VacbLookasideList
, *Vacb
);
715 if (InterlockedCompareExchange(¤t
->PinCount
, 0, 0) == 0)
717 CcRosAcquireVacbLock(current
, NULL
);
719 return STATUS_SUCCESS
;
721 if (current
->FileOffset
.QuadPart
< FileOffset
)
723 ASSERT(previous
== NULL
||
724 previous
->FileOffset
.QuadPart
< current
->FileOffset
.QuadPart
);
727 if (current
->FileOffset
.QuadPart
> FileOffset
)
729 current_entry
= current_entry
->Flink
;
731 /* There was no existing VACB. */
735 InsertHeadList(&previous
->CacheMapVacbListEntry
, ¤t
->CacheMapVacbListEntry
);
739 InsertHeadList(&SharedCacheMap
->CacheMapVacbListHead
, ¤t
->CacheMapVacbListEntry
);
741 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
742 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
743 KeReleaseGuardedMutex(&ViewLock
);
745 MI_SET_USAGE(MI_USAGE_CACHE
);
747 if ((SharedCacheMap
->FileObject
) && (SharedCacheMap
->FileObject
->FileName
.Buffer
))
751 pos
= wcsrchr(SharedCacheMap
->FileObject
->FileName
.Buffer
, '\\');
752 len
= wcslen(pos
) * sizeof(WCHAR
);
753 if (pos
) snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%S", pos
);
757 Status
= CcRosMapVacb(current
);
765 PROS_SHARED_CACHE_MAP SharedCacheMap
,
767 PLONGLONG BaseOffset
,
775 ASSERT(SharedCacheMap
);
777 DPRINT("CcRosGetVacb()\n");
780 * Look for a VACB already mapping the same data.
782 current
= CcRosLookupVacb(SharedCacheMap
, FileOffset
);
786 * Otherwise create a new VACB.
788 Status
= CcRosCreateVacb(SharedCacheMap
, FileOffset
, ¤t
);
789 if (!NT_SUCCESS(Status
))
795 KeAcquireGuardedMutex(&ViewLock
);
797 /* Move to the tail of the LRU list */
798 RemoveEntryList(¤t
->VacbLruListEntry
);
799 InsertTailList(&VacbLruListHead
, ¤t
->VacbLruListEntry
);
801 KeReleaseGuardedMutex(&ViewLock
);
804 * Return information about the VACB to the caller.
806 *UptoDate
= current
->Valid
;
807 *BaseAddress
= current
->BaseAddress
;
808 DPRINT("*BaseAddress %p\n", *BaseAddress
);
810 *BaseOffset
= current
->FileOffset
.QuadPart
;
811 return STATUS_SUCCESS
;
817 PROS_SHARED_CACHE_MAP SharedCacheMap
,
823 * FUNCTION: Request a page mapping for a shared cache map
828 ASSERT(SharedCacheMap
);
830 if (FileOffset
% VACB_MAPPING_GRANULARITY
!= 0)
832 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
833 FileOffset
, VACB_MAPPING_GRANULARITY
);
834 KeBugCheck(CACHE_MANAGER
);
837 return CcRosGetVacb(SharedCacheMap
,
849 MEMORY_AREA
* MemoryArea
,
855 ASSERT(SwapEntry
== 0);
858 ASSERT(MmGetReferenceCountPage(Page
) == 1);
859 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
864 CcRosInternalFreeVacb (
867 * FUNCTION: Releases a VACB associated with a shared cache map
870 DPRINT("Freeing VACB 0x%p\n", Vacb
);
872 if (Vacb
->SharedCacheMap
->Trace
)
874 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb
->SharedCacheMap
, Vacb
);
878 MmLockAddressSpace(MmGetKernelAddressSpace());
879 MmFreeMemoryArea(MmGetKernelAddressSpace(),
883 MmUnlockAddressSpace(MmGetKernelAddressSpace());
885 ExFreeToNPagedLookasideList(&VacbLookasideList
, Vacb
);
886 return STATUS_SUCCESS
;
895 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
,
896 IN PLARGE_INTEGER FileOffset OPTIONAL
,
898 OUT PIO_STATUS_BLOCK IoStatus
)
900 PROS_SHARED_CACHE_MAP SharedCacheMap
;
901 LARGE_INTEGER Offset
;
902 LONGLONG RemainingLength
;
907 CCTRACE(CC_API_DEBUG
, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
908 SectionObjectPointers
, FileOffset
, Length
);
910 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
911 SectionObjectPointers
, FileOffset
, Length
, IoStatus
);
913 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
915 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
916 ASSERT(SharedCacheMap
);
919 Offset
= *FileOffset
;
920 RemainingLength
= Length
;
925 RemainingLength
= SharedCacheMap
->FileSize
.QuadPart
;
930 IoStatus
->Status
= STATUS_SUCCESS
;
931 IoStatus
->Information
= 0;
934 while (RemainingLength
> 0)
936 current
= CcRosLookupVacb(SharedCacheMap
, Offset
.QuadPart
);
941 Status
= CcRosFlushVacb(current
);
942 if (!NT_SUCCESS(Status
) && IoStatus
!= NULL
)
944 IoStatus
->Status
= Status
;
948 if (InterlockedCompareExchange(¤t
->PinCount
, 0, 0) == 0)
950 CcRosReleaseVacbLock(current
);
953 KeAcquireGuardedMutex(&ViewLock
);
954 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
955 CcRosVacbDecRefCount(current
);
956 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
957 KeReleaseGuardedMutex(&ViewLock
);
960 Offset
.QuadPart
+= VACB_MAPPING_GRANULARITY
;
961 RemainingLength
-= min(RemainingLength
, VACB_MAPPING_GRANULARITY
);
968 IoStatus
->Status
= STATUS_INVALID_PARAMETER
;
975 CcRosDeleteFileCache (
976 PFILE_OBJECT FileObject
,
977 PROS_SHARED_CACHE_MAP SharedCacheMap
)
979 * FUNCTION: Releases the shared cache map associated with a file object
982 PLIST_ENTRY current_entry
;
987 ASSERT(SharedCacheMap
);
989 SharedCacheMap
->RefCount
++;
990 KeReleaseGuardedMutex(&ViewLock
);
992 CcFlushCache(FileObject
->SectionObjectPointer
, NULL
, 0, NULL
);
994 KeAcquireGuardedMutex(&ViewLock
);
995 SharedCacheMap
->RefCount
--;
996 if (SharedCacheMap
->RefCount
== 0)
998 FileObject
->SectionObjectPointer
->SharedCacheMap
= NULL
;
1003 InitializeListHead(&FreeList
);
1004 KeAcquireSpinLock(&SharedCacheMap
->CacheMapLock
, &oldIrql
);
1005 while (!IsListEmpty(&SharedCacheMap
->CacheMapVacbListHead
))
1007 current_entry
= RemoveTailList(&SharedCacheMap
->CacheMapVacbListHead
);
1008 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1009 RemoveEntryList(¤t
->VacbLruListEntry
);
1012 RemoveEntryList(¤t
->DirtyVacbListEntry
);
1013 DirtyPageCount
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
1014 DPRINT1("Freeing dirty VACB\n");
1016 InsertHeadList(&FreeList
, ¤t
->CacheMapVacbListEntry
);
1019 SharedCacheMap
->Trace
= FALSE
;
1021 KeReleaseSpinLock(&SharedCacheMap
->CacheMapLock
, oldIrql
);
1023 KeReleaseGuardedMutex(&ViewLock
);
1024 ObDereferenceObject(SharedCacheMap
->FileObject
);
1026 while (!IsListEmpty(&FreeList
))
1028 current_entry
= RemoveTailList(&FreeList
);
1029 current
= CONTAINING_RECORD(current_entry
, ROS_VACB
, CacheMapVacbListEntry
);
1030 CcRosInternalFreeVacb(current
);
1032 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList
, SharedCacheMap
);
1033 KeAcquireGuardedMutex(&ViewLock
);
1035 return STATUS_SUCCESS
;
1040 CcRosReferenceCache (
1041 PFILE_OBJECT FileObject
)
1043 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1044 KeAcquireGuardedMutex(&ViewLock
);
1045 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1046 ASSERT(SharedCacheMap
);
1047 ASSERT(SharedCacheMap
->RefCount
!= 0);
1048 SharedCacheMap
->RefCount
++;
1049 KeReleaseGuardedMutex(&ViewLock
);
1054 CcRosRemoveIfClosed (
1055 PSECTION_OBJECT_POINTERS SectionObjectPointer
)
1057 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1058 DPRINT("CcRosRemoveIfClosed()\n");
1059 KeAcquireGuardedMutex(&ViewLock
);
1060 SharedCacheMap
= SectionObjectPointer
->SharedCacheMap
;
1061 if (SharedCacheMap
&& SharedCacheMap
->RefCount
== 0)
1063 CcRosDeleteFileCache(SharedCacheMap
->FileObject
, SharedCacheMap
);
1065 KeReleaseGuardedMutex(&ViewLock
);
1071 CcRosDereferenceCache (
1072 PFILE_OBJECT FileObject
)
1074 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1075 KeAcquireGuardedMutex(&ViewLock
);
1076 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1077 ASSERT(SharedCacheMap
);
1078 if (SharedCacheMap
->RefCount
> 0)
1080 SharedCacheMap
->RefCount
--;
1081 if (SharedCacheMap
->RefCount
== 0)
1083 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1084 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1087 KeReleaseGuardedMutex(&ViewLock
);
1092 CcRosReleaseFileCache (
1093 PFILE_OBJECT FileObject
)
1095 * FUNCTION: Called by the file system when a handle to a file object
1099 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1101 KeAcquireGuardedMutex(&ViewLock
);
1103 if (FileObject
->SectionObjectPointer
->SharedCacheMap
!= NULL
)
1105 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1106 if (FileObject
->PrivateCacheMap
!= NULL
)
1108 FileObject
->PrivateCacheMap
= NULL
;
1109 if (SharedCacheMap
->RefCount
> 0)
1111 SharedCacheMap
->RefCount
--;
1112 if (SharedCacheMap
->RefCount
== 0)
1114 MmFreeSectionSegments(SharedCacheMap
->FileObject
);
1115 CcRosDeleteFileCache(FileObject
, SharedCacheMap
);
1120 KeReleaseGuardedMutex(&ViewLock
);
1121 return STATUS_SUCCESS
;
1126 CcTryToInitializeFileCache (
1127 PFILE_OBJECT FileObject
)
1129 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1132 KeAcquireGuardedMutex(&ViewLock
);
1134 ASSERT(FileObject
->SectionObjectPointer
);
1135 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1136 if (SharedCacheMap
== NULL
)
1138 Status
= STATUS_UNSUCCESSFUL
;
1142 if (FileObject
->PrivateCacheMap
== NULL
)
1144 FileObject
->PrivateCacheMap
= SharedCacheMap
;
1145 SharedCacheMap
->RefCount
++;
1147 Status
= STATUS_SUCCESS
;
1149 KeReleaseGuardedMutex(&ViewLock
);
1157 CcRosInitializeFileCache (
1158 PFILE_OBJECT FileObject
,
1159 PCC_FILE_SIZES FileSizes
,
1161 PCACHE_MANAGER_CALLBACKS CallBacks
,
1162 PVOID LazyWriterContext
)
1164 * FUNCTION: Initializes a shared cache map for a file object
1167 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1169 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1170 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1171 FileObject
, SharedCacheMap
);
1173 KeAcquireGuardedMutex(&ViewLock
);
1174 if (SharedCacheMap
== NULL
)
1176 SharedCacheMap
= ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList
);
1177 if (SharedCacheMap
== NULL
)
1179 KeReleaseGuardedMutex(&ViewLock
);
1180 return STATUS_INSUFFICIENT_RESOURCES
;
1182 RtlZeroMemory(SharedCacheMap
, sizeof(*SharedCacheMap
));
1183 ObReferenceObjectByPointer(FileObject
,
1187 SharedCacheMap
->FileObject
= FileObject
;
1188 SharedCacheMap
->Callbacks
= CallBacks
;
1189 SharedCacheMap
->LazyWriteContext
= LazyWriterContext
;
1190 SharedCacheMap
->SectionSize
= FileSizes
->AllocationSize
;
1191 SharedCacheMap
->FileSize
= FileSizes
->FileSize
;
1192 SharedCacheMap
->PinAccess
= PinAccess
;
1193 KeInitializeSpinLock(&SharedCacheMap
->CacheMapLock
);
1194 InitializeListHead(&SharedCacheMap
->CacheMapVacbListHead
);
1195 FileObject
->SectionObjectPointer
->SharedCacheMap
= SharedCacheMap
;
1197 if (FileObject
->PrivateCacheMap
== NULL
)
1199 FileObject
->PrivateCacheMap
= SharedCacheMap
;
1200 SharedCacheMap
->RefCount
++;
1202 KeReleaseGuardedMutex(&ViewLock
);
1204 return STATUS_SUCCESS
;
1212 CcGetFileObjectFromSectionPtrs (
1213 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
)
1215 PROS_SHARED_CACHE_MAP SharedCacheMap
;
1217 CCTRACE(CC_API_DEBUG
, "SectionObjectPointers=%p\n", SectionObjectPointers
);
1219 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1221 SharedCacheMap
= SectionObjectPointers
->SharedCacheMap
;
1222 ASSERT(SharedCacheMap
);
1223 return SharedCacheMap
->FileObject
;
1234 DPRINT("CcInitView()\n");
1236 InitializeListHead(&DirtyVacbListHead
);
1237 InitializeListHead(&VacbLruListHead
);
1238 KeInitializeGuardedMutex(&ViewLock
);
1239 ExInitializeNPagedLookasideList(&iBcbLookasideList
,
1243 sizeof(INTERNAL_BCB
),
1246 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList
,
1250 sizeof(ROS_SHARED_CACHE_MAP
),
1251 TAG_SHARED_CACHE_MAP
,
1253 ExInitializeNPagedLookasideList(&VacbLookasideList
,
1261 MmInitializeMemoryConsumer(MC_CACHE
, CcRosTrimCache
);
1263 CcInitCacheZeroPage();