2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
10 /* NOTES **********************************************************************
12 * This is not the NT implementation of a file cache nor anything much like
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 * (4) Copy the data into or out of the page as necessary.
30 * (5) Release the cache page
32 /* INCLUDES ******************************************************************/
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
42 /* GLOBALS *******************************************************************/
45 * If CACHE_BITMAP is defined, the cache manager uses one large memory region
46 * within the kernel address space and allocate/deallocate space from this block
47 * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
48 * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
50 //#define CACHE_BITMAP
52 static LIST_ENTRY DirtySegmentListHead
;
53 static LIST_ENTRY CacheSegmentListHead
;
54 static LIST_ENTRY CacheSegmentLRUListHead
;
55 static LIST_ENTRY ClosedListHead
;
56 ULONG DirtyPageCount
= 0;
58 KGUARDED_MUTEX ViewLock
;
61 #define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
63 static PVOID CiCacheSegMappingRegionBase
= NULL
;
64 static RTL_BITMAP CiCacheSegMappingRegionAllocMap
;
65 static ULONG CiCacheSegMappingRegionHint
;
66 static KSPIN_LOCK CiCacheSegMappingRegionLock
;
69 NPAGED_LOOKASIDE_LIST iBcbLookasideList
;
70 static NPAGED_LOOKASIDE_LIST BcbLookasideList
;
71 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList
;
74 static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs
, const char* file
, int line
)
79 DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
80 file
, line
, cs
, cs
->ReferenceCount
, cs
->Dirty
, cs
->PageOut
);
83 static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs
, const char* file
, int line
)
88 DbgPrint("(%s:%i) CacheSegment %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
89 file
, line
, cs
, cs
->ReferenceCount
, cs
->Dirty
, cs
->PageOut
);
92 #define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
93 #define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
95 #define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
96 #define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
100 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg
);
103 /* FUNCTIONS *****************************************************************/
113 PLIST_ENTRY current_entry
;
114 PCACHE_SEGMENT current
;
123 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb
);
125 KeAcquireGuardedMutex(&ViewLock
);
126 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldirql
);
128 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
129 while (current_entry
!= &Bcb
->BcbSegmentListHead
)
131 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
, BcbSegmentListEntry
);
132 current_entry
= current_entry
->Flink
;
134 DPRINT1(" CacheSegment 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
135 current
, current
->ReferenceCount
, current
->Dirty
, current
->PageOut
);
137 KeReleaseSpinLock(&Bcb
->BcbLock
, oldirql
);
138 KeReleaseGuardedMutex(&ViewLock
);
142 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb
);
153 CcRosFlushCacheSegment (
154 PCACHE_SEGMENT CacheSegment
)
159 Status
= WriteCacheSegment(CacheSegment
);
160 if (NT_SUCCESS(Status
))
162 KeAcquireGuardedMutex(&ViewLock
);
163 KeAcquireSpinLock(&CacheSegment
->Bcb
->BcbLock
, &oldIrql
);
165 CacheSegment
->Dirty
= FALSE
;
166 RemoveEntryList(&CacheSegment
->DirtySegmentListEntry
);
167 DirtyPageCount
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
168 CcRosCacheSegmentDecRefCount(CacheSegment
);
170 KeReleaseSpinLock(&CacheSegment
->Bcb
->BcbLock
, oldIrql
);
171 KeReleaseGuardedMutex(&ViewLock
);
179 CcRosFlushDirtyPages (
184 PLIST_ENTRY current_entry
;
185 PCACHE_SEGMENT current
;
186 ULONG PagesPerSegment
;
189 LARGE_INTEGER ZeroTimeout
;
191 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target
);
194 ZeroTimeout
.QuadPart
= 0;
196 KeEnterCriticalRegion();
197 KeAcquireGuardedMutex(&ViewLock
);
199 current_entry
= DirtySegmentListHead
.Flink
;
200 if (current_entry
== &DirtySegmentListHead
)
202 DPRINT("No Dirty pages\n");
205 while ((current_entry
!= &DirtySegmentListHead
) && (Target
> 0))
207 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
208 DirtySegmentListEntry
);
209 current_entry
= current_entry
->Flink
;
211 CcRosCacheSegmentIncRefCount(current
);
213 Locked
= current
->Bcb
->Callbacks
->AcquireForLazyWrite(
214 current
->Bcb
->LazyWriteContext
, Wait
);
217 CcRosCacheSegmentDecRefCount(current
);
221 Status
= KeWaitForSingleObject(¤t
->Mutex
,
225 Wait
? NULL
: &ZeroTimeout
);
226 if (Status
!= STATUS_SUCCESS
)
228 current
->Bcb
->Callbacks
->ReleaseFromLazyWrite(
229 current
->Bcb
->LazyWriteContext
);
230 CcRosCacheSegmentDecRefCount(current
);
234 ASSERT(current
->Dirty
);
236 /* One reference is added above */
237 if (current
->ReferenceCount
> 2)
239 KeReleaseMutex(¤t
->Mutex
, FALSE
);
240 current
->Bcb
->Callbacks
->ReleaseFromLazyWrite(
241 current
->Bcb
->LazyWriteContext
);
242 CcRosCacheSegmentDecRefCount(current
);
246 PagesPerSegment
= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
248 KeReleaseGuardedMutex(&ViewLock
);
250 Status
= CcRosFlushCacheSegment(current
);
252 KeReleaseMutex(¤t
->Mutex
, FALSE
);
253 current
->Bcb
->Callbacks
->ReleaseFromLazyWrite(
254 current
->Bcb
->LazyWriteContext
);
256 KeAcquireGuardedMutex(&ViewLock
);
257 CcRosCacheSegmentDecRefCount(current
);
259 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
))
261 DPRINT1("CC: Failed to flush cache segment.\n");
265 (*Count
) += PagesPerSegment
;
266 Target
-= PagesPerSegment
;
269 current_entry
= DirtySegmentListHead
.Flink
;
272 KeReleaseGuardedMutex(&ViewLock
);
273 KeLeaveCriticalRegion();
275 DPRINT("CcRosFlushDirtyPages() finished\n");
276 return STATUS_SUCCESS
;
285 * FUNCTION: Try to free some memory from the file cache.
287 * Target - The number of pages to be freed.
288 * Priority - The priority of free (currently unused).
289 * NrFreed - Points to a variable where the number of pages
290 * actually freed is returned.
293 PLIST_ENTRY current_entry
;
294 PCACHE_SEGMENT current
;
295 ULONG PagesPerSegment
;
301 BOOLEAN FlushedPages
= FALSE
;
303 DPRINT("CcRosTrimCache(Target %lu)\n", Target
);
305 InitializeListHead(&FreeList
);
310 KeAcquireGuardedMutex(&ViewLock
);
312 current_entry
= CacheSegmentLRUListHead
.Flink
;
313 while (current_entry
!= &CacheSegmentLRUListHead
)
315 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
316 CacheSegmentLRUListEntry
);
317 current_entry
= current_entry
->Flink
;
319 KeAcquireSpinLock(¤t
->Bcb
->BcbLock
, &oldIrql
);
321 /* Reference the cache segment */
322 CcRosCacheSegmentIncRefCount(current
);
324 /* Check if it's mapped and not dirty */
325 if (current
->MappedCount
> 0 && !current
->Dirty
)
327 /* We have to break these locks because Cc sucks */
328 KeReleaseSpinLock(¤t
->Bcb
->BcbLock
, oldIrql
);
329 KeReleaseGuardedMutex(&ViewLock
);
331 /* Page out the segment */
332 for (i
= 0; i
< VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
; i
++)
334 Page
= (PFN_NUMBER
)(MmGetPhysicalAddress((PUCHAR
)current
->BaseAddress
+ (i
* PAGE_SIZE
)).QuadPart
>> PAGE_SHIFT
);
336 MmPageOutPhysicalAddress(Page
);
339 /* Reacquire the locks */
340 KeAcquireGuardedMutex(&ViewLock
);
341 KeAcquireSpinLock(¤t
->Bcb
->BcbLock
, &oldIrql
);
344 /* Dereference the cache segment */
345 CcRosCacheSegmentDecRefCount(current
);
347 /* Check if we can free this entry now */
348 if (current
->ReferenceCount
== 0)
350 ASSERT(!current
->Dirty
);
351 ASSERT(!current
->MappedCount
);
353 RemoveEntryList(¤t
->BcbSegmentListEntry
);
354 RemoveEntryList(¤t
->CacheSegmentListEntry
);
355 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
356 InsertHeadList(&FreeList
, ¤t
->BcbSegmentListEntry
);
358 /* Calculate how many pages we freed for Mm */
359 PagesPerSegment
= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
360 PagesFreed
= min(PagesPerSegment
, Target
);
361 Target
-= PagesFreed
;
362 (*NrFreed
) += PagesFreed
;
365 KeReleaseSpinLock(¤t
->Bcb
->BcbLock
, oldIrql
);
368 KeReleaseGuardedMutex(&ViewLock
);
370 /* Try flushing pages if we haven't met our target */
371 if ((Target
> 0) && !FlushedPages
)
373 /* Flush dirty pages to disk */
374 CcRosFlushDirtyPages(Target
, &PagesFreed
, FALSE
);
377 /* We can only swap as many pages as we flushed */
378 if (PagesFreed
< Target
) Target
= PagesFreed
;
380 /* Check if we flushed anything */
383 /* Try again after flushing dirty pages */
384 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed
);
389 while (!IsListEmpty(&FreeList
))
391 current_entry
= RemoveHeadList(&FreeList
);
392 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
393 BcbSegmentListEntry
);
394 CcRosInternalFreeCacheSegment(current
);
397 DPRINT("Evicted %lu cache pages\n", (*NrFreed
));
399 return STATUS_SUCCESS
;
404 CcRosReleaseCacheSegment (
406 PCACHE_SEGMENT CacheSeg
,
416 DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %u)\n",
417 Bcb
, CacheSeg
, Valid
);
419 KeAcquireGuardedMutex(&ViewLock
);
420 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
422 CacheSeg
->Valid
= Valid
;
424 WasDirty
= CacheSeg
->Dirty
;
425 CacheSeg
->Dirty
= CacheSeg
->Dirty
|| Dirty
;
427 if (!WasDirty
&& CacheSeg
->Dirty
)
429 InsertTailList(&DirtySegmentListHead
, &CacheSeg
->DirtySegmentListEntry
);
430 DirtyPageCount
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
435 CacheSeg
->MappedCount
++;
437 CcRosCacheSegmentDecRefCount(CacheSeg
);
438 if (Mapped
&& (CacheSeg
->MappedCount
== 1))
440 CcRosCacheSegmentIncRefCount(CacheSeg
);
442 if (!WasDirty
&& CacheSeg
->Dirty
)
444 CcRosCacheSegmentIncRefCount(CacheSeg
);
447 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
448 KeReleaseGuardedMutex(&ViewLock
);
449 KeReleaseMutex(&CacheSeg
->Mutex
, FALSE
);
451 return STATUS_SUCCESS
;
454 /* Returns with Cache Segment Lock Held! */
457 CcRosLookupCacheSegment (
461 PLIST_ENTRY current_entry
;
462 PCACHE_SEGMENT current
;
467 DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %lu)\n", Bcb
, FileOffset
);
469 KeAcquireGuardedMutex(&ViewLock
);
470 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
472 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
473 while (current_entry
!= &Bcb
->BcbSegmentListHead
)
475 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
476 BcbSegmentListEntry
);
477 if (IsPointInSegment(current
->FileOffset
, VACB_MAPPING_GRANULARITY
,
480 CcRosCacheSegmentIncRefCount(current
);
481 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
482 KeReleaseGuardedMutex(&ViewLock
);
483 KeWaitForSingleObject(¤t
->Mutex
,
490 if (current
->FileOffset
> FileOffset
)
492 current_entry
= current_entry
->Flink
;
495 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
496 KeReleaseGuardedMutex(&ViewLock
);
503 CcRosMarkDirtyCacheSegment (
507 PCACHE_SEGMENT CacheSeg
;
512 DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %lu)\n", Bcb
, FileOffset
);
514 CacheSeg
= CcRosLookupCacheSegment(Bcb
, FileOffset
);
515 if (CacheSeg
== NULL
)
517 KeBugCheck(CACHE_MANAGER
);
520 KeAcquireGuardedMutex(&ViewLock
);
521 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
523 if (!CacheSeg
->Dirty
)
525 InsertTailList(&DirtySegmentListHead
, &CacheSeg
->DirtySegmentListEntry
);
526 DirtyPageCount
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
530 CcRosCacheSegmentDecRefCount(CacheSeg
);
533 /* Move to the tail of the LRU list */
534 RemoveEntryList(&CacheSeg
->CacheSegmentLRUListEntry
);
535 InsertTailList(&CacheSegmentLRUListHead
, &CacheSeg
->CacheSegmentLRUListEntry
);
537 CacheSeg
->Dirty
= TRUE
;
539 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
540 KeReleaseGuardedMutex(&ViewLock
);
541 KeReleaseMutex(&CacheSeg
->Mutex
, FALSE
);
543 return STATUS_SUCCESS
;
548 CcRosUnmapCacheSegment (
553 PCACHE_SEGMENT CacheSeg
;
559 DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %lu, NowDirty %u)\n",
560 Bcb
, FileOffset
, NowDirty
);
562 CacheSeg
= CcRosLookupCacheSegment(Bcb
, FileOffset
);
563 if (CacheSeg
== NULL
)
565 return STATUS_UNSUCCESSFUL
;
568 KeAcquireGuardedMutex(&ViewLock
);
569 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
571 WasDirty
= CacheSeg
->Dirty
;
572 CacheSeg
->Dirty
= CacheSeg
->Dirty
|| NowDirty
;
574 CacheSeg
->MappedCount
--;
576 if (!WasDirty
&& NowDirty
)
578 InsertTailList(&DirtySegmentListHead
, &CacheSeg
->DirtySegmentListEntry
);
579 DirtyPageCount
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
582 CcRosCacheSegmentDecRefCount(CacheSeg
);
583 if (!WasDirty
&& NowDirty
)
585 CcRosCacheSegmentIncRefCount(CacheSeg
);
587 if (CacheSeg
->MappedCount
== 0)
589 CcRosCacheSegmentDecRefCount(CacheSeg
);
592 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
593 KeReleaseGuardedMutex(&ViewLock
);
594 KeReleaseMutex(&CacheSeg
->Mutex
, FALSE
);
596 return STATUS_SUCCESS
;
601 CcRosCreateCacheSegment (
604 PCACHE_SEGMENT
* CacheSeg
)
606 PCACHE_SEGMENT current
;
607 PCACHE_SEGMENT previous
;
608 PLIST_ENTRY current_entry
;
612 ULONG StartingOffset
;
614 PHYSICAL_ADDRESS BoundaryAddressMultiple
;
618 DPRINT("CcRosCreateCacheSegment()\n");
620 BoundaryAddressMultiple
.QuadPart
= 0;
621 if (FileOffset
>= Bcb
->FileSize
.u
.LowPart
)
624 return STATUS_INVALID_PARAMETER
;
627 current
= ExAllocateFromNPagedLookasideList(&CacheSegLookasideList
);
628 current
->Valid
= FALSE
;
629 current
->Dirty
= FALSE
;
630 current
->PageOut
= FALSE
;
631 current
->FileOffset
= ROUND_DOWN(FileOffset
, VACB_MAPPING_GRANULARITY
);
636 DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb
, current
);
639 current
->MappedCount
= 0;
640 current
->DirtySegmentListEntry
.Flink
= NULL
;
641 current
->DirtySegmentListEntry
.Blink
= NULL
;
642 current
->ReferenceCount
= 1;
643 KeInitializeMutex(¤t
->Mutex
, 0);
644 KeWaitForSingleObject(¤t
->Mutex
,
649 KeAcquireGuardedMutex(&ViewLock
);
652 /* There is window between the call to CcRosLookupCacheSegment
653 * and CcRosCreateCacheSegment. We must check if a segment on
654 * the fileoffset exist. If there exist a segment, we release
655 * our new created segment and return the existing one.
657 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
658 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
660 while (current_entry
!= &Bcb
->BcbSegmentListHead
)
662 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
663 BcbSegmentListEntry
);
664 if (IsPointInSegment(current
->FileOffset
, VACB_MAPPING_GRANULARITY
,
667 CcRosCacheSegmentIncRefCount(current
);
668 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
672 DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
678 KeReleaseMutex(&(*CacheSeg
)->Mutex
, FALSE
);
679 KeReleaseGuardedMutex(&ViewLock
);
680 ExFreeToNPagedLookasideList(&CacheSegLookasideList
, *CacheSeg
);
682 KeWaitForSingleObject(¤t
->Mutex
,
687 return STATUS_SUCCESS
;
689 if (current
->FileOffset
< FileOffset
)
691 ASSERT(previous
== NULL
||
692 previous
->FileOffset
< current
->FileOffset
);
695 if (current
->FileOffset
> FileOffset
)
697 current_entry
= current_entry
->Flink
;
699 /* There was no existing segment. */
703 InsertHeadList(&previous
->BcbSegmentListEntry
, ¤t
->BcbSegmentListEntry
);
707 InsertHeadList(&Bcb
->BcbSegmentListHead
, ¤t
->BcbSegmentListEntry
);
709 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
710 InsertTailList(&CacheSegmentListHead
, ¤t
->CacheSegmentListEntry
);
711 InsertTailList(&CacheSegmentLRUListHead
, ¤t
->CacheSegmentLRUListEntry
);
712 KeReleaseGuardedMutex(&ViewLock
);
714 KeAcquireSpinLock(&CiCacheSegMappingRegionLock
, &oldIrql
);
716 StartingOffset
= RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap
,
717 VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
,
718 CiCacheSegMappingRegionHint
);
720 if (StartingOffset
== 0xffffffff)
722 DPRINT1("Out of CacheSeg mapping space\n");
723 KeBugCheck(CACHE_MANAGER
);
726 current
->BaseAddress
= CiCacheSegMappingRegionBase
+ StartingOffset
* PAGE_SIZE
;
728 if (CiCacheSegMappingRegionHint
== StartingOffset
)
730 CiCacheSegMappingRegionHint
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
733 KeReleaseSpinLock(&CiCacheSegMappingRegionLock
, oldIrql
);
735 MmLockAddressSpace(MmGetKernelAddressSpace());
736 current
->BaseAddress
= NULL
;
737 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
738 0, // nothing checks for cache_segment mareas, so set to 0
739 ¤t
->BaseAddress
,
740 VACB_MAPPING_GRANULARITY
,
742 (PMEMORY_AREA
*)¤t
->MemoryArea
,
745 BoundaryAddressMultiple
);
746 MmUnlockAddressSpace(MmGetKernelAddressSpace());
747 if (!NT_SUCCESS(Status
))
749 KeBugCheck(CACHE_MANAGER
);
753 /* Create a virtual mapping for this memory area */
754 MI_SET_USAGE(MI_USAGE_CACHE
);
758 if ((Bcb
->FileObject
) && (Bcb
->FileObject
->FileName
.Buffer
))
760 pos
= wcsrchr(Bcb
->FileObject
->FileName
.Buffer
, '\\');
761 len
= wcslen(pos
) * sizeof(WCHAR
);
762 if (pos
) snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%S", pos
);
766 MmMapMemoryArea(current
->BaseAddress
, VACB_MAPPING_GRANULARITY
,
767 MC_CACHE
, PAGE_READWRITE
);
769 return STATUS_SUCCESS
;
774 CcRosGetCacheSegmentChain (
778 PCACHE_SEGMENT
* CacheSeg
)
780 PCACHE_SEGMENT current
;
782 PCACHE_SEGMENT
* CacheSegList
;
783 PCACHE_SEGMENT Previous
= NULL
;
787 DPRINT("CcRosGetCacheSegmentChain()\n");
789 Length
= ROUND_UP(Length
, VACB_MAPPING_GRANULARITY
);
791 CacheSegList
= _alloca(sizeof(PCACHE_SEGMENT
) *
792 (Length
/ VACB_MAPPING_GRANULARITY
));
795 * Look for a cache segment already mapping the same data.
797 for (i
= 0; i
< (Length
/ VACB_MAPPING_GRANULARITY
); i
++)
799 ULONG CurrentOffset
= FileOffset
+ (i
* VACB_MAPPING_GRANULARITY
);
800 current
= CcRosLookupCacheSegment(Bcb
, CurrentOffset
);
803 KeAcquireGuardedMutex(&ViewLock
);
805 /* Move to tail of LRU list */
806 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
807 InsertTailList(&CacheSegmentLRUListHead
, ¤t
->CacheSegmentLRUListEntry
);
809 KeReleaseGuardedMutex(&ViewLock
);
811 CacheSegList
[i
] = current
;
815 CcRosCreateCacheSegment(Bcb
, CurrentOffset
, ¤t
);
816 CacheSegList
[i
] = current
;
820 for (i
= 0; i
< Length
/ VACB_MAPPING_GRANULARITY
; i
++)
824 *CacheSeg
= CacheSegList
[i
];
825 Previous
= CacheSegList
[i
];
829 Previous
->NextInChain
= CacheSegList
[i
];
830 Previous
= CacheSegList
[i
];
834 Previous
->NextInChain
= NULL
;
836 return STATUS_SUCCESS
;
841 CcRosGetCacheSegment (
847 PCACHE_SEGMENT
* CacheSeg
)
849 PCACHE_SEGMENT current
;
854 DPRINT("CcRosGetCacheSegment()\n");
857 * Look for a cache segment already mapping the same data.
859 current
= CcRosLookupCacheSegment(Bcb
, FileOffset
);
863 * Otherwise create a new segment.
865 Status
= CcRosCreateCacheSegment(Bcb
, FileOffset
, ¤t
);
866 if (!NT_SUCCESS(Status
))
872 KeAcquireGuardedMutex(&ViewLock
);
874 /* Move to the tail of the LRU list */
875 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
876 InsertTailList(&CacheSegmentLRUListHead
, ¤t
->CacheSegmentLRUListEntry
);
878 KeReleaseGuardedMutex(&ViewLock
);
881 * Return information about the segment to the caller.
883 *UptoDate
= current
->Valid
;
884 *BaseAddress
= current
->BaseAddress
;
885 DPRINT("*BaseAddress %p\n", *BaseAddress
);
887 *BaseOffset
= current
->FileOffset
;
888 return STATUS_SUCCESS
;
893 CcRosRequestCacheSegment (
898 PCACHE_SEGMENT
* CacheSeg
)
900 * FUNCTION: Request a page mapping for a BCB
907 if (FileOffset
% VACB_MAPPING_GRANULARITY
!= 0)
909 DPRINT1("Bad fileoffset %x should be multiple of %x",
910 FileOffset
, VACB_MAPPING_GRANULARITY
);
911 KeBugCheck(CACHE_MANAGER
);
914 return CcRosGetCacheSegment(Bcb
,
927 MEMORY_AREA
* MemoryArea
,
933 ASSERT(SwapEntry
== 0);
936 ASSERT(MmGetReferenceCountPage(Page
) == 1);
937 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
942 CcRosInternalFreeCacheSegment (
943 PCACHE_SEGMENT CacheSeg
)
945 * FUNCTION: Releases a cache segment associated with a BCB
955 DPRINT("Freeing cache segment 0x%p\n", CacheSeg
);
957 if ( CacheSeg
->Bcb
->Trace
)
959 DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg
->Bcb
, CacheSeg
);
963 RegionSize
= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
965 /* Unmap all the pages. */
966 for (i
= 0; i
< RegionSize
; i
++)
968 MmDeleteVirtualMapping(NULL
,
969 CacheSeg
->BaseAddress
+ (i
* PAGE_SIZE
),
973 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
976 KeAcquireSpinLock(&CiCacheSegMappingRegionLock
, &oldIrql
);
977 /* Deallocate all the pages used. */
978 Base
= (ULONG
)(CacheSeg
->BaseAddress
- CiCacheSegMappingRegionBase
) / PAGE_SIZE
;
980 RtlClearBits(&CiCacheSegMappingRegionAllocMap
, Base
, RegionSize
);
982 CiCacheSegMappingRegionHint
= min(CiCacheSegMappingRegionHint
, Base
);
984 KeReleaseSpinLock(&CiCacheSegMappingRegionLock
, oldIrql
);
986 MmLockAddressSpace(MmGetKernelAddressSpace());
987 MmFreeMemoryArea(MmGetKernelAddressSpace(),
988 CacheSeg
->MemoryArea
,
991 MmUnlockAddressSpace(MmGetKernelAddressSpace());
993 ExFreeToNPagedLookasideList(&CacheSegLookasideList
, CacheSeg
);
994 return STATUS_SUCCESS
;
1003 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
,
1004 IN PLARGE_INTEGER FileOffset OPTIONAL
,
1006 OUT PIO_STATUS_BLOCK IoStatus
)
1009 LARGE_INTEGER Offset
;
1010 PCACHE_SEGMENT current
;
1014 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1015 SectionObjectPointers
, FileOffset
, Length
, IoStatus
);
1017 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1019 Bcb
= (PBCB
)SectionObjectPointers
->SharedCacheMap
;
1023 Offset
= *FileOffset
;
1027 Offset
.QuadPart
= (LONGLONG
)0;
1028 Length
= Bcb
->FileSize
.u
.LowPart
;
1033 IoStatus
->Status
= STATUS_SUCCESS
;
1034 IoStatus
->Information
= 0;
1039 current
= CcRosLookupCacheSegment (Bcb
, Offset
.u
.LowPart
);
1040 if (current
!= NULL
)
1044 Status
= CcRosFlushCacheSegment(current
);
1045 if (!NT_SUCCESS(Status
) && IoStatus
!= NULL
)
1047 IoStatus
->Status
= Status
;
1050 KeReleaseMutex(¤t
->Mutex
, FALSE
);
1052 KeAcquireGuardedMutex(&ViewLock
);
1053 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
1054 CcRosCacheSegmentDecRefCount(current
);
1055 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
1056 KeReleaseGuardedMutex(&ViewLock
);
1059 Offset
.QuadPart
+= VACB_MAPPING_GRANULARITY
;
1060 if (Length
> VACB_MAPPING_GRANULARITY
)
1062 Length
-= VACB_MAPPING_GRANULARITY
;
1074 IoStatus
->Status
= STATUS_INVALID_PARAMETER
;
1081 CcRosDeleteFileCache (
1082 PFILE_OBJECT FileObject
,
1085 * FUNCTION: Releases the BCB associated with a file object
1088 PLIST_ENTRY current_entry
;
1089 PCACHE_SEGMENT current
;
1090 LIST_ENTRY FreeList
;
1096 KeReleaseGuardedMutex(&ViewLock
);
1098 CcFlushCache(FileObject
->SectionObjectPointer
, NULL
, 0, NULL
);
1100 KeAcquireGuardedMutex(&ViewLock
);
1102 if (Bcb
->RefCount
== 0)
1104 if (Bcb
->BcbRemoveListEntry
.Flink
!= NULL
)
1106 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1107 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1110 FileObject
->SectionObjectPointer
->SharedCacheMap
= NULL
;
1113 * Release all cache segments.
1115 InitializeListHead(&FreeList
);
1116 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
1117 while (!IsListEmpty(&Bcb
->BcbSegmentListHead
))
1119 current_entry
= RemoveTailList(&Bcb
->BcbSegmentListHead
);
1120 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
, BcbSegmentListEntry
);
1121 RemoveEntryList(¤t
->CacheSegmentListEntry
);
1122 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
1125 RemoveEntryList(¤t
->DirtySegmentListEntry
);
1126 DirtyPageCount
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
1127 DPRINT1("Freeing dirty segment\n");
1129 InsertHeadList(&FreeList
, ¤t
->BcbSegmentListEntry
);
1134 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
1136 KeReleaseGuardedMutex(&ViewLock
);
1137 ObDereferenceObject (Bcb
->FileObject
);
1139 while (!IsListEmpty(&FreeList
))
1141 current_entry
= RemoveTailList(&FreeList
);
1142 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
, BcbSegmentListEntry
);
1143 CcRosInternalFreeCacheSegment(current
);
1145 ExFreeToNPagedLookasideList(&BcbLookasideList
, Bcb
);
1146 KeAcquireGuardedMutex(&ViewLock
);
1148 return STATUS_SUCCESS
;
1153 CcRosReferenceCache (
1154 PFILE_OBJECT FileObject
)
1157 KeAcquireGuardedMutex(&ViewLock
);
1158 Bcb
= (PBCB
)FileObject
->SectionObjectPointer
->SharedCacheMap
;
1160 if (Bcb
->RefCount
== 0)
1162 ASSERT(Bcb
->BcbRemoveListEntry
.Flink
!= NULL
);
1163 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1164 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1169 ASSERT(Bcb
->BcbRemoveListEntry
.Flink
== NULL
);
1172 KeReleaseGuardedMutex(&ViewLock
);
1177 CcRosSetRemoveOnClose (
1178 PSECTION_OBJECT_POINTERS SectionObjectPointer
)
1181 DPRINT("CcRosSetRemoveOnClose()\n");
1182 KeAcquireGuardedMutex(&ViewLock
);
1183 Bcb
= (PBCB
)SectionObjectPointer
->SharedCacheMap
;
1186 Bcb
->RemoveOnClose
= TRUE
;
1187 if (Bcb
->RefCount
== 0)
1189 CcRosDeleteFileCache(Bcb
->FileObject
, Bcb
);
1192 KeReleaseGuardedMutex(&ViewLock
);
1198 CcRosDereferenceCache (
1199 PFILE_OBJECT FileObject
)
1202 KeAcquireGuardedMutex(&ViewLock
);
1203 Bcb
= (PBCB
)FileObject
->SectionObjectPointer
->SharedCacheMap
;
1205 if (Bcb
->RefCount
> 0)
1208 if (Bcb
->RefCount
== 0)
1210 MmFreeSectionSegments(Bcb
->FileObject
);
1211 CcRosDeleteFileCache(FileObject
, Bcb
);
1214 KeReleaseGuardedMutex(&ViewLock
);
1219 CcRosReleaseFileCache (
1220 PFILE_OBJECT FileObject
)
1222 * FUNCTION: Called by the file system when a handle to a file object
1228 KeAcquireGuardedMutex(&ViewLock
);
1230 if (FileObject
->SectionObjectPointer
->SharedCacheMap
!= NULL
)
1232 Bcb
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1233 if (FileObject
->PrivateCacheMap
!= NULL
)
1235 FileObject
->PrivateCacheMap
= NULL
;
1236 if (Bcb
->RefCount
> 0)
1239 if (Bcb
->RefCount
== 0)
1241 MmFreeSectionSegments(Bcb
->FileObject
);
1242 CcRosDeleteFileCache(FileObject
, Bcb
);
1247 KeReleaseGuardedMutex(&ViewLock
);
1248 return STATUS_SUCCESS
;
1253 CcTryToInitializeFileCache (
1254 PFILE_OBJECT FileObject
)
1259 KeAcquireGuardedMutex(&ViewLock
);
1261 ASSERT(FileObject
->SectionObjectPointer
);
1262 Bcb
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1265 Status
= STATUS_UNSUCCESSFUL
;
1269 if (FileObject
->PrivateCacheMap
== NULL
)
1271 FileObject
->PrivateCacheMap
= Bcb
;
1274 if (Bcb
->BcbRemoveListEntry
.Flink
!= NULL
)
1276 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1277 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1279 Status
= STATUS_SUCCESS
;
1281 KeReleaseGuardedMutex(&ViewLock
);
1289 CcRosInitializeFileCache (
1290 PFILE_OBJECT FileObject
,
1291 PCACHE_MANAGER_CALLBACKS CallBacks
,
1292 PVOID LazyWriterContext
)
1294 * FUNCTION: Initializes a BCB for a file object
1299 Bcb
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1300 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p)\n",
1303 KeAcquireGuardedMutex(&ViewLock
);
1306 Bcb
= ExAllocateFromNPagedLookasideList(&BcbLookasideList
);
1309 KeReleaseGuardedMutex(&ViewLock
);
1310 return STATUS_UNSUCCESSFUL
;
1312 RtlZeroMemory(Bcb
, sizeof(*Bcb
));
1313 ObReferenceObjectByPointer(FileObject
,
1317 Bcb
->FileObject
= FileObject
;
1318 Bcb
->Callbacks
= CallBacks
;
1319 Bcb
->LazyWriteContext
= LazyWriterContext
;
1320 if (FileObject
->FsContext
)
1322 Bcb
->AllocationSize
=
1323 ((PFSRTL_COMMON_FCB_HEADER
)FileObject
->FsContext
)->AllocationSize
;
1325 ((PFSRTL_COMMON_FCB_HEADER
)FileObject
->FsContext
)->FileSize
;
1327 KeInitializeSpinLock(&Bcb
->BcbLock
);
1328 InitializeListHead(&Bcb
->BcbSegmentListHead
);
1329 FileObject
->SectionObjectPointer
->SharedCacheMap
= Bcb
;
1331 if (FileObject
->PrivateCacheMap
== NULL
)
1333 FileObject
->PrivateCacheMap
= Bcb
;
1336 if (Bcb
->BcbRemoveListEntry
.Flink
!= NULL
)
1338 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1339 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1341 KeReleaseGuardedMutex(&ViewLock
);
1343 return STATUS_SUCCESS
;
1351 CcGetFileObjectFromSectionPtrs (
1352 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
)
1355 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1357 Bcb
= (PBCB
)SectionObjectPointers
->SharedCacheMap
;
1359 return Bcb
->FileObject
;
1373 PHYSICAL_ADDRESS BoundaryAddressMultiple
;
1376 DPRINT("CcInitView()\n");
1378 BoundaryAddressMultiple
.QuadPart
= 0;
1379 CiCacheSegMappingRegionHint
= 0;
1380 CiCacheSegMappingRegionBase
= NULL
;
1382 MmLockAddressSpace(MmGetKernelAddressSpace());
1384 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
1385 MEMORY_AREA_CACHE_SEGMENT
,
1386 &CiCacheSegMappingRegionBase
,
1387 CI_CACHESEG_MAPPING_REGION_SIZE
,
1392 BoundaryAddressMultiple
);
1393 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1394 if (!NT_SUCCESS(Status
))
1396 KeBugCheck(CACHE_MANAGER
);
1399 Buffer
= ExAllocatePoolWithTag(NonPagedPool
,
1400 CI_CACHESEG_MAPPING_REGION_SIZE
/ (PAGE_SIZE
* 8),
1404 KeBugCheck(CACHE_MANAGER
);
1407 RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap
,
1409 CI_CACHESEG_MAPPING_REGION_SIZE
/ PAGE_SIZE
);
1410 RtlClearAllBits(&CiCacheSegMappingRegionAllocMap
);
1412 KeInitializeSpinLock(&CiCacheSegMappingRegionLock
);
1414 InitializeListHead(&CacheSegmentListHead
);
1415 InitializeListHead(&DirtySegmentListHead
);
1416 InitializeListHead(&CacheSegmentLRUListHead
);
1417 InitializeListHead(&ClosedListHead
);
1418 KeInitializeGuardedMutex(&ViewLock
);
1419 ExInitializeNPagedLookasideList (&iBcbLookasideList
,
1423 sizeof(INTERNAL_BCB
),
1426 ExInitializeNPagedLookasideList (&BcbLookasideList
,
1433 ExInitializeNPagedLookasideList (&CacheSegLookasideList
,
1437 sizeof(CACHE_SEGMENT
),
1441 MmInitializeMemoryConsumer(MC_CACHE
, CcRosTrimCache
);
1443 CcInitCacheZeroPage();