2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
10 /* NOTES **********************************************************************
12 * This is not the NT implementation of a file cache nor anything much like
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 * (4) Copy the data into or out of the page as necessary.
30 * (5) Release the cache page
32 /* INCLUDES ******************************************************************/
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
42 /* GLOBALS *******************************************************************/
45 * If CACHE_BITMAP is defined, the cache manager uses one large memory region
46 * within the kernel address space and allocate/deallocate space from this block
47 * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
48 * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
50 //#define CACHE_BITMAP
52 static LIST_ENTRY DirtySegmentListHead
;
53 static LIST_ENTRY CacheSegmentListHead
;
54 static LIST_ENTRY CacheSegmentLRUListHead
;
55 static LIST_ENTRY ClosedListHead
;
56 ULONG DirtyPageCount
= 0;
58 KGUARDED_MUTEX ViewLock
;
61 #define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
63 static PVOID CiCacheSegMappingRegionBase
= NULL
;
64 static RTL_BITMAP CiCacheSegMappingRegionAllocMap
;
65 static ULONG CiCacheSegMappingRegionHint
;
66 static KSPIN_LOCK CiCacheSegMappingRegionLock
;
69 NPAGED_LOOKASIDE_LIST iBcbLookasideList
;
70 static NPAGED_LOOKASIDE_LIST BcbLookasideList
;
71 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList
;
74 static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs
, const char* file
, int line
)
79 DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
80 file
, line
, cs
, cs
->ReferenceCount
, cs
->Dirty
, cs
->PageOut
);
83 static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs
, const char* file
, int line
)
88 DbgPrint("(%s:%i) CacheSegment %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
89 file
, line
, cs
, cs
->ReferenceCount
, cs
->Dirty
, cs
->PageOut
);
92 #define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
93 #define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
95 #define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
96 #define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
100 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg
);
103 /* FUNCTIONS *****************************************************************/
113 PLIST_ENTRY current_entry
;
114 PCACHE_SEGMENT current
;
123 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb
);
125 KeAcquireGuardedMutex(&ViewLock
);
126 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldirql
);
128 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
129 while (current_entry
!= &Bcb
->BcbSegmentListHead
)
131 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
, BcbSegmentListEntry
);
132 current_entry
= current_entry
->Flink
;
134 DPRINT1(" CacheSegment 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
135 current
, current
->ReferenceCount
, current
->Dirty
, current
->PageOut
);
137 KeReleaseSpinLock(&Bcb
->BcbLock
, oldirql
);
138 KeReleaseGuardedMutex(&ViewLock
);
142 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb
);
153 CcRosFlushCacheSegment (
154 PCACHE_SEGMENT CacheSegment
)
159 Status
= WriteCacheSegment(CacheSegment
);
160 if (NT_SUCCESS(Status
))
162 KeAcquireGuardedMutex(&ViewLock
);
163 KeAcquireSpinLock(&CacheSegment
->Bcb
->BcbLock
, &oldIrql
);
165 CacheSegment
->Dirty
= FALSE
;
166 RemoveEntryList(&CacheSegment
->DirtySegmentListEntry
);
167 DirtyPageCount
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
168 CcRosCacheSegmentDecRefCount(CacheSegment
);
170 KeReleaseSpinLock(&CacheSegment
->Bcb
->BcbLock
, oldIrql
);
171 KeReleaseGuardedMutex(&ViewLock
);
179 CcRosFlushDirtyPages (
184 PLIST_ENTRY current_entry
;
185 PCACHE_SEGMENT current
;
186 ULONG PagesPerSegment
;
189 LARGE_INTEGER ZeroTimeout
;
191 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target
);
194 ZeroTimeout
.QuadPart
= 0;
196 KeEnterCriticalRegion();
197 KeAcquireGuardedMutex(&ViewLock
);
199 current_entry
= DirtySegmentListHead
.Flink
;
200 if (current_entry
== &DirtySegmentListHead
)
202 DPRINT("No Dirty pages\n");
205 while ((current_entry
!= &DirtySegmentListHead
) && (Target
> 0))
207 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
208 DirtySegmentListEntry
);
209 current_entry
= current_entry
->Flink
;
211 CcRosCacheSegmentIncRefCount(current
);
213 Locked
= current
->Bcb
->Callbacks
->AcquireForLazyWrite(
214 current
->Bcb
->LazyWriteContext
, Wait
);
217 CcRosCacheSegmentDecRefCount(current
);
221 Status
= KeWaitForSingleObject(¤t
->Mutex
,
225 Wait
? NULL
: &ZeroTimeout
);
226 if (Status
!= STATUS_SUCCESS
)
228 current
->Bcb
->Callbacks
->ReleaseFromLazyWrite(
229 current
->Bcb
->LazyWriteContext
);
230 CcRosCacheSegmentDecRefCount(current
);
234 ASSERT(current
->Dirty
);
236 /* One reference is added above */
237 if (current
->ReferenceCount
> 2)
239 KeReleaseMutex(¤t
->Mutex
, FALSE
);
240 current
->Bcb
->Callbacks
->ReleaseFromLazyWrite(
241 current
->Bcb
->LazyWriteContext
);
242 CcRosCacheSegmentDecRefCount(current
);
246 PagesPerSegment
= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
248 KeReleaseGuardedMutex(&ViewLock
);
250 Status
= CcRosFlushCacheSegment(current
);
252 KeReleaseMutex(¤t
->Mutex
, FALSE
);
253 current
->Bcb
->Callbacks
->ReleaseFromLazyWrite(
254 current
->Bcb
->LazyWriteContext
);
256 KeAcquireGuardedMutex(&ViewLock
);
257 CcRosCacheSegmentDecRefCount(current
);
259 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
))
261 DPRINT1("CC: Failed to flush cache segment.\n");
265 (*Count
) += PagesPerSegment
;
266 Target
-= PagesPerSegment
;
269 current_entry
= DirtySegmentListHead
.Flink
;
272 KeReleaseGuardedMutex(&ViewLock
);
273 KeLeaveCriticalRegion();
275 DPRINT("CcRosFlushDirtyPages() finished\n");
276 return STATUS_SUCCESS
;
285 * FUNCTION: Try to free some memory from the file cache.
287 * Target - The number of pages to be freed.
288 * Priority - The priority of free (currently unused).
289 * NrFreed - Points to a variable where the number of pages
290 * actually freed is returned.
293 PLIST_ENTRY current_entry
;
294 PCACHE_SEGMENT current
;
295 ULONG PagesPerSegment
;
301 BOOLEAN FlushedPages
= FALSE
;
303 DPRINT("CcRosTrimCache(Target %lu)\n", Target
);
305 InitializeListHead(&FreeList
);
310 KeAcquireGuardedMutex(&ViewLock
);
312 current_entry
= CacheSegmentLRUListHead
.Flink
;
313 while (current_entry
!= &CacheSegmentLRUListHead
)
315 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
316 CacheSegmentLRUListEntry
);
317 current_entry
= current_entry
->Flink
;
319 KeAcquireSpinLock(¤t
->Bcb
->BcbLock
, &oldIrql
);
321 /* Reference the cache segment */
322 CcRosCacheSegmentIncRefCount(current
);
324 /* Check if it's mapped and not dirty */
325 if (current
->MappedCount
> 0 && !current
->Dirty
)
327 /* We have to break these locks because Cc sucks */
328 KeReleaseSpinLock(¤t
->Bcb
->BcbLock
, oldIrql
);
329 KeReleaseGuardedMutex(&ViewLock
);
331 /* Page out the segment */
332 for (i
= 0; i
< VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
; i
++)
334 Page
= (PFN_NUMBER
)(MmGetPhysicalAddress((PUCHAR
)current
->BaseAddress
+ (i
* PAGE_SIZE
)).QuadPart
>> PAGE_SHIFT
);
336 MmPageOutPhysicalAddress(Page
);
339 /* Reacquire the locks */
340 KeAcquireGuardedMutex(&ViewLock
);
341 KeAcquireSpinLock(¤t
->Bcb
->BcbLock
, &oldIrql
);
344 /* Dereference the cache segment */
345 CcRosCacheSegmentDecRefCount(current
);
347 /* Check if we can free this entry now */
348 if (current
->ReferenceCount
== 0)
350 ASSERT(!current
->Dirty
);
351 ASSERT(!current
->MappedCount
);
353 RemoveEntryList(¤t
->BcbSegmentListEntry
);
354 RemoveEntryList(¤t
->CacheSegmentListEntry
);
355 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
356 InsertHeadList(&FreeList
, ¤t
->BcbSegmentListEntry
);
358 /* Calculate how many pages we freed for Mm */
359 PagesPerSegment
= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
360 PagesFreed
= min(PagesPerSegment
, Target
);
361 Target
-= PagesFreed
;
362 (*NrFreed
) += PagesFreed
;
365 KeReleaseSpinLock(¤t
->Bcb
->BcbLock
, oldIrql
);
368 KeReleaseGuardedMutex(&ViewLock
);
370 /* Try flushing pages if we haven't met our target */
371 if ((Target
> 0) && !FlushedPages
)
373 /* Flush dirty pages to disk */
374 CcRosFlushDirtyPages(Target
, &PagesFreed
, FALSE
);
377 /* We can only swap as many pages as we flushed */
378 if (PagesFreed
< Target
) Target
= PagesFreed
;
380 /* Check if we flushed anything */
383 /* Try again after flushing dirty pages */
384 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed
);
389 while (!IsListEmpty(&FreeList
))
391 current_entry
= RemoveHeadList(&FreeList
);
392 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
393 BcbSegmentListEntry
);
394 CcRosInternalFreeCacheSegment(current
);
397 DPRINT("Evicted %lu cache pages\n", (*NrFreed
));
399 return STATUS_SUCCESS
;
404 CcRosReleaseCacheSegment (
406 PCACHE_SEGMENT CacheSeg
,
416 DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %u)\n",
417 Bcb
, CacheSeg
, Valid
);
419 KeAcquireGuardedMutex(&ViewLock
);
420 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
422 CacheSeg
->Valid
= Valid
;
424 WasDirty
= CacheSeg
->Dirty
;
425 CacheSeg
->Dirty
= CacheSeg
->Dirty
|| Dirty
;
427 if (!WasDirty
&& CacheSeg
->Dirty
)
429 InsertTailList(&DirtySegmentListHead
, &CacheSeg
->DirtySegmentListEntry
);
430 DirtyPageCount
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
435 CacheSeg
->MappedCount
++;
437 CcRosCacheSegmentDecRefCount(CacheSeg
);
438 if (Mapped
&& (CacheSeg
->MappedCount
== 1))
440 CcRosCacheSegmentIncRefCount(CacheSeg
);
442 if (!WasDirty
&& CacheSeg
->Dirty
)
444 CcRosCacheSegmentIncRefCount(CacheSeg
);
447 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
448 KeReleaseGuardedMutex(&ViewLock
);
449 KeReleaseMutex(&CacheSeg
->Mutex
, FALSE
);
451 return STATUS_SUCCESS
;
454 /* Returns with Cache Segment Lock Held! */
457 CcRosLookupCacheSegment (
461 PLIST_ENTRY current_entry
;
462 PCACHE_SEGMENT current
;
467 DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %lu)\n", Bcb
, FileOffset
);
469 KeAcquireGuardedMutex(&ViewLock
);
470 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
472 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
473 while (current_entry
!= &Bcb
->BcbSegmentListHead
)
475 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
476 BcbSegmentListEntry
);
477 if (IsPointInSegment(current
->FileOffset
, VACB_MAPPING_GRANULARITY
,
480 CcRosCacheSegmentIncRefCount(current
);
481 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
482 KeReleaseGuardedMutex(&ViewLock
);
483 KeWaitForSingleObject(¤t
->Mutex
,
490 if (current
->FileOffset
> FileOffset
)
492 current_entry
= current_entry
->Flink
;
495 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
496 KeReleaseGuardedMutex(&ViewLock
);
503 CcRosMarkDirtyCacheSegment (
507 PCACHE_SEGMENT CacheSeg
;
512 DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %lu)\n", Bcb
, FileOffset
);
514 CacheSeg
= CcRosLookupCacheSegment(Bcb
, FileOffset
);
515 if (CacheSeg
== NULL
)
517 KeBugCheck(CACHE_MANAGER
);
520 KeAcquireGuardedMutex(&ViewLock
);
521 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
523 if (!CacheSeg
->Dirty
)
525 InsertTailList(&DirtySegmentListHead
, &CacheSeg
->DirtySegmentListEntry
);
526 DirtyPageCount
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
530 CcRosCacheSegmentDecRefCount(CacheSeg
);
533 /* Move to the tail of the LRU list */
534 RemoveEntryList(&CacheSeg
->CacheSegmentLRUListEntry
);
535 InsertTailList(&CacheSegmentLRUListHead
, &CacheSeg
->CacheSegmentLRUListEntry
);
537 CacheSeg
->Dirty
= TRUE
;
539 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
540 KeReleaseGuardedMutex(&ViewLock
);
541 KeReleaseMutex(&CacheSeg
->Mutex
, FALSE
);
543 return STATUS_SUCCESS
;
548 CcRosUnmapCacheSegment (
553 PCACHE_SEGMENT CacheSeg
;
559 DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %lu, NowDirty %u)\n",
560 Bcb
, FileOffset
, NowDirty
);
562 CacheSeg
= CcRosLookupCacheSegment(Bcb
, FileOffset
);
563 if (CacheSeg
== NULL
)
565 return STATUS_UNSUCCESSFUL
;
568 KeAcquireGuardedMutex(&ViewLock
);
569 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
571 WasDirty
= CacheSeg
->Dirty
;
572 CacheSeg
->Dirty
= CacheSeg
->Dirty
|| NowDirty
;
574 CacheSeg
->MappedCount
--;
576 if (!WasDirty
&& NowDirty
)
578 InsertTailList(&DirtySegmentListHead
, &CacheSeg
->DirtySegmentListEntry
);
579 DirtyPageCount
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
582 CcRosCacheSegmentDecRefCount(CacheSeg
);
583 if (!WasDirty
&& NowDirty
)
585 CcRosCacheSegmentIncRefCount(CacheSeg
);
587 if (CacheSeg
->MappedCount
== 0)
589 CcRosCacheSegmentDecRefCount(CacheSeg
);
592 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
593 KeReleaseGuardedMutex(&ViewLock
);
594 KeReleaseMutex(&CacheSeg
->Mutex
, FALSE
);
596 return STATUS_SUCCESS
;
601 CcRosCreateCacheSegment (
604 PCACHE_SEGMENT
* CacheSeg
)
606 PCACHE_SEGMENT current
;
607 PCACHE_SEGMENT previous
;
608 PLIST_ENTRY current_entry
;
612 ULONG StartingOffset
;
617 DPRINT("CcRosCreateCacheSegment()\n");
619 if (FileOffset
>= Bcb
->FileSize
.u
.LowPart
)
622 return STATUS_INVALID_PARAMETER
;
625 current
= ExAllocateFromNPagedLookasideList(&CacheSegLookasideList
);
626 current
->Valid
= FALSE
;
627 current
->Dirty
= FALSE
;
628 current
->PageOut
= FALSE
;
629 current
->FileOffset
= ROUND_DOWN(FileOffset
, VACB_MAPPING_GRANULARITY
);
634 DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb
, current
);
637 current
->MappedCount
= 0;
638 current
->DirtySegmentListEntry
.Flink
= NULL
;
639 current
->DirtySegmentListEntry
.Blink
= NULL
;
640 current
->ReferenceCount
= 1;
641 KeInitializeMutex(¤t
->Mutex
, 0);
642 KeWaitForSingleObject(¤t
->Mutex
,
647 KeAcquireGuardedMutex(&ViewLock
);
650 /* There is window between the call to CcRosLookupCacheSegment
651 * and CcRosCreateCacheSegment. We must check if a segment on
652 * the fileoffset exist. If there exist a segment, we release
653 * our new created segment and return the existing one.
655 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
656 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
658 while (current_entry
!= &Bcb
->BcbSegmentListHead
)
660 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
661 BcbSegmentListEntry
);
662 if (IsPointInSegment(current
->FileOffset
, VACB_MAPPING_GRANULARITY
,
665 CcRosCacheSegmentIncRefCount(current
);
666 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
670 DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
676 KeReleaseMutex(&(*CacheSeg
)->Mutex
, FALSE
);
677 KeReleaseGuardedMutex(&ViewLock
);
678 ExFreeToNPagedLookasideList(&CacheSegLookasideList
, *CacheSeg
);
680 KeWaitForSingleObject(¤t
->Mutex
,
685 return STATUS_SUCCESS
;
687 if (current
->FileOffset
< FileOffset
)
689 ASSERT(previous
== NULL
||
690 previous
->FileOffset
< current
->FileOffset
);
693 if (current
->FileOffset
> FileOffset
)
695 current_entry
= current_entry
->Flink
;
697 /* There was no existing segment. */
701 InsertHeadList(&previous
->BcbSegmentListEntry
, ¤t
->BcbSegmentListEntry
);
705 InsertHeadList(&Bcb
->BcbSegmentListHead
, ¤t
->BcbSegmentListEntry
);
707 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
708 InsertTailList(&CacheSegmentListHead
, ¤t
->CacheSegmentListEntry
);
709 InsertTailList(&CacheSegmentLRUListHead
, ¤t
->CacheSegmentLRUListEntry
);
710 KeReleaseGuardedMutex(&ViewLock
);
712 KeAcquireSpinLock(&CiCacheSegMappingRegionLock
, &oldIrql
);
714 StartingOffset
= RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap
,
715 VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
,
716 CiCacheSegMappingRegionHint
);
718 if (StartingOffset
== 0xffffffff)
720 DPRINT1("Out of CacheSeg mapping space\n");
721 KeBugCheck(CACHE_MANAGER
);
724 current
->BaseAddress
= CiCacheSegMappingRegionBase
+ StartingOffset
* PAGE_SIZE
;
726 if (CiCacheSegMappingRegionHint
== StartingOffset
)
728 CiCacheSegMappingRegionHint
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
731 KeReleaseSpinLock(&CiCacheSegMappingRegionLock
, oldIrql
);
733 MmLockAddressSpace(MmGetKernelAddressSpace());
734 current
->BaseAddress
= NULL
;
735 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
736 0, // nothing checks for cache_segment mareas, so set to 0
737 ¤t
->BaseAddress
,
738 VACB_MAPPING_GRANULARITY
,
740 (PMEMORY_AREA
*)¤t
->MemoryArea
,
744 MmUnlockAddressSpace(MmGetKernelAddressSpace());
745 if (!NT_SUCCESS(Status
))
747 KeBugCheck(CACHE_MANAGER
);
751 /* Create a virtual mapping for this memory area */
752 MI_SET_USAGE(MI_USAGE_CACHE
);
756 if ((Bcb
->FileObject
) && (Bcb
->FileObject
->FileName
.Buffer
))
758 pos
= wcsrchr(Bcb
->FileObject
->FileName
.Buffer
, '\\');
759 len
= wcslen(pos
) * sizeof(WCHAR
);
760 if (pos
) snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%S", pos
);
764 MmMapMemoryArea(current
->BaseAddress
, VACB_MAPPING_GRANULARITY
,
765 MC_CACHE
, PAGE_READWRITE
);
767 return STATUS_SUCCESS
;
772 CcRosGetCacheSegmentChain (
776 PCACHE_SEGMENT
* CacheSeg
)
778 PCACHE_SEGMENT current
;
780 PCACHE_SEGMENT
* CacheSegList
;
781 PCACHE_SEGMENT Previous
= NULL
;
785 DPRINT("CcRosGetCacheSegmentChain()\n");
787 Length
= ROUND_UP(Length
, VACB_MAPPING_GRANULARITY
);
789 CacheSegList
= _alloca(sizeof(PCACHE_SEGMENT
) *
790 (Length
/ VACB_MAPPING_GRANULARITY
));
793 * Look for a cache segment already mapping the same data.
795 for (i
= 0; i
< (Length
/ VACB_MAPPING_GRANULARITY
); i
++)
797 ULONG CurrentOffset
= FileOffset
+ (i
* VACB_MAPPING_GRANULARITY
);
798 current
= CcRosLookupCacheSegment(Bcb
, CurrentOffset
);
801 KeAcquireGuardedMutex(&ViewLock
);
803 /* Move to tail of LRU list */
804 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
805 InsertTailList(&CacheSegmentLRUListHead
, ¤t
->CacheSegmentLRUListEntry
);
807 KeReleaseGuardedMutex(&ViewLock
);
809 CacheSegList
[i
] = current
;
813 CcRosCreateCacheSegment(Bcb
, CurrentOffset
, ¤t
);
814 CacheSegList
[i
] = current
;
818 for (i
= 0; i
< Length
/ VACB_MAPPING_GRANULARITY
; i
++)
822 *CacheSeg
= CacheSegList
[i
];
823 Previous
= CacheSegList
[i
];
827 Previous
->NextInChain
= CacheSegList
[i
];
828 Previous
= CacheSegList
[i
];
832 Previous
->NextInChain
= NULL
;
834 return STATUS_SUCCESS
;
839 CcRosGetCacheSegment (
845 PCACHE_SEGMENT
* CacheSeg
)
847 PCACHE_SEGMENT current
;
852 DPRINT("CcRosGetCacheSegment()\n");
855 * Look for a cache segment already mapping the same data.
857 current
= CcRosLookupCacheSegment(Bcb
, FileOffset
);
861 * Otherwise create a new segment.
863 Status
= CcRosCreateCacheSegment(Bcb
, FileOffset
, ¤t
);
864 if (!NT_SUCCESS(Status
))
870 KeAcquireGuardedMutex(&ViewLock
);
872 /* Move to the tail of the LRU list */
873 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
874 InsertTailList(&CacheSegmentLRUListHead
, ¤t
->CacheSegmentLRUListEntry
);
876 KeReleaseGuardedMutex(&ViewLock
);
879 * Return information about the segment to the caller.
881 *UptoDate
= current
->Valid
;
882 *BaseAddress
= current
->BaseAddress
;
883 DPRINT("*BaseAddress %p\n", *BaseAddress
);
885 *BaseOffset
= current
->FileOffset
;
886 return STATUS_SUCCESS
;
891 CcRosRequestCacheSegment (
896 PCACHE_SEGMENT
* CacheSeg
)
898 * FUNCTION: Request a page mapping for a BCB
905 if (FileOffset
% VACB_MAPPING_GRANULARITY
!= 0)
907 DPRINT1("Bad fileoffset %x should be multiple of %x",
908 FileOffset
, VACB_MAPPING_GRANULARITY
);
909 KeBugCheck(CACHE_MANAGER
);
912 return CcRosGetCacheSegment(Bcb
,
925 MEMORY_AREA
* MemoryArea
,
931 ASSERT(SwapEntry
== 0);
934 ASSERT(MmGetReferenceCountPage(Page
) == 1);
935 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
940 CcRosInternalFreeCacheSegment (
941 PCACHE_SEGMENT CacheSeg
)
943 * FUNCTION: Releases a cache segment associated with a BCB
953 DPRINT("Freeing cache segment 0x%p\n", CacheSeg
);
955 if ( CacheSeg
->Bcb
->Trace
)
957 DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg
->Bcb
, CacheSeg
);
961 RegionSize
= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
963 /* Unmap all the pages. */
964 for (i
= 0; i
< RegionSize
; i
++)
966 MmDeleteVirtualMapping(NULL
,
967 CacheSeg
->BaseAddress
+ (i
* PAGE_SIZE
),
971 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
974 KeAcquireSpinLock(&CiCacheSegMappingRegionLock
, &oldIrql
);
975 /* Deallocate all the pages used. */
976 Base
= (ULONG
)(CacheSeg
->BaseAddress
- CiCacheSegMappingRegionBase
) / PAGE_SIZE
;
978 RtlClearBits(&CiCacheSegMappingRegionAllocMap
, Base
, RegionSize
);
980 CiCacheSegMappingRegionHint
= min(CiCacheSegMappingRegionHint
, Base
);
982 KeReleaseSpinLock(&CiCacheSegMappingRegionLock
, oldIrql
);
984 MmLockAddressSpace(MmGetKernelAddressSpace());
985 MmFreeMemoryArea(MmGetKernelAddressSpace(),
986 CacheSeg
->MemoryArea
,
989 MmUnlockAddressSpace(MmGetKernelAddressSpace());
991 ExFreeToNPagedLookasideList(&CacheSegLookasideList
, CacheSeg
);
992 return STATUS_SUCCESS
;
1001 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
,
1002 IN PLARGE_INTEGER FileOffset OPTIONAL
,
1004 OUT PIO_STATUS_BLOCK IoStatus
)
1007 LARGE_INTEGER Offset
;
1008 PCACHE_SEGMENT current
;
1012 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1013 SectionObjectPointers
, FileOffset
, Length
, IoStatus
);
1015 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1017 Bcb
= (PBCB
)SectionObjectPointers
->SharedCacheMap
;
1021 Offset
= *FileOffset
;
1025 Offset
.QuadPart
= (LONGLONG
)0;
1026 Length
= Bcb
->FileSize
.u
.LowPart
;
1031 IoStatus
->Status
= STATUS_SUCCESS
;
1032 IoStatus
->Information
= 0;
1037 current
= CcRosLookupCacheSegment (Bcb
, Offset
.u
.LowPart
);
1038 if (current
!= NULL
)
1042 Status
= CcRosFlushCacheSegment(current
);
1043 if (!NT_SUCCESS(Status
) && IoStatus
!= NULL
)
1045 IoStatus
->Status
= Status
;
1048 KeReleaseMutex(¤t
->Mutex
, FALSE
);
1050 KeAcquireGuardedMutex(&ViewLock
);
1051 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
1052 CcRosCacheSegmentDecRefCount(current
);
1053 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
1054 KeReleaseGuardedMutex(&ViewLock
);
1057 Offset
.QuadPart
+= VACB_MAPPING_GRANULARITY
;
1058 if (Length
> VACB_MAPPING_GRANULARITY
)
1060 Length
-= VACB_MAPPING_GRANULARITY
;
1072 IoStatus
->Status
= STATUS_INVALID_PARAMETER
;
1079 CcRosDeleteFileCache (
1080 PFILE_OBJECT FileObject
,
1083 * FUNCTION: Releases the BCB associated with a file object
1086 PLIST_ENTRY current_entry
;
1087 PCACHE_SEGMENT current
;
1088 LIST_ENTRY FreeList
;
1094 KeReleaseGuardedMutex(&ViewLock
);
1096 CcFlushCache(FileObject
->SectionObjectPointer
, NULL
, 0, NULL
);
1098 KeAcquireGuardedMutex(&ViewLock
);
1100 if (Bcb
->RefCount
== 0)
1102 if (Bcb
->BcbRemoveListEntry
.Flink
!= NULL
)
1104 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1105 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1108 FileObject
->SectionObjectPointer
->SharedCacheMap
= NULL
;
1111 * Release all cache segments.
1113 InitializeListHead(&FreeList
);
1114 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
1115 while (!IsListEmpty(&Bcb
->BcbSegmentListHead
))
1117 current_entry
= RemoveTailList(&Bcb
->BcbSegmentListHead
);
1118 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
, BcbSegmentListEntry
);
1119 RemoveEntryList(¤t
->CacheSegmentListEntry
);
1120 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
1123 RemoveEntryList(¤t
->DirtySegmentListEntry
);
1124 DirtyPageCount
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
1125 DPRINT1("Freeing dirty segment\n");
1127 InsertHeadList(&FreeList
, ¤t
->BcbSegmentListEntry
);
1132 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
1134 KeReleaseGuardedMutex(&ViewLock
);
1135 ObDereferenceObject (Bcb
->FileObject
);
1137 while (!IsListEmpty(&FreeList
))
1139 current_entry
= RemoveTailList(&FreeList
);
1140 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
, BcbSegmentListEntry
);
1141 CcRosInternalFreeCacheSegment(current
);
1143 ExFreeToNPagedLookasideList(&BcbLookasideList
, Bcb
);
1144 KeAcquireGuardedMutex(&ViewLock
);
1146 return STATUS_SUCCESS
;
1151 CcRosReferenceCache (
1152 PFILE_OBJECT FileObject
)
1155 KeAcquireGuardedMutex(&ViewLock
);
1156 Bcb
= (PBCB
)FileObject
->SectionObjectPointer
->SharedCacheMap
;
1158 if (Bcb
->RefCount
== 0)
1160 ASSERT(Bcb
->BcbRemoveListEntry
.Flink
!= NULL
);
1161 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1162 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1167 ASSERT(Bcb
->BcbRemoveListEntry
.Flink
== NULL
);
1170 KeReleaseGuardedMutex(&ViewLock
);
1175 CcRosSetRemoveOnClose (
1176 PSECTION_OBJECT_POINTERS SectionObjectPointer
)
1179 DPRINT("CcRosSetRemoveOnClose()\n");
1180 KeAcquireGuardedMutex(&ViewLock
);
1181 Bcb
= (PBCB
)SectionObjectPointer
->SharedCacheMap
;
1184 Bcb
->RemoveOnClose
= TRUE
;
1185 if (Bcb
->RefCount
== 0)
1187 CcRosDeleteFileCache(Bcb
->FileObject
, Bcb
);
1190 KeReleaseGuardedMutex(&ViewLock
);
1196 CcRosDereferenceCache (
1197 PFILE_OBJECT FileObject
)
1200 KeAcquireGuardedMutex(&ViewLock
);
1201 Bcb
= (PBCB
)FileObject
->SectionObjectPointer
->SharedCacheMap
;
1203 if (Bcb
->RefCount
> 0)
1206 if (Bcb
->RefCount
== 0)
1208 MmFreeSectionSegments(Bcb
->FileObject
);
1209 CcRosDeleteFileCache(FileObject
, Bcb
);
1212 KeReleaseGuardedMutex(&ViewLock
);
1217 CcRosReleaseFileCache (
1218 PFILE_OBJECT FileObject
)
1220 * FUNCTION: Called by the file system when a handle to a file object
1226 KeAcquireGuardedMutex(&ViewLock
);
1228 if (FileObject
->SectionObjectPointer
->SharedCacheMap
!= NULL
)
1230 Bcb
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1231 if (FileObject
->PrivateCacheMap
!= NULL
)
1233 FileObject
->PrivateCacheMap
= NULL
;
1234 if (Bcb
->RefCount
> 0)
1237 if (Bcb
->RefCount
== 0)
1239 MmFreeSectionSegments(Bcb
->FileObject
);
1240 CcRosDeleteFileCache(FileObject
, Bcb
);
1245 KeReleaseGuardedMutex(&ViewLock
);
1246 return STATUS_SUCCESS
;
1251 CcTryToInitializeFileCache (
1252 PFILE_OBJECT FileObject
)
1257 KeAcquireGuardedMutex(&ViewLock
);
1259 ASSERT(FileObject
->SectionObjectPointer
);
1260 Bcb
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1263 Status
= STATUS_UNSUCCESSFUL
;
1267 if (FileObject
->PrivateCacheMap
== NULL
)
1269 FileObject
->PrivateCacheMap
= Bcb
;
1272 if (Bcb
->BcbRemoveListEntry
.Flink
!= NULL
)
1274 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1275 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1277 Status
= STATUS_SUCCESS
;
1279 KeReleaseGuardedMutex(&ViewLock
);
1287 CcRosInitializeFileCache (
1288 PFILE_OBJECT FileObject
,
1289 PCACHE_MANAGER_CALLBACKS CallBacks
,
1290 PVOID LazyWriterContext
)
1292 * FUNCTION: Initializes a BCB for a file object
1297 Bcb
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1298 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p)\n",
1301 KeAcquireGuardedMutex(&ViewLock
);
1304 Bcb
= ExAllocateFromNPagedLookasideList(&BcbLookasideList
);
1307 KeReleaseGuardedMutex(&ViewLock
);
1308 return STATUS_UNSUCCESSFUL
;
1310 RtlZeroMemory(Bcb
, sizeof(*Bcb
));
1311 ObReferenceObjectByPointer(FileObject
,
1315 Bcb
->FileObject
= FileObject
;
1316 Bcb
->Callbacks
= CallBacks
;
1317 Bcb
->LazyWriteContext
= LazyWriterContext
;
1318 if (FileObject
->FsContext
)
1320 Bcb
->AllocationSize
=
1321 ((PFSRTL_COMMON_FCB_HEADER
)FileObject
->FsContext
)->AllocationSize
;
1323 ((PFSRTL_COMMON_FCB_HEADER
)FileObject
->FsContext
)->FileSize
;
1325 KeInitializeSpinLock(&Bcb
->BcbLock
);
1326 InitializeListHead(&Bcb
->BcbSegmentListHead
);
1327 FileObject
->SectionObjectPointer
->SharedCacheMap
= Bcb
;
1329 if (FileObject
->PrivateCacheMap
== NULL
)
1331 FileObject
->PrivateCacheMap
= Bcb
;
1334 if (Bcb
->BcbRemoveListEntry
.Flink
!= NULL
)
1336 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1337 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1339 KeReleaseGuardedMutex(&ViewLock
);
1341 return STATUS_SUCCESS
;
1349 CcGetFileObjectFromSectionPtrs (
1350 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
)
1353 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1355 Bcb
= (PBCB
)SectionObjectPointers
->SharedCacheMap
;
1357 return Bcb
->FileObject
;
1373 DPRINT("CcInitView()\n");
1375 CiCacheSegMappingRegionHint
= 0;
1376 CiCacheSegMappingRegionBase
= NULL
;
1378 MmLockAddressSpace(MmGetKernelAddressSpace());
1380 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
1381 MEMORY_AREA_CACHE_SEGMENT
,
1382 &CiCacheSegMappingRegionBase
,
1383 CI_CACHESEG_MAPPING_REGION_SIZE
,
1389 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1390 if (!NT_SUCCESS(Status
))
1392 KeBugCheck(CACHE_MANAGER
);
1395 Buffer
= ExAllocatePoolWithTag(NonPagedPool
,
1396 CI_CACHESEG_MAPPING_REGION_SIZE
/ (PAGE_SIZE
* 8),
1400 KeBugCheck(CACHE_MANAGER
);
1403 RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap
,
1405 CI_CACHESEG_MAPPING_REGION_SIZE
/ PAGE_SIZE
);
1406 RtlClearAllBits(&CiCacheSegMappingRegionAllocMap
);
1408 KeInitializeSpinLock(&CiCacheSegMappingRegionLock
);
1410 InitializeListHead(&CacheSegmentListHead
);
1411 InitializeListHead(&DirtySegmentListHead
);
1412 InitializeListHead(&CacheSegmentLRUListHead
);
1413 InitializeListHead(&ClosedListHead
);
1414 KeInitializeGuardedMutex(&ViewLock
);
1415 ExInitializeNPagedLookasideList (&iBcbLookasideList
,
1419 sizeof(INTERNAL_BCB
),
1422 ExInitializeNPagedLookasideList (&BcbLookasideList
,
1429 ExInitializeNPagedLookasideList (&CacheSegLookasideList
,
1433 sizeof(CACHE_SEGMENT
),
1437 MmInitializeMemoryConsumer(MC_CACHE
, CcRosTrimCache
);
1439 CcInitCacheZeroPage();