2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
10 /* NOTES **********************************************************************
12 * This is not the NT implementation of a file cache nor anything much like
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 * (4) Copy the data into or out of the page as necessary.
30 * (5) Release the cache page
32 /* INCLUDES ******************************************************************/
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
42 /* GLOBALS *******************************************************************/
45 * If CACHE_BITMAP is defined, the cache manager uses one large memory region
46 * within the kernel address space and allocate/deallocate space from this block
47 * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
48 * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
50 //#define CACHE_BITMAP
52 static LIST_ENTRY DirtySegmentListHead
;
53 static LIST_ENTRY CacheSegmentListHead
;
54 static LIST_ENTRY CacheSegmentLRUListHead
;
55 static LIST_ENTRY ClosedListHead
;
56 ULONG DirtyPageCount
=0;
58 KGUARDED_MUTEX ViewLock
;
61 #define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
63 static PVOID CiCacheSegMappingRegionBase
= NULL
;
64 static RTL_BITMAP CiCacheSegMappingRegionAllocMap
;
65 static ULONG CiCacheSegMappingRegionHint
;
66 static KSPIN_LOCK CiCacheSegMappingRegionLock
;
69 NPAGED_LOOKASIDE_LIST iBcbLookasideList
;
70 static NPAGED_LOOKASIDE_LIST BcbLookasideList
;
71 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList
;
74 static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs
, const char* file
, int line
)
79 DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
80 file
, line
, cs
, cs
->ReferenceCount
, cs
->Dirty
, cs
->PageOut
);
83 static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs
, const char* file
, int line
)
88 DbgPrint("(%s:%i) CacheSegment %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
89 file
, line
, cs
, cs
->ReferenceCount
, cs
->Dirty
, cs
->PageOut
);
92 #define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
93 #define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
95 #define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
96 #define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
100 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg
);
103 /* FUNCTIONS *****************************************************************/
113 PLIST_ENTRY current_entry
;
114 PCACHE_SEGMENT current
;
123 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb
);
125 KeAcquireGuardedMutex(&ViewLock
);
126 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldirql
);
128 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
129 while (current_entry
!= &Bcb
->BcbSegmentListHead
)
131 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
, BcbSegmentListEntry
);
132 current_entry
= current_entry
->Flink
;
134 DPRINT1(" CacheSegment 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
135 current
, current
->ReferenceCount
, current
->Dirty
, current
->PageOut
);
137 KeReleaseSpinLock(&Bcb
->BcbLock
, oldirql
);
138 KeReleaseGuardedMutex(&ViewLock
);
142 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb
);
153 CcRosFlushCacheSegment (
154 PCACHE_SEGMENT CacheSegment
)
159 Status
= WriteCacheSegment(CacheSegment
);
160 if (NT_SUCCESS(Status
))
162 KeAcquireGuardedMutex(&ViewLock
);
163 KeAcquireSpinLock(&CacheSegment
->Bcb
->BcbLock
, &oldIrql
);
165 CacheSegment
->Dirty
= FALSE
;
166 RemoveEntryList(&CacheSegment
->DirtySegmentListEntry
);
167 DirtyPageCount
-= CacheSegment
->Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
168 CcRosCacheSegmentDecRefCount(CacheSegment
);
170 KeReleaseSpinLock(&CacheSegment
->Bcb
->BcbLock
, oldIrql
);
171 KeReleaseGuardedMutex(&ViewLock
);
179 CcRosFlushDirtyPages (
184 PLIST_ENTRY current_entry
;
185 PCACHE_SEGMENT current
;
186 ULONG PagesPerSegment
;
189 LARGE_INTEGER ZeroTimeout
;
191 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target
);
194 ZeroTimeout
.QuadPart
= 0;
196 KeEnterCriticalRegion();
197 KeAcquireGuardedMutex(&ViewLock
);
199 current_entry
= DirtySegmentListHead
.Flink
;
200 if (current_entry
== &DirtySegmentListHead
)
202 DPRINT("No Dirty pages\n");
205 while ((current_entry
!= &DirtySegmentListHead
) && (Target
> 0))
207 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
208 DirtySegmentListEntry
);
209 current_entry
= current_entry
->Flink
;
211 CcRosCacheSegmentIncRefCount(current
);
213 Locked
= current
->Bcb
->Callbacks
->AcquireForLazyWrite(
214 current
->Bcb
->LazyWriteContext
, Wait
);
217 CcRosCacheSegmentDecRefCount(current
);
221 Status
= KeWaitForSingleObject(¤t
->Mutex
,
225 Wait
? NULL
: &ZeroTimeout
);
226 if (Status
!= STATUS_SUCCESS
)
228 current
->Bcb
->Callbacks
->ReleaseFromLazyWrite(
229 current
->Bcb
->LazyWriteContext
);
230 CcRosCacheSegmentDecRefCount(current
);
234 ASSERT(current
->Dirty
);
236 /* One reference is added above */
237 if (current
->ReferenceCount
> 2)
239 KeReleaseMutex(¤t
->Mutex
, 0);
240 current
->Bcb
->Callbacks
->ReleaseFromLazyWrite(
241 current
->Bcb
->LazyWriteContext
);
242 CcRosCacheSegmentDecRefCount(current
);
246 PagesPerSegment
= current
->Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
248 KeReleaseGuardedMutex(&ViewLock
);
250 Status
= CcRosFlushCacheSegment(current
);
252 KeReleaseMutex(¤t
->Mutex
, 0);
253 current
->Bcb
->Callbacks
->ReleaseFromLazyWrite(
254 current
->Bcb
->LazyWriteContext
);
256 KeAcquireGuardedMutex(&ViewLock
);
257 CcRosCacheSegmentDecRefCount(current
);
259 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
))
261 DPRINT1("CC: Failed to flush cache segment.\n");
265 (*Count
) += PagesPerSegment
;
266 Target
-= PagesPerSegment
;
269 current_entry
= DirtySegmentListHead
.Flink
;
272 KeReleaseGuardedMutex(&ViewLock
);
273 KeLeaveCriticalRegion();
275 DPRINT("CcRosFlushDirtyPages() finished\n");
276 return(STATUS_SUCCESS
);
285 * FUNCTION: Try to free some memory from the file cache.
287 * Target - The number of pages to be freed.
288 * Priority - The priority of free (currently unused).
289 * NrFreed - Points to a variable where the number of pages
290 * actually freed is returned.
293 PLIST_ENTRY current_entry
;
294 PCACHE_SEGMENT current
;
295 ULONG PagesPerSegment
;
301 BOOLEAN FlushedPages
= FALSE
;
303 DPRINT("CcRosTrimCache(Target %lu)\n", Target
);
305 InitializeListHead(&FreeList
);
310 KeAcquireGuardedMutex(&ViewLock
);
312 current_entry
= CacheSegmentLRUListHead
.Flink
;
313 while (current_entry
!= &CacheSegmentLRUListHead
)
315 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
316 CacheSegmentLRUListEntry
);
317 current_entry
= current_entry
->Flink
;
319 KeAcquireSpinLock(¤t
->Bcb
->BcbLock
, &oldIrql
);
321 /* Reference the cache segment */
322 CcRosCacheSegmentIncRefCount(current
);
324 /* Check if it's mapped and not dirty */
325 if (current
->MappedCount
> 0 && !current
->Dirty
)
327 /* We have to break these locks because Cc sucks */
328 KeReleaseSpinLock(¤t
->Bcb
->BcbLock
, oldIrql
);
329 KeReleaseGuardedMutex(&ViewLock
);
331 /* Page out the segment */
332 for (i
= 0; i
< current
->Bcb
->CacheSegmentSize
/ PAGE_SIZE
; i
++)
334 Page
= (PFN_NUMBER
)(MmGetPhysicalAddress((PUCHAR
)current
->BaseAddress
+ (i
* PAGE_SIZE
)).QuadPart
>> PAGE_SHIFT
);
336 MmPageOutPhysicalAddress(Page
);
339 /* Reacquire the locks */
340 KeAcquireGuardedMutex(&ViewLock
);
341 KeAcquireSpinLock(¤t
->Bcb
->BcbLock
, &oldIrql
);
344 /* Dereference the cache segment */
345 CcRosCacheSegmentDecRefCount(current
);
347 /* Check if we can free this entry now */
348 if (current
->ReferenceCount
== 0)
350 ASSERT(!current
->Dirty
);
351 ASSERT(!current
->MappedCount
);
353 RemoveEntryList(¤t
->BcbSegmentListEntry
);
354 RemoveEntryList(¤t
->CacheSegmentListEntry
);
355 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
356 InsertHeadList(&FreeList
, ¤t
->BcbSegmentListEntry
);
358 /* Calculate how many pages we freed for Mm */
359 PagesPerSegment
= current
->Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
360 PagesFreed
= min(PagesPerSegment
, Target
);
361 Target
-= PagesFreed
;
362 (*NrFreed
) += PagesFreed
;
365 KeReleaseSpinLock(¤t
->Bcb
->BcbLock
, oldIrql
);
368 KeReleaseGuardedMutex(&ViewLock
);
370 /* Try flushing pages if we haven't met our target */
371 if ((Target
> 0) && !FlushedPages
)
373 /* Flush dirty pages to disk */
374 CcRosFlushDirtyPages(Target
, &PagesFreed
, FALSE
);
377 /* We can only swap as many pages as we flushed */
378 if (PagesFreed
< Target
) Target
= PagesFreed
;
380 /* Check if we flushed anything */
383 /* Try again after flushing dirty pages */
384 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed
);
389 while (!IsListEmpty(&FreeList
))
391 current_entry
= RemoveHeadList(&FreeList
);
392 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
393 BcbSegmentListEntry
);
394 CcRosInternalFreeCacheSegment(current
);
397 DPRINT("Evicted %lu cache pages\n", (*NrFreed
));
399 return(STATUS_SUCCESS
);
404 CcRosReleaseCacheSegment (
406 PCACHE_SEGMENT CacheSeg
,
416 DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %u)\n",
417 Bcb
, CacheSeg
, Valid
);
419 KeAcquireGuardedMutex(&ViewLock
);
420 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
422 CacheSeg
->Valid
= Valid
;
424 WasDirty
= CacheSeg
->Dirty
;
425 CacheSeg
->Dirty
= CacheSeg
->Dirty
|| Dirty
;
427 if (!WasDirty
&& CacheSeg
->Dirty
)
429 InsertTailList(&DirtySegmentListHead
, &CacheSeg
->DirtySegmentListEntry
);
430 DirtyPageCount
+= Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
435 CacheSeg
->MappedCount
++;
437 CcRosCacheSegmentDecRefCount(CacheSeg
);
438 if (Mapped
&& (CacheSeg
->MappedCount
== 1))
440 CcRosCacheSegmentIncRefCount(CacheSeg
);
442 if (!WasDirty
&& CacheSeg
->Dirty
)
444 CcRosCacheSegmentIncRefCount(CacheSeg
);
447 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
448 KeReleaseGuardedMutex(&ViewLock
);
449 KeReleaseMutex(&CacheSeg
->Mutex
, 0);
451 return(STATUS_SUCCESS
);
454 /* Returns with Cache Segment Lock Held! */
457 CcRosLookupCacheSegment (
461 PLIST_ENTRY current_entry
;
462 PCACHE_SEGMENT current
;
467 DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %lu)\n", Bcb
, FileOffset
);
469 KeAcquireGuardedMutex(&ViewLock
);
470 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
472 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
473 while (current_entry
!= &Bcb
->BcbSegmentListHead
)
475 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
476 BcbSegmentListEntry
);
477 if ((current
->FileOffset
<= FileOffset
) &&
478 ((current
->FileOffset
+ Bcb
->CacheSegmentSize
) > FileOffset
))
480 CcRosCacheSegmentIncRefCount(current
);
481 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
482 KeReleaseGuardedMutex(&ViewLock
);
483 KeWaitForSingleObject(¤t
->Mutex
,
490 current_entry
= current_entry
->Flink
;
493 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
494 KeReleaseGuardedMutex(&ViewLock
);
501 CcRosMarkDirtyCacheSegment (
505 PCACHE_SEGMENT CacheSeg
;
510 DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %lu)\n", Bcb
, FileOffset
);
512 CacheSeg
= CcRosLookupCacheSegment(Bcb
, FileOffset
);
513 if (CacheSeg
== NULL
)
515 KeBugCheck(CACHE_MANAGER
);
518 KeAcquireGuardedMutex(&ViewLock
);
519 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
521 if (!CacheSeg
->Dirty
)
523 InsertTailList(&DirtySegmentListHead
, &CacheSeg
->DirtySegmentListEntry
);
524 DirtyPageCount
+= Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
528 CcRosCacheSegmentDecRefCount(CacheSeg
);
531 /* Move to the tail of the LRU list */
532 RemoveEntryList(&CacheSeg
->CacheSegmentLRUListEntry
);
533 InsertTailList(&CacheSegmentLRUListHead
, &CacheSeg
->CacheSegmentLRUListEntry
);
535 CacheSeg
->Dirty
= TRUE
;
537 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
538 KeReleaseGuardedMutex(&ViewLock
);
539 KeReleaseMutex(&CacheSeg
->Mutex
, 0);
541 return(STATUS_SUCCESS
);
546 CcRosUnmapCacheSegment (
551 PCACHE_SEGMENT CacheSeg
;
557 DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %lu, NowDirty %u)\n",
558 Bcb
, FileOffset
, NowDirty
);
560 CacheSeg
= CcRosLookupCacheSegment(Bcb
, FileOffset
);
561 if (CacheSeg
== NULL
)
563 return(STATUS_UNSUCCESSFUL
);
566 KeAcquireGuardedMutex(&ViewLock
);
567 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
569 WasDirty
= CacheSeg
->Dirty
;
570 CacheSeg
->Dirty
= CacheSeg
->Dirty
|| NowDirty
;
572 CacheSeg
->MappedCount
--;
574 if (!WasDirty
&& NowDirty
)
576 InsertTailList(&DirtySegmentListHead
, &CacheSeg
->DirtySegmentListEntry
);
577 DirtyPageCount
+= Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
580 CcRosCacheSegmentDecRefCount(CacheSeg
);
581 if (!WasDirty
&& NowDirty
)
583 CcRosCacheSegmentIncRefCount(CacheSeg
);
585 if (CacheSeg
->MappedCount
== 0)
587 CcRosCacheSegmentDecRefCount(CacheSeg
);
590 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
591 KeReleaseGuardedMutex(&ViewLock
);
592 KeReleaseMutex(&CacheSeg
->Mutex
, 0);
594 return(STATUS_SUCCESS
);
599 CcRosCreateCacheSegment (
602 PCACHE_SEGMENT
* CacheSeg
)
604 PCACHE_SEGMENT current
;
605 PCACHE_SEGMENT previous
;
606 PLIST_ENTRY current_entry
;
610 ULONG StartingOffset
;
612 PHYSICAL_ADDRESS BoundaryAddressMultiple
;
616 DPRINT("CcRosCreateCacheSegment()\n");
618 BoundaryAddressMultiple
.QuadPart
= 0;
619 if (FileOffset
>= Bcb
->FileSize
.u
.LowPart
)
622 return STATUS_INVALID_PARAMETER
;
625 current
= ExAllocateFromNPagedLookasideList(&CacheSegLookasideList
);
626 current
->Valid
= FALSE
;
627 current
->Dirty
= FALSE
;
628 current
->PageOut
= FALSE
;
629 current
->FileOffset
= ROUND_DOWN(FileOffset
, Bcb
->CacheSegmentSize
);
634 DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb
, current
);
637 current
->MappedCount
= 0;
638 current
->DirtySegmentListEntry
.Flink
= NULL
;
639 current
->DirtySegmentListEntry
.Blink
= NULL
;
640 current
->ReferenceCount
= 1;
641 KeInitializeMutex(¤t
->Mutex
, 0);
642 KeWaitForSingleObject(¤t
->Mutex
,
647 KeAcquireGuardedMutex(&ViewLock
);
650 /* There is window between the call to CcRosLookupCacheSegment
651 * and CcRosCreateCacheSegment. We must check if a segment on
652 * the fileoffset exist. If there exist a segment, we release
653 * our new created segment and return the existing one.
655 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
656 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
658 while (current_entry
!= &Bcb
->BcbSegmentListHead
)
660 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
661 BcbSegmentListEntry
);
662 if (current
->FileOffset
<= FileOffset
&&
663 (current
->FileOffset
+ Bcb
->CacheSegmentSize
) > FileOffset
)
665 CcRosCacheSegmentIncRefCount(current
);
666 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
670 DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
676 KeReleaseMutex(&(*CacheSeg
)->Mutex
, 0);
677 KeReleaseGuardedMutex(&ViewLock
);
678 ExFreeToNPagedLookasideList(&CacheSegLookasideList
, *CacheSeg
);
680 KeWaitForSingleObject(¤t
->Mutex
,
685 return STATUS_SUCCESS
;
687 if (current
->FileOffset
< FileOffset
)
689 if (previous
== NULL
)
695 if (previous
->FileOffset
< current
->FileOffset
)
701 current_entry
= current_entry
->Flink
;
703 /* There was no existing segment. */
707 InsertHeadList(&previous
->BcbSegmentListEntry
, ¤t
->BcbSegmentListEntry
);
711 InsertHeadList(&Bcb
->BcbSegmentListHead
, ¤t
->BcbSegmentListEntry
);
713 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
714 InsertTailList(&CacheSegmentListHead
, ¤t
->CacheSegmentListEntry
);
715 InsertTailList(&CacheSegmentLRUListHead
, ¤t
->CacheSegmentLRUListEntry
);
716 KeReleaseGuardedMutex(&ViewLock
);
718 KeAcquireSpinLock(&CiCacheSegMappingRegionLock
, &oldIrql
);
720 StartingOffset
= RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap
, Bcb
->CacheSegmentSize
/ PAGE_SIZE
, CiCacheSegMappingRegionHint
);
722 if (StartingOffset
== 0xffffffff)
724 DPRINT1("Out of CacheSeg mapping space\n");
725 KeBugCheck(CACHE_MANAGER
);
728 current
->BaseAddress
= CiCacheSegMappingRegionBase
+ StartingOffset
* PAGE_SIZE
;
730 if (CiCacheSegMappingRegionHint
== StartingOffset
)
732 CiCacheSegMappingRegionHint
+= Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
735 KeReleaseSpinLock(&CiCacheSegMappingRegionLock
, oldIrql
);
737 MmLockAddressSpace(MmGetKernelAddressSpace());
738 current
->BaseAddress
= NULL
;
739 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
740 0, // nothing checks for cache_segment mareas, so set to 0
741 ¤t
->BaseAddress
,
742 Bcb
->CacheSegmentSize
,
744 (PMEMORY_AREA
*)¤t
->MemoryArea
,
747 BoundaryAddressMultiple
);
748 MmUnlockAddressSpace(MmGetKernelAddressSpace());
749 if (!NT_SUCCESS(Status
))
751 KeBugCheck(CACHE_MANAGER
);
755 /* Create a virtual mapping for this memory area */
756 MI_SET_USAGE(MI_USAGE_CACHE
);
760 if ((Bcb
->FileObject
) && (Bcb
->FileObject
->FileName
.Buffer
))
762 pos
= wcsrchr(Bcb
->FileObject
->FileName
.Buffer
, '\\');
763 len
= wcslen(pos
) * sizeof(WCHAR
);
764 if (pos
) snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%S", pos
);
768 MmMapMemoryArea(current
->BaseAddress
, Bcb
->CacheSegmentSize
,
769 MC_CACHE
, PAGE_READWRITE
);
771 return(STATUS_SUCCESS
);
776 CcRosGetCacheSegmentChain (
780 PCACHE_SEGMENT
* CacheSeg
)
782 PCACHE_SEGMENT current
;
784 PCACHE_SEGMENT
* CacheSegList
;
785 PCACHE_SEGMENT Previous
= NULL
;
789 DPRINT("CcRosGetCacheSegmentChain()\n");
791 Length
= ROUND_UP(Length
, Bcb
->CacheSegmentSize
);
793 CacheSegList
= _alloca(sizeof(PCACHE_SEGMENT
) *
794 (Length
/ Bcb
->CacheSegmentSize
));
797 * Look for a cache segment already mapping the same data.
799 for (i
= 0; i
< (Length
/ Bcb
->CacheSegmentSize
); i
++)
801 ULONG CurrentOffset
= FileOffset
+ (i
* Bcb
->CacheSegmentSize
);
802 current
= CcRosLookupCacheSegment(Bcb
, CurrentOffset
);
805 KeAcquireGuardedMutex(&ViewLock
);
807 /* Move to tail of LRU list */
808 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
809 InsertTailList(&CacheSegmentLRUListHead
, ¤t
->CacheSegmentLRUListEntry
);
811 KeReleaseGuardedMutex(&ViewLock
);
813 CacheSegList
[i
] = current
;
817 CcRosCreateCacheSegment(Bcb
, CurrentOffset
, ¤t
);
818 CacheSegList
[i
] = current
;
822 for (i
= 0; i
< (Length
/ Bcb
->CacheSegmentSize
); i
++)
826 *CacheSeg
= CacheSegList
[i
];
827 Previous
= CacheSegList
[i
];
831 Previous
->NextInChain
= CacheSegList
[i
];
832 Previous
= CacheSegList
[i
];
836 Previous
->NextInChain
= NULL
;
838 return(STATUS_SUCCESS
);
843 CcRosGetCacheSegment (
849 PCACHE_SEGMENT
* CacheSeg
)
851 PCACHE_SEGMENT current
;
856 DPRINT("CcRosGetCacheSegment()\n");
859 * Look for a cache segment already mapping the same data.
861 current
= CcRosLookupCacheSegment(Bcb
, FileOffset
);
865 * Otherwise create a new segment.
867 Status
= CcRosCreateCacheSegment(Bcb
, FileOffset
, ¤t
);
868 if (!NT_SUCCESS(Status
))
874 KeAcquireGuardedMutex(&ViewLock
);
876 /* Move to the tail of the LRU list */
877 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
878 InsertTailList(&CacheSegmentLRUListHead
, ¤t
->CacheSegmentLRUListEntry
);
880 KeReleaseGuardedMutex(&ViewLock
);
883 * Return information about the segment to the caller.
885 *UptoDate
= current
->Valid
;
886 *BaseAddress
= current
->BaseAddress
;
887 DPRINT("*BaseAddress %p\n", *BaseAddress
);
889 *BaseOffset
= current
->FileOffset
;
890 return(STATUS_SUCCESS
);
895 CcRosRequestCacheSegment (
900 PCACHE_SEGMENT
* CacheSeg
)
902 * FUNCTION: Request a page mapping for a BCB
909 if ((FileOffset
% Bcb
->CacheSegmentSize
) != 0)
911 DPRINT1("Bad fileoffset %x should be multiple of %x",
912 FileOffset
, Bcb
->CacheSegmentSize
);
913 KeBugCheck(CACHE_MANAGER
);
916 return(CcRosGetCacheSegment(Bcb
,
929 MEMORY_AREA
* MemoryArea
,
935 ASSERT(SwapEntry
== 0);
938 ASSERT(MmGetReferenceCountPage(Page
) == 1);
939 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
944 CcRosInternalFreeCacheSegment (
945 PCACHE_SEGMENT CacheSeg
)
947 * FUNCTION: Releases a cache segment associated with a BCB
957 DPRINT("Freeing cache segment 0x%p\n", CacheSeg
);
959 if ( CacheSeg
->Bcb
->Trace
)
961 DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg
->Bcb
, CacheSeg
);
965 RegionSize
= CacheSeg
->Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
967 /* Unmap all the pages. */
968 for (i
= 0; i
< RegionSize
; i
++)
970 MmDeleteVirtualMapping(NULL
,
971 CacheSeg
->BaseAddress
+ (i
* PAGE_SIZE
),
975 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
978 KeAcquireSpinLock(&CiCacheSegMappingRegionLock
, &oldIrql
);
979 /* Deallocate all the pages used. */
980 Base
= (ULONG
)(CacheSeg
->BaseAddress
- CiCacheSegMappingRegionBase
) / PAGE_SIZE
;
982 RtlClearBits(&CiCacheSegMappingRegionAllocMap
, Base
, RegionSize
);
984 CiCacheSegMappingRegionHint
= min (CiCacheSegMappingRegionHint
, Base
);
986 KeReleaseSpinLock(&CiCacheSegMappingRegionLock
, oldIrql
);
988 MmLockAddressSpace(MmGetKernelAddressSpace());
989 MmFreeMemoryArea(MmGetKernelAddressSpace(),
990 CacheSeg
->MemoryArea
,
993 MmUnlockAddressSpace(MmGetKernelAddressSpace());
995 ExFreeToNPagedLookasideList(&CacheSegLookasideList
, CacheSeg
);
996 return(STATUS_SUCCESS
);
1001 CcRosFreeCacheSegment (
1003 PCACHE_SEGMENT CacheSeg
)
1010 DPRINT("CcRosFreeCacheSegment(Bcb 0x%p, CacheSeg 0x%p)\n",
1013 KeAcquireGuardedMutex(&ViewLock
);
1014 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
1015 RemoveEntryList(&CacheSeg
->BcbSegmentListEntry
);
1016 RemoveEntryList(&CacheSeg
->CacheSegmentListEntry
);
1017 RemoveEntryList(&CacheSeg
->CacheSegmentLRUListEntry
);
1018 if (CacheSeg
->Dirty
)
1020 RemoveEntryList(&CacheSeg
->DirtySegmentListEntry
);
1021 DirtyPageCount
-= Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
1024 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
1025 KeReleaseGuardedMutex(&ViewLock
);
1027 Status
= CcRosInternalFreeCacheSegment(CacheSeg
);
1037 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
,
1038 IN PLARGE_INTEGER FileOffset OPTIONAL
,
1040 OUT PIO_STATUS_BLOCK IoStatus
)
1043 LARGE_INTEGER Offset
;
1044 PCACHE_SEGMENT current
;
1048 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1049 SectionObjectPointers
, FileOffset
, Length
, IoStatus
);
1051 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1053 Bcb
= (PBCB
)SectionObjectPointers
->SharedCacheMap
;
1057 Offset
= *FileOffset
;
1061 Offset
.QuadPart
= (LONGLONG
)0;
1062 Length
= Bcb
->FileSize
.u
.LowPart
;
1067 IoStatus
->Status
= STATUS_SUCCESS
;
1068 IoStatus
->Information
= 0;
1073 current
= CcRosLookupCacheSegment (Bcb
, Offset
.u
.LowPart
);
1074 if (current
!= NULL
)
1078 Status
= CcRosFlushCacheSegment(current
);
1079 if (!NT_SUCCESS(Status
) && IoStatus
!= NULL
)
1081 IoStatus
->Status
= Status
;
1084 KeReleaseMutex(¤t
->Mutex
, 0);
1086 KeAcquireGuardedMutex(&ViewLock
);
1087 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
1088 CcRosCacheSegmentDecRefCount(current
);
1089 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
1090 KeReleaseGuardedMutex(&ViewLock
);
1093 Offset
.QuadPart
+= Bcb
->CacheSegmentSize
;
1094 if (Length
> Bcb
->CacheSegmentSize
)
1096 Length
-= Bcb
->CacheSegmentSize
;
1108 IoStatus
->Status
= STATUS_INVALID_PARAMETER
;
1115 CcRosDeleteFileCache (
1116 PFILE_OBJECT FileObject
,
1119 * FUNCTION: Releases the BCB associated with a file object
1122 PLIST_ENTRY current_entry
;
1123 PCACHE_SEGMENT current
;
1124 LIST_ENTRY FreeList
;
1130 KeReleaseGuardedMutex(&ViewLock
);
1132 CcFlushCache(FileObject
->SectionObjectPointer
, NULL
, 0, NULL
);
1134 KeAcquireGuardedMutex(&ViewLock
);
1136 if (Bcb
->RefCount
== 0)
1138 if (Bcb
->BcbRemoveListEntry
.Flink
!= NULL
)
1140 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1141 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1144 FileObject
->SectionObjectPointer
->SharedCacheMap
= NULL
;
1147 * Release all cache segments.
1149 InitializeListHead(&FreeList
);
1150 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
1151 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
1152 while (!IsListEmpty(&Bcb
->BcbSegmentListHead
))
1154 current_entry
= RemoveTailList(&Bcb
->BcbSegmentListHead
);
1155 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
, BcbSegmentListEntry
);
1156 RemoveEntryList(¤t
->CacheSegmentListEntry
);
1157 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
1160 RemoveEntryList(¤t
->DirtySegmentListEntry
);
1161 DirtyPageCount
-= Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
1162 DPRINT1("Freeing dirty segment\n");
1164 InsertHeadList(&FreeList
, ¤t
->BcbSegmentListEntry
);
1169 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
1171 KeReleaseGuardedMutex(&ViewLock
);
1172 ObDereferenceObject (Bcb
->FileObject
);
1174 while (!IsListEmpty(&FreeList
))
1176 current_entry
= RemoveTailList(&FreeList
);
1177 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
, BcbSegmentListEntry
);
1178 CcRosInternalFreeCacheSegment(current
);
1180 ExFreeToNPagedLookasideList(&BcbLookasideList
, Bcb
);
1181 KeAcquireGuardedMutex(&ViewLock
);
1183 return(STATUS_SUCCESS
);
1188 CcRosReferenceCache (
1189 PFILE_OBJECT FileObject
)
1192 KeAcquireGuardedMutex(&ViewLock
);
1193 Bcb
= (PBCB
)FileObject
->SectionObjectPointer
->SharedCacheMap
;
1195 if (Bcb
->RefCount
== 0)
1197 ASSERT(Bcb
->BcbRemoveListEntry
.Flink
!= NULL
);
1198 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1199 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1204 ASSERT(Bcb
->BcbRemoveListEntry
.Flink
== NULL
);
1207 KeReleaseGuardedMutex(&ViewLock
);
1212 CcRosSetRemoveOnClose (
1213 PSECTION_OBJECT_POINTERS SectionObjectPointer
)
1216 DPRINT("CcRosSetRemoveOnClose()\n");
1217 KeAcquireGuardedMutex(&ViewLock
);
1218 Bcb
= (PBCB
)SectionObjectPointer
->SharedCacheMap
;
1221 Bcb
->RemoveOnClose
= TRUE
;
1222 if (Bcb
->RefCount
== 0)
1224 CcRosDeleteFileCache(Bcb
->FileObject
, Bcb
);
1227 KeReleaseGuardedMutex(&ViewLock
);
1233 CcRosDereferenceCache (
1234 PFILE_OBJECT FileObject
)
1237 KeAcquireGuardedMutex(&ViewLock
);
1238 Bcb
= (PBCB
)FileObject
->SectionObjectPointer
->SharedCacheMap
;
1240 if (Bcb
->RefCount
> 0)
1243 if (Bcb
->RefCount
== 0)
1245 MmFreeSectionSegments(Bcb
->FileObject
);
1246 CcRosDeleteFileCache(FileObject
, Bcb
);
1249 KeReleaseGuardedMutex(&ViewLock
);
1254 CcRosReleaseFileCache (
1255 PFILE_OBJECT FileObject
)
1257 * FUNCTION: Called by the file system when a handle to a file object
1263 KeAcquireGuardedMutex(&ViewLock
);
1265 if (FileObject
->SectionObjectPointer
->SharedCacheMap
!= NULL
)
1267 Bcb
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1268 if (FileObject
->PrivateCacheMap
!= NULL
)
1270 FileObject
->PrivateCacheMap
= NULL
;
1271 if (Bcb
->RefCount
> 0)
1274 if (Bcb
->RefCount
== 0)
1276 MmFreeSectionSegments(Bcb
->FileObject
);
1277 CcRosDeleteFileCache(FileObject
, Bcb
);
1282 KeReleaseGuardedMutex(&ViewLock
);
1283 return(STATUS_SUCCESS
);
1288 CcTryToInitializeFileCache (
1289 PFILE_OBJECT FileObject
)
1294 KeAcquireGuardedMutex(&ViewLock
);
1296 ASSERT(FileObject
->SectionObjectPointer
);
1297 Bcb
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1300 Status
= STATUS_UNSUCCESSFUL
;
1304 if (FileObject
->PrivateCacheMap
== NULL
)
1306 FileObject
->PrivateCacheMap
= Bcb
;
1309 if (Bcb
->BcbRemoveListEntry
.Flink
!= NULL
)
1311 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1312 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1314 Status
= STATUS_SUCCESS
;
1316 KeReleaseGuardedMutex(&ViewLock
);
1324 CcRosInitializeFileCache (
1325 PFILE_OBJECT FileObject
,
1326 ULONG CacheSegmentSize
,
1327 PCACHE_MANAGER_CALLBACKS CallBacks
,
1328 PVOID LazyWriterContext
)
1330 * FUNCTION: Initializes a BCB for a file object
1335 Bcb
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1336 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p, CacheSegmentSize %lu)\n",
1337 FileObject
, Bcb
, CacheSegmentSize
);
1339 KeAcquireGuardedMutex(&ViewLock
);
1342 Bcb
= ExAllocateFromNPagedLookasideList(&BcbLookasideList
);
1345 KeReleaseGuardedMutex(&ViewLock
);
1346 return(STATUS_UNSUCCESSFUL
);
1348 memset(Bcb
, 0, sizeof(BCB
));
1349 ObReferenceObjectByPointer(FileObject
,
1353 Bcb
->FileObject
= FileObject
;
1354 Bcb
->CacheSegmentSize
= CacheSegmentSize
;
1355 Bcb
->Callbacks
= CallBacks
;
1356 Bcb
->LazyWriteContext
= LazyWriterContext
;
1357 if (FileObject
->FsContext
)
1359 Bcb
->AllocationSize
=
1360 ((PFSRTL_COMMON_FCB_HEADER
)FileObject
->FsContext
)->AllocationSize
;
1362 ((PFSRTL_COMMON_FCB_HEADER
)FileObject
->FsContext
)->FileSize
;
1364 KeInitializeSpinLock(&Bcb
->BcbLock
);
1365 InitializeListHead(&Bcb
->BcbSegmentListHead
);
1366 FileObject
->SectionObjectPointer
->SharedCacheMap
= Bcb
;
1368 if (FileObject
->PrivateCacheMap
== NULL
)
1370 FileObject
->PrivateCacheMap
= Bcb
;
1373 if (Bcb
->BcbRemoveListEntry
.Flink
!= NULL
)
1375 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1376 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1378 KeReleaseGuardedMutex(&ViewLock
);
1380 return(STATUS_SUCCESS
);
1388 CcGetFileObjectFromSectionPtrs (
1389 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
)
1392 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1394 Bcb
= (PBCB
)SectionObjectPointers
->SharedCacheMap
;
1396 return Bcb
->FileObject
;
1410 PHYSICAL_ADDRESS BoundaryAddressMultiple
;
1413 DPRINT("CcInitView()\n");
1415 BoundaryAddressMultiple
.QuadPart
= 0;
1416 CiCacheSegMappingRegionHint
= 0;
1417 CiCacheSegMappingRegionBase
= NULL
;
1419 MmLockAddressSpace(MmGetKernelAddressSpace());
1421 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
1422 MEMORY_AREA_CACHE_SEGMENT
,
1423 &CiCacheSegMappingRegionBase
,
1424 CI_CACHESEG_MAPPING_REGION_SIZE
,
1429 BoundaryAddressMultiple
);
1430 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1431 if (!NT_SUCCESS(Status
))
1433 KeBugCheck(CACHE_MANAGER
);
1436 Buffer
= ExAllocatePool(NonPagedPool
, CI_CACHESEG_MAPPING_REGION_SIZE
/ (PAGE_SIZE
* 8));
1439 KeBugCheck(CACHE_MANAGER
);
1442 RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap
, Buffer
, CI_CACHESEG_MAPPING_REGION_SIZE
/ PAGE_SIZE
);
1443 RtlClearAllBits(&CiCacheSegMappingRegionAllocMap
);
1445 KeInitializeSpinLock(&CiCacheSegMappingRegionLock
);
1447 InitializeListHead(&CacheSegmentListHead
);
1448 InitializeListHead(&DirtySegmentListHead
);
1449 InitializeListHead(&CacheSegmentLRUListHead
);
1450 InitializeListHead(&ClosedListHead
);
1451 KeInitializeGuardedMutex(&ViewLock
);
1452 ExInitializeNPagedLookasideList (&iBcbLookasideList
,
1456 sizeof(INTERNAL_BCB
),
1459 ExInitializeNPagedLookasideList (&BcbLookasideList
,
1466 ExInitializeNPagedLookasideList (&CacheSegLookasideList
,
1470 sizeof(CACHE_SEGMENT
),
1474 MmInitializeMemoryConsumer(MC_CACHE
, CcRosTrimCache
);
1476 CcInitCacheZeroPage();