2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
10 /* NOTES **********************************************************************
12 * This is not the NT implementation of a file cache nor anything much like
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 * (4) Copy the data into or out of the page as necessary.
30 * (5) Release the cache page
32 /* INCLUDES ******************************************************************/
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
42 /* GLOBALS *******************************************************************/
45 * If CACHE_BITMAP is defined, the cache manager uses one large memory region
46 * within the kernel address space and allocate/deallocate space from this block
47 * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
48 * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
50 //#define CACHE_BITMAP
52 static LIST_ENTRY DirtySegmentListHead
;
53 static LIST_ENTRY CacheSegmentListHead
;
54 static LIST_ENTRY CacheSegmentLRUListHead
;
55 static LIST_ENTRY ClosedListHead
;
56 ULONG DirtyPageCount
=0;
58 KGUARDED_MUTEX ViewLock
;
61 #define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
63 static PVOID CiCacheSegMappingRegionBase
= NULL
;
64 static RTL_BITMAP CiCacheSegMappingRegionAllocMap
;
65 static ULONG CiCacheSegMappingRegionHint
;
66 static KSPIN_LOCK CiCacheSegMappingRegionLock
;
69 NPAGED_LOOKASIDE_LIST iBcbLookasideList
;
70 static NPAGED_LOOKASIDE_LIST BcbLookasideList
;
71 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList
;
74 static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs
, const char* file
, int line
)
79 DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%d, Dirty %d, PageOut %d\n",
80 file
, line
, cs
, cs
->ReferenceCount
, cs
->Dirty
, cs
->PageOut
);
83 static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs
, const char* file
, int line
)
88 DbgPrint("(%s:%i) CacheSegment %p --RefCount=%d, Dirty %d, PageOut %d\n",
89 file
, line
, cs
, cs
->ReferenceCount
, cs
->Dirty
, cs
->PageOut
);
92 #define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
93 #define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
95 #define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
96 #define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
100 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg
);
103 /* FUNCTIONS *****************************************************************/
113 PLIST_ENTRY current_entry
;
114 PCACHE_SEGMENT current
;
123 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb
);
125 KeAcquireGuardedMutex(&ViewLock
);
126 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldirql
);
128 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
129 while (current_entry
!= &Bcb
->BcbSegmentListHead
)
131 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
, BcbSegmentListEntry
);
132 current_entry
= current_entry
->Flink
;
134 DPRINT1(" CacheSegment 0x%p enabled, RefCount %d, Dirty %d, PageOut %d\n",
135 current
, current
->ReferenceCount
, current
->Dirty
, current
->PageOut
);
137 KeReleaseSpinLock(&Bcb
->BcbLock
, oldirql
);
138 KeReleaseGuardedMutex(&ViewLock
);
142 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb
);
153 CcRosFlushCacheSegment(PCACHE_SEGMENT CacheSegment
)
158 Status
= WriteCacheSegment(CacheSegment
);
159 if (NT_SUCCESS(Status
))
161 KeAcquireGuardedMutex(&ViewLock
);
162 KeAcquireSpinLock(&CacheSegment
->Bcb
->BcbLock
, &oldIrql
);
164 CacheSegment
->Dirty
= FALSE
;
165 RemoveEntryList(&CacheSegment
->DirtySegmentListEntry
);
166 DirtyPageCount
-= CacheSegment
->Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
167 CcRosCacheSegmentDecRefCount ( CacheSegment
);
169 KeReleaseSpinLock(&CacheSegment
->Bcb
->BcbLock
, oldIrql
);
170 KeReleaseGuardedMutex(&ViewLock
);
178 CcRosFlushDirtyPages(ULONG Target
, PULONG Count
)
180 PLIST_ENTRY current_entry
;
181 PCACHE_SEGMENT current
;
182 ULONG PagesPerSegment
;
185 static ULONG WriteCount
[4] = {0, 0, 0, 0};
188 DPRINT("CcRosFlushDirtyPages(Target %d)\n", Target
);
192 KeEnterCriticalRegion();
193 KeAcquireGuardedMutex(&ViewLock
);
195 WriteCount
[0] = WriteCount
[1];
196 WriteCount
[1] = WriteCount
[2];
197 WriteCount
[2] = WriteCount
[3];
200 NewTarget
= WriteCount
[0] + WriteCount
[1] + WriteCount
[2];
202 if (NewTarget
< DirtyPageCount
)
204 NewTarget
= (DirtyPageCount
- NewTarget
+ 3) / 4;
205 WriteCount
[0] += NewTarget
;
206 WriteCount
[1] += NewTarget
;
207 WriteCount
[2] += NewTarget
;
208 WriteCount
[3] += NewTarget
;
211 NewTarget
= WriteCount
[0];
213 Target
= max(NewTarget
, Target
);
215 current_entry
= DirtySegmentListHead
.Flink
;
216 if (current_entry
== &DirtySegmentListHead
)
218 DPRINT("No Dirty pages\n");
221 while (current_entry
!= &DirtySegmentListHead
&& Target
> 0)
223 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
224 DirtySegmentListEntry
);
225 current_entry
= current_entry
->Flink
;
227 Locked
= current
->Bcb
->Callbacks
->AcquireForLazyWrite(
228 current
->Bcb
->LazyWriteContext
, TRUE
);
234 ExAcquirePushLockExclusive(¤t
->Lock
);
236 ASSERT(current
->Dirty
);
237 if (current
->ReferenceCount
> 1)
239 ExReleasePushLock(¤t
->Lock
);
240 current
->Bcb
->Callbacks
->ReleaseFromLazyWrite(
241 current
->Bcb
->LazyWriteContext
);
245 PagesPerSegment
= current
->Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
247 KeReleaseGuardedMutex(&ViewLock
);
249 Status
= CcRosFlushCacheSegment(current
);
251 ExReleasePushLock(¤t
->Lock
);
252 current
->Bcb
->Callbacks
->ReleaseFromLazyWrite(
253 current
->Bcb
->LazyWriteContext
);
255 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
))
257 DPRINT1("CC: Failed to flush cache segment.\n");
261 (*Count
) += PagesPerSegment
;
262 Target
-= PagesPerSegment
;
265 KeAcquireGuardedMutex(&ViewLock
);
266 current_entry
= DirtySegmentListHead
.Flink
;
269 if (*Count
< NewTarget
)
271 WriteCount
[1] += (NewTarget
- *Count
);
274 KeReleaseGuardedMutex(&ViewLock
);
275 KeLeaveCriticalRegion();
277 DPRINT("CcRosFlushDirtyPages() finished\n");
278 return(STATUS_SUCCESS
);
282 CcRosTrimCache(ULONG Target
, ULONG Priority
, PULONG NrFreed
)
284 * FUNCTION: Try to free some memory from the file cache.
286 * Target - The number of pages to be freed.
287 * Priority - The priority of free (currently unused).
288 * NrFreed - Points to a variable where the number of pages
289 * actually freed is returned.
292 PLIST_ENTRY current_entry
;
293 PCACHE_SEGMENT current
;
294 ULONG PagesPerSegment
;
300 BOOLEAN FlushedPages
= FALSE
;
302 DPRINT("CcRosTrimCache(Target %d)\n", Target
);
304 InitializeListHead(&FreeList
);
309 KeAcquireGuardedMutex(&ViewLock
);
311 current_entry
= CacheSegmentLRUListHead
.Flink
;
312 while (current_entry
!= &CacheSegmentLRUListHead
)
314 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
315 CacheSegmentLRUListEntry
);
316 current_entry
= current_entry
->Flink
;
318 KeAcquireSpinLock(¤t
->Bcb
->BcbLock
, &oldIrql
);
320 /* Reference the cache segment */
321 CcRosCacheSegmentIncRefCount(current
);
323 /* Check if it's mapped and not dirty */
324 if (current
->MappedCount
> 0 && !current
->Dirty
)
326 /* We have to break these locks because Cc sucks */
327 KeReleaseSpinLock(¤t
->Bcb
->BcbLock
, oldIrql
);
328 KeReleaseGuardedMutex(&ViewLock
);
330 /* Page out the segment */
331 for (i
= 0; i
< current
->Bcb
->CacheSegmentSize
/ PAGE_SIZE
; i
++)
333 Page
= (PFN_NUMBER
)(MmGetPhysicalAddress((PUCHAR
)current
->BaseAddress
+ (i
* PAGE_SIZE
)).QuadPart
>> PAGE_SHIFT
);
335 MmPageOutPhysicalAddress(Page
);
338 /* Reacquire the locks */
339 KeAcquireGuardedMutex(&ViewLock
);
340 KeAcquireSpinLock(¤t
->Bcb
->BcbLock
, &oldIrql
);
343 /* Dereference the cache segment */
344 CcRosCacheSegmentDecRefCount(current
);
346 /* Check if we can free this entry now */
347 if (current
->ReferenceCount
== 0)
349 ASSERT(!current
->Dirty
);
350 ASSERT(!current
->MappedCount
);
352 RemoveEntryList(¤t
->BcbSegmentListEntry
);
353 RemoveEntryList(¤t
->CacheSegmentListEntry
);
354 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
355 InsertHeadList(&FreeList
, ¤t
->BcbSegmentListEntry
);
357 /* Calculate how many pages we freed for Mm */
358 PagesPerSegment
= current
->Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
359 PagesFreed
= min(PagesPerSegment
, Target
);
360 Target
-= PagesFreed
;
361 (*NrFreed
) += PagesFreed
;
364 KeReleaseSpinLock(¤t
->Bcb
->BcbLock
, oldIrql
);
367 KeReleaseGuardedMutex(&ViewLock
);
369 /* Try flushing pages if we haven't met our target */
370 if (Target
> 0 && !FlushedPages
)
372 /* Flush dirty pages to disk */
373 CcRosFlushDirtyPages(Target
, &PagesFreed
);
376 /* We can only swap as many pages as we flushed */
377 if (PagesFreed
< Target
) Target
= PagesFreed
;
379 /* Check if we flushed anything */
382 /* Try again after flushing dirty pages */
383 DPRINT("Flushed %d dirty cache pages to disk\n", PagesFreed
);
388 while (!IsListEmpty(&FreeList
))
390 current_entry
= RemoveHeadList(&FreeList
);
391 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
392 BcbSegmentListEntry
);
393 CcRosInternalFreeCacheSegment(current
);
396 DPRINT("Evicted %d cache pages\n", (*NrFreed
));
398 return(STATUS_SUCCESS
);
403 CcRosReleaseCacheSegment(PBCB Bcb
,
404 PCACHE_SEGMENT CacheSeg
,
409 BOOLEAN WasDirty
= CacheSeg
->Dirty
;
414 DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %d)\n",
415 Bcb
, CacheSeg
, Valid
);
417 CacheSeg
->Valid
= Valid
;
418 CacheSeg
->Dirty
= CacheSeg
->Dirty
|| Dirty
;
420 KeAcquireGuardedMutex(&ViewLock
);
421 if (!WasDirty
&& CacheSeg
->Dirty
)
423 InsertTailList(&DirtySegmentListHead
, &CacheSeg
->DirtySegmentListEntry
);
424 DirtyPageCount
+= Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
429 CacheSeg
->MappedCount
++;
431 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
432 CcRosCacheSegmentDecRefCount(CacheSeg
);
433 if (Mapped
&& CacheSeg
->MappedCount
== 1)
435 CcRosCacheSegmentIncRefCount(CacheSeg
);
437 if (!WasDirty
&& CacheSeg
->Dirty
)
439 CcRosCacheSegmentIncRefCount(CacheSeg
);
441 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
442 KeReleaseGuardedMutex(&ViewLock
);
443 ExReleasePushLock(&CacheSeg
->Lock
);
444 KeLeaveCriticalRegion();
446 return(STATUS_SUCCESS
);
449 /* Returns with Cache Segment Lock Held! */
452 CcRosLookupCacheSegment(PBCB Bcb
, ULONG FileOffset
)
454 PLIST_ENTRY current_entry
;
455 PCACHE_SEGMENT current
;
460 DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %d)\n", Bcb
, FileOffset
);
462 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
463 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
464 while (current_entry
!= &Bcb
->BcbSegmentListHead
)
466 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
467 BcbSegmentListEntry
);
468 if (current
->FileOffset
<= FileOffset
&&
469 (current
->FileOffset
+ Bcb
->CacheSegmentSize
) > FileOffset
)
471 CcRosCacheSegmentIncRefCount(current
);
472 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
473 KeEnterCriticalRegion();
474 ExAcquirePushLockExclusive(¤t
->Lock
);
477 current_entry
= current_entry
->Flink
;
479 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
485 CcRosMarkDirtyCacheSegment(PBCB Bcb
, ULONG FileOffset
)
487 PCACHE_SEGMENT CacheSeg
;
492 DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %d)\n", Bcb
, FileOffset
);
494 CacheSeg
= CcRosLookupCacheSegment(Bcb
, FileOffset
);
495 if (CacheSeg
== NULL
)
497 KeBugCheck(CACHE_MANAGER
);
499 if (!CacheSeg
->Dirty
)
501 KeAcquireGuardedMutex(&ViewLock
);
502 InsertTailList(&DirtySegmentListHead
, &CacheSeg
->DirtySegmentListEntry
);
503 DirtyPageCount
+= Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
504 KeReleaseGuardedMutex(&ViewLock
);
508 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
509 CcRosCacheSegmentDecRefCount(CacheSeg
);
510 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
513 KeAcquireGuardedMutex(&ViewLock
);
515 /* Move to the tail of the LRU list */
516 RemoveEntryList(&CacheSeg
->CacheSegmentLRUListEntry
);
517 InsertTailList(&CacheSegmentLRUListHead
, &CacheSeg
->CacheSegmentLRUListEntry
);
519 KeReleaseGuardedMutex(&ViewLock
);
521 CacheSeg
->Dirty
= TRUE
;
522 ExReleasePushLock(&CacheSeg
->Lock
);
523 KeLeaveCriticalRegion();
525 return(STATUS_SUCCESS
);
530 CcRosUnmapCacheSegment(PBCB Bcb
, ULONG FileOffset
, BOOLEAN NowDirty
)
532 PCACHE_SEGMENT CacheSeg
;
538 DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %d, NowDirty %d)\n",
539 Bcb
, FileOffset
, NowDirty
);
541 CacheSeg
= CcRosLookupCacheSegment(Bcb
, FileOffset
);
542 if (CacheSeg
== NULL
)
544 return(STATUS_UNSUCCESSFUL
);
547 WasDirty
= CacheSeg
->Dirty
;
548 CacheSeg
->Dirty
= CacheSeg
->Dirty
|| NowDirty
;
550 CacheSeg
->MappedCount
--;
552 if (!WasDirty
&& NowDirty
)
554 KeAcquireGuardedMutex(&ViewLock
);
555 InsertTailList(&DirtySegmentListHead
, &CacheSeg
->DirtySegmentListEntry
);
556 DirtyPageCount
+= Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
557 KeReleaseGuardedMutex(&ViewLock
);
560 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
561 CcRosCacheSegmentDecRefCount(CacheSeg
);
562 if (!WasDirty
&& NowDirty
)
564 CcRosCacheSegmentIncRefCount(CacheSeg
);
566 if (CacheSeg
->MappedCount
== 0)
568 CcRosCacheSegmentDecRefCount(CacheSeg
);
570 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
572 ExReleasePushLock(&CacheSeg
->Lock
);
573 KeLeaveCriticalRegion();
575 return(STATUS_SUCCESS
);
580 CcRosCreateCacheSegment(PBCB Bcb
,
582 PCACHE_SEGMENT
* CacheSeg
)
584 PCACHE_SEGMENT current
;
585 PCACHE_SEGMENT previous
;
586 PLIST_ENTRY current_entry
;
590 ULONG StartingOffset
;
592 PHYSICAL_ADDRESS BoundaryAddressMultiple
;
596 DPRINT("CcRosCreateCacheSegment()\n");
598 BoundaryAddressMultiple
.QuadPart
= 0;
599 if (FileOffset
>= Bcb
->FileSize
.u
.LowPart
)
602 return STATUS_INVALID_PARAMETER
;
605 current
= ExAllocateFromNPagedLookasideList(&CacheSegLookasideList
);
606 current
->Valid
= FALSE
;
607 current
->Dirty
= FALSE
;
608 current
->PageOut
= FALSE
;
609 current
->FileOffset
= ROUND_DOWN(FileOffset
, Bcb
->CacheSegmentSize
);
614 DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb
, current
);
617 current
->MappedCount
= 0;
618 current
->DirtySegmentListEntry
.Flink
= NULL
;
619 current
->DirtySegmentListEntry
.Blink
= NULL
;
620 current
->ReferenceCount
= 1;
621 ExInitializePushLock(¤t
->Lock
);
622 KeEnterCriticalRegion();
623 ExAcquirePushLockExclusive(¤t
->Lock
);
624 KeAcquireGuardedMutex(&ViewLock
);
627 /* There is window between the call to CcRosLookupCacheSegment
628 * and CcRosCreateCacheSegment. We must check if a segment on
629 * the fileoffset exist. If there exist a segment, we release
630 * our new created segment and return the existing one.
632 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
633 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
635 while (current_entry
!= &Bcb
->BcbSegmentListHead
)
637 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
638 BcbSegmentListEntry
);
639 if (current
->FileOffset
<= FileOffset
&&
640 (current
->FileOffset
+ Bcb
->CacheSegmentSize
) > FileOffset
)
642 CcRosCacheSegmentIncRefCount(current
);
643 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
647 DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
653 ExReleasePushLock(&(*CacheSeg
)->Lock
);
654 KeReleaseGuardedMutex(&ViewLock
);
655 ExFreeToNPagedLookasideList(&CacheSegLookasideList
, *CacheSeg
);
657 /* We're still in the critical region from above */
658 ExAcquirePushLockExclusive(¤t
->Lock
);
659 return STATUS_SUCCESS
;
661 if (current
->FileOffset
< FileOffset
)
663 if (previous
== NULL
)
669 if (previous
->FileOffset
< current
->FileOffset
)
675 current_entry
= current_entry
->Flink
;
677 /* There was no existing segment. */
681 InsertHeadList(&previous
->BcbSegmentListEntry
, ¤t
->BcbSegmentListEntry
);
685 InsertHeadList(&Bcb
->BcbSegmentListHead
, ¤t
->BcbSegmentListEntry
);
687 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
688 InsertTailList(&CacheSegmentListHead
, ¤t
->CacheSegmentListEntry
);
689 InsertTailList(&CacheSegmentLRUListHead
, ¤t
->CacheSegmentLRUListEntry
);
690 KeReleaseGuardedMutex(&ViewLock
);
692 KeAcquireSpinLock(&CiCacheSegMappingRegionLock
, &oldIrql
);
694 StartingOffset
= RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap
, Bcb
->CacheSegmentSize
/ PAGE_SIZE
, CiCacheSegMappingRegionHint
);
696 if (StartingOffset
== 0xffffffff)
698 DPRINT1("Out of CacheSeg mapping space\n");
699 KeBugCheck(CACHE_MANAGER
);
702 current
->BaseAddress
= CiCacheSegMappingRegionBase
+ StartingOffset
* PAGE_SIZE
;
704 if (CiCacheSegMappingRegionHint
== StartingOffset
)
706 CiCacheSegMappingRegionHint
+= Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
709 KeReleaseSpinLock(&CiCacheSegMappingRegionLock
, oldIrql
);
711 MmLockAddressSpace(MmGetKernelAddressSpace());
712 current
->BaseAddress
= NULL
;
713 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
714 0, // nothing checks for cache_segment mareas, so set to 0
715 ¤t
->BaseAddress
,
716 Bcb
->CacheSegmentSize
,
718 (PMEMORY_AREA
*)¤t
->MemoryArea
,
721 BoundaryAddressMultiple
);
722 MmUnlockAddressSpace(MmGetKernelAddressSpace());
723 if (!NT_SUCCESS(Status
))
725 KeBugCheck(CACHE_MANAGER
);
729 /* Create a virtual mapping for this memory area */
730 MI_SET_USAGE(MI_USAGE_CACHE
);
734 if ((Bcb
->FileObject
) && (Bcb
->FileObject
->FileName
.Buffer
))
736 pos
= wcsrchr(Bcb
->FileObject
->FileName
.Buffer
, '\\');
737 len
= wcslen(pos
) * sizeof(WCHAR
);
738 if (pos
) snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%S", pos
);
742 MmMapMemoryArea(current
->BaseAddress
, Bcb
->CacheSegmentSize
,
743 MC_CACHE
, PAGE_READWRITE
);
745 return(STATUS_SUCCESS
);
750 CcRosGetCacheSegmentChain(PBCB Bcb
,
753 PCACHE_SEGMENT
* CacheSeg
)
755 PCACHE_SEGMENT current
;
757 PCACHE_SEGMENT
* CacheSegList
;
758 PCACHE_SEGMENT Previous
= NULL
;
762 DPRINT("CcRosGetCacheSegmentChain()\n");
764 Length
= ROUND_UP(Length
, Bcb
->CacheSegmentSize
);
766 CacheSegList
= _alloca(sizeof(PCACHE_SEGMENT
) *
767 (Length
/ Bcb
->CacheSegmentSize
));
770 * Look for a cache segment already mapping the same data.
772 for (i
= 0; i
< (Length
/ Bcb
->CacheSegmentSize
); i
++)
774 ULONG CurrentOffset
= FileOffset
+ (i
* Bcb
->CacheSegmentSize
);
775 current
= CcRosLookupCacheSegment(Bcb
, CurrentOffset
);
778 KeAcquireGuardedMutex(&ViewLock
);
780 /* Move to tail of LRU list */
781 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
782 InsertTailList(&CacheSegmentLRUListHead
, ¤t
->CacheSegmentLRUListEntry
);
784 KeReleaseGuardedMutex(&ViewLock
);
786 CacheSegList
[i
] = current
;
790 CcRosCreateCacheSegment(Bcb
, CurrentOffset
, ¤t
);
791 CacheSegList
[i
] = current
;
795 for (i
= 0; i
< (Length
/ Bcb
->CacheSegmentSize
); i
++)
799 *CacheSeg
= CacheSegList
[i
];
800 Previous
= CacheSegList
[i
];
804 Previous
->NextInChain
= CacheSegList
[i
];
805 Previous
= CacheSegList
[i
];
809 Previous
->NextInChain
= NULL
;
811 return(STATUS_SUCCESS
);
816 CcRosGetCacheSegment(PBCB Bcb
,
821 PCACHE_SEGMENT
* CacheSeg
)
823 PCACHE_SEGMENT current
;
828 DPRINT("CcRosGetCacheSegment()\n");
831 * Look for a cache segment already mapping the same data.
833 current
= CcRosLookupCacheSegment(Bcb
, FileOffset
);
837 * Otherwise create a new segment.
839 Status
= CcRosCreateCacheSegment(Bcb
, FileOffset
, ¤t
);
840 if (!NT_SUCCESS(Status
))
846 KeAcquireGuardedMutex(&ViewLock
);
848 /* Move to the tail of the LRU list */
849 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
850 InsertTailList(&CacheSegmentLRUListHead
, ¤t
->CacheSegmentLRUListEntry
);
852 KeReleaseGuardedMutex(&ViewLock
);
855 * Return information about the segment to the caller.
857 *UptoDate
= current
->Valid
;
858 *BaseAddress
= current
->BaseAddress
;
859 DPRINT("*BaseAddress 0x%.8X\n", *BaseAddress
);
861 *BaseOffset
= current
->FileOffset
;
862 return(STATUS_SUCCESS
);
866 CcRosRequestCacheSegment(PBCB Bcb
,
870 PCACHE_SEGMENT
* CacheSeg
)
872 * FUNCTION: Request a page mapping for a BCB
879 if ((FileOffset
% Bcb
->CacheSegmentSize
) != 0)
881 DPRINT1("Bad fileoffset %x should be multiple of %x",
882 FileOffset
, Bcb
->CacheSegmentSize
);
883 KeBugCheck(CACHE_MANAGER
);
886 return(CcRosGetCacheSegment(Bcb
,
896 CcFreeCachePage(PVOID Context
, MEMORY_AREA
* MemoryArea
, PVOID Address
,
897 PFN_NUMBER Page
, SWAPENTRY SwapEntry
, BOOLEAN Dirty
)
899 ASSERT(SwapEntry
== 0);
902 ASSERT(MmGetReferenceCountPage(Page
) == 1);
903 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
908 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg
)
910 * FUNCTION: Releases a cache segment associated with a BCB
920 DPRINT("Freeing cache segment 0x%p\n", CacheSeg
);
922 if ( CacheSeg
->Bcb
->Trace
)
924 DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg
->Bcb
, CacheSeg
);
928 RegionSize
= CacheSeg
->Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
930 /* Unmap all the pages. */
931 for (i
= 0; i
< RegionSize
; i
++)
933 MmDeleteVirtualMapping(NULL
,
934 CacheSeg
->BaseAddress
+ (i
* PAGE_SIZE
),
938 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
941 KeAcquireSpinLock(&CiCacheSegMappingRegionLock
, &oldIrql
);
942 /* Deallocate all the pages used. */
943 Base
= (ULONG
)(CacheSeg
->BaseAddress
- CiCacheSegMappingRegionBase
) / PAGE_SIZE
;
945 RtlClearBits(&CiCacheSegMappingRegionAllocMap
, Base
, RegionSize
);
947 CiCacheSegMappingRegionHint
= min (CiCacheSegMappingRegionHint
, Base
);
949 KeReleaseSpinLock(&CiCacheSegMappingRegionLock
, oldIrql
);
951 MmLockAddressSpace(MmGetKernelAddressSpace());
952 MmFreeMemoryArea(MmGetKernelAddressSpace(),
953 CacheSeg
->MemoryArea
,
956 MmUnlockAddressSpace(MmGetKernelAddressSpace());
958 ExFreeToNPagedLookasideList(&CacheSegLookasideList
, CacheSeg
);
959 return(STATUS_SUCCESS
);
964 CcRosFreeCacheSegment(PBCB Bcb
, PCACHE_SEGMENT CacheSeg
)
971 DPRINT("CcRosFreeCacheSegment(Bcb 0x%p, CacheSeg 0x%p)\n",
974 KeAcquireGuardedMutex(&ViewLock
);
975 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
976 RemoveEntryList(&CacheSeg
->BcbSegmentListEntry
);
977 RemoveEntryList(&CacheSeg
->CacheSegmentListEntry
);
978 RemoveEntryList(&CacheSeg
->CacheSegmentLRUListEntry
);
981 RemoveEntryList(&CacheSeg
->DirtySegmentListEntry
);
982 DirtyPageCount
-= Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
985 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
986 KeReleaseGuardedMutex(&ViewLock
);
988 Status
= CcRosInternalFreeCacheSegment(CacheSeg
);
996 CcFlushCache(IN PSECTION_OBJECT_POINTERS SectionObjectPointers
,
997 IN PLARGE_INTEGER FileOffset OPTIONAL
,
999 OUT PIO_STATUS_BLOCK IoStatus
)
1002 LARGE_INTEGER Offset
;
1003 PCACHE_SEGMENT current
;
1007 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %d, IoStatus 0x%p)\n",
1008 SectionObjectPointers
, FileOffset
, Length
, IoStatus
);
1010 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1012 Bcb
= (PBCB
)SectionObjectPointers
->SharedCacheMap
;
1016 Offset
= *FileOffset
;
1020 Offset
.QuadPart
= (LONGLONG
)0;
1021 Length
= Bcb
->FileSize
.u
.LowPart
;
1026 IoStatus
->Status
= STATUS_SUCCESS
;
1027 IoStatus
->Information
= 0;
1032 current
= CcRosLookupCacheSegment (Bcb
, Offset
.u
.LowPart
);
1033 if (current
!= NULL
)
1037 Status
= CcRosFlushCacheSegment(current
);
1038 if (!NT_SUCCESS(Status
) && IoStatus
!= NULL
)
1040 IoStatus
->Status
= Status
;
1043 ExReleasePushLock(¤t
->Lock
);
1044 KeLeaveCriticalRegion();
1045 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
1046 CcRosCacheSegmentDecRefCount(current
);
1047 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
1050 Offset
.QuadPart
+= Bcb
->CacheSegmentSize
;
1051 if (Length
> Bcb
->CacheSegmentSize
)
1053 Length
-= Bcb
->CacheSegmentSize
;
1065 IoStatus
->Status
= STATUS_INVALID_PARAMETER
;
1072 CcRosDeleteFileCache(PFILE_OBJECT FileObject
, PBCB Bcb
)
1074 * FUNCTION: Releases the BCB associated with a file object
1077 PLIST_ENTRY current_entry
;
1078 PCACHE_SEGMENT current
;
1079 LIST_ENTRY FreeList
;
1085 KeReleaseGuardedMutex(&ViewLock
);
1087 CcFlushCache(FileObject
->SectionObjectPointer
, NULL
, 0, NULL
);
1089 KeAcquireGuardedMutex(&ViewLock
);
1091 if (Bcb
->RefCount
== 0)
1093 if (Bcb
->BcbRemoveListEntry
.Flink
!= NULL
)
1095 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1096 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1099 FileObject
->SectionObjectPointer
->SharedCacheMap
= NULL
;
1102 * Release all cache segments.
1104 InitializeListHead(&FreeList
);
1105 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
1106 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
1107 while (!IsListEmpty(&Bcb
->BcbSegmentListHead
))
1109 current_entry
= RemoveTailList(&Bcb
->BcbSegmentListHead
);
1110 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
, BcbSegmentListEntry
);
1111 RemoveEntryList(¤t
->CacheSegmentListEntry
);
1112 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
1115 RemoveEntryList(¤t
->DirtySegmentListEntry
);
1116 DirtyPageCount
-= Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
1117 DPRINT1("Freeing dirty segment\n");
1119 InsertHeadList(&FreeList
, ¤t
->BcbSegmentListEntry
);
1124 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
1126 KeReleaseGuardedMutex(&ViewLock
);
1127 ObDereferenceObject (Bcb
->FileObject
);
1129 while (!IsListEmpty(&FreeList
))
1131 current_entry
= RemoveTailList(&FreeList
);
1132 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
, BcbSegmentListEntry
);
1133 CcRosInternalFreeCacheSegment(current
);
1135 ExFreeToNPagedLookasideList(&BcbLookasideList
, Bcb
);
1136 KeAcquireGuardedMutex(&ViewLock
);
1138 return(STATUS_SUCCESS
);
1143 CcRosReferenceCache(PFILE_OBJECT FileObject
)
1146 KeAcquireGuardedMutex(&ViewLock
);
1147 Bcb
= (PBCB
)FileObject
->SectionObjectPointer
->SharedCacheMap
;
1149 if (Bcb
->RefCount
== 0)
1151 ASSERT(Bcb
->BcbRemoveListEntry
.Flink
!= NULL
);
1152 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1153 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1158 ASSERT(Bcb
->BcbRemoveListEntry
.Flink
== NULL
);
1161 KeReleaseGuardedMutex(&ViewLock
);
1166 CcRosSetRemoveOnClose(PSECTION_OBJECT_POINTERS SectionObjectPointer
)
1169 DPRINT("CcRosSetRemoveOnClose()\n");
1170 KeAcquireGuardedMutex(&ViewLock
);
1171 Bcb
= (PBCB
)SectionObjectPointer
->SharedCacheMap
;
1174 Bcb
->RemoveOnClose
= TRUE
;
1175 if (Bcb
->RefCount
== 0)
1177 CcRosDeleteFileCache(Bcb
->FileObject
, Bcb
);
1180 KeReleaseGuardedMutex(&ViewLock
);
1186 CcRosDereferenceCache(PFILE_OBJECT FileObject
)
1189 KeAcquireGuardedMutex(&ViewLock
);
1190 Bcb
= (PBCB
)FileObject
->SectionObjectPointer
->SharedCacheMap
;
1192 if (Bcb
->RefCount
> 0)
1195 if (Bcb
->RefCount
== 0)
1197 MmFreeSectionSegments(Bcb
->FileObject
);
1198 CcRosDeleteFileCache(FileObject
, Bcb
);
1201 KeReleaseGuardedMutex(&ViewLock
);
1205 CcRosReleaseFileCache(PFILE_OBJECT FileObject
)
1207 * FUNCTION: Called by the file system when a handle to a file object
1213 KeAcquireGuardedMutex(&ViewLock
);
1215 if (FileObject
->SectionObjectPointer
->SharedCacheMap
!= NULL
)
1217 Bcb
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1218 if (FileObject
->PrivateCacheMap
!= NULL
)
1220 FileObject
->PrivateCacheMap
= NULL
;
1221 if (Bcb
->RefCount
> 0)
1224 if (Bcb
->RefCount
== 0)
1226 MmFreeSectionSegments(Bcb
->FileObject
);
1227 CcRosDeleteFileCache(FileObject
, Bcb
);
1232 KeReleaseGuardedMutex(&ViewLock
);
1233 return(STATUS_SUCCESS
);
1238 CcTryToInitializeFileCache(PFILE_OBJECT FileObject
)
1243 KeAcquireGuardedMutex(&ViewLock
);
1245 ASSERT(FileObject
->SectionObjectPointer
);
1246 Bcb
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1249 Status
= STATUS_UNSUCCESSFUL
;
1253 if (FileObject
->PrivateCacheMap
== NULL
)
1255 FileObject
->PrivateCacheMap
= Bcb
;
1258 if (Bcb
->BcbRemoveListEntry
.Flink
!= NULL
)
1260 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1261 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1263 Status
= STATUS_SUCCESS
;
1265 KeReleaseGuardedMutex(&ViewLock
);
1272 CcRosInitializeFileCache(PFILE_OBJECT FileObject
,
1273 ULONG CacheSegmentSize
,
1274 PCACHE_MANAGER_CALLBACKS CallBacks
,
1275 PVOID LazyWriterContext
)
1277 * FUNCTION: Initializes a BCB for a file object
1282 Bcb
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1283 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p, CacheSegmentSize %d)\n",
1284 FileObject
, Bcb
, CacheSegmentSize
);
1286 KeAcquireGuardedMutex(&ViewLock
);
1289 Bcb
= ExAllocateFromNPagedLookasideList(&BcbLookasideList
);
1292 KeReleaseGuardedMutex(&ViewLock
);
1293 return(STATUS_UNSUCCESSFUL
);
1295 memset(Bcb
, 0, sizeof(BCB
));
1296 ObReferenceObjectByPointer(FileObject
,
1300 Bcb
->FileObject
= FileObject
;
1301 Bcb
->CacheSegmentSize
= CacheSegmentSize
;
1302 Bcb
->Callbacks
= CallBacks
;
1303 Bcb
->LazyWriteContext
= LazyWriterContext
;
1304 if (FileObject
->FsContext
)
1306 Bcb
->AllocationSize
=
1307 ((PFSRTL_COMMON_FCB_HEADER
)FileObject
->FsContext
)->AllocationSize
;
1309 ((PFSRTL_COMMON_FCB_HEADER
)FileObject
->FsContext
)->FileSize
;
1311 KeInitializeSpinLock(&Bcb
->BcbLock
);
1312 InitializeListHead(&Bcb
->BcbSegmentListHead
);
1313 FileObject
->SectionObjectPointer
->SharedCacheMap
= Bcb
;
1315 if (FileObject
->PrivateCacheMap
== NULL
)
1317 FileObject
->PrivateCacheMap
= Bcb
;
1320 if (Bcb
->BcbRemoveListEntry
.Flink
!= NULL
)
1322 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1323 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1325 KeReleaseGuardedMutex(&ViewLock
);
1327 return(STATUS_SUCCESS
);
1334 CcGetFileObjectFromSectionPtrs(IN PSECTION_OBJECT_POINTERS SectionObjectPointers
)
1337 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1339 Bcb
= (PBCB
)SectionObjectPointers
->SharedCacheMap
;
1341 return Bcb
->FileObject
;
1354 PHYSICAL_ADDRESS BoundaryAddressMultiple
;
1357 DPRINT("CcInitView()\n");
1359 BoundaryAddressMultiple
.QuadPart
= 0;
1360 CiCacheSegMappingRegionHint
= 0;
1361 CiCacheSegMappingRegionBase
= NULL
;
1363 MmLockAddressSpace(MmGetKernelAddressSpace());
1365 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
1366 MEMORY_AREA_CACHE_SEGMENT
,
1367 &CiCacheSegMappingRegionBase
,
1368 CI_CACHESEG_MAPPING_REGION_SIZE
,
1373 BoundaryAddressMultiple
);
1374 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1375 if (!NT_SUCCESS(Status
))
1377 KeBugCheck(CACHE_MANAGER
);
1380 Buffer
= ExAllocatePool(NonPagedPool
, CI_CACHESEG_MAPPING_REGION_SIZE
/ (PAGE_SIZE
* 8));
1383 KeBugCheck(CACHE_MANAGER
);
1386 RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap
, Buffer
, CI_CACHESEG_MAPPING_REGION_SIZE
/ PAGE_SIZE
);
1387 RtlClearAllBits(&CiCacheSegMappingRegionAllocMap
);
1389 KeInitializeSpinLock(&CiCacheSegMappingRegionLock
);
1391 InitializeListHead(&CacheSegmentListHead
);
1392 InitializeListHead(&DirtySegmentListHead
);
1393 InitializeListHead(&CacheSegmentLRUListHead
);
1394 InitializeListHead(&ClosedListHead
);
1395 KeInitializeGuardedMutex(&ViewLock
);
1396 ExInitializeNPagedLookasideList (&iBcbLookasideList
,
1400 sizeof(INTERNAL_BCB
),
1403 ExInitializeNPagedLookasideList (&BcbLookasideList
,
1410 ExInitializeNPagedLookasideList (&CacheSegLookasideList
,
1414 sizeof(CACHE_SEGMENT
),
1418 MmInitializeMemoryConsumer(MC_CACHE
, CcRosTrimCache
);
1420 CcInitCacheZeroPage();