2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
10 /* NOTES **********************************************************************
12 * This is not the NT implementation of a file cache nor anything much like
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 * (4) Copy the data into or out of the page as necessary.
30 * (5) Release the cache page
32 /* INCLUDES ******************************************************************/
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
42 /* GLOBALS *******************************************************************/
45 * If CACHE_BITMAP is defined, the cache manager uses one large memory region
46 * within the kernel address space and allocate/deallocate space from this block
47 * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
48 * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
50 //#define CACHE_BITMAP
52 static LIST_ENTRY DirtySegmentListHead
;
53 static LIST_ENTRY CacheSegmentListHead
;
54 static LIST_ENTRY CacheSegmentLRUListHead
;
55 static LIST_ENTRY ClosedListHead
;
56 ULONG DirtyPageCount
=0;
58 KGUARDED_MUTEX ViewLock
;
61 #define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
63 static PVOID CiCacheSegMappingRegionBase
= NULL
;
64 static RTL_BITMAP CiCacheSegMappingRegionAllocMap
;
65 static ULONG CiCacheSegMappingRegionHint
;
66 static KSPIN_LOCK CiCacheSegMappingRegionLock
;
69 NPAGED_LOOKASIDE_LIST iBcbLookasideList
;
70 static NPAGED_LOOKASIDE_LIST BcbLookasideList
;
71 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList
;
74 static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs
, const char* file
, int line
)
79 DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%d, Dirty %d, PageOut %d\n",
80 file
, line
, cs
, cs
->ReferenceCount
, cs
->Dirty
, cs
->PageOut
);
83 static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs
, const char* file
, int line
)
88 DbgPrint("(%s:%i) CacheSegment %p --RefCount=%d, Dirty %d, PageOut %d\n",
89 file
, line
, cs
, cs
->ReferenceCount
, cs
->Dirty
, cs
->PageOut
);
92 #define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
93 #define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
95 #define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
96 #define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
100 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg
);
103 /* FUNCTIONS *****************************************************************/
113 PLIST_ENTRY current_entry
;
114 PCACHE_SEGMENT current
;
123 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb
);
125 KeAcquireGuardedMutex(&ViewLock
);
126 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldirql
);
128 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
129 while (current_entry
!= &Bcb
->BcbSegmentListHead
)
131 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
, BcbSegmentListEntry
);
132 current_entry
= current_entry
->Flink
;
134 DPRINT1(" CacheSegment 0x%p enabled, RefCount %d, Dirty %d, PageOut %d\n",
135 current
, current
->ReferenceCount
, current
->Dirty
, current
->PageOut
);
137 KeReleaseSpinLock(&Bcb
->BcbLock
, oldirql
);
138 KeReleaseGuardedMutex(&ViewLock
);
142 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb
);
153 CcRosFlushCacheSegment(PCACHE_SEGMENT CacheSegment
)
158 Status
= WriteCacheSegment(CacheSegment
);
159 if (NT_SUCCESS(Status
))
161 KeAcquireGuardedMutex(&ViewLock
);
162 KeAcquireSpinLock(&CacheSegment
->Bcb
->BcbLock
, &oldIrql
);
164 CacheSegment
->Dirty
= FALSE
;
165 RemoveEntryList(&CacheSegment
->DirtySegmentListEntry
);
166 DirtyPageCount
-= CacheSegment
->Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
167 CcRosCacheSegmentDecRefCount ( CacheSegment
);
169 KeReleaseSpinLock(&CacheSegment
->Bcb
->BcbLock
, oldIrql
);
170 KeReleaseGuardedMutex(&ViewLock
);
178 CcRosFlushDirtyPages(ULONG Target
, PULONG Count
)
180 PLIST_ENTRY current_entry
;
181 PCACHE_SEGMENT current
;
182 ULONG PagesPerSegment
;
185 static ULONG WriteCount
[4] = {0, 0, 0, 0};
188 DPRINT("CcRosFlushDirtyPages(Target %d)\n", Target
);
192 KeEnterCriticalRegion();
193 KeAcquireGuardedMutex(&ViewLock
);
195 WriteCount
[0] = WriteCount
[1];
196 WriteCount
[1] = WriteCount
[2];
197 WriteCount
[2] = WriteCount
[3];
200 NewTarget
= WriteCount
[0] + WriteCount
[1] + WriteCount
[2];
202 if (NewTarget
< DirtyPageCount
)
204 NewTarget
= (DirtyPageCount
- NewTarget
+ 3) / 4;
205 WriteCount
[0] += NewTarget
;
206 WriteCount
[1] += NewTarget
;
207 WriteCount
[2] += NewTarget
;
208 WriteCount
[3] += NewTarget
;
211 NewTarget
= WriteCount
[0];
213 Target
= max(NewTarget
, Target
);
215 current_entry
= DirtySegmentListHead
.Flink
;
216 if (current_entry
== &DirtySegmentListHead
)
218 DPRINT("No Dirty pages\n");
221 while (current_entry
!= &DirtySegmentListHead
&& Target
> 0)
223 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
224 DirtySegmentListEntry
);
225 current_entry
= current_entry
->Flink
;
227 Locked
= current
->Bcb
->Callbacks
->AcquireForLazyWrite(
228 current
->Bcb
->LazyWriteContext
, FALSE
);
234 Locked
= ExTryToAcquirePushLockExclusive(¤t
->Lock
);
237 current
->Bcb
->Callbacks
->ReleaseFromLazyWrite(
238 current
->Bcb
->LazyWriteContext
);
243 ASSERT(current
->Dirty
);
244 if (current
->ReferenceCount
> 1)
246 ExReleasePushLock(¤t
->Lock
);
247 current
->Bcb
->Callbacks
->ReleaseFromLazyWrite(
248 current
->Bcb
->LazyWriteContext
);
252 PagesPerSegment
= current
->Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
254 KeReleaseGuardedMutex(&ViewLock
);
256 Status
= CcRosFlushCacheSegment(current
);
258 ExReleasePushLock(¤t
->Lock
);
259 current
->Bcb
->Callbacks
->ReleaseFromLazyWrite(
260 current
->Bcb
->LazyWriteContext
);
262 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
))
264 DPRINT1("CC: Failed to flush cache segment.\n");
268 (*Count
) += PagesPerSegment
;
269 Target
-= PagesPerSegment
;
272 KeAcquireGuardedMutex(&ViewLock
);
273 current_entry
= DirtySegmentListHead
.Flink
;
276 if (*Count
< NewTarget
)
278 WriteCount
[1] += (NewTarget
- *Count
);
281 KeReleaseGuardedMutex(&ViewLock
);
282 KeLeaveCriticalRegion();
284 DPRINT("CcRosFlushDirtyPages() finished\n");
285 return(STATUS_SUCCESS
);
289 CcRosTrimCache(ULONG Target
, ULONG Priority
, PULONG NrFreed
)
291 * FUNCTION: Try to free some memory from the file cache.
293 * Target - The number of pages to be freed.
294 * Priority - The priority of free (currently unused).
295 * NrFreed - Points to a variable where the number of pages
296 * actually freed is returned.
299 PLIST_ENTRY current_entry
;
300 PCACHE_SEGMENT current
;
301 ULONG PagesPerSegment
;
307 BOOLEAN FlushedPages
= FALSE
;
309 DPRINT("CcRosTrimCache(Target %d)\n", Target
);
311 InitializeListHead(&FreeList
);
316 KeAcquireGuardedMutex(&ViewLock
);
318 current_entry
= CacheSegmentLRUListHead
.Flink
;
319 while (current_entry
!= &CacheSegmentLRUListHead
)
321 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
322 CacheSegmentLRUListEntry
);
323 current_entry
= current_entry
->Flink
;
325 KeAcquireSpinLock(¤t
->Bcb
->BcbLock
, &oldIrql
);
327 /* Reference the cache segment */
328 CcRosCacheSegmentIncRefCount(current
);
330 /* Check if it's mapped and not dirty */
331 if (current
->MappedCount
> 0 && !current
->Dirty
)
333 /* We have to break these locks because Cc sucks */
334 KeReleaseSpinLock(¤t
->Bcb
->BcbLock
, oldIrql
);
335 KeReleaseGuardedMutex(&ViewLock
);
337 /* Page out the segment */
338 for (i
= 0; i
< current
->Bcb
->CacheSegmentSize
/ PAGE_SIZE
; i
++)
340 Page
= (PFN_NUMBER
)(MmGetPhysicalAddress((PUCHAR
)current
->BaseAddress
+ (i
* PAGE_SIZE
)).QuadPart
>> PAGE_SHIFT
);
342 MmPageOutPhysicalAddress(Page
);
345 /* Reacquire the locks */
346 KeAcquireGuardedMutex(&ViewLock
);
347 KeAcquireSpinLock(¤t
->Bcb
->BcbLock
, &oldIrql
);
350 /* Dereference the cache segment */
351 CcRosCacheSegmentDecRefCount(current
);
353 /* Check if we can free this entry now */
354 if (current
->ReferenceCount
== 0)
356 ASSERT(!current
->Dirty
);
357 ASSERT(!current
->MappedCount
);
359 RemoveEntryList(¤t
->BcbSegmentListEntry
);
360 RemoveEntryList(¤t
->CacheSegmentListEntry
);
361 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
362 InsertHeadList(&FreeList
, ¤t
->BcbSegmentListEntry
);
364 /* Calculate how many pages we freed for Mm */
365 PagesPerSegment
= current
->Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
366 PagesFreed
= min(PagesPerSegment
, Target
);
367 Target
-= PagesFreed
;
368 (*NrFreed
) += PagesFreed
;
371 KeReleaseSpinLock(¤t
->Bcb
->BcbLock
, oldIrql
);
374 KeReleaseGuardedMutex(&ViewLock
);
376 /* Try flushing pages if we haven't met our target */
377 if (Target
> 0 && !FlushedPages
)
379 /* Flush dirty pages to disk */
380 CcRosFlushDirtyPages(Target
, &PagesFreed
);
383 /* We can only swap as many pages as we flushed */
384 if (PagesFreed
< Target
) Target
= PagesFreed
;
386 /* Check if we flushed anything */
389 /* Try again after flushing dirty pages */
390 DPRINT("Flushed %d dirty cache pages to disk\n", PagesFreed
);
395 while (!IsListEmpty(&FreeList
))
397 current_entry
= RemoveHeadList(&FreeList
);
398 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
399 BcbSegmentListEntry
);
400 CcRosInternalFreeCacheSegment(current
);
403 DPRINT("Evicted %d cache pages\n", (*NrFreed
));
405 return(STATUS_SUCCESS
);
410 CcRosReleaseCacheSegment(PBCB Bcb
,
411 PCACHE_SEGMENT CacheSeg
,
416 BOOLEAN WasDirty
= CacheSeg
->Dirty
;
421 DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %d)\n",
422 Bcb
, CacheSeg
, Valid
);
424 CacheSeg
->Valid
= Valid
;
425 CacheSeg
->Dirty
= CacheSeg
->Dirty
|| Dirty
;
427 KeAcquireGuardedMutex(&ViewLock
);
428 if (!WasDirty
&& CacheSeg
->Dirty
)
430 InsertTailList(&DirtySegmentListHead
, &CacheSeg
->DirtySegmentListEntry
);
431 DirtyPageCount
+= Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
436 CacheSeg
->MappedCount
++;
438 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
439 CcRosCacheSegmentDecRefCount(CacheSeg
);
440 if (Mapped
&& CacheSeg
->MappedCount
== 1)
442 CcRosCacheSegmentIncRefCount(CacheSeg
);
444 if (!WasDirty
&& CacheSeg
->Dirty
)
446 CcRosCacheSegmentIncRefCount(CacheSeg
);
448 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
449 KeReleaseGuardedMutex(&ViewLock
);
450 ExReleasePushLock(&CacheSeg
->Lock
);
452 return(STATUS_SUCCESS
);
455 /* Returns with Cache Segment Lock Held! */
458 CcRosLookupCacheSegment(PBCB Bcb
, ULONG FileOffset
)
460 PLIST_ENTRY current_entry
;
461 PCACHE_SEGMENT current
;
466 DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %d)\n", Bcb
, FileOffset
);
468 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
469 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
470 while (current_entry
!= &Bcb
->BcbSegmentListHead
)
472 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
473 BcbSegmentListEntry
);
474 if (current
->FileOffset
<= FileOffset
&&
475 (current
->FileOffset
+ Bcb
->CacheSegmentSize
) > FileOffset
)
477 CcRosCacheSegmentIncRefCount(current
);
478 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
479 ExAcquirePushLockExclusive(¤t
->Lock
);
482 current_entry
= current_entry
->Flink
;
484 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
490 CcRosMarkDirtyCacheSegment(PBCB Bcb
, ULONG FileOffset
)
492 PCACHE_SEGMENT CacheSeg
;
497 DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %d)\n", Bcb
, FileOffset
);
499 CacheSeg
= CcRosLookupCacheSegment(Bcb
, FileOffset
);
500 if (CacheSeg
== NULL
)
502 KeBugCheck(CACHE_MANAGER
);
504 if (!CacheSeg
->Dirty
)
506 KeAcquireGuardedMutex(&ViewLock
);
507 InsertTailList(&DirtySegmentListHead
, &CacheSeg
->DirtySegmentListEntry
);
508 DirtyPageCount
+= Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
509 KeReleaseGuardedMutex(&ViewLock
);
513 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
514 CcRosCacheSegmentDecRefCount(CacheSeg
);
515 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
518 KeAcquireGuardedMutex(&ViewLock
);
520 /* Move to the tail of the LRU list */
521 RemoveEntryList(&CacheSeg
->CacheSegmentLRUListEntry
);
522 InsertTailList(&CacheSegmentLRUListHead
, &CacheSeg
->CacheSegmentLRUListEntry
);
524 KeReleaseGuardedMutex(&ViewLock
);
526 CacheSeg
->Dirty
= TRUE
;
527 ExReleasePushLock(&CacheSeg
->Lock
);
529 return(STATUS_SUCCESS
);
534 CcRosUnmapCacheSegment(PBCB Bcb
, ULONG FileOffset
, BOOLEAN NowDirty
)
536 PCACHE_SEGMENT CacheSeg
;
542 DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %d, NowDirty %d)\n",
543 Bcb
, FileOffset
, NowDirty
);
545 CacheSeg
= CcRosLookupCacheSegment(Bcb
, FileOffset
);
546 if (CacheSeg
== NULL
)
548 return(STATUS_UNSUCCESSFUL
);
551 WasDirty
= CacheSeg
->Dirty
;
552 CacheSeg
->Dirty
= CacheSeg
->Dirty
|| NowDirty
;
554 CacheSeg
->MappedCount
--;
556 if (!WasDirty
&& NowDirty
)
558 KeAcquireGuardedMutex(&ViewLock
);
559 InsertTailList(&DirtySegmentListHead
, &CacheSeg
->DirtySegmentListEntry
);
560 DirtyPageCount
+= Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
561 KeReleaseGuardedMutex(&ViewLock
);
564 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
565 CcRosCacheSegmentDecRefCount(CacheSeg
);
566 if (!WasDirty
&& NowDirty
)
568 CcRosCacheSegmentIncRefCount(CacheSeg
);
570 if (CacheSeg
->MappedCount
== 0)
572 CcRosCacheSegmentDecRefCount(CacheSeg
);
574 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
576 ExReleasePushLock(&CacheSeg
->Lock
);
577 return(STATUS_SUCCESS
);
582 CcRosCreateCacheSegment(PBCB Bcb
,
584 PCACHE_SEGMENT
* CacheSeg
)
586 PCACHE_SEGMENT current
;
587 PCACHE_SEGMENT previous
;
588 PLIST_ENTRY current_entry
;
592 ULONG StartingOffset
;
594 PHYSICAL_ADDRESS BoundaryAddressMultiple
;
598 DPRINT("CcRosCreateCacheSegment()\n");
600 BoundaryAddressMultiple
.QuadPart
= 0;
601 if (FileOffset
>= Bcb
->FileSize
.u
.LowPart
)
604 return STATUS_INVALID_PARAMETER
;
607 current
= ExAllocateFromNPagedLookasideList(&CacheSegLookasideList
);
608 current
->Valid
= FALSE
;
609 current
->Dirty
= FALSE
;
610 current
->PageOut
= FALSE
;
611 current
->FileOffset
= ROUND_DOWN(FileOffset
, Bcb
->CacheSegmentSize
);
616 DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb
, current
);
619 current
->MappedCount
= 0;
620 current
->DirtySegmentListEntry
.Flink
= NULL
;
621 current
->DirtySegmentListEntry
.Blink
= NULL
;
622 current
->ReferenceCount
= 1;
623 ExInitializePushLock(¤t
->Lock
);
624 ExAcquirePushLockExclusive(¤t
->Lock
);
625 KeAcquireGuardedMutex(&ViewLock
);
628 /* There is window between the call to CcRosLookupCacheSegment
629 * and CcRosCreateCacheSegment. We must check if a segment on
630 * the fileoffset exist. If there exist a segment, we release
631 * our new created segment and return the existing one.
633 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
634 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
636 while (current_entry
!= &Bcb
->BcbSegmentListHead
)
638 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
639 BcbSegmentListEntry
);
640 if (current
->FileOffset
<= FileOffset
&&
641 (current
->FileOffset
+ Bcb
->CacheSegmentSize
) > FileOffset
)
643 CcRosCacheSegmentIncRefCount(current
);
644 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
648 DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
654 ExReleasePushLock(&(*CacheSeg
)->Lock
);
655 KeReleaseGuardedMutex(&ViewLock
);
656 ExFreeToNPagedLookasideList(&CacheSegLookasideList
, *CacheSeg
);
658 ExAcquirePushLockExclusive(¤t
->Lock
);
659 return STATUS_SUCCESS
;
661 if (current
->FileOffset
< FileOffset
)
663 if (previous
== NULL
)
669 if (previous
->FileOffset
< current
->FileOffset
)
675 current_entry
= current_entry
->Flink
;
677 /* There was no existing segment. */
681 InsertHeadList(&previous
->BcbSegmentListEntry
, ¤t
->BcbSegmentListEntry
);
685 InsertHeadList(&Bcb
->BcbSegmentListHead
, ¤t
->BcbSegmentListEntry
);
687 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
688 InsertTailList(&CacheSegmentListHead
, ¤t
->CacheSegmentListEntry
);
689 InsertTailList(&CacheSegmentLRUListHead
, ¤t
->CacheSegmentLRUListEntry
);
690 KeReleaseGuardedMutex(&ViewLock
);
692 KeAcquireSpinLock(&CiCacheSegMappingRegionLock
, &oldIrql
);
694 StartingOffset
= RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap
, Bcb
->CacheSegmentSize
/ PAGE_SIZE
, CiCacheSegMappingRegionHint
);
696 if (StartingOffset
== 0xffffffff)
698 DPRINT1("Out of CacheSeg mapping space\n");
699 KeBugCheck(CACHE_MANAGER
);
702 current
->BaseAddress
= CiCacheSegMappingRegionBase
+ StartingOffset
* PAGE_SIZE
;
704 if (CiCacheSegMappingRegionHint
== StartingOffset
)
706 CiCacheSegMappingRegionHint
+= Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
709 KeReleaseSpinLock(&CiCacheSegMappingRegionLock
, oldIrql
);
711 MmLockAddressSpace(MmGetKernelAddressSpace());
712 current
->BaseAddress
= NULL
;
713 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
714 0, // nothing checks for cache_segment mareas, so set to 0
715 ¤t
->BaseAddress
,
716 Bcb
->CacheSegmentSize
,
718 (PMEMORY_AREA
*)¤t
->MemoryArea
,
721 BoundaryAddressMultiple
);
722 MmUnlockAddressSpace(MmGetKernelAddressSpace());
723 if (!NT_SUCCESS(Status
))
725 KeBugCheck(CACHE_MANAGER
);
729 /* Create a virtual mapping for this memory area */
730 MI_SET_USAGE(MI_USAGE_CACHE
);
734 if ((Bcb
->FileObject
) && (Bcb
->FileObject
->FileName
.Buffer
))
736 pos
= wcsrchr(Bcb
->FileObject
->FileName
.Buffer
, '\\');
737 len
= wcslen(pos
) * sizeof(WCHAR
);
738 if (pos
) snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%S", pos
);
742 MmMapMemoryArea(current
->BaseAddress
, Bcb
->CacheSegmentSize
,
743 MC_CACHE
, PAGE_READWRITE
);
745 return(STATUS_SUCCESS
);
750 CcRosGetCacheSegmentChain(PBCB Bcb
,
753 PCACHE_SEGMENT
* CacheSeg
)
755 PCACHE_SEGMENT current
;
757 PCACHE_SEGMENT
* CacheSegList
;
758 PCACHE_SEGMENT Previous
= NULL
;
762 DPRINT("CcRosGetCacheSegmentChain()\n");
764 Length
= ROUND_UP(Length
, Bcb
->CacheSegmentSize
);
766 CacheSegList
= _alloca(sizeof(PCACHE_SEGMENT
) *
767 (Length
/ Bcb
->CacheSegmentSize
));
770 * Look for a cache segment already mapping the same data.
772 for (i
= 0; i
< (Length
/ Bcb
->CacheSegmentSize
); i
++)
774 ULONG CurrentOffset
= FileOffset
+ (i
* Bcb
->CacheSegmentSize
);
775 current
= CcRosLookupCacheSegment(Bcb
, CurrentOffset
);
778 KeAcquireGuardedMutex(&ViewLock
);
780 /* Move to tail of LRU list */
781 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
782 InsertTailList(&CacheSegmentLRUListHead
, ¤t
->CacheSegmentLRUListEntry
);
784 KeReleaseGuardedMutex(&ViewLock
);
786 CacheSegList
[i
] = current
;
790 CcRosCreateCacheSegment(Bcb
, CurrentOffset
, ¤t
);
791 CacheSegList
[i
] = current
;
795 for (i
= 0; i
< (Length
/ Bcb
->CacheSegmentSize
); i
++)
799 *CacheSeg
= CacheSegList
[i
];
800 Previous
= CacheSegList
[i
];
804 Previous
->NextInChain
= CacheSegList
[i
];
805 Previous
= CacheSegList
[i
];
809 Previous
->NextInChain
= NULL
;
811 return(STATUS_SUCCESS
);
816 CcRosGetCacheSegment(PBCB Bcb
,
821 PCACHE_SEGMENT
* CacheSeg
)
823 PCACHE_SEGMENT current
;
828 DPRINT("CcRosGetCacheSegment()\n");
831 * Look for a cache segment already mapping the same data.
833 current
= CcRosLookupCacheSegment(Bcb
, FileOffset
);
837 * Otherwise create a new segment.
839 Status
= CcRosCreateCacheSegment(Bcb
, FileOffset
, ¤t
);
840 if (!NT_SUCCESS(Status
))
846 KeAcquireGuardedMutex(&ViewLock
);
848 /* Move to the tail of the LRU list */
849 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
850 InsertTailList(&CacheSegmentLRUListHead
, ¤t
->CacheSegmentLRUListEntry
);
852 KeReleaseGuardedMutex(&ViewLock
);
855 * Return information about the segment to the caller.
857 *UptoDate
= current
->Valid
;
858 *BaseAddress
= current
->BaseAddress
;
859 DPRINT("*BaseAddress 0x%.8X\n", *BaseAddress
);
861 *BaseOffset
= current
->FileOffset
;
862 return(STATUS_SUCCESS
);
866 CcRosRequestCacheSegment(PBCB Bcb
,
870 PCACHE_SEGMENT
* CacheSeg
)
872 * FUNCTION: Request a page mapping for a BCB
879 if ((FileOffset
% Bcb
->CacheSegmentSize
) != 0)
881 DPRINT1("Bad fileoffset %x should be multiple of %x",
882 FileOffset
, Bcb
->CacheSegmentSize
);
883 KeBugCheck(CACHE_MANAGER
);
886 return(CcRosGetCacheSegment(Bcb
,
896 CcFreeCachePage(PVOID Context
, MEMORY_AREA
* MemoryArea
, PVOID Address
,
897 PFN_NUMBER Page
, SWAPENTRY SwapEntry
, BOOLEAN Dirty
)
899 ASSERT(SwapEntry
== 0);
902 ASSERT(MmGetReferenceCountPage(Page
) == 1);
903 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
908 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg
)
910 * FUNCTION: Releases a cache segment associated with a BCB
920 DPRINT("Freeing cache segment 0x%p\n", CacheSeg
);
922 if ( CacheSeg
->Bcb
->Trace
)
924 DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg
->Bcb
, CacheSeg
);
928 RegionSize
= CacheSeg
->Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
930 /* Unmap all the pages. */
931 for (i
= 0; i
< RegionSize
; i
++)
933 MmDeleteVirtualMapping(NULL
,
934 CacheSeg
->BaseAddress
+ (i
* PAGE_SIZE
),
938 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
941 KeAcquireSpinLock(&CiCacheSegMappingRegionLock
, &oldIrql
);
942 /* Deallocate all the pages used. */
943 Base
= (ULONG
)(CacheSeg
->BaseAddress
- CiCacheSegMappingRegionBase
) / PAGE_SIZE
;
945 RtlClearBits(&CiCacheSegMappingRegionAllocMap
, Base
, RegionSize
);
947 CiCacheSegMappingRegionHint
= min (CiCacheSegMappingRegionHint
, Base
);
949 KeReleaseSpinLock(&CiCacheSegMappingRegionLock
, oldIrql
);
951 MmLockAddressSpace(MmGetKernelAddressSpace());
952 MmFreeMemoryArea(MmGetKernelAddressSpace(),
953 CacheSeg
->MemoryArea
,
956 MmUnlockAddressSpace(MmGetKernelAddressSpace());
958 ExFreeToNPagedLookasideList(&CacheSegLookasideList
, CacheSeg
);
959 return(STATUS_SUCCESS
);
964 CcRosFreeCacheSegment(PBCB Bcb
, PCACHE_SEGMENT CacheSeg
)
971 DPRINT("CcRosFreeCacheSegment(Bcb 0x%p, CacheSeg 0x%p)\n",
974 KeAcquireGuardedMutex(&ViewLock
);
975 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
976 RemoveEntryList(&CacheSeg
->BcbSegmentListEntry
);
977 RemoveEntryList(&CacheSeg
->CacheSegmentListEntry
);
978 RemoveEntryList(&CacheSeg
->CacheSegmentLRUListEntry
);
981 RemoveEntryList(&CacheSeg
->DirtySegmentListEntry
);
982 DirtyPageCount
-= Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
985 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
986 KeReleaseGuardedMutex(&ViewLock
);
988 Status
= CcRosInternalFreeCacheSegment(CacheSeg
);
996 CcFlushCache(IN PSECTION_OBJECT_POINTERS SectionObjectPointers
,
997 IN PLARGE_INTEGER FileOffset OPTIONAL
,
999 OUT PIO_STATUS_BLOCK IoStatus
)
1002 LARGE_INTEGER Offset
;
1003 PCACHE_SEGMENT current
;
1007 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %d, IoStatus 0x%p)\n",
1008 SectionObjectPointers
, FileOffset
, Length
, IoStatus
);
1010 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1012 Bcb
= (PBCB
)SectionObjectPointers
->SharedCacheMap
;
1016 Offset
= *FileOffset
;
1020 Offset
.QuadPart
= (LONGLONG
)0;
1021 Length
= Bcb
->FileSize
.u
.LowPart
;
1026 IoStatus
->Status
= STATUS_SUCCESS
;
1027 IoStatus
->Information
= 0;
1032 current
= CcRosLookupCacheSegment (Bcb
, Offset
.u
.LowPart
);
1033 if (current
!= NULL
)
1037 Status
= CcRosFlushCacheSegment(current
);
1038 if (!NT_SUCCESS(Status
) && IoStatus
!= NULL
)
1040 IoStatus
->Status
= Status
;
1043 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
1044 ExReleasePushLock(¤t
->Lock
);
1045 CcRosCacheSegmentDecRefCount(current
);
1046 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
1049 Offset
.QuadPart
+= Bcb
->CacheSegmentSize
;
1050 if (Length
> Bcb
->CacheSegmentSize
)
1052 Length
-= Bcb
->CacheSegmentSize
;
1064 IoStatus
->Status
= STATUS_INVALID_PARAMETER
;
1071 CcRosDeleteFileCache(PFILE_OBJECT FileObject
, PBCB Bcb
)
1073 * FUNCTION: Releases the BCB associated with a file object
1076 PLIST_ENTRY current_entry
;
1077 PCACHE_SEGMENT current
;
1078 LIST_ENTRY FreeList
;
1084 KeReleaseGuardedMutex(&ViewLock
);
1086 CcFlushCache(FileObject
->SectionObjectPointer
, NULL
, 0, NULL
);
1088 KeAcquireGuardedMutex(&ViewLock
);
1090 if (Bcb
->RefCount
== 0)
1092 if (Bcb
->BcbRemoveListEntry
.Flink
!= NULL
)
1094 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1095 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1098 FileObject
->SectionObjectPointer
->SharedCacheMap
= NULL
;
1101 * Release all cache segments.
1103 InitializeListHead(&FreeList
);
1104 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
1105 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
1106 while (!IsListEmpty(&Bcb
->BcbSegmentListHead
))
1108 current_entry
= RemoveTailList(&Bcb
->BcbSegmentListHead
);
1109 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
, BcbSegmentListEntry
);
1110 RemoveEntryList(¤t
->CacheSegmentListEntry
);
1111 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
1114 RemoveEntryList(¤t
->DirtySegmentListEntry
);
1115 DirtyPageCount
-= Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
1116 DPRINT1("Freeing dirty segment\n");
1118 InsertHeadList(&FreeList
, ¤t
->BcbSegmentListEntry
);
1123 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
1125 KeReleaseGuardedMutex(&ViewLock
);
1126 ObDereferenceObject (Bcb
->FileObject
);
1128 while (!IsListEmpty(&FreeList
))
1130 current_entry
= RemoveTailList(&FreeList
);
1131 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
, BcbSegmentListEntry
);
1132 CcRosInternalFreeCacheSegment(current
);
1134 ExFreeToNPagedLookasideList(&BcbLookasideList
, Bcb
);
1135 KeAcquireGuardedMutex(&ViewLock
);
1137 return(STATUS_SUCCESS
);
1142 CcRosReferenceCache(PFILE_OBJECT FileObject
)
1145 KeAcquireGuardedMutex(&ViewLock
);
1146 Bcb
= (PBCB
)FileObject
->SectionObjectPointer
->SharedCacheMap
;
1148 if (Bcb
->RefCount
== 0)
1150 ASSERT(Bcb
->BcbRemoveListEntry
.Flink
!= NULL
);
1151 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1152 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1157 ASSERT(Bcb
->BcbRemoveListEntry
.Flink
== NULL
);
1160 KeReleaseGuardedMutex(&ViewLock
);
1165 CcRosSetRemoveOnClose(PSECTION_OBJECT_POINTERS SectionObjectPointer
)
1168 DPRINT("CcRosSetRemoveOnClose()\n");
1169 KeAcquireGuardedMutex(&ViewLock
);
1170 Bcb
= (PBCB
)SectionObjectPointer
->SharedCacheMap
;
1173 Bcb
->RemoveOnClose
= TRUE
;
1174 if (Bcb
->RefCount
== 0)
1176 CcRosDeleteFileCache(Bcb
->FileObject
, Bcb
);
1179 KeReleaseGuardedMutex(&ViewLock
);
1185 CcRosDereferenceCache(PFILE_OBJECT FileObject
)
1188 KeAcquireGuardedMutex(&ViewLock
);
1189 Bcb
= (PBCB
)FileObject
->SectionObjectPointer
->SharedCacheMap
;
1191 if (Bcb
->RefCount
> 0)
1194 if (Bcb
->RefCount
== 0)
1196 MmFreeSectionSegments(Bcb
->FileObject
);
1197 CcRosDeleteFileCache(FileObject
, Bcb
);
1200 KeReleaseGuardedMutex(&ViewLock
);
1204 CcRosReleaseFileCache(PFILE_OBJECT FileObject
)
1206 * FUNCTION: Called by the file system when a handle to a file object
1212 KeAcquireGuardedMutex(&ViewLock
);
1214 if (FileObject
->SectionObjectPointer
->SharedCacheMap
!= NULL
)
1216 Bcb
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1217 if (FileObject
->PrivateCacheMap
!= NULL
)
1219 FileObject
->PrivateCacheMap
= NULL
;
1220 if (Bcb
->RefCount
> 0)
1223 if (Bcb
->RefCount
== 0)
1225 MmFreeSectionSegments(Bcb
->FileObject
);
1226 CcRosDeleteFileCache(FileObject
, Bcb
);
1231 KeReleaseGuardedMutex(&ViewLock
);
1232 return(STATUS_SUCCESS
);
1237 CcTryToInitializeFileCache(PFILE_OBJECT FileObject
)
1242 KeAcquireGuardedMutex(&ViewLock
);
1244 ASSERT(FileObject
->SectionObjectPointer
);
1245 Bcb
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1248 Status
= STATUS_UNSUCCESSFUL
;
1252 if (FileObject
->PrivateCacheMap
== NULL
)
1254 FileObject
->PrivateCacheMap
= Bcb
;
1257 if (Bcb
->BcbRemoveListEntry
.Flink
!= NULL
)
1259 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1260 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1262 Status
= STATUS_SUCCESS
;
1264 KeReleaseGuardedMutex(&ViewLock
);
1271 CcRosInitializeFileCache(PFILE_OBJECT FileObject
,
1272 ULONG CacheSegmentSize
,
1273 PCACHE_MANAGER_CALLBACKS CallBacks
,
1274 PVOID LazyWriterContext
)
1276 * FUNCTION: Initializes a BCB for a file object
1281 Bcb
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1282 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p, CacheSegmentSize %d)\n",
1283 FileObject
, Bcb
, CacheSegmentSize
);
1285 KeAcquireGuardedMutex(&ViewLock
);
1288 Bcb
= ExAllocateFromNPagedLookasideList(&BcbLookasideList
);
1291 KeReleaseGuardedMutex(&ViewLock
);
1292 return(STATUS_UNSUCCESSFUL
);
1294 memset(Bcb
, 0, sizeof(BCB
));
1295 ObReferenceObjectByPointer(FileObject
,
1299 Bcb
->FileObject
= FileObject
;
1300 Bcb
->CacheSegmentSize
= CacheSegmentSize
;
1301 Bcb
->Callbacks
= CallBacks
;
1302 Bcb
->LazyWriteContext
= LazyWriterContext
;
1303 if (FileObject
->FsContext
)
1305 Bcb
->AllocationSize
=
1306 ((PFSRTL_COMMON_FCB_HEADER
)FileObject
->FsContext
)->AllocationSize
;
1308 ((PFSRTL_COMMON_FCB_HEADER
)FileObject
->FsContext
)->FileSize
;
1310 KeInitializeSpinLock(&Bcb
->BcbLock
);
1311 InitializeListHead(&Bcb
->BcbSegmentListHead
);
1312 FileObject
->SectionObjectPointer
->SharedCacheMap
= Bcb
;
1314 if (FileObject
->PrivateCacheMap
== NULL
)
1316 FileObject
->PrivateCacheMap
= Bcb
;
1319 if (Bcb
->BcbRemoveListEntry
.Flink
!= NULL
)
1321 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1322 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1324 KeReleaseGuardedMutex(&ViewLock
);
1326 return(STATUS_SUCCESS
);
1333 CcGetFileObjectFromSectionPtrs(IN PSECTION_OBJECT_POINTERS SectionObjectPointers
)
1336 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1338 Bcb
= (PBCB
)SectionObjectPointers
->SharedCacheMap
;
1340 return Bcb
->FileObject
;
1353 PHYSICAL_ADDRESS BoundaryAddressMultiple
;
1356 DPRINT("CcInitView()\n");
1358 BoundaryAddressMultiple
.QuadPart
= 0;
1359 CiCacheSegMappingRegionHint
= 0;
1360 CiCacheSegMappingRegionBase
= NULL
;
1362 MmLockAddressSpace(MmGetKernelAddressSpace());
1364 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
1365 MEMORY_AREA_CACHE_SEGMENT
,
1366 &CiCacheSegMappingRegionBase
,
1367 CI_CACHESEG_MAPPING_REGION_SIZE
,
1372 BoundaryAddressMultiple
);
1373 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1374 if (!NT_SUCCESS(Status
))
1376 KeBugCheck(CACHE_MANAGER
);
1379 Buffer
= ExAllocatePool(NonPagedPool
, CI_CACHESEG_MAPPING_REGION_SIZE
/ (PAGE_SIZE
* 8));
1382 KeBugCheck(CACHE_MANAGER
);
1385 RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap
, Buffer
, CI_CACHESEG_MAPPING_REGION_SIZE
/ PAGE_SIZE
);
1386 RtlClearAllBits(&CiCacheSegMappingRegionAllocMap
);
1388 KeInitializeSpinLock(&CiCacheSegMappingRegionLock
);
1390 InitializeListHead(&CacheSegmentListHead
);
1391 InitializeListHead(&DirtySegmentListHead
);
1392 InitializeListHead(&CacheSegmentLRUListHead
);
1393 InitializeListHead(&ClosedListHead
);
1394 KeInitializeGuardedMutex(&ViewLock
);
1395 ExInitializeNPagedLookasideList (&iBcbLookasideList
,
1399 sizeof(INTERNAL_BCB
),
1402 ExInitializeNPagedLookasideList (&BcbLookasideList
,
1409 ExInitializeNPagedLookasideList (&CacheSegLookasideList
,
1413 sizeof(CACHE_SEGMENT
),
1417 MmInitializeMemoryConsumer(MC_CACHE
, CcRosTrimCache
);
1419 CcInitCacheZeroPage();