2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
10 /* NOTES **********************************************************************
12 * This is not the NT implementation of a file cache nor anything much like
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 * (4) Copy the data into or out of the page as necessary.
30 * (5) Release the cache page
32 /* INCLUDES ******************************************************************/
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
42 /* GLOBALS *******************************************************************/
45 * If CACHE_BITMAP is defined, the cache manager uses one large memory region
46 * within the kernel address space and allocate/deallocate space from this block
47 * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
48 * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
50 //#define CACHE_BITMAP
52 static LIST_ENTRY DirtySegmentListHead
;
53 static LIST_ENTRY CacheSegmentListHead
;
54 static LIST_ENTRY CacheSegmentLRUListHead
;
55 static LIST_ENTRY ClosedListHead
;
56 ULONG DirtyPageCount
=0;
58 KGUARDED_MUTEX ViewLock
;
61 #define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
63 static PVOID CiCacheSegMappingRegionBase
= NULL
;
64 static RTL_BITMAP CiCacheSegMappingRegionAllocMap
;
65 static ULONG CiCacheSegMappingRegionHint
;
66 static KSPIN_LOCK CiCacheSegMappingRegionLock
;
69 NPAGED_LOOKASIDE_LIST iBcbLookasideList
;
70 static NPAGED_LOOKASIDE_LIST BcbLookasideList
;
71 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList
;
74 static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs
, const char* file
, int line
)
79 DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%d, Dirty %d, PageOut %d\n",
80 file
, line
, cs
, cs
->ReferenceCount
, cs
->Dirty
, cs
->PageOut
);
83 static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs
, const char* file
, int line
)
88 DbgPrint("(%s:%i) CacheSegment %p --RefCount=%d, Dirty %d, PageOut %d\n",
89 file
, line
, cs
, cs
->ReferenceCount
, cs
->Dirty
, cs
->PageOut
);
92 #define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
93 #define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
95 #define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
96 #define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
100 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg
);
103 /* FUNCTIONS *****************************************************************/
113 PLIST_ENTRY current_entry
;
114 PCACHE_SEGMENT current
;
123 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb
);
125 KeAcquireGuardedMutex(&ViewLock
);
126 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldirql
);
128 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
129 while (current_entry
!= &Bcb
->BcbSegmentListHead
)
131 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
, BcbSegmentListEntry
);
132 current_entry
= current_entry
->Flink
;
134 DPRINT1(" CacheSegment 0x%p enabled, RefCount %d, Dirty %d, PageOut %d\n",
135 current
, current
->ReferenceCount
, current
->Dirty
, current
->PageOut
);
137 KeReleaseSpinLock(&Bcb
->BcbLock
, oldirql
);
138 KeReleaseGuardedMutex(&ViewLock
);
142 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb
);
153 CcRosFlushCacheSegment(PCACHE_SEGMENT CacheSegment
)
158 Status
= WriteCacheSegment(CacheSegment
);
159 if (NT_SUCCESS(Status
))
161 KeAcquireGuardedMutex(&ViewLock
);
162 KeAcquireSpinLock(&CacheSegment
->Bcb
->BcbLock
, &oldIrql
);
164 CacheSegment
->Dirty
= FALSE
;
165 RemoveEntryList(&CacheSegment
->DirtySegmentListEntry
);
166 DirtyPageCount
-= CacheSegment
->Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
167 CcRosCacheSegmentDecRefCount ( CacheSegment
);
169 KeReleaseSpinLock(&CacheSegment
->Bcb
->BcbLock
, oldIrql
);
170 KeReleaseGuardedMutex(&ViewLock
);
178 CcRosFlushDirtyPages(ULONG Target
, PULONG Count
)
180 PLIST_ENTRY current_entry
;
181 PCACHE_SEGMENT current
;
182 ULONG PagesPerSegment
;
185 static ULONG WriteCount
[4] = {0, 0, 0, 0};
188 DPRINT("CcRosFlushDirtyPages(Target %d)\n", Target
);
192 KeEnterCriticalRegion();
193 KeAcquireGuardedMutex(&ViewLock
);
195 WriteCount
[0] = WriteCount
[1];
196 WriteCount
[1] = WriteCount
[2];
197 WriteCount
[2] = WriteCount
[3];
200 NewTarget
= WriteCount
[0] + WriteCount
[1] + WriteCount
[2];
202 if (NewTarget
< DirtyPageCount
)
204 NewTarget
= (DirtyPageCount
- NewTarget
+ 3) / 4;
205 WriteCount
[0] += NewTarget
;
206 WriteCount
[1] += NewTarget
;
207 WriteCount
[2] += NewTarget
;
208 WriteCount
[3] += NewTarget
;
211 NewTarget
= WriteCount
[0];
213 Target
= max(NewTarget
, Target
);
215 current_entry
= DirtySegmentListHead
.Flink
;
216 if (current_entry
== &DirtySegmentListHead
)
218 DPRINT("No Dirty pages\n");
221 while (current_entry
!= &DirtySegmentListHead
&& Target
> 0)
223 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
224 DirtySegmentListEntry
);
225 current_entry
= current_entry
->Flink
;
227 Locked
= current
->Bcb
->Callbacks
->AcquireForLazyWrite(
228 current
->Bcb
->LazyWriteContext
, FALSE
);
234 Locked
= ExTryToAcquirePushLockExclusive(¤t
->Lock
);
237 current
->Bcb
->Callbacks
->ReleaseFromLazyWrite(
238 current
->Bcb
->LazyWriteContext
);
243 ASSERT(current
->Dirty
);
244 if (current
->ReferenceCount
> 1)
246 ExReleasePushLock(¤t
->Lock
);
247 current
->Bcb
->Callbacks
->ReleaseFromLazyWrite(
248 current
->Bcb
->LazyWriteContext
);
252 PagesPerSegment
= current
->Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
254 KeReleaseGuardedMutex(&ViewLock
);
256 Status
= CcRosFlushCacheSegment(current
);
258 ExReleasePushLock(¤t
->Lock
);
259 current
->Bcb
->Callbacks
->ReleaseFromLazyWrite(
260 current
->Bcb
->LazyWriteContext
);
262 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
))
264 DPRINT1("CC: Failed to flush cache segment.\n");
268 (*Count
) += PagesPerSegment
;
269 Target
-= PagesPerSegment
;
272 KeAcquireGuardedMutex(&ViewLock
);
273 current_entry
= DirtySegmentListHead
.Flink
;
276 if (*Count
< NewTarget
)
278 WriteCount
[1] += (NewTarget
- *Count
);
281 KeReleaseGuardedMutex(&ViewLock
);
282 KeLeaveCriticalRegion();
284 DPRINT("CcRosFlushDirtyPages() finished\n");
285 return(STATUS_SUCCESS
);
289 CcRosTrimCache(ULONG Target
, ULONG Priority
, PULONG NrFreed
)
291 * FUNCTION: Try to free some memory from the file cache.
293 * Target - The number of pages to be freed.
294 * Priority - The priority of free (currently unused).
295 * NrFreed - Points to a variable where the number of pages
296 * actually freed is returned.
299 PLIST_ENTRY current_entry
;
300 PCACHE_SEGMENT current
;
301 ULONG PagesPerSegment
;
307 BOOLEAN FlushedPages
= FALSE
;
309 DPRINT("CcRosTrimCache(Target %d)\n", Target
);
311 InitializeListHead(&FreeList
);
316 KeAcquireGuardedMutex(&ViewLock
);
318 current_entry
= CacheSegmentLRUListHead
.Flink
;
319 while (current_entry
!= &CacheSegmentLRUListHead
)
321 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
322 CacheSegmentLRUListEntry
);
323 current_entry
= current_entry
->Flink
;
325 KeAcquireSpinLock(¤t
->Bcb
->BcbLock
, &oldIrql
);
327 /* Reference the cache segment */
328 CcRosCacheSegmentIncRefCount(current
);
330 /* Check if it's mapped and not dirty */
331 if (current
->MappedCount
> 0 && !current
->Dirty
)
333 /* We have to break these locks because Cc sucks */
334 KeReleaseSpinLock(¤t
->Bcb
->BcbLock
, oldIrql
);
335 KeReleaseGuardedMutex(&ViewLock
);
337 /* Page out the segment */
338 for (i
= 0; i
< current
->Bcb
->CacheSegmentSize
/ PAGE_SIZE
; i
++)
340 Page
= (PFN_NUMBER
)(MmGetPhysicalAddress((PUCHAR
)current
->BaseAddress
+ (i
* PAGE_SIZE
)).QuadPart
>> PAGE_SHIFT
);
342 MmPageOutPhysicalAddress(Page
);
345 /* Reacquire the locks */
346 KeAcquireGuardedMutex(&ViewLock
);
347 KeAcquireSpinLock(¤t
->Bcb
->BcbLock
, &oldIrql
);
350 /* Dereference the cache segment */
351 CcRosCacheSegmentDecRefCount(current
);
353 /* Check if we can free this entry now */
354 if (current
->ReferenceCount
== 0)
356 ASSERT(!current
->Dirty
);
357 ASSERT(!current
->MappedCount
);
359 RemoveEntryList(¤t
->BcbSegmentListEntry
);
360 RemoveEntryList(¤t
->CacheSegmentListEntry
);
361 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
362 InsertHeadList(&FreeList
, ¤t
->BcbSegmentListEntry
);
364 /* Calculate how many pages we freed for Mm */
365 PagesPerSegment
= current
->Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
366 PagesFreed
= min(PagesPerSegment
, Target
);
367 Target
-= PagesFreed
;
368 (*NrFreed
) += PagesFreed
;
371 KeReleaseSpinLock(¤t
->Bcb
->BcbLock
, oldIrql
);
374 KeReleaseGuardedMutex(&ViewLock
);
376 /* Try flushing pages if we haven't met our target */
377 if (Target
> 0 && !FlushedPages
)
379 /* Flush dirty pages to disk */
380 CcRosFlushDirtyPages(Target
, &PagesFreed
);
383 /* We can only swap as many pages as we flushed */
384 if (PagesFreed
< Target
) Target
= PagesFreed
;
386 /* Check if we flushed anything */
389 /* Try again after flushing dirty pages */
390 DPRINT("Flushed %d dirty cache pages to disk\n", PagesFreed
);
395 while (!IsListEmpty(&FreeList
))
397 current_entry
= RemoveHeadList(&FreeList
);
398 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
399 BcbSegmentListEntry
);
400 CcRosInternalFreeCacheSegment(current
);
403 DPRINT("Evicted %d cache pages\n", (*NrFreed
));
405 return(STATUS_SUCCESS
);
410 CcRosReleaseCacheSegment(PBCB Bcb
,
411 PCACHE_SEGMENT CacheSeg
,
416 BOOLEAN WasDirty
= CacheSeg
->Dirty
;
421 DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %d)\n",
422 Bcb
, CacheSeg
, Valid
);
424 CacheSeg
->Valid
= Valid
;
425 CacheSeg
->Dirty
= CacheSeg
->Dirty
|| Dirty
;
427 KeAcquireGuardedMutex(&ViewLock
);
428 if (!WasDirty
&& CacheSeg
->Dirty
)
430 InsertTailList(&DirtySegmentListHead
, &CacheSeg
->DirtySegmentListEntry
);
431 DirtyPageCount
+= Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
436 CacheSeg
->MappedCount
++;
438 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
439 CcRosCacheSegmentDecRefCount(CacheSeg
);
440 if (Mapped
&& CacheSeg
->MappedCount
== 1)
442 CcRosCacheSegmentIncRefCount(CacheSeg
);
444 if (!WasDirty
&& CacheSeg
->Dirty
)
446 CcRosCacheSegmentIncRefCount(CacheSeg
);
448 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
449 KeReleaseGuardedMutex(&ViewLock
);
450 ExReleasePushLock(&CacheSeg
->Lock
);
451 KeLeaveCriticalRegion();
453 return(STATUS_SUCCESS
);
456 /* Returns with Cache Segment Lock Held! */
459 CcRosLookupCacheSegment(PBCB Bcb
, ULONG FileOffset
)
461 PLIST_ENTRY current_entry
;
462 PCACHE_SEGMENT current
;
467 DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %d)\n", Bcb
, FileOffset
);
469 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
470 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
471 while (current_entry
!= &Bcb
->BcbSegmentListHead
)
473 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
474 BcbSegmentListEntry
);
475 if (current
->FileOffset
<= FileOffset
&&
476 (current
->FileOffset
+ Bcb
->CacheSegmentSize
) > FileOffset
)
478 CcRosCacheSegmentIncRefCount(current
);
479 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
480 KeEnterCriticalRegion();
481 ExAcquirePushLockExclusive(¤t
->Lock
);
484 current_entry
= current_entry
->Flink
;
486 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
492 CcRosMarkDirtyCacheSegment(PBCB Bcb
, ULONG FileOffset
)
494 PCACHE_SEGMENT CacheSeg
;
499 DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %d)\n", Bcb
, FileOffset
);
501 CacheSeg
= CcRosLookupCacheSegment(Bcb
, FileOffset
);
502 if (CacheSeg
== NULL
)
504 KeBugCheck(CACHE_MANAGER
);
506 if (!CacheSeg
->Dirty
)
508 KeAcquireGuardedMutex(&ViewLock
);
509 InsertTailList(&DirtySegmentListHead
, &CacheSeg
->DirtySegmentListEntry
);
510 DirtyPageCount
+= Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
511 KeReleaseGuardedMutex(&ViewLock
);
515 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
516 CcRosCacheSegmentDecRefCount(CacheSeg
);
517 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
520 KeAcquireGuardedMutex(&ViewLock
);
522 /* Move to the tail of the LRU list */
523 RemoveEntryList(&CacheSeg
->CacheSegmentLRUListEntry
);
524 InsertTailList(&CacheSegmentLRUListHead
, &CacheSeg
->CacheSegmentLRUListEntry
);
526 KeReleaseGuardedMutex(&ViewLock
);
528 CacheSeg
->Dirty
= TRUE
;
529 ExReleasePushLock(&CacheSeg
->Lock
);
530 KeLeaveCriticalRegion();
532 return(STATUS_SUCCESS
);
537 CcRosUnmapCacheSegment(PBCB Bcb
, ULONG FileOffset
, BOOLEAN NowDirty
)
539 PCACHE_SEGMENT CacheSeg
;
545 DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %d, NowDirty %d)\n",
546 Bcb
, FileOffset
, NowDirty
);
548 CacheSeg
= CcRosLookupCacheSegment(Bcb
, FileOffset
);
549 if (CacheSeg
== NULL
)
551 return(STATUS_UNSUCCESSFUL
);
554 WasDirty
= CacheSeg
->Dirty
;
555 CacheSeg
->Dirty
= CacheSeg
->Dirty
|| NowDirty
;
557 CacheSeg
->MappedCount
--;
559 if (!WasDirty
&& NowDirty
)
561 KeAcquireGuardedMutex(&ViewLock
);
562 InsertTailList(&DirtySegmentListHead
, &CacheSeg
->DirtySegmentListEntry
);
563 DirtyPageCount
+= Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
564 KeReleaseGuardedMutex(&ViewLock
);
567 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
568 CcRosCacheSegmentDecRefCount(CacheSeg
);
569 if (!WasDirty
&& NowDirty
)
571 CcRosCacheSegmentIncRefCount(CacheSeg
);
573 if (CacheSeg
->MappedCount
== 0)
575 CcRosCacheSegmentDecRefCount(CacheSeg
);
577 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
579 ExReleasePushLock(&CacheSeg
->Lock
);
580 KeLeaveCriticalRegion();
582 return(STATUS_SUCCESS
);
587 CcRosCreateCacheSegment(PBCB Bcb
,
589 PCACHE_SEGMENT
* CacheSeg
)
591 PCACHE_SEGMENT current
;
592 PCACHE_SEGMENT previous
;
593 PLIST_ENTRY current_entry
;
597 ULONG StartingOffset
;
599 PHYSICAL_ADDRESS BoundaryAddressMultiple
;
603 DPRINT("CcRosCreateCacheSegment()\n");
605 BoundaryAddressMultiple
.QuadPart
= 0;
606 if (FileOffset
>= Bcb
->FileSize
.u
.LowPart
)
609 return STATUS_INVALID_PARAMETER
;
612 current
= ExAllocateFromNPagedLookasideList(&CacheSegLookasideList
);
613 current
->Valid
= FALSE
;
614 current
->Dirty
= FALSE
;
615 current
->PageOut
= FALSE
;
616 current
->FileOffset
= ROUND_DOWN(FileOffset
, Bcb
->CacheSegmentSize
);
621 DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb
, current
);
624 current
->MappedCount
= 0;
625 current
->DirtySegmentListEntry
.Flink
= NULL
;
626 current
->DirtySegmentListEntry
.Blink
= NULL
;
627 current
->ReferenceCount
= 1;
628 ExInitializePushLock(¤t
->Lock
);
629 KeEnterCriticalRegion();
630 ExAcquirePushLockExclusive(¤t
->Lock
);
631 KeAcquireGuardedMutex(&ViewLock
);
634 /* There is window between the call to CcRosLookupCacheSegment
635 * and CcRosCreateCacheSegment. We must check if a segment on
636 * the fileoffset exist. If there exist a segment, we release
637 * our new created segment and return the existing one.
639 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
640 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
642 while (current_entry
!= &Bcb
->BcbSegmentListHead
)
644 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
645 BcbSegmentListEntry
);
646 if (current
->FileOffset
<= FileOffset
&&
647 (current
->FileOffset
+ Bcb
->CacheSegmentSize
) > FileOffset
)
649 CcRosCacheSegmentIncRefCount(current
);
650 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
654 DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
660 ExReleasePushLock(&(*CacheSeg
)->Lock
);
661 KeReleaseGuardedMutex(&ViewLock
);
662 ExFreeToNPagedLookasideList(&CacheSegLookasideList
, *CacheSeg
);
664 /* We're still in the critical region from above */
665 ExAcquirePushLockExclusive(¤t
->Lock
);
666 return STATUS_SUCCESS
;
668 if (current
->FileOffset
< FileOffset
)
670 if (previous
== NULL
)
676 if (previous
->FileOffset
< current
->FileOffset
)
682 current_entry
= current_entry
->Flink
;
684 /* There was no existing segment. */
688 InsertHeadList(&previous
->BcbSegmentListEntry
, ¤t
->BcbSegmentListEntry
);
692 InsertHeadList(&Bcb
->BcbSegmentListHead
, ¤t
->BcbSegmentListEntry
);
694 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
695 InsertTailList(&CacheSegmentListHead
, ¤t
->CacheSegmentListEntry
);
696 InsertTailList(&CacheSegmentLRUListHead
, ¤t
->CacheSegmentLRUListEntry
);
697 KeReleaseGuardedMutex(&ViewLock
);
699 KeAcquireSpinLock(&CiCacheSegMappingRegionLock
, &oldIrql
);
701 StartingOffset
= RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap
, Bcb
->CacheSegmentSize
/ PAGE_SIZE
, CiCacheSegMappingRegionHint
);
703 if (StartingOffset
== 0xffffffff)
705 DPRINT1("Out of CacheSeg mapping space\n");
706 KeBugCheck(CACHE_MANAGER
);
709 current
->BaseAddress
= CiCacheSegMappingRegionBase
+ StartingOffset
* PAGE_SIZE
;
711 if (CiCacheSegMappingRegionHint
== StartingOffset
)
713 CiCacheSegMappingRegionHint
+= Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
716 KeReleaseSpinLock(&CiCacheSegMappingRegionLock
, oldIrql
);
718 MmLockAddressSpace(MmGetKernelAddressSpace());
719 current
->BaseAddress
= NULL
;
720 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
721 0, // nothing checks for cache_segment mareas, so set to 0
722 ¤t
->BaseAddress
,
723 Bcb
->CacheSegmentSize
,
725 (PMEMORY_AREA
*)¤t
->MemoryArea
,
728 BoundaryAddressMultiple
);
729 MmUnlockAddressSpace(MmGetKernelAddressSpace());
730 if (!NT_SUCCESS(Status
))
732 KeBugCheck(CACHE_MANAGER
);
736 /* Create a virtual mapping for this memory area */
737 MI_SET_USAGE(MI_USAGE_CACHE
);
741 if ((Bcb
->FileObject
) && (Bcb
->FileObject
->FileName
.Buffer
))
743 pos
= wcsrchr(Bcb
->FileObject
->FileName
.Buffer
, '\\');
744 len
= wcslen(pos
) * sizeof(WCHAR
);
745 if (pos
) snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%S", pos
);
749 MmMapMemoryArea(current
->BaseAddress
, Bcb
->CacheSegmentSize
,
750 MC_CACHE
, PAGE_READWRITE
);
752 return(STATUS_SUCCESS
);
757 CcRosGetCacheSegmentChain(PBCB Bcb
,
760 PCACHE_SEGMENT
* CacheSeg
)
762 PCACHE_SEGMENT current
;
764 PCACHE_SEGMENT
* CacheSegList
;
765 PCACHE_SEGMENT Previous
= NULL
;
769 DPRINT("CcRosGetCacheSegmentChain()\n");
771 Length
= ROUND_UP(Length
, Bcb
->CacheSegmentSize
);
773 CacheSegList
= _alloca(sizeof(PCACHE_SEGMENT
) *
774 (Length
/ Bcb
->CacheSegmentSize
));
777 * Look for a cache segment already mapping the same data.
779 for (i
= 0; i
< (Length
/ Bcb
->CacheSegmentSize
); i
++)
781 ULONG CurrentOffset
= FileOffset
+ (i
* Bcb
->CacheSegmentSize
);
782 current
= CcRosLookupCacheSegment(Bcb
, CurrentOffset
);
785 KeAcquireGuardedMutex(&ViewLock
);
787 /* Move to tail of LRU list */
788 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
789 InsertTailList(&CacheSegmentLRUListHead
, ¤t
->CacheSegmentLRUListEntry
);
791 KeReleaseGuardedMutex(&ViewLock
);
793 CacheSegList
[i
] = current
;
797 CcRosCreateCacheSegment(Bcb
, CurrentOffset
, ¤t
);
798 CacheSegList
[i
] = current
;
802 for (i
= 0; i
< (Length
/ Bcb
->CacheSegmentSize
); i
++)
806 *CacheSeg
= CacheSegList
[i
];
807 Previous
= CacheSegList
[i
];
811 Previous
->NextInChain
= CacheSegList
[i
];
812 Previous
= CacheSegList
[i
];
816 Previous
->NextInChain
= NULL
;
818 return(STATUS_SUCCESS
);
823 CcRosGetCacheSegment(PBCB Bcb
,
828 PCACHE_SEGMENT
* CacheSeg
)
830 PCACHE_SEGMENT current
;
835 DPRINT("CcRosGetCacheSegment()\n");
838 * Look for a cache segment already mapping the same data.
840 current
= CcRosLookupCacheSegment(Bcb
, FileOffset
);
844 * Otherwise create a new segment.
846 Status
= CcRosCreateCacheSegment(Bcb
, FileOffset
, ¤t
);
847 if (!NT_SUCCESS(Status
))
853 KeAcquireGuardedMutex(&ViewLock
);
855 /* Move to the tail of the LRU list */
856 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
857 InsertTailList(&CacheSegmentLRUListHead
, ¤t
->CacheSegmentLRUListEntry
);
859 KeReleaseGuardedMutex(&ViewLock
);
862 * Return information about the segment to the caller.
864 *UptoDate
= current
->Valid
;
865 *BaseAddress
= current
->BaseAddress
;
866 DPRINT("*BaseAddress 0x%.8X\n", *BaseAddress
);
868 *BaseOffset
= current
->FileOffset
;
869 return(STATUS_SUCCESS
);
873 CcRosRequestCacheSegment(PBCB Bcb
,
877 PCACHE_SEGMENT
* CacheSeg
)
879 * FUNCTION: Request a page mapping for a BCB
886 if ((FileOffset
% Bcb
->CacheSegmentSize
) != 0)
888 DPRINT1("Bad fileoffset %x should be multiple of %x",
889 FileOffset
, Bcb
->CacheSegmentSize
);
890 KeBugCheck(CACHE_MANAGER
);
893 return(CcRosGetCacheSegment(Bcb
,
903 CcFreeCachePage(PVOID Context
, MEMORY_AREA
* MemoryArea
, PVOID Address
,
904 PFN_NUMBER Page
, SWAPENTRY SwapEntry
, BOOLEAN Dirty
)
906 ASSERT(SwapEntry
== 0);
909 ASSERT(MmGetReferenceCountPage(Page
) == 1);
910 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
915 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg
)
917 * FUNCTION: Releases a cache segment associated with a BCB
927 DPRINT("Freeing cache segment 0x%p\n", CacheSeg
);
929 if ( CacheSeg
->Bcb
->Trace
)
931 DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg
->Bcb
, CacheSeg
);
935 RegionSize
= CacheSeg
->Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
937 /* Unmap all the pages. */
938 for (i
= 0; i
< RegionSize
; i
++)
940 MmDeleteVirtualMapping(NULL
,
941 CacheSeg
->BaseAddress
+ (i
* PAGE_SIZE
),
945 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
948 KeAcquireSpinLock(&CiCacheSegMappingRegionLock
, &oldIrql
);
949 /* Deallocate all the pages used. */
950 Base
= (ULONG
)(CacheSeg
->BaseAddress
- CiCacheSegMappingRegionBase
) / PAGE_SIZE
;
952 RtlClearBits(&CiCacheSegMappingRegionAllocMap
, Base
, RegionSize
);
954 CiCacheSegMappingRegionHint
= min (CiCacheSegMappingRegionHint
, Base
);
956 KeReleaseSpinLock(&CiCacheSegMappingRegionLock
, oldIrql
);
958 MmLockAddressSpace(MmGetKernelAddressSpace());
959 MmFreeMemoryArea(MmGetKernelAddressSpace(),
960 CacheSeg
->MemoryArea
,
963 MmUnlockAddressSpace(MmGetKernelAddressSpace());
965 ExFreeToNPagedLookasideList(&CacheSegLookasideList
, CacheSeg
);
966 return(STATUS_SUCCESS
);
971 CcRosFreeCacheSegment(PBCB Bcb
, PCACHE_SEGMENT CacheSeg
)
978 DPRINT("CcRosFreeCacheSegment(Bcb 0x%p, CacheSeg 0x%p)\n",
981 KeAcquireGuardedMutex(&ViewLock
);
982 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
983 RemoveEntryList(&CacheSeg
->BcbSegmentListEntry
);
984 RemoveEntryList(&CacheSeg
->CacheSegmentListEntry
);
985 RemoveEntryList(&CacheSeg
->CacheSegmentLRUListEntry
);
988 RemoveEntryList(&CacheSeg
->DirtySegmentListEntry
);
989 DirtyPageCount
-= Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
992 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
993 KeReleaseGuardedMutex(&ViewLock
);
995 Status
= CcRosInternalFreeCacheSegment(CacheSeg
);
1003 CcFlushCache(IN PSECTION_OBJECT_POINTERS SectionObjectPointers
,
1004 IN PLARGE_INTEGER FileOffset OPTIONAL
,
1006 OUT PIO_STATUS_BLOCK IoStatus
)
1009 LARGE_INTEGER Offset
;
1010 PCACHE_SEGMENT current
;
1014 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %d, IoStatus 0x%p)\n",
1015 SectionObjectPointers
, FileOffset
, Length
, IoStatus
);
1017 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1019 Bcb
= (PBCB
)SectionObjectPointers
->SharedCacheMap
;
1023 Offset
= *FileOffset
;
1027 Offset
.QuadPart
= (LONGLONG
)0;
1028 Length
= Bcb
->FileSize
.u
.LowPart
;
1033 IoStatus
->Status
= STATUS_SUCCESS
;
1034 IoStatus
->Information
= 0;
1039 current
= CcRosLookupCacheSegment (Bcb
, Offset
.u
.LowPart
);
1040 if (current
!= NULL
)
1044 Status
= CcRosFlushCacheSegment(current
);
1045 if (!NT_SUCCESS(Status
) && IoStatus
!= NULL
)
1047 IoStatus
->Status
= Status
;
1050 ExReleasePushLock(¤t
->Lock
);
1051 KeLeaveCriticalRegion();
1052 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
1053 CcRosCacheSegmentDecRefCount(current
);
1054 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
1057 Offset
.QuadPart
+= Bcb
->CacheSegmentSize
;
1058 if (Length
> Bcb
->CacheSegmentSize
)
1060 Length
-= Bcb
->CacheSegmentSize
;
1072 IoStatus
->Status
= STATUS_INVALID_PARAMETER
;
1079 CcRosDeleteFileCache(PFILE_OBJECT FileObject
, PBCB Bcb
)
1081 * FUNCTION: Releases the BCB associated with a file object
1084 PLIST_ENTRY current_entry
;
1085 PCACHE_SEGMENT current
;
1086 LIST_ENTRY FreeList
;
1092 KeReleaseGuardedMutex(&ViewLock
);
1094 CcFlushCache(FileObject
->SectionObjectPointer
, NULL
, 0, NULL
);
1096 KeAcquireGuardedMutex(&ViewLock
);
1098 if (Bcb
->RefCount
== 0)
1100 if (Bcb
->BcbRemoveListEntry
.Flink
!= NULL
)
1102 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1103 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1106 FileObject
->SectionObjectPointer
->SharedCacheMap
= NULL
;
1109 * Release all cache segments.
1111 InitializeListHead(&FreeList
);
1112 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
1113 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
1114 while (!IsListEmpty(&Bcb
->BcbSegmentListHead
))
1116 current_entry
= RemoveTailList(&Bcb
->BcbSegmentListHead
);
1117 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
, BcbSegmentListEntry
);
1118 RemoveEntryList(¤t
->CacheSegmentListEntry
);
1119 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
1122 RemoveEntryList(¤t
->DirtySegmentListEntry
);
1123 DirtyPageCount
-= Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
1124 DPRINT1("Freeing dirty segment\n");
1126 InsertHeadList(&FreeList
, ¤t
->BcbSegmentListEntry
);
1131 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
1133 KeReleaseGuardedMutex(&ViewLock
);
1134 ObDereferenceObject (Bcb
->FileObject
);
1136 while (!IsListEmpty(&FreeList
))
1138 current_entry
= RemoveTailList(&FreeList
);
1139 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
, BcbSegmentListEntry
);
1140 CcRosInternalFreeCacheSegment(current
);
1142 ExFreeToNPagedLookasideList(&BcbLookasideList
, Bcb
);
1143 KeAcquireGuardedMutex(&ViewLock
);
1145 return(STATUS_SUCCESS
);
1150 CcRosReferenceCache(PFILE_OBJECT FileObject
)
1153 KeAcquireGuardedMutex(&ViewLock
);
1154 Bcb
= (PBCB
)FileObject
->SectionObjectPointer
->SharedCacheMap
;
1156 if (Bcb
->RefCount
== 0)
1158 ASSERT(Bcb
->BcbRemoveListEntry
.Flink
!= NULL
);
1159 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1160 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1165 ASSERT(Bcb
->BcbRemoveListEntry
.Flink
== NULL
);
1168 KeReleaseGuardedMutex(&ViewLock
);
1173 CcRosSetRemoveOnClose(PSECTION_OBJECT_POINTERS SectionObjectPointer
)
1176 DPRINT("CcRosSetRemoveOnClose()\n");
1177 KeAcquireGuardedMutex(&ViewLock
);
1178 Bcb
= (PBCB
)SectionObjectPointer
->SharedCacheMap
;
1181 Bcb
->RemoveOnClose
= TRUE
;
1182 if (Bcb
->RefCount
== 0)
1184 CcRosDeleteFileCache(Bcb
->FileObject
, Bcb
);
1187 KeReleaseGuardedMutex(&ViewLock
);
1193 CcRosDereferenceCache(PFILE_OBJECT FileObject
)
1196 KeAcquireGuardedMutex(&ViewLock
);
1197 Bcb
= (PBCB
)FileObject
->SectionObjectPointer
->SharedCacheMap
;
1199 if (Bcb
->RefCount
> 0)
1202 if (Bcb
->RefCount
== 0)
1204 MmFreeSectionSegments(Bcb
->FileObject
);
1205 CcRosDeleteFileCache(FileObject
, Bcb
);
1208 KeReleaseGuardedMutex(&ViewLock
);
1212 CcRosReleaseFileCache(PFILE_OBJECT FileObject
)
1214 * FUNCTION: Called by the file system when a handle to a file object
1220 KeAcquireGuardedMutex(&ViewLock
);
1222 if (FileObject
->SectionObjectPointer
->SharedCacheMap
!= NULL
)
1224 Bcb
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1225 if (FileObject
->PrivateCacheMap
!= NULL
)
1227 FileObject
->PrivateCacheMap
= NULL
;
1228 if (Bcb
->RefCount
> 0)
1231 if (Bcb
->RefCount
== 0)
1233 MmFreeSectionSegments(Bcb
->FileObject
);
1234 CcRosDeleteFileCache(FileObject
, Bcb
);
1239 KeReleaseGuardedMutex(&ViewLock
);
1240 return(STATUS_SUCCESS
);
1245 CcTryToInitializeFileCache(PFILE_OBJECT FileObject
)
1250 KeAcquireGuardedMutex(&ViewLock
);
1252 ASSERT(FileObject
->SectionObjectPointer
);
1253 Bcb
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1256 Status
= STATUS_UNSUCCESSFUL
;
1260 if (FileObject
->PrivateCacheMap
== NULL
)
1262 FileObject
->PrivateCacheMap
= Bcb
;
1265 if (Bcb
->BcbRemoveListEntry
.Flink
!= NULL
)
1267 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1268 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1270 Status
= STATUS_SUCCESS
;
1272 KeReleaseGuardedMutex(&ViewLock
);
1279 CcRosInitializeFileCache(PFILE_OBJECT FileObject
,
1280 ULONG CacheSegmentSize
,
1281 PCACHE_MANAGER_CALLBACKS CallBacks
,
1282 PVOID LazyWriterContext
)
1284 * FUNCTION: Initializes a BCB for a file object
1289 Bcb
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1290 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p, CacheSegmentSize %d)\n",
1291 FileObject
, Bcb
, CacheSegmentSize
);
1293 KeAcquireGuardedMutex(&ViewLock
);
1296 Bcb
= ExAllocateFromNPagedLookasideList(&BcbLookasideList
);
1299 KeReleaseGuardedMutex(&ViewLock
);
1300 return(STATUS_UNSUCCESSFUL
);
1302 memset(Bcb
, 0, sizeof(BCB
));
1303 ObReferenceObjectByPointer(FileObject
,
1307 Bcb
->FileObject
= FileObject
;
1308 Bcb
->CacheSegmentSize
= CacheSegmentSize
;
1309 Bcb
->Callbacks
= CallBacks
;
1310 Bcb
->LazyWriteContext
= LazyWriterContext
;
1311 if (FileObject
->FsContext
)
1313 Bcb
->AllocationSize
=
1314 ((PFSRTL_COMMON_FCB_HEADER
)FileObject
->FsContext
)->AllocationSize
;
1316 ((PFSRTL_COMMON_FCB_HEADER
)FileObject
->FsContext
)->FileSize
;
1318 KeInitializeSpinLock(&Bcb
->BcbLock
);
1319 InitializeListHead(&Bcb
->BcbSegmentListHead
);
1320 FileObject
->SectionObjectPointer
->SharedCacheMap
= Bcb
;
1322 if (FileObject
->PrivateCacheMap
== NULL
)
1324 FileObject
->PrivateCacheMap
= Bcb
;
1327 if (Bcb
->BcbRemoveListEntry
.Flink
!= NULL
)
1329 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1330 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1332 KeReleaseGuardedMutex(&ViewLock
);
1334 return(STATUS_SUCCESS
);
1341 CcGetFileObjectFromSectionPtrs(IN PSECTION_OBJECT_POINTERS SectionObjectPointers
)
1344 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1346 Bcb
= (PBCB
)SectionObjectPointers
->SharedCacheMap
;
1348 return Bcb
->FileObject
;
1361 PHYSICAL_ADDRESS BoundaryAddressMultiple
;
1364 DPRINT("CcInitView()\n");
1366 BoundaryAddressMultiple
.QuadPart
= 0;
1367 CiCacheSegMappingRegionHint
= 0;
1368 CiCacheSegMappingRegionBase
= NULL
;
1370 MmLockAddressSpace(MmGetKernelAddressSpace());
1372 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
1373 MEMORY_AREA_CACHE_SEGMENT
,
1374 &CiCacheSegMappingRegionBase
,
1375 CI_CACHESEG_MAPPING_REGION_SIZE
,
1380 BoundaryAddressMultiple
);
1381 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1382 if (!NT_SUCCESS(Status
))
1384 KeBugCheck(CACHE_MANAGER
);
1387 Buffer
= ExAllocatePool(NonPagedPool
, CI_CACHESEG_MAPPING_REGION_SIZE
/ (PAGE_SIZE
* 8));
1390 KeBugCheck(CACHE_MANAGER
);
1393 RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap
, Buffer
, CI_CACHESEG_MAPPING_REGION_SIZE
/ PAGE_SIZE
);
1394 RtlClearAllBits(&CiCacheSegMappingRegionAllocMap
);
1396 KeInitializeSpinLock(&CiCacheSegMappingRegionLock
);
1398 InitializeListHead(&CacheSegmentListHead
);
1399 InitializeListHead(&DirtySegmentListHead
);
1400 InitializeListHead(&CacheSegmentLRUListHead
);
1401 InitializeListHead(&ClosedListHead
);
1402 KeInitializeGuardedMutex(&ViewLock
);
1403 ExInitializeNPagedLookasideList (&iBcbLookasideList
,
1407 sizeof(INTERNAL_BCB
),
1410 ExInitializeNPagedLookasideList (&BcbLookasideList
,
1417 ExInitializeNPagedLookasideList (&CacheSegLookasideList
,
1421 sizeof(CACHE_SEGMENT
),
1425 MmInitializeMemoryConsumer(MC_CACHE
, CcRosTrimCache
);
1427 CcInitCacheZeroPage();