2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
10 /* NOTES **********************************************************************
12 * This is not the NT implementation of a file cache nor anything much like
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 * (4) Copy the data into or out of the page as necessary.
30 * (5) Release the cache page
32 /* INCLUDES ******************************************************************/
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
42 /* GLOBALS *******************************************************************/
45 * If CACHE_BITMAP is defined, the cache manager uses one large memory region
46 * within the kernel address space and allocate/deallocate space from this block
47 * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
48 * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
50 //#define CACHE_BITMAP
52 static LIST_ENTRY DirtySegmentListHead
;
53 static LIST_ENTRY CacheSegmentListHead
;
54 static LIST_ENTRY CacheSegmentLRUListHead
;
55 static LIST_ENTRY ClosedListHead
;
56 ULONG DirtyPageCount
=0;
58 KGUARDED_MUTEX ViewLock
;
61 #define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
63 static PVOID CiCacheSegMappingRegionBase
= NULL
;
64 static RTL_BITMAP CiCacheSegMappingRegionAllocMap
;
65 static ULONG CiCacheSegMappingRegionHint
;
66 static KSPIN_LOCK CiCacheSegMappingRegionLock
;
69 NPAGED_LOOKASIDE_LIST iBcbLookasideList
;
70 static NPAGED_LOOKASIDE_LIST BcbLookasideList
;
71 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList
;
74 static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs
, const char* file
, int line
)
79 DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
80 file
, line
, cs
, cs
->ReferenceCount
, cs
->Dirty
, cs
->PageOut
);
83 static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs
, const char* file
, int line
)
88 DbgPrint("(%s:%i) CacheSegment %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
89 file
, line
, cs
, cs
->ReferenceCount
, cs
->Dirty
, cs
->PageOut
);
92 #define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
93 #define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
95 #define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
96 #define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
100 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg
);
103 /* FUNCTIONS *****************************************************************/
113 PLIST_ENTRY current_entry
;
114 PCACHE_SEGMENT current
;
123 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb
);
125 KeAcquireGuardedMutex(&ViewLock
);
126 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldirql
);
128 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
129 while (current_entry
!= &Bcb
->BcbSegmentListHead
)
131 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
, BcbSegmentListEntry
);
132 current_entry
= current_entry
->Flink
;
134 DPRINT1(" CacheSegment 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
135 current
, current
->ReferenceCount
, current
->Dirty
, current
->PageOut
);
137 KeReleaseSpinLock(&Bcb
->BcbLock
, oldirql
);
138 KeReleaseGuardedMutex(&ViewLock
);
142 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb
);
153 CcRosFlushCacheSegment (
154 PCACHE_SEGMENT CacheSegment
)
159 Status
= WriteCacheSegment(CacheSegment
);
160 if (NT_SUCCESS(Status
))
162 KeAcquireGuardedMutex(&ViewLock
);
163 KeAcquireSpinLock(&CacheSegment
->Bcb
->BcbLock
, &oldIrql
);
165 CacheSegment
->Dirty
= FALSE
;
166 RemoveEntryList(&CacheSegment
->DirtySegmentListEntry
);
167 DirtyPageCount
-= CacheSegment
->Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
168 CcRosCacheSegmentDecRefCount(CacheSegment
);
170 KeReleaseSpinLock(&CacheSegment
->Bcb
->BcbLock
, oldIrql
);
171 KeReleaseGuardedMutex(&ViewLock
);
179 CcRosFlushDirtyPages (
184 PLIST_ENTRY current_entry
;
185 PCACHE_SEGMENT current
;
186 ULONG PagesPerSegment
;
189 LARGE_INTEGER ZeroTimeout
;
191 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target
);
194 ZeroTimeout
.QuadPart
= 0;
196 KeEnterCriticalRegion();
197 KeAcquireGuardedMutex(&ViewLock
);
199 current_entry
= DirtySegmentListHead
.Flink
;
200 if (current_entry
== &DirtySegmentListHead
)
202 DPRINT("No Dirty pages\n");
205 while ((current_entry
!= &DirtySegmentListHead
) && (Target
> 0))
207 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
208 DirtySegmentListEntry
);
209 current_entry
= current_entry
->Flink
;
211 CcRosCacheSegmentIncRefCount(current
);
213 Locked
= current
->Bcb
->Callbacks
->AcquireForLazyWrite(
214 current
->Bcb
->LazyWriteContext
, Wait
);
217 CcRosCacheSegmentDecRefCount(current
);
221 Status
= KeWaitForSingleObject(¤t
->Mutex
,
225 Wait
? NULL
: &ZeroTimeout
);
226 if (Status
!= STATUS_SUCCESS
)
228 current
->Bcb
->Callbacks
->ReleaseFromLazyWrite(
229 current
->Bcb
->LazyWriteContext
);
230 CcRosCacheSegmentDecRefCount(current
);
234 ASSERT(current
->Dirty
);
236 /* One reference is added above */
237 if (current
->ReferenceCount
> 2)
239 KeReleaseMutex(¤t
->Mutex
, FALSE
);
240 current
->Bcb
->Callbacks
->ReleaseFromLazyWrite(
241 current
->Bcb
->LazyWriteContext
);
242 CcRosCacheSegmentDecRefCount(current
);
246 PagesPerSegment
= current
->Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
248 KeReleaseGuardedMutex(&ViewLock
);
250 Status
= CcRosFlushCacheSegment(current
);
252 KeReleaseMutex(¤t
->Mutex
, FALSE
);
253 current
->Bcb
->Callbacks
->ReleaseFromLazyWrite(
254 current
->Bcb
->LazyWriteContext
);
256 KeAcquireGuardedMutex(&ViewLock
);
257 CcRosCacheSegmentDecRefCount(current
);
259 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
))
261 DPRINT1("CC: Failed to flush cache segment.\n");
265 (*Count
) += PagesPerSegment
;
266 Target
-= PagesPerSegment
;
269 current_entry
= DirtySegmentListHead
.Flink
;
272 KeReleaseGuardedMutex(&ViewLock
);
273 KeLeaveCriticalRegion();
275 DPRINT("CcRosFlushDirtyPages() finished\n");
276 return(STATUS_SUCCESS
);
285 * FUNCTION: Try to free some memory from the file cache.
287 * Target - The number of pages to be freed.
288 * Priority - The priority of free (currently unused).
289 * NrFreed - Points to a variable where the number of pages
290 * actually freed is returned.
293 PLIST_ENTRY current_entry
;
294 PCACHE_SEGMENT current
;
295 ULONG PagesPerSegment
;
301 BOOLEAN FlushedPages
= FALSE
;
303 DPRINT("CcRosTrimCache(Target %lu)\n", Target
);
305 InitializeListHead(&FreeList
);
310 KeAcquireGuardedMutex(&ViewLock
);
312 current_entry
= CacheSegmentLRUListHead
.Flink
;
313 while (current_entry
!= &CacheSegmentLRUListHead
)
315 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
316 CacheSegmentLRUListEntry
);
317 current_entry
= current_entry
->Flink
;
319 KeAcquireSpinLock(¤t
->Bcb
->BcbLock
, &oldIrql
);
321 /* Reference the cache segment */
322 CcRosCacheSegmentIncRefCount(current
);
324 /* Check if it's mapped and not dirty */
325 if (current
->MappedCount
> 0 && !current
->Dirty
)
327 /* We have to break these locks because Cc sucks */
328 KeReleaseSpinLock(¤t
->Bcb
->BcbLock
, oldIrql
);
329 KeReleaseGuardedMutex(&ViewLock
);
331 /* Page out the segment */
332 for (i
= 0; i
< current
->Bcb
->CacheSegmentSize
/ PAGE_SIZE
; i
++)
334 Page
= (PFN_NUMBER
)(MmGetPhysicalAddress((PUCHAR
)current
->BaseAddress
+ (i
* PAGE_SIZE
)).QuadPart
>> PAGE_SHIFT
);
336 MmPageOutPhysicalAddress(Page
);
339 /* Reacquire the locks */
340 KeAcquireGuardedMutex(&ViewLock
);
341 KeAcquireSpinLock(¤t
->Bcb
->BcbLock
, &oldIrql
);
344 /* Dereference the cache segment */
345 CcRosCacheSegmentDecRefCount(current
);
347 /* Check if we can free this entry now */
348 if (current
->ReferenceCount
== 0)
350 ASSERT(!current
->Dirty
);
351 ASSERT(!current
->MappedCount
);
353 RemoveEntryList(¤t
->BcbSegmentListEntry
);
354 RemoveEntryList(¤t
->CacheSegmentListEntry
);
355 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
356 InsertHeadList(&FreeList
, ¤t
->BcbSegmentListEntry
);
358 /* Calculate how many pages we freed for Mm */
359 PagesPerSegment
= current
->Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
360 PagesFreed
= min(PagesPerSegment
, Target
);
361 Target
-= PagesFreed
;
362 (*NrFreed
) += PagesFreed
;
365 KeReleaseSpinLock(¤t
->Bcb
->BcbLock
, oldIrql
);
368 KeReleaseGuardedMutex(&ViewLock
);
370 /* Try flushing pages if we haven't met our target */
371 if ((Target
> 0) && !FlushedPages
)
373 /* Flush dirty pages to disk */
374 CcRosFlushDirtyPages(Target
, &PagesFreed
, FALSE
);
377 /* We can only swap as many pages as we flushed */
378 if (PagesFreed
< Target
) Target
= PagesFreed
;
380 /* Check if we flushed anything */
383 /* Try again after flushing dirty pages */
384 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed
);
389 while (!IsListEmpty(&FreeList
))
391 current_entry
= RemoveHeadList(&FreeList
);
392 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
393 BcbSegmentListEntry
);
394 CcRosInternalFreeCacheSegment(current
);
397 DPRINT("Evicted %lu cache pages\n", (*NrFreed
));
399 return(STATUS_SUCCESS
);
404 CcRosReleaseCacheSegment (
406 PCACHE_SEGMENT CacheSeg
,
416 DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %u)\n",
417 Bcb
, CacheSeg
, Valid
);
419 KeAcquireGuardedMutex(&ViewLock
);
420 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
422 CacheSeg
->Valid
= Valid
;
424 WasDirty
= CacheSeg
->Dirty
;
425 CacheSeg
->Dirty
= CacheSeg
->Dirty
|| Dirty
;
427 if (!WasDirty
&& CacheSeg
->Dirty
)
429 InsertTailList(&DirtySegmentListHead
, &CacheSeg
->DirtySegmentListEntry
);
430 DirtyPageCount
+= Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
435 CacheSeg
->MappedCount
++;
437 CcRosCacheSegmentDecRefCount(CacheSeg
);
438 if (Mapped
&& (CacheSeg
->MappedCount
== 1))
440 CcRosCacheSegmentIncRefCount(CacheSeg
);
442 if (!WasDirty
&& CacheSeg
->Dirty
)
444 CcRosCacheSegmentIncRefCount(CacheSeg
);
447 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
448 KeReleaseGuardedMutex(&ViewLock
);
449 KeReleaseMutex(&CacheSeg
->Mutex
, FALSE
);
451 return(STATUS_SUCCESS
);
454 /* Returns with Cache Segment Lock Held! */
457 CcRosLookupCacheSegment (
461 PLIST_ENTRY current_entry
;
462 PCACHE_SEGMENT current
;
467 DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %lu)\n", Bcb
, FileOffset
);
469 KeAcquireGuardedMutex(&ViewLock
);
470 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
472 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
473 while (current_entry
!= &Bcb
->BcbSegmentListHead
)
475 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
476 BcbSegmentListEntry
);
477 if (IsPointInSegment(current
->FileOffset
, Bcb
->CacheSegmentSize
,
480 CcRosCacheSegmentIncRefCount(current
);
481 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
482 KeReleaseGuardedMutex(&ViewLock
);
483 KeWaitForSingleObject(¤t
->Mutex
,
490 current_entry
= current_entry
->Flink
;
493 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
494 KeReleaseGuardedMutex(&ViewLock
);
501 CcRosMarkDirtyCacheSegment (
505 PCACHE_SEGMENT CacheSeg
;
510 DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %lu)\n", Bcb
, FileOffset
);
512 CacheSeg
= CcRosLookupCacheSegment(Bcb
, FileOffset
);
513 if (CacheSeg
== NULL
)
515 KeBugCheck(CACHE_MANAGER
);
518 KeAcquireGuardedMutex(&ViewLock
);
519 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
521 if (!CacheSeg
->Dirty
)
523 InsertTailList(&DirtySegmentListHead
, &CacheSeg
->DirtySegmentListEntry
);
524 DirtyPageCount
+= Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
528 CcRosCacheSegmentDecRefCount(CacheSeg
);
531 /* Move to the tail of the LRU list */
532 RemoveEntryList(&CacheSeg
->CacheSegmentLRUListEntry
);
533 InsertTailList(&CacheSegmentLRUListHead
, &CacheSeg
->CacheSegmentLRUListEntry
);
535 CacheSeg
->Dirty
= TRUE
;
537 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
538 KeReleaseGuardedMutex(&ViewLock
);
539 KeReleaseMutex(&CacheSeg
->Mutex
, FALSE
);
541 return(STATUS_SUCCESS
);
546 CcRosUnmapCacheSegment (
551 PCACHE_SEGMENT CacheSeg
;
557 DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %lu, NowDirty %u)\n",
558 Bcb
, FileOffset
, NowDirty
);
560 CacheSeg
= CcRosLookupCacheSegment(Bcb
, FileOffset
);
561 if (CacheSeg
== NULL
)
563 return(STATUS_UNSUCCESSFUL
);
566 KeAcquireGuardedMutex(&ViewLock
);
567 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
569 WasDirty
= CacheSeg
->Dirty
;
570 CacheSeg
->Dirty
= CacheSeg
->Dirty
|| NowDirty
;
572 CacheSeg
->MappedCount
--;
574 if (!WasDirty
&& NowDirty
)
576 InsertTailList(&DirtySegmentListHead
, &CacheSeg
->DirtySegmentListEntry
);
577 DirtyPageCount
+= Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
580 CcRosCacheSegmentDecRefCount(CacheSeg
);
581 if (!WasDirty
&& NowDirty
)
583 CcRosCacheSegmentIncRefCount(CacheSeg
);
585 if (CacheSeg
->MappedCount
== 0)
587 CcRosCacheSegmentDecRefCount(CacheSeg
);
590 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
591 KeReleaseGuardedMutex(&ViewLock
);
592 KeReleaseMutex(&CacheSeg
->Mutex
, FALSE
);
594 return(STATUS_SUCCESS
);
599 CcRosCreateCacheSegment (
602 PCACHE_SEGMENT
* CacheSeg
)
604 PCACHE_SEGMENT current
;
605 PCACHE_SEGMENT previous
;
606 PLIST_ENTRY current_entry
;
610 ULONG StartingOffset
;
612 PHYSICAL_ADDRESS BoundaryAddressMultiple
;
616 DPRINT("CcRosCreateCacheSegment()\n");
618 BoundaryAddressMultiple
.QuadPart
= 0;
619 if (FileOffset
>= Bcb
->FileSize
.u
.LowPart
)
622 return STATUS_INVALID_PARAMETER
;
625 current
= ExAllocateFromNPagedLookasideList(&CacheSegLookasideList
);
626 current
->Valid
= FALSE
;
627 current
->Dirty
= FALSE
;
628 current
->PageOut
= FALSE
;
629 current
->FileOffset
= ROUND_DOWN(FileOffset
, Bcb
->CacheSegmentSize
);
634 DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb
, current
);
637 current
->MappedCount
= 0;
638 current
->DirtySegmentListEntry
.Flink
= NULL
;
639 current
->DirtySegmentListEntry
.Blink
= NULL
;
640 current
->ReferenceCount
= 1;
641 KeInitializeMutex(¤t
->Mutex
, 0);
642 KeWaitForSingleObject(¤t
->Mutex
,
647 KeAcquireGuardedMutex(&ViewLock
);
650 /* There is window between the call to CcRosLookupCacheSegment
651 * and CcRosCreateCacheSegment. We must check if a segment on
652 * the fileoffset exist. If there exist a segment, we release
653 * our new created segment and return the existing one.
655 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
656 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
658 while (current_entry
!= &Bcb
->BcbSegmentListHead
)
660 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
661 BcbSegmentListEntry
);
662 if (IsPointInSegment(current
->FileOffset
, Bcb
->CacheSegmentSize
,
665 CcRosCacheSegmentIncRefCount(current
);
666 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
670 DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
676 KeReleaseMutex(&(*CacheSeg
)->Mutex
, FALSE
);
677 KeReleaseGuardedMutex(&ViewLock
);
678 ExFreeToNPagedLookasideList(&CacheSegLookasideList
, *CacheSeg
);
680 KeWaitForSingleObject(¤t
->Mutex
,
685 return STATUS_SUCCESS
;
687 if (current
->FileOffset
< FileOffset
)
689 if (previous
== NULL
)
695 if (previous
->FileOffset
< current
->FileOffset
)
701 current_entry
= current_entry
->Flink
;
703 /* There was no existing segment. */
707 InsertHeadList(&previous
->BcbSegmentListEntry
, ¤t
->BcbSegmentListEntry
);
711 InsertHeadList(&Bcb
->BcbSegmentListHead
, ¤t
->BcbSegmentListEntry
);
713 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
714 InsertTailList(&CacheSegmentListHead
, ¤t
->CacheSegmentListEntry
);
715 InsertTailList(&CacheSegmentLRUListHead
, ¤t
->CacheSegmentLRUListEntry
);
716 KeReleaseGuardedMutex(&ViewLock
);
718 KeAcquireSpinLock(&CiCacheSegMappingRegionLock
, &oldIrql
);
720 StartingOffset
= RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap
, Bcb
->CacheSegmentSize
/ PAGE_SIZE
, CiCacheSegMappingRegionHint
);
722 if (StartingOffset
== 0xffffffff)
724 DPRINT1("Out of CacheSeg mapping space\n");
725 KeBugCheck(CACHE_MANAGER
);
728 current
->BaseAddress
= CiCacheSegMappingRegionBase
+ StartingOffset
* PAGE_SIZE
;
730 if (CiCacheSegMappingRegionHint
== StartingOffset
)
732 CiCacheSegMappingRegionHint
+= Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
735 KeReleaseSpinLock(&CiCacheSegMappingRegionLock
, oldIrql
);
737 MmLockAddressSpace(MmGetKernelAddressSpace());
738 current
->BaseAddress
= NULL
;
739 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
740 0, // nothing checks for cache_segment mareas, so set to 0
741 ¤t
->BaseAddress
,
742 Bcb
->CacheSegmentSize
,
744 (PMEMORY_AREA
*)¤t
->MemoryArea
,
747 BoundaryAddressMultiple
);
748 MmUnlockAddressSpace(MmGetKernelAddressSpace());
749 if (!NT_SUCCESS(Status
))
751 KeBugCheck(CACHE_MANAGER
);
755 /* Create a virtual mapping for this memory area */
756 MI_SET_USAGE(MI_USAGE_CACHE
);
760 if ((Bcb
->FileObject
) && (Bcb
->FileObject
->FileName
.Buffer
))
762 pos
= wcsrchr(Bcb
->FileObject
->FileName
.Buffer
, '\\');
763 len
= wcslen(pos
) * sizeof(WCHAR
);
764 if (pos
) snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%S", pos
);
768 MmMapMemoryArea(current
->BaseAddress
, Bcb
->CacheSegmentSize
,
769 MC_CACHE
, PAGE_READWRITE
);
771 return(STATUS_SUCCESS
);
776 CcRosGetCacheSegmentChain (
780 PCACHE_SEGMENT
* CacheSeg
)
782 PCACHE_SEGMENT current
;
784 PCACHE_SEGMENT
* CacheSegList
;
785 PCACHE_SEGMENT Previous
= NULL
;
789 DPRINT("CcRosGetCacheSegmentChain()\n");
791 Length
= ROUND_UP(Length
, Bcb
->CacheSegmentSize
);
793 CacheSegList
= _alloca(sizeof(PCACHE_SEGMENT
) *
794 (Length
/ Bcb
->CacheSegmentSize
));
797 * Look for a cache segment already mapping the same data.
799 for (i
= 0; i
< (Length
/ Bcb
->CacheSegmentSize
); i
++)
801 ULONG CurrentOffset
= FileOffset
+ (i
* Bcb
->CacheSegmentSize
);
802 current
= CcRosLookupCacheSegment(Bcb
, CurrentOffset
);
805 KeAcquireGuardedMutex(&ViewLock
);
807 /* Move to tail of LRU list */
808 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
809 InsertTailList(&CacheSegmentLRUListHead
, ¤t
->CacheSegmentLRUListEntry
);
811 KeReleaseGuardedMutex(&ViewLock
);
813 CacheSegList
[i
] = current
;
817 CcRosCreateCacheSegment(Bcb
, CurrentOffset
, ¤t
);
818 CacheSegList
[i
] = current
;
822 for (i
= 0; i
< (Length
/ Bcb
->CacheSegmentSize
); i
++)
826 *CacheSeg
= CacheSegList
[i
];
827 Previous
= CacheSegList
[i
];
831 Previous
->NextInChain
= CacheSegList
[i
];
832 Previous
= CacheSegList
[i
];
836 Previous
->NextInChain
= NULL
;
838 return(STATUS_SUCCESS
);
843 CcRosGetCacheSegment (
849 PCACHE_SEGMENT
* CacheSeg
)
851 PCACHE_SEGMENT current
;
856 DPRINT("CcRosGetCacheSegment()\n");
859 * Look for a cache segment already mapping the same data.
861 current
= CcRosLookupCacheSegment(Bcb
, FileOffset
);
865 * Otherwise create a new segment.
867 Status
= CcRosCreateCacheSegment(Bcb
, FileOffset
, ¤t
);
868 if (!NT_SUCCESS(Status
))
874 KeAcquireGuardedMutex(&ViewLock
);
876 /* Move to the tail of the LRU list */
877 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
878 InsertTailList(&CacheSegmentLRUListHead
, ¤t
->CacheSegmentLRUListEntry
);
880 KeReleaseGuardedMutex(&ViewLock
);
883 * Return information about the segment to the caller.
885 *UptoDate
= current
->Valid
;
886 *BaseAddress
= current
->BaseAddress
;
887 DPRINT("*BaseAddress %p\n", *BaseAddress
);
889 *BaseOffset
= current
->FileOffset
;
890 return(STATUS_SUCCESS
);
895 CcRosRequestCacheSegment (
900 PCACHE_SEGMENT
* CacheSeg
)
902 * FUNCTION: Request a page mapping for a BCB
909 if ((FileOffset
% Bcb
->CacheSegmentSize
) != 0)
911 DPRINT1("Bad fileoffset %x should be multiple of %x",
912 FileOffset
, Bcb
->CacheSegmentSize
);
913 KeBugCheck(CACHE_MANAGER
);
916 return(CcRosGetCacheSegment(Bcb
,
929 MEMORY_AREA
* MemoryArea
,
935 ASSERT(SwapEntry
== 0);
938 ASSERT(MmGetReferenceCountPage(Page
) == 1);
939 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
944 CcRosInternalFreeCacheSegment (
945 PCACHE_SEGMENT CacheSeg
)
947 * FUNCTION: Releases a cache segment associated with a BCB
957 DPRINT("Freeing cache segment 0x%p\n", CacheSeg
);
959 if ( CacheSeg
->Bcb
->Trace
)
961 DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg
->Bcb
, CacheSeg
);
965 RegionSize
= CacheSeg
->Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
967 /* Unmap all the pages. */
968 for (i
= 0; i
< RegionSize
; i
++)
970 MmDeleteVirtualMapping(NULL
,
971 CacheSeg
->BaseAddress
+ (i
* PAGE_SIZE
),
975 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
978 KeAcquireSpinLock(&CiCacheSegMappingRegionLock
, &oldIrql
);
979 /* Deallocate all the pages used. */
980 Base
= (ULONG
)(CacheSeg
->BaseAddress
- CiCacheSegMappingRegionBase
) / PAGE_SIZE
;
982 RtlClearBits(&CiCacheSegMappingRegionAllocMap
, Base
, RegionSize
);
984 CiCacheSegMappingRegionHint
= min (CiCacheSegMappingRegionHint
, Base
);
986 KeReleaseSpinLock(&CiCacheSegMappingRegionLock
, oldIrql
);
988 MmLockAddressSpace(MmGetKernelAddressSpace());
989 MmFreeMemoryArea(MmGetKernelAddressSpace(),
990 CacheSeg
->MemoryArea
,
993 MmUnlockAddressSpace(MmGetKernelAddressSpace());
995 ExFreeToNPagedLookasideList(&CacheSegLookasideList
, CacheSeg
);
996 return(STATUS_SUCCESS
);
1005 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
,
1006 IN PLARGE_INTEGER FileOffset OPTIONAL
,
1008 OUT PIO_STATUS_BLOCK IoStatus
)
1011 LARGE_INTEGER Offset
;
1012 PCACHE_SEGMENT current
;
1016 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1017 SectionObjectPointers
, FileOffset
, Length
, IoStatus
);
1019 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1021 Bcb
= (PBCB
)SectionObjectPointers
->SharedCacheMap
;
1025 Offset
= *FileOffset
;
1029 Offset
.QuadPart
= (LONGLONG
)0;
1030 Length
= Bcb
->FileSize
.u
.LowPart
;
1035 IoStatus
->Status
= STATUS_SUCCESS
;
1036 IoStatus
->Information
= 0;
1041 current
= CcRosLookupCacheSegment (Bcb
, Offset
.u
.LowPart
);
1042 if (current
!= NULL
)
1046 Status
= CcRosFlushCacheSegment(current
);
1047 if (!NT_SUCCESS(Status
) && IoStatus
!= NULL
)
1049 IoStatus
->Status
= Status
;
1052 KeReleaseMutex(¤t
->Mutex
, FALSE
);
1054 KeAcquireGuardedMutex(&ViewLock
);
1055 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
1056 CcRosCacheSegmentDecRefCount(current
);
1057 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
1058 KeReleaseGuardedMutex(&ViewLock
);
1061 Offset
.QuadPart
+= Bcb
->CacheSegmentSize
;
1062 if (Length
> Bcb
->CacheSegmentSize
)
1064 Length
-= Bcb
->CacheSegmentSize
;
1076 IoStatus
->Status
= STATUS_INVALID_PARAMETER
;
1083 CcRosDeleteFileCache (
1084 PFILE_OBJECT FileObject
,
1087 * FUNCTION: Releases the BCB associated with a file object
1090 PLIST_ENTRY current_entry
;
1091 PCACHE_SEGMENT current
;
1092 LIST_ENTRY FreeList
;
1098 KeReleaseGuardedMutex(&ViewLock
);
1100 CcFlushCache(FileObject
->SectionObjectPointer
, NULL
, 0, NULL
);
1102 KeAcquireGuardedMutex(&ViewLock
);
1104 if (Bcb
->RefCount
== 0)
1106 if (Bcb
->BcbRemoveListEntry
.Flink
!= NULL
)
1108 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1109 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1112 FileObject
->SectionObjectPointer
->SharedCacheMap
= NULL
;
1115 * Release all cache segments.
1117 InitializeListHead(&FreeList
);
1118 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
1119 while (!IsListEmpty(&Bcb
->BcbSegmentListHead
))
1121 current_entry
= RemoveTailList(&Bcb
->BcbSegmentListHead
);
1122 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
, BcbSegmentListEntry
);
1123 RemoveEntryList(¤t
->CacheSegmentListEntry
);
1124 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
1127 RemoveEntryList(¤t
->DirtySegmentListEntry
);
1128 DirtyPageCount
-= Bcb
->CacheSegmentSize
/ PAGE_SIZE
;
1129 DPRINT1("Freeing dirty segment\n");
1131 InsertHeadList(&FreeList
, ¤t
->BcbSegmentListEntry
);
1136 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
1138 KeReleaseGuardedMutex(&ViewLock
);
1139 ObDereferenceObject (Bcb
->FileObject
);
1141 while (!IsListEmpty(&FreeList
))
1143 current_entry
= RemoveTailList(&FreeList
);
1144 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
, BcbSegmentListEntry
);
1145 CcRosInternalFreeCacheSegment(current
);
1147 ExFreeToNPagedLookasideList(&BcbLookasideList
, Bcb
);
1148 KeAcquireGuardedMutex(&ViewLock
);
1150 return(STATUS_SUCCESS
);
1155 CcRosReferenceCache (
1156 PFILE_OBJECT FileObject
)
1159 KeAcquireGuardedMutex(&ViewLock
);
1160 Bcb
= (PBCB
)FileObject
->SectionObjectPointer
->SharedCacheMap
;
1162 if (Bcb
->RefCount
== 0)
1164 ASSERT(Bcb
->BcbRemoveListEntry
.Flink
!= NULL
);
1165 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1166 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1171 ASSERT(Bcb
->BcbRemoveListEntry
.Flink
== NULL
);
1174 KeReleaseGuardedMutex(&ViewLock
);
1179 CcRosSetRemoveOnClose (
1180 PSECTION_OBJECT_POINTERS SectionObjectPointer
)
1183 DPRINT("CcRosSetRemoveOnClose()\n");
1184 KeAcquireGuardedMutex(&ViewLock
);
1185 Bcb
= (PBCB
)SectionObjectPointer
->SharedCacheMap
;
1188 Bcb
->RemoveOnClose
= TRUE
;
1189 if (Bcb
->RefCount
== 0)
1191 CcRosDeleteFileCache(Bcb
->FileObject
, Bcb
);
1194 KeReleaseGuardedMutex(&ViewLock
);
1200 CcRosDereferenceCache (
1201 PFILE_OBJECT FileObject
)
1204 KeAcquireGuardedMutex(&ViewLock
);
1205 Bcb
= (PBCB
)FileObject
->SectionObjectPointer
->SharedCacheMap
;
1207 if (Bcb
->RefCount
> 0)
1210 if (Bcb
->RefCount
== 0)
1212 MmFreeSectionSegments(Bcb
->FileObject
);
1213 CcRosDeleteFileCache(FileObject
, Bcb
);
1216 KeReleaseGuardedMutex(&ViewLock
);
1221 CcRosReleaseFileCache (
1222 PFILE_OBJECT FileObject
)
1224 * FUNCTION: Called by the file system when a handle to a file object
1230 KeAcquireGuardedMutex(&ViewLock
);
1232 if (FileObject
->SectionObjectPointer
->SharedCacheMap
!= NULL
)
1234 Bcb
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1235 if (FileObject
->PrivateCacheMap
!= NULL
)
1237 FileObject
->PrivateCacheMap
= NULL
;
1238 if (Bcb
->RefCount
> 0)
1241 if (Bcb
->RefCount
== 0)
1243 MmFreeSectionSegments(Bcb
->FileObject
);
1244 CcRosDeleteFileCache(FileObject
, Bcb
);
1249 KeReleaseGuardedMutex(&ViewLock
);
1250 return(STATUS_SUCCESS
);
1255 CcTryToInitializeFileCache (
1256 PFILE_OBJECT FileObject
)
1261 KeAcquireGuardedMutex(&ViewLock
);
1263 ASSERT(FileObject
->SectionObjectPointer
);
1264 Bcb
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1267 Status
= STATUS_UNSUCCESSFUL
;
1271 if (FileObject
->PrivateCacheMap
== NULL
)
1273 FileObject
->PrivateCacheMap
= Bcb
;
1276 if (Bcb
->BcbRemoveListEntry
.Flink
!= NULL
)
1278 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1279 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1281 Status
= STATUS_SUCCESS
;
1283 KeReleaseGuardedMutex(&ViewLock
);
1291 CcRosInitializeFileCache (
1292 PFILE_OBJECT FileObject
,
1293 ULONG CacheSegmentSize
,
1294 PCACHE_MANAGER_CALLBACKS CallBacks
,
1295 PVOID LazyWriterContext
)
1297 * FUNCTION: Initializes a BCB for a file object
1302 Bcb
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1303 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p, CacheSegmentSize %lu)\n",
1304 FileObject
, Bcb
, CacheSegmentSize
);
1306 KeAcquireGuardedMutex(&ViewLock
);
1309 Bcb
= ExAllocateFromNPagedLookasideList(&BcbLookasideList
);
1312 KeReleaseGuardedMutex(&ViewLock
);
1313 return(STATUS_UNSUCCESSFUL
);
1315 memset(Bcb
, 0, sizeof(BCB
));
1316 ObReferenceObjectByPointer(FileObject
,
1320 Bcb
->FileObject
= FileObject
;
1321 Bcb
->CacheSegmentSize
= CacheSegmentSize
;
1322 Bcb
->Callbacks
= CallBacks
;
1323 Bcb
->LazyWriteContext
= LazyWriterContext
;
1324 if (FileObject
->FsContext
)
1326 Bcb
->AllocationSize
=
1327 ((PFSRTL_COMMON_FCB_HEADER
)FileObject
->FsContext
)->AllocationSize
;
1329 ((PFSRTL_COMMON_FCB_HEADER
)FileObject
->FsContext
)->FileSize
;
1331 KeInitializeSpinLock(&Bcb
->BcbLock
);
1332 InitializeListHead(&Bcb
->BcbSegmentListHead
);
1333 FileObject
->SectionObjectPointer
->SharedCacheMap
= Bcb
;
1335 if (FileObject
->PrivateCacheMap
== NULL
)
1337 FileObject
->PrivateCacheMap
= Bcb
;
1340 if (Bcb
->BcbRemoveListEntry
.Flink
!= NULL
)
1342 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1343 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1345 KeReleaseGuardedMutex(&ViewLock
);
1347 return(STATUS_SUCCESS
);
1355 CcGetFileObjectFromSectionPtrs (
1356 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
)
1359 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1361 Bcb
= (PBCB
)SectionObjectPointers
->SharedCacheMap
;
1363 return Bcb
->FileObject
;
1377 PHYSICAL_ADDRESS BoundaryAddressMultiple
;
1380 DPRINT("CcInitView()\n");
1382 BoundaryAddressMultiple
.QuadPart
= 0;
1383 CiCacheSegMappingRegionHint
= 0;
1384 CiCacheSegMappingRegionBase
= NULL
;
1386 MmLockAddressSpace(MmGetKernelAddressSpace());
1388 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
1389 MEMORY_AREA_CACHE_SEGMENT
,
1390 &CiCacheSegMappingRegionBase
,
1391 CI_CACHESEG_MAPPING_REGION_SIZE
,
1396 BoundaryAddressMultiple
);
1397 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1398 if (!NT_SUCCESS(Status
))
1400 KeBugCheck(CACHE_MANAGER
);
1403 Buffer
= ExAllocatePool(NonPagedPool
, CI_CACHESEG_MAPPING_REGION_SIZE
/ (PAGE_SIZE
* 8));
1406 KeBugCheck(CACHE_MANAGER
);
1409 RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap
, Buffer
, CI_CACHESEG_MAPPING_REGION_SIZE
/ PAGE_SIZE
);
1410 RtlClearAllBits(&CiCacheSegMappingRegionAllocMap
);
1412 KeInitializeSpinLock(&CiCacheSegMappingRegionLock
);
1414 InitializeListHead(&CacheSegmentListHead
);
1415 InitializeListHead(&DirtySegmentListHead
);
1416 InitializeListHead(&CacheSegmentLRUListHead
);
1417 InitializeListHead(&ClosedListHead
);
1418 KeInitializeGuardedMutex(&ViewLock
);
1419 ExInitializeNPagedLookasideList (&iBcbLookasideList
,
1423 sizeof(INTERNAL_BCB
),
1426 ExInitializeNPagedLookasideList (&BcbLookasideList
,
1433 ExInitializeNPagedLookasideList (&CacheSegLookasideList
,
1437 sizeof(CACHE_SEGMENT
),
1441 MmInitializeMemoryConsumer(MC_CACHE
, CcRosTrimCache
);
1443 CcInitCacheZeroPage();