2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
10 /* NOTES **********************************************************************
12 * This is not the NT implementation of a file cache nor anything much like
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 * (4) Copy the data into or out of the page as necessary.
30 * (5) Release the cache page
32 /* INCLUDES ******************************************************************/
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
42 /* GLOBALS *******************************************************************/
44 static LIST_ENTRY DirtySegmentListHead
;
45 static LIST_ENTRY CacheSegmentListHead
;
46 static LIST_ENTRY CacheSegmentLRUListHead
;
47 static LIST_ENTRY ClosedListHead
;
48 ULONG DirtyPageCount
= 0;
50 KGUARDED_MUTEX ViewLock
;
52 NPAGED_LOOKASIDE_LIST iBcbLookasideList
;
53 static NPAGED_LOOKASIDE_LIST BcbLookasideList
;
54 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList
;
57 static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs
, const char* file
, int line
)
62 DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
63 file
, line
, cs
, cs
->ReferenceCount
, cs
->Dirty
, cs
->PageOut
);
66 static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs
, const char* file
, int line
)
71 DbgPrint("(%s:%i) CacheSegment %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
72 file
, line
, cs
, cs
->ReferenceCount
, cs
->Dirty
, cs
->PageOut
);
75 #define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
76 #define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
78 #define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
79 #define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
83 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg
);
86 /* FUNCTIONS *****************************************************************/
96 PLIST_ENTRY current_entry
;
97 PCACHE_SEGMENT current
;
106 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb
);
108 KeAcquireGuardedMutex(&ViewLock
);
109 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldirql
);
111 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
112 while (current_entry
!= &Bcb
->BcbSegmentListHead
)
114 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
, BcbSegmentListEntry
);
115 current_entry
= current_entry
->Flink
;
117 DPRINT1(" CacheSegment 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
118 current
, current
->ReferenceCount
, current
->Dirty
, current
->PageOut
);
120 KeReleaseSpinLock(&Bcb
->BcbLock
, oldirql
);
121 KeReleaseGuardedMutex(&ViewLock
);
125 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb
);
136 CcRosFlushCacheSegment (
137 PCACHE_SEGMENT CacheSegment
)
142 Status
= WriteCacheSegment(CacheSegment
);
143 if (NT_SUCCESS(Status
))
145 KeAcquireGuardedMutex(&ViewLock
);
146 KeAcquireSpinLock(&CacheSegment
->Bcb
->BcbLock
, &oldIrql
);
148 CacheSegment
->Dirty
= FALSE
;
149 RemoveEntryList(&CacheSegment
->DirtySegmentListEntry
);
150 DirtyPageCount
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
151 CcRosCacheSegmentDecRefCount(CacheSegment
);
153 KeReleaseSpinLock(&CacheSegment
->Bcb
->BcbLock
, oldIrql
);
154 KeReleaseGuardedMutex(&ViewLock
);
162 CcRosFlushDirtyPages (
167 PLIST_ENTRY current_entry
;
168 PCACHE_SEGMENT current
;
169 ULONG PagesPerSegment
;
172 LARGE_INTEGER ZeroTimeout
;
174 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target
);
177 ZeroTimeout
.QuadPart
= 0;
179 KeEnterCriticalRegion();
180 KeAcquireGuardedMutex(&ViewLock
);
182 current_entry
= DirtySegmentListHead
.Flink
;
183 if (current_entry
== &DirtySegmentListHead
)
185 DPRINT("No Dirty pages\n");
188 while ((current_entry
!= &DirtySegmentListHead
) && (Target
> 0))
190 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
191 DirtySegmentListEntry
);
192 current_entry
= current_entry
->Flink
;
194 CcRosCacheSegmentIncRefCount(current
);
196 Locked
= current
->Bcb
->Callbacks
->AcquireForLazyWrite(
197 current
->Bcb
->LazyWriteContext
, Wait
);
200 CcRosCacheSegmentDecRefCount(current
);
204 Status
= KeWaitForSingleObject(¤t
->Mutex
,
208 Wait
? NULL
: &ZeroTimeout
);
209 if (Status
!= STATUS_SUCCESS
)
211 current
->Bcb
->Callbacks
->ReleaseFromLazyWrite(
212 current
->Bcb
->LazyWriteContext
);
213 CcRosCacheSegmentDecRefCount(current
);
217 ASSERT(current
->Dirty
);
219 /* One reference is added above */
220 if (current
->ReferenceCount
> 2)
222 KeReleaseMutex(¤t
->Mutex
, FALSE
);
223 current
->Bcb
->Callbacks
->ReleaseFromLazyWrite(
224 current
->Bcb
->LazyWriteContext
);
225 CcRosCacheSegmentDecRefCount(current
);
229 PagesPerSegment
= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
231 KeReleaseGuardedMutex(&ViewLock
);
233 Status
= CcRosFlushCacheSegment(current
);
235 KeReleaseMutex(¤t
->Mutex
, FALSE
);
236 current
->Bcb
->Callbacks
->ReleaseFromLazyWrite(
237 current
->Bcb
->LazyWriteContext
);
239 KeAcquireGuardedMutex(&ViewLock
);
240 CcRosCacheSegmentDecRefCount(current
);
242 if (!NT_SUCCESS(Status
) && (Status
!= STATUS_END_OF_FILE
))
244 DPRINT1("CC: Failed to flush cache segment.\n");
248 (*Count
) += PagesPerSegment
;
249 Target
-= PagesPerSegment
;
252 current_entry
= DirtySegmentListHead
.Flink
;
255 KeReleaseGuardedMutex(&ViewLock
);
256 KeLeaveCriticalRegion();
258 DPRINT("CcRosFlushDirtyPages() finished\n");
259 return STATUS_SUCCESS
;
268 * FUNCTION: Try to free some memory from the file cache.
270 * Target - The number of pages to be freed.
271 * Priority - The priority of free (currently unused).
272 * NrFreed - Points to a variable where the number of pages
273 * actually freed is returned.
276 PLIST_ENTRY current_entry
;
277 PCACHE_SEGMENT current
;
278 ULONG PagesPerSegment
;
284 BOOLEAN FlushedPages
= FALSE
;
286 DPRINT("CcRosTrimCache(Target %lu)\n", Target
);
288 InitializeListHead(&FreeList
);
293 KeAcquireGuardedMutex(&ViewLock
);
295 current_entry
= CacheSegmentLRUListHead
.Flink
;
296 while (current_entry
!= &CacheSegmentLRUListHead
)
298 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
299 CacheSegmentLRUListEntry
);
300 current_entry
= current_entry
->Flink
;
302 KeAcquireSpinLock(¤t
->Bcb
->BcbLock
, &oldIrql
);
304 /* Reference the cache segment */
305 CcRosCacheSegmentIncRefCount(current
);
307 /* Check if it's mapped and not dirty */
308 if (current
->MappedCount
> 0 && !current
->Dirty
)
310 /* We have to break these locks because Cc sucks */
311 KeReleaseSpinLock(¤t
->Bcb
->BcbLock
, oldIrql
);
312 KeReleaseGuardedMutex(&ViewLock
);
314 /* Page out the segment */
315 for (i
= 0; i
< VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
; i
++)
317 Page
= (PFN_NUMBER
)(MmGetPhysicalAddress((PUCHAR
)current
->BaseAddress
+ (i
* PAGE_SIZE
)).QuadPart
>> PAGE_SHIFT
);
319 MmPageOutPhysicalAddress(Page
);
322 /* Reacquire the locks */
323 KeAcquireGuardedMutex(&ViewLock
);
324 KeAcquireSpinLock(¤t
->Bcb
->BcbLock
, &oldIrql
);
327 /* Dereference the cache segment */
328 CcRosCacheSegmentDecRefCount(current
);
330 /* Check if we can free this entry now */
331 if (current
->ReferenceCount
== 0)
333 ASSERT(!current
->Dirty
);
334 ASSERT(!current
->MappedCount
);
336 RemoveEntryList(¤t
->BcbSegmentListEntry
);
337 RemoveEntryList(¤t
->CacheSegmentListEntry
);
338 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
339 InsertHeadList(&FreeList
, ¤t
->BcbSegmentListEntry
);
341 /* Calculate how many pages we freed for Mm */
342 PagesPerSegment
= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
343 PagesFreed
= min(PagesPerSegment
, Target
);
344 Target
-= PagesFreed
;
345 (*NrFreed
) += PagesFreed
;
348 KeReleaseSpinLock(¤t
->Bcb
->BcbLock
, oldIrql
);
351 KeReleaseGuardedMutex(&ViewLock
);
353 /* Try flushing pages if we haven't met our target */
354 if ((Target
> 0) && !FlushedPages
)
356 /* Flush dirty pages to disk */
357 CcRosFlushDirtyPages(Target
, &PagesFreed
, FALSE
);
360 /* We can only swap as many pages as we flushed */
361 if (PagesFreed
< Target
) Target
= PagesFreed
;
363 /* Check if we flushed anything */
366 /* Try again after flushing dirty pages */
367 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed
);
372 while (!IsListEmpty(&FreeList
))
374 current_entry
= RemoveHeadList(&FreeList
);
375 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
376 BcbSegmentListEntry
);
377 CcRosInternalFreeCacheSegment(current
);
380 DPRINT("Evicted %lu cache pages\n", (*NrFreed
));
382 return STATUS_SUCCESS
;
387 CcRosReleaseCacheSegment (
389 PCACHE_SEGMENT CacheSeg
,
399 DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %u)\n",
400 Bcb
, CacheSeg
, Valid
);
402 KeAcquireGuardedMutex(&ViewLock
);
403 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
405 CacheSeg
->Valid
= Valid
;
407 WasDirty
= CacheSeg
->Dirty
;
408 CacheSeg
->Dirty
= CacheSeg
->Dirty
|| Dirty
;
410 if (!WasDirty
&& CacheSeg
->Dirty
)
412 InsertTailList(&DirtySegmentListHead
, &CacheSeg
->DirtySegmentListEntry
);
413 DirtyPageCount
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
418 CacheSeg
->MappedCount
++;
420 CcRosCacheSegmentDecRefCount(CacheSeg
);
421 if (Mapped
&& (CacheSeg
->MappedCount
== 1))
423 CcRosCacheSegmentIncRefCount(CacheSeg
);
425 if (!WasDirty
&& CacheSeg
->Dirty
)
427 CcRosCacheSegmentIncRefCount(CacheSeg
);
430 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
431 KeReleaseGuardedMutex(&ViewLock
);
432 KeReleaseMutex(&CacheSeg
->Mutex
, FALSE
);
434 return STATUS_SUCCESS
;
437 /* Returns with Cache Segment Lock Held! */
440 CcRosLookupCacheSegment (
444 PLIST_ENTRY current_entry
;
445 PCACHE_SEGMENT current
;
450 DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %lu)\n", Bcb
, FileOffset
);
452 KeAcquireGuardedMutex(&ViewLock
);
453 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
455 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
456 while (current_entry
!= &Bcb
->BcbSegmentListHead
)
458 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
459 BcbSegmentListEntry
);
460 if (IsPointInSegment(current
->FileOffset
, VACB_MAPPING_GRANULARITY
,
463 CcRosCacheSegmentIncRefCount(current
);
464 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
465 KeReleaseGuardedMutex(&ViewLock
);
466 KeWaitForSingleObject(¤t
->Mutex
,
473 if (current
->FileOffset
> FileOffset
)
475 current_entry
= current_entry
->Flink
;
478 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
479 KeReleaseGuardedMutex(&ViewLock
);
486 CcRosMarkDirtyCacheSegment (
490 PCACHE_SEGMENT CacheSeg
;
495 DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %lu)\n", Bcb
, FileOffset
);
497 CacheSeg
= CcRosLookupCacheSegment(Bcb
, FileOffset
);
498 if (CacheSeg
== NULL
)
500 KeBugCheck(CACHE_MANAGER
);
503 KeAcquireGuardedMutex(&ViewLock
);
504 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
506 if (!CacheSeg
->Dirty
)
508 InsertTailList(&DirtySegmentListHead
, &CacheSeg
->DirtySegmentListEntry
);
509 DirtyPageCount
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
513 CcRosCacheSegmentDecRefCount(CacheSeg
);
516 /* Move to the tail of the LRU list */
517 RemoveEntryList(&CacheSeg
->CacheSegmentLRUListEntry
);
518 InsertTailList(&CacheSegmentLRUListHead
, &CacheSeg
->CacheSegmentLRUListEntry
);
520 CacheSeg
->Dirty
= TRUE
;
522 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
523 KeReleaseGuardedMutex(&ViewLock
);
524 KeReleaseMutex(&CacheSeg
->Mutex
, FALSE
);
526 return STATUS_SUCCESS
;
531 CcRosUnmapCacheSegment (
536 PCACHE_SEGMENT CacheSeg
;
542 DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %lu, NowDirty %u)\n",
543 Bcb
, FileOffset
, NowDirty
);
545 CacheSeg
= CcRosLookupCacheSegment(Bcb
, FileOffset
);
546 if (CacheSeg
== NULL
)
548 return STATUS_UNSUCCESSFUL
;
551 KeAcquireGuardedMutex(&ViewLock
);
552 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
554 WasDirty
= CacheSeg
->Dirty
;
555 CacheSeg
->Dirty
= CacheSeg
->Dirty
|| NowDirty
;
557 CacheSeg
->MappedCount
--;
559 if (!WasDirty
&& NowDirty
)
561 InsertTailList(&DirtySegmentListHead
, &CacheSeg
->DirtySegmentListEntry
);
562 DirtyPageCount
+= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
565 CcRosCacheSegmentDecRefCount(CacheSeg
);
566 if (!WasDirty
&& NowDirty
)
568 CcRosCacheSegmentIncRefCount(CacheSeg
);
570 if (CacheSeg
->MappedCount
== 0)
572 CcRosCacheSegmentDecRefCount(CacheSeg
);
575 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
576 KeReleaseGuardedMutex(&ViewLock
);
577 KeReleaseMutex(&CacheSeg
->Mutex
, FALSE
);
579 return STATUS_SUCCESS
;
584 CcRosCreateCacheSegment (
587 PCACHE_SEGMENT
* CacheSeg
)
589 PCACHE_SEGMENT current
;
590 PCACHE_SEGMENT previous
;
591 PLIST_ENTRY current_entry
;
597 DPRINT("CcRosCreateCacheSegment()\n");
599 if (FileOffset
>= Bcb
->FileSize
.u
.LowPart
)
602 return STATUS_INVALID_PARAMETER
;
605 current
= ExAllocateFromNPagedLookasideList(&CacheSegLookasideList
);
606 current
->Valid
= FALSE
;
607 current
->Dirty
= FALSE
;
608 current
->PageOut
= FALSE
;
609 current
->FileOffset
= ROUND_DOWN(FileOffset
, VACB_MAPPING_GRANULARITY
);
614 DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb
, current
);
617 current
->MappedCount
= 0;
618 current
->DirtySegmentListEntry
.Flink
= NULL
;
619 current
->DirtySegmentListEntry
.Blink
= NULL
;
620 current
->ReferenceCount
= 1;
621 KeInitializeMutex(¤t
->Mutex
, 0);
622 KeWaitForSingleObject(¤t
->Mutex
,
627 KeAcquireGuardedMutex(&ViewLock
);
630 /* There is window between the call to CcRosLookupCacheSegment
631 * and CcRosCreateCacheSegment. We must check if a segment on
632 * the fileoffset exist. If there exist a segment, we release
633 * our new created segment and return the existing one.
635 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
636 current_entry
= Bcb
->BcbSegmentListHead
.Flink
;
638 while (current_entry
!= &Bcb
->BcbSegmentListHead
)
640 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
,
641 BcbSegmentListEntry
);
642 if (IsPointInSegment(current
->FileOffset
, VACB_MAPPING_GRANULARITY
,
645 CcRosCacheSegmentIncRefCount(current
);
646 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
650 DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
656 KeReleaseMutex(&(*CacheSeg
)->Mutex
, FALSE
);
657 KeReleaseGuardedMutex(&ViewLock
);
658 ExFreeToNPagedLookasideList(&CacheSegLookasideList
, *CacheSeg
);
660 KeWaitForSingleObject(¤t
->Mutex
,
665 return STATUS_SUCCESS
;
667 if (current
->FileOffset
< FileOffset
)
669 ASSERT(previous
== NULL
||
670 previous
->FileOffset
< current
->FileOffset
);
673 if (current
->FileOffset
> FileOffset
)
675 current_entry
= current_entry
->Flink
;
677 /* There was no existing segment. */
681 InsertHeadList(&previous
->BcbSegmentListEntry
, ¤t
->BcbSegmentListEntry
);
685 InsertHeadList(&Bcb
->BcbSegmentListHead
, ¤t
->BcbSegmentListEntry
);
687 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
688 InsertTailList(&CacheSegmentListHead
, ¤t
->CacheSegmentListEntry
);
689 InsertTailList(&CacheSegmentLRUListHead
, ¤t
->CacheSegmentLRUListEntry
);
690 KeReleaseGuardedMutex(&ViewLock
);
692 MmLockAddressSpace(MmGetKernelAddressSpace());
693 current
->BaseAddress
= NULL
;
694 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
695 0, // nothing checks for cache_segment mareas, so set to 0
696 ¤t
->BaseAddress
,
697 VACB_MAPPING_GRANULARITY
,
699 (PMEMORY_AREA
*)¤t
->MemoryArea
,
703 MmUnlockAddressSpace(MmGetKernelAddressSpace());
704 if (!NT_SUCCESS(Status
))
706 KeBugCheck(CACHE_MANAGER
);
709 /* Create a virtual mapping for this memory area */
710 MI_SET_USAGE(MI_USAGE_CACHE
);
714 if ((Bcb
->FileObject
) && (Bcb
->FileObject
->FileName
.Buffer
))
716 pos
= wcsrchr(Bcb
->FileObject
->FileName
.Buffer
, '\\');
717 len
= wcslen(pos
) * sizeof(WCHAR
);
718 if (pos
) snprintf(MI_PFN_CURRENT_PROCESS_NAME
, min(16, len
), "%S", pos
);
722 MmMapMemoryArea(current
->BaseAddress
, VACB_MAPPING_GRANULARITY
,
723 MC_CACHE
, PAGE_READWRITE
);
725 return STATUS_SUCCESS
;
730 CcRosGetCacheSegmentChain (
734 PCACHE_SEGMENT
* CacheSeg
)
736 PCACHE_SEGMENT current
;
738 PCACHE_SEGMENT
* CacheSegList
;
739 PCACHE_SEGMENT Previous
= NULL
;
743 DPRINT("CcRosGetCacheSegmentChain()\n");
745 Length
= ROUND_UP(Length
, VACB_MAPPING_GRANULARITY
);
747 CacheSegList
= _alloca(sizeof(PCACHE_SEGMENT
) *
748 (Length
/ VACB_MAPPING_GRANULARITY
));
751 * Look for a cache segment already mapping the same data.
753 for (i
= 0; i
< (Length
/ VACB_MAPPING_GRANULARITY
); i
++)
755 ULONG CurrentOffset
= FileOffset
+ (i
* VACB_MAPPING_GRANULARITY
);
756 current
= CcRosLookupCacheSegment(Bcb
, CurrentOffset
);
759 KeAcquireGuardedMutex(&ViewLock
);
761 /* Move to tail of LRU list */
762 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
763 InsertTailList(&CacheSegmentLRUListHead
, ¤t
->CacheSegmentLRUListEntry
);
765 KeReleaseGuardedMutex(&ViewLock
);
767 CacheSegList
[i
] = current
;
771 CcRosCreateCacheSegment(Bcb
, CurrentOffset
, ¤t
);
772 CacheSegList
[i
] = current
;
776 for (i
= 0; i
< Length
/ VACB_MAPPING_GRANULARITY
; i
++)
780 *CacheSeg
= CacheSegList
[i
];
781 Previous
= CacheSegList
[i
];
785 Previous
->NextInChain
= CacheSegList
[i
];
786 Previous
= CacheSegList
[i
];
790 Previous
->NextInChain
= NULL
;
792 return STATUS_SUCCESS
;
797 CcRosGetCacheSegment (
803 PCACHE_SEGMENT
* CacheSeg
)
805 PCACHE_SEGMENT current
;
810 DPRINT("CcRosGetCacheSegment()\n");
813 * Look for a cache segment already mapping the same data.
815 current
= CcRosLookupCacheSegment(Bcb
, FileOffset
);
819 * Otherwise create a new segment.
821 Status
= CcRosCreateCacheSegment(Bcb
, FileOffset
, ¤t
);
822 if (!NT_SUCCESS(Status
))
828 KeAcquireGuardedMutex(&ViewLock
);
830 /* Move to the tail of the LRU list */
831 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
832 InsertTailList(&CacheSegmentLRUListHead
, ¤t
->CacheSegmentLRUListEntry
);
834 KeReleaseGuardedMutex(&ViewLock
);
837 * Return information about the segment to the caller.
839 *UptoDate
= current
->Valid
;
840 *BaseAddress
= current
->BaseAddress
;
841 DPRINT("*BaseAddress %p\n", *BaseAddress
);
843 *BaseOffset
= current
->FileOffset
;
844 return STATUS_SUCCESS
;
849 CcRosRequestCacheSegment (
854 PCACHE_SEGMENT
* CacheSeg
)
856 * FUNCTION: Request a page mapping for a BCB
863 if (FileOffset
% VACB_MAPPING_GRANULARITY
!= 0)
865 DPRINT1("Bad fileoffset %x should be multiple of %x",
866 FileOffset
, VACB_MAPPING_GRANULARITY
);
867 KeBugCheck(CACHE_MANAGER
);
870 return CcRosGetCacheSegment(Bcb
,
882 MEMORY_AREA
* MemoryArea
,
888 ASSERT(SwapEntry
== 0);
891 ASSERT(MmGetReferenceCountPage(Page
) == 1);
892 MmReleasePageMemoryConsumer(MC_CACHE
, Page
);
897 CcRosInternalFreeCacheSegment (
898 PCACHE_SEGMENT CacheSeg
)
900 * FUNCTION: Releases a cache segment associated with a BCB
903 DPRINT("Freeing cache segment 0x%p\n", CacheSeg
);
905 if ( CacheSeg
->Bcb
->Trace
)
907 DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg
->Bcb
, CacheSeg
);
911 MmLockAddressSpace(MmGetKernelAddressSpace());
912 MmFreeMemoryArea(MmGetKernelAddressSpace(),
913 CacheSeg
->MemoryArea
,
916 MmUnlockAddressSpace(MmGetKernelAddressSpace());
918 ExFreeToNPagedLookasideList(&CacheSegLookasideList
, CacheSeg
);
919 return STATUS_SUCCESS
;
928 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
,
929 IN PLARGE_INTEGER FileOffset OPTIONAL
,
931 OUT PIO_STATUS_BLOCK IoStatus
)
934 LARGE_INTEGER Offset
;
935 PCACHE_SEGMENT current
;
939 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
940 SectionObjectPointers
, FileOffset
, Length
, IoStatus
);
942 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
944 Bcb
= (PBCB
)SectionObjectPointers
->SharedCacheMap
;
948 Offset
= *FileOffset
;
952 Offset
.QuadPart
= (LONGLONG
)0;
953 Length
= Bcb
->FileSize
.u
.LowPart
;
958 IoStatus
->Status
= STATUS_SUCCESS
;
959 IoStatus
->Information
= 0;
964 current
= CcRosLookupCacheSegment (Bcb
, Offset
.u
.LowPart
);
969 Status
= CcRosFlushCacheSegment(current
);
970 if (!NT_SUCCESS(Status
) && IoStatus
!= NULL
)
972 IoStatus
->Status
= Status
;
975 KeReleaseMutex(¤t
->Mutex
, FALSE
);
977 KeAcquireGuardedMutex(&ViewLock
);
978 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
979 CcRosCacheSegmentDecRefCount(current
);
980 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
981 KeReleaseGuardedMutex(&ViewLock
);
984 Offset
.QuadPart
+= VACB_MAPPING_GRANULARITY
;
985 if (Length
> VACB_MAPPING_GRANULARITY
)
987 Length
-= VACB_MAPPING_GRANULARITY
;
999 IoStatus
->Status
= STATUS_INVALID_PARAMETER
;
1006 CcRosDeleteFileCache (
1007 PFILE_OBJECT FileObject
,
1010 * FUNCTION: Releases the BCB associated with a file object
1013 PLIST_ENTRY current_entry
;
1014 PCACHE_SEGMENT current
;
1015 LIST_ENTRY FreeList
;
1021 KeReleaseGuardedMutex(&ViewLock
);
1023 CcFlushCache(FileObject
->SectionObjectPointer
, NULL
, 0, NULL
);
1025 KeAcquireGuardedMutex(&ViewLock
);
1027 if (Bcb
->RefCount
== 0)
1029 if (Bcb
->BcbRemoveListEntry
.Flink
!= NULL
)
1031 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1032 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1035 FileObject
->SectionObjectPointer
->SharedCacheMap
= NULL
;
1038 * Release all cache segments.
1040 InitializeListHead(&FreeList
);
1041 KeAcquireSpinLock(&Bcb
->BcbLock
, &oldIrql
);
1042 while (!IsListEmpty(&Bcb
->BcbSegmentListHead
))
1044 current_entry
= RemoveTailList(&Bcb
->BcbSegmentListHead
);
1045 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
, BcbSegmentListEntry
);
1046 RemoveEntryList(¤t
->CacheSegmentListEntry
);
1047 RemoveEntryList(¤t
->CacheSegmentLRUListEntry
);
1050 RemoveEntryList(¤t
->DirtySegmentListEntry
);
1051 DirtyPageCount
-= VACB_MAPPING_GRANULARITY
/ PAGE_SIZE
;
1052 DPRINT1("Freeing dirty segment\n");
1054 InsertHeadList(&FreeList
, ¤t
->BcbSegmentListEntry
);
1059 KeReleaseSpinLock(&Bcb
->BcbLock
, oldIrql
);
1061 KeReleaseGuardedMutex(&ViewLock
);
1062 ObDereferenceObject (Bcb
->FileObject
);
1064 while (!IsListEmpty(&FreeList
))
1066 current_entry
= RemoveTailList(&FreeList
);
1067 current
= CONTAINING_RECORD(current_entry
, CACHE_SEGMENT
, BcbSegmentListEntry
);
1068 CcRosInternalFreeCacheSegment(current
);
1070 ExFreeToNPagedLookasideList(&BcbLookasideList
, Bcb
);
1071 KeAcquireGuardedMutex(&ViewLock
);
1073 return STATUS_SUCCESS
;
1078 CcRosReferenceCache (
1079 PFILE_OBJECT FileObject
)
1082 KeAcquireGuardedMutex(&ViewLock
);
1083 Bcb
= (PBCB
)FileObject
->SectionObjectPointer
->SharedCacheMap
;
1085 if (Bcb
->RefCount
== 0)
1087 ASSERT(Bcb
->BcbRemoveListEntry
.Flink
!= NULL
);
1088 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1089 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1094 ASSERT(Bcb
->BcbRemoveListEntry
.Flink
== NULL
);
1097 KeReleaseGuardedMutex(&ViewLock
);
1102 CcRosSetRemoveOnClose (
1103 PSECTION_OBJECT_POINTERS SectionObjectPointer
)
1106 DPRINT("CcRosSetRemoveOnClose()\n");
1107 KeAcquireGuardedMutex(&ViewLock
);
1108 Bcb
= (PBCB
)SectionObjectPointer
->SharedCacheMap
;
1111 Bcb
->RemoveOnClose
= TRUE
;
1112 if (Bcb
->RefCount
== 0)
1114 CcRosDeleteFileCache(Bcb
->FileObject
, Bcb
);
1117 KeReleaseGuardedMutex(&ViewLock
);
1123 CcRosDereferenceCache (
1124 PFILE_OBJECT FileObject
)
1127 KeAcquireGuardedMutex(&ViewLock
);
1128 Bcb
= (PBCB
)FileObject
->SectionObjectPointer
->SharedCacheMap
;
1130 if (Bcb
->RefCount
> 0)
1133 if (Bcb
->RefCount
== 0)
1135 MmFreeSectionSegments(Bcb
->FileObject
);
1136 CcRosDeleteFileCache(FileObject
, Bcb
);
1139 KeReleaseGuardedMutex(&ViewLock
);
1144 CcRosReleaseFileCache (
1145 PFILE_OBJECT FileObject
)
1147 * FUNCTION: Called by the file system when a handle to a file object
1153 KeAcquireGuardedMutex(&ViewLock
);
1155 if (FileObject
->SectionObjectPointer
->SharedCacheMap
!= NULL
)
1157 Bcb
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1158 if (FileObject
->PrivateCacheMap
!= NULL
)
1160 FileObject
->PrivateCacheMap
= NULL
;
1161 if (Bcb
->RefCount
> 0)
1164 if (Bcb
->RefCount
== 0)
1166 MmFreeSectionSegments(Bcb
->FileObject
);
1167 CcRosDeleteFileCache(FileObject
, Bcb
);
1172 KeReleaseGuardedMutex(&ViewLock
);
1173 return STATUS_SUCCESS
;
1178 CcTryToInitializeFileCache (
1179 PFILE_OBJECT FileObject
)
1184 KeAcquireGuardedMutex(&ViewLock
);
1186 ASSERT(FileObject
->SectionObjectPointer
);
1187 Bcb
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1190 Status
= STATUS_UNSUCCESSFUL
;
1194 if (FileObject
->PrivateCacheMap
== NULL
)
1196 FileObject
->PrivateCacheMap
= Bcb
;
1199 if (Bcb
->BcbRemoveListEntry
.Flink
!= NULL
)
1201 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1202 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1204 Status
= STATUS_SUCCESS
;
1206 KeReleaseGuardedMutex(&ViewLock
);
1214 CcRosInitializeFileCache (
1215 PFILE_OBJECT FileObject
,
1216 PCACHE_MANAGER_CALLBACKS CallBacks
,
1217 PVOID LazyWriterContext
)
1219 * FUNCTION: Initializes a BCB for a file object
1224 Bcb
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
1225 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p)\n",
1228 KeAcquireGuardedMutex(&ViewLock
);
1231 Bcb
= ExAllocateFromNPagedLookasideList(&BcbLookasideList
);
1234 KeReleaseGuardedMutex(&ViewLock
);
1235 return STATUS_UNSUCCESSFUL
;
1237 RtlZeroMemory(Bcb
, sizeof(*Bcb
));
1238 ObReferenceObjectByPointer(FileObject
,
1242 Bcb
->FileObject
= FileObject
;
1243 Bcb
->Callbacks
= CallBacks
;
1244 Bcb
->LazyWriteContext
= LazyWriterContext
;
1245 if (FileObject
->FsContext
)
1247 Bcb
->AllocationSize
=
1248 ((PFSRTL_COMMON_FCB_HEADER
)FileObject
->FsContext
)->AllocationSize
;
1250 ((PFSRTL_COMMON_FCB_HEADER
)FileObject
->FsContext
)->FileSize
;
1252 KeInitializeSpinLock(&Bcb
->BcbLock
);
1253 InitializeListHead(&Bcb
->BcbSegmentListHead
);
1254 FileObject
->SectionObjectPointer
->SharedCacheMap
= Bcb
;
1256 if (FileObject
->PrivateCacheMap
== NULL
)
1258 FileObject
->PrivateCacheMap
= Bcb
;
1261 if (Bcb
->BcbRemoveListEntry
.Flink
!= NULL
)
1263 RemoveEntryList(&Bcb
->BcbRemoveListEntry
);
1264 Bcb
->BcbRemoveListEntry
.Flink
= NULL
;
1266 KeReleaseGuardedMutex(&ViewLock
);
1268 return STATUS_SUCCESS
;
1276 CcGetFileObjectFromSectionPtrs (
1277 IN PSECTION_OBJECT_POINTERS SectionObjectPointers
)
1280 if (SectionObjectPointers
&& SectionObjectPointers
->SharedCacheMap
)
1282 Bcb
= (PBCB
)SectionObjectPointers
->SharedCacheMap
;
1284 return Bcb
->FileObject
;
1295 DPRINT("CcInitView()\n");
1297 InitializeListHead(&CacheSegmentListHead
);
1298 InitializeListHead(&DirtySegmentListHead
);
1299 InitializeListHead(&CacheSegmentLRUListHead
);
1300 InitializeListHead(&ClosedListHead
);
1301 KeInitializeGuardedMutex(&ViewLock
);
1302 ExInitializeNPagedLookasideList (&iBcbLookasideList
,
1306 sizeof(INTERNAL_BCB
),
1309 ExInitializeNPagedLookasideList (&BcbLookasideList
,
1316 ExInitializeNPagedLookasideList (&CacheSegLookasideList
,
1320 sizeof(CACHE_SEGMENT
),
1324 MmInitializeMemoryConsumer(MC_CACHE
, CcRosTrimCache
);
1326 CcInitCacheZeroPage();