2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/cacheman.c
5 * PURPOSE: Cache manager
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
8 * Pierre Schweitzer (pierre@reactos.org)
11 /* INCLUDES *****************************************************************/
17 BOOLEAN CcPfEnablePrefetcher
;
18 PFSN_PREFETCHER_GLOBALS CcPfGlobals
;
19 MM_SYSTEMSIZE CcCapturedSystemSize
;
21 static ULONG BugCheckFileId
= 0x4 << 16;
23 /* FUNCTIONS *****************************************************************/
28 CcPfInitializePrefetcher(VOID
)
31 DbgPrintEx(DPFLTR_PREFETCHER_ID
,
33 "CCPF: InitializePrefetecher()\n");
35 /* Setup the Prefetcher Data */
36 InitializeListHead(&CcPfGlobals
.ActiveTraces
);
37 InitializeListHead(&CcPfGlobals
.CompletedTraces
);
38 ExInitializeFastMutex(&CcPfGlobals
.CompletedTracesLock
);
40 /* FIXME: Setup the rest of the prefetecher */
46 CcInitializeCacheManager(VOID
)
52 /* Initialize lazy-writer lists */
53 InitializeListHead(&CcIdleWorkerThreadList
);
54 InitializeListHead(&CcExpressWorkQueue
);
55 InitializeListHead(&CcRegularWorkQueue
);
56 InitializeListHead(&CcPostTickWorkQueue
);
58 /* Define lazy writer threshold and the amount of workers,
59 * depending on the system type
61 CcCapturedSystemSize
= MmQuerySystemSize();
62 switch (CcCapturedSystemSize
)
65 CcNumberWorkerThreads
= ExCriticalWorkerThreads
- 1;
66 CcDirtyPageThreshold
= MmNumberOfPhysicalPages
/ 8;
70 CcNumberWorkerThreads
= ExCriticalWorkerThreads
- 1;
71 CcDirtyPageThreshold
= MmNumberOfPhysicalPages
/ 4;
75 CcNumberWorkerThreads
= ExCriticalWorkerThreads
- 2;
76 CcDirtyPageThreshold
= MmNumberOfPhysicalPages
/ 8 + MmNumberOfPhysicalPages
/ 4;
80 CcNumberWorkerThreads
= 1;
81 CcDirtyPageThreshold
= MmNumberOfPhysicalPages
/ 8;
85 /* Allocate a work item for all our threads */
86 for (Thread
= 0; Thread
< CcNumberWorkerThreads
; ++Thread
)
88 PWORK_QUEUE_ITEM Item
;
90 Item
= ExAllocatePoolWithTag(NonPagedPool
, sizeof(WORK_QUEUE_ITEM
), 'qWcC');
96 /* By default, it's obviously idle */
97 ExInitializeWorkItem(Item
, CcWorkerThread
, Item
);
98 InsertTailList(&CcIdleWorkerThreadList
, &Item
->List
);
101 /* Initialize our lazy writer */
102 RtlZeroMemory(&LazyWriter
, sizeof(LazyWriter
));
103 InitializeListHead(&LazyWriter
.WorkQueue
);
104 /* Delay activation of the lazy writer */
105 KeInitializeDpc(&LazyWriter
.ScanDpc
, CcScanDpc
, NULL
);
106 KeInitializeTimer(&LazyWriter
.ScanTimer
);
108 /* Lookaside list for our work items */
109 ExInitializeNPagedLookasideList(&CcTwilightLookasideList
, NULL
, NULL
, 0, sizeof(WORK_QUEUE_ENTRY
), 'KWcC', 0);
116 CcShutdownSystem(VOID
)
126 CcGetFlushedValidData (
127 IN PSECTION_OBJECT_POINTERS SectionObjectPointer
,
128 IN BOOLEAN BcbListHeld
158 CcScheduleReadAhead (
159 IN PFILE_OBJECT FileObject
,
160 IN PLARGE_INTEGER FileOffset
,
165 LARGE_INTEGER NewOffset
;
166 PROS_SHARED_CACHE_MAP SharedCacheMap
;
167 PPRIVATE_CACHE_MAP PrivateCacheMap
;
169 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
170 PrivateCacheMap
= FileObject
->PrivateCacheMap
;
172 /* If file isn't cached, or if read ahead is disabled, this is no op */
173 if (SharedCacheMap
== NULL
|| PrivateCacheMap
== NULL
||
174 BooleanFlagOn(SharedCacheMap
->Flags
, READAHEAD_DISABLED
))
179 /* Round read length with read ahead mask */
180 Length
= ROUND_UP(Length
, PrivateCacheMap
->ReadAheadMask
+ 1);
181 /* Compute the offset we'll reach */
182 NewOffset
.QuadPart
= FileOffset
->QuadPart
+ Length
;
184 /* Lock read ahead spin lock */
185 KeAcquireSpinLock(&PrivateCacheMap
->ReadAheadSpinLock
, &OldIrql
);
186 /* Easy case: the file is sequentially read */
187 if (BooleanFlagOn(FileObject
->Flags
, FO_SEQUENTIAL_ONLY
))
189 /* If we went backward, this is no go! */
190 if (NewOffset
.QuadPart
< PrivateCacheMap
->ReadAheadOffset
[1].QuadPart
)
192 KeReleaseSpinLock(&PrivateCacheMap
->ReadAheadSpinLock
, OldIrql
);
196 /* FIXME: hackish, but will do the job for now */
197 PrivateCacheMap
->ReadAheadOffset
[1].QuadPart
= NewOffset
.QuadPart
;
198 PrivateCacheMap
->ReadAheadLength
[1] = Length
;
200 /* Other cases: try to find some logic in that mess... */
203 /* Let's check if we always read the same way (like going down in the file)
204 * and pretend it's enough for now
206 if (PrivateCacheMap
->FileOffset2
.QuadPart
>= PrivateCacheMap
->FileOffset1
.QuadPart
&&
207 FileOffset
->QuadPart
>= PrivateCacheMap
->FileOffset2
.QuadPart
)
209 /* FIXME: hackish, but will do the job for now */
210 PrivateCacheMap
->ReadAheadOffset
[1].QuadPart
= NewOffset
.QuadPart
;
211 PrivateCacheMap
->ReadAheadLength
[1] = Length
;
215 /* FIXME: handle the other cases */
216 KeReleaseSpinLock(&PrivateCacheMap
->ReadAheadSpinLock
, OldIrql
);
222 /* If read ahead isn't active yet */
223 if (!PrivateCacheMap
->Flags
.ReadAheadActive
)
225 PWORK_QUEUE_ENTRY WorkItem
;
228 * Be careful with the mask, you don't want to mess with node code
230 InterlockedOr((volatile long *)&PrivateCacheMap
->UlongFlags
, 0x10000);
231 KeReleaseSpinLock(&PrivateCacheMap
->ReadAheadSpinLock
, OldIrql
);
233 /* Get a work item */
234 WorkItem
= ExAllocateFromNPagedLookasideList(&CcTwilightLookasideList
);
235 if (WorkItem
!= NULL
)
237 /* Reference our FO so that it doesn't go in between */
238 ObReferenceObject(FileObject
);
240 /* We want to do read ahead! */
241 WorkItem
->Function
= ReadAhead
;
242 WorkItem
->Parameters
.Read
.FileObject
= FileObject
;
244 /* Queue in the read ahead dedicated queue */
245 CcPostWorkQueue(WorkItem
, &CcExpressWorkQueue
);
250 /* Fail path: lock again, and revert read ahead active */
251 KeAcquireSpinLock(&PrivateCacheMap
->ReadAheadSpinLock
, &OldIrql
);
252 InterlockedAnd((volatile long *)&PrivateCacheMap
->UlongFlags
, 0xFFFEFFFF);
256 KeReleaseSpinLock(&PrivateCacheMap
->ReadAheadSpinLock
, OldIrql
);
264 CcSetAdditionalCacheAttributes (
265 IN PFILE_OBJECT FileObject
,
266 IN BOOLEAN DisableReadAhead
,
267 IN BOOLEAN DisableWriteBehind
271 PROS_SHARED_CACHE_MAP SharedCacheMap
;
273 CCTRACE(CC_API_DEBUG
, "FileObject=%p DisableReadAhead=%d DisableWriteBehind=%d\n",
274 FileObject
, DisableReadAhead
, DisableWriteBehind
);
276 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
278 OldIrql
= KeAcquireQueuedSpinLock(LockQueueMasterLock
);
280 if (DisableReadAhead
)
282 SetFlag(SharedCacheMap
->Flags
, READAHEAD_DISABLED
);
286 ClearFlag(SharedCacheMap
->Flags
, READAHEAD_DISABLED
);
289 if (DisableWriteBehind
)
291 /* FIXME: also set flag 0x200 */
292 SetFlag(SharedCacheMap
->Flags
, WRITEBEHIND_DISABLED
);
296 ClearFlag(SharedCacheMap
->Flags
, WRITEBEHIND_DISABLED
);
298 KeReleaseQueuedSpinLock(LockQueueMasterLock
, OldIrql
);
306 CcSetBcbOwnerPointer (
311 PINTERNAL_BCB iBcb
= Bcb
;
313 CCTRACE(CC_API_DEBUG
, "Bcb=%p Owner=%p\n",
316 if (!ExIsResourceAcquiredExclusiveLite(&iBcb
->Lock
) && !ExIsResourceAcquiredSharedLite(&iBcb
->Lock
))
318 DPRINT1("Current thread doesn't own resource!\n");
322 ExSetResourceOwnerPointer(&iBcb
->Lock
, Owner
);
330 CcSetDirtyPageThreshold (
331 IN PFILE_OBJECT FileObject
,
332 IN ULONG DirtyPageThreshold
335 PFSRTL_COMMON_FCB_HEADER Fcb
;
336 PROS_SHARED_CACHE_MAP SharedCacheMap
;
338 CCTRACE(CC_API_DEBUG
, "FileObject=%p DirtyPageThreshold=%lu\n",
339 FileObject
, DirtyPageThreshold
);
341 SharedCacheMap
= FileObject
->SectionObjectPointer
->SharedCacheMap
;
342 if (SharedCacheMap
!= NULL
)
344 SharedCacheMap
->DirtyPageThreshold
= DirtyPageThreshold
;
347 Fcb
= FileObject
->FsContext
;
348 if (!BooleanFlagOn(Fcb
->Flags
, FSRTL_FLAG_LIMIT_MODIFIED_PAGES
))
350 SetFlag(Fcb
->Flags
, FSRTL_FLAG_LIMIT_MODIFIED_PAGES
);
359 CcSetReadAheadGranularity (
360 IN PFILE_OBJECT FileObject
,
364 PPRIVATE_CACHE_MAP PrivateMap
;
366 CCTRACE(CC_API_DEBUG
, "FileObject=%p Granularity=%lu\n",
367 FileObject
, Granularity
);
369 PrivateMap
= FileObject
->PrivateCacheMap
;
370 PrivateMap
->ReadAheadMask
= Granularity
- 1;