[NTOS:CC] Avoid some magic numbers.
[reactos.git] / ntoskrnl / cc / cacheman.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/cacheman.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* INCLUDES *****************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 BOOLEAN CcPfEnablePrefetcher;
18 PFSN_PREFETCHER_GLOBALS CcPfGlobals;
19 MM_SYSTEMSIZE CcCapturedSystemSize;
20
21 static ULONG BugCheckFileId = 0x4 << 16;
22
23 /* FUNCTIONS *****************************************************************/
24
25 VOID
26 NTAPI
27 INIT_FUNCTION
28 CcPfInitializePrefetcher(VOID)
29 {
30 /* Notify debugger */
31 DbgPrintEx(DPFLTR_PREFETCHER_ID,
32 DPFLTR_TRACE_LEVEL,
33 "CCPF: InitializePrefetecher()\n");
34
35 /* Setup the Prefetcher Data */
36 InitializeListHead(&CcPfGlobals.ActiveTraces);
37 InitializeListHead(&CcPfGlobals.CompletedTraces);
38 ExInitializeFastMutex(&CcPfGlobals.CompletedTracesLock);
39
40 /* FIXME: Setup the rest of the prefetecher */
41 }
42
43 BOOLEAN
44 NTAPI
45 INIT_FUNCTION
46 CcInitializeCacheManager(VOID)
47 {
48 ULONG Thread;
49
50 CcInitView();
51
52 /* Initialize lazy-writer lists */
53 InitializeListHead(&CcIdleWorkerThreadList);
54 InitializeListHead(&CcExpressWorkQueue);
55 InitializeListHead(&CcRegularWorkQueue);
56 InitializeListHead(&CcPostTickWorkQueue);
57
58 /* Define lazy writer threshold and the amount of workers,
59 * depending on the system type
60 */
61 CcCapturedSystemSize = MmQuerySystemSize();
62 switch (CcCapturedSystemSize)
63 {
64 case MmSmallSystem:
65 CcNumberWorkerThreads = ExCriticalWorkerThreads - 1;
66 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8;
67 break;
68
69 case MmMediumSystem:
70 CcNumberWorkerThreads = ExCriticalWorkerThreads - 1;
71 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4;
72 break;
73
74 case MmLargeSystem:
75 CcNumberWorkerThreads = ExCriticalWorkerThreads - 2;
76 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8 + MmNumberOfPhysicalPages / 4;
77 break;
78
79 default:
80 CcNumberWorkerThreads = 1;
81 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8;
82 break;
83 }
84
85 /* Allocate a work item for all our threads */
86 for (Thread = 0; Thread < CcNumberWorkerThreads; ++Thread)
87 {
88 PWORK_QUEUE_ITEM Item;
89
90 Item = ExAllocatePoolWithTag(NonPagedPool, sizeof(WORK_QUEUE_ITEM), 'qWcC');
91 if (Item == NULL)
92 {
93 CcBugCheck(0, 0, 0);
94 }
95
96 /* By default, it's obviously idle */
97 ExInitializeWorkItem(Item, CcWorkerThread, Item);
98 InsertTailList(&CcIdleWorkerThreadList, &Item->List);
99 }
100
101 /* Initialize our lazy writer */
102 RtlZeroMemory(&LazyWriter, sizeof(LazyWriter));
103 InitializeListHead(&LazyWriter.WorkQueue);
104 /* Delay activation of the lazy writer */
105 KeInitializeDpc(&LazyWriter.ScanDpc, CcScanDpc, NULL);
106 KeInitializeTimer(&LazyWriter.ScanTimer);
107
108 /* Lookaside list for our work items */
109 ExInitializeNPagedLookasideList(&CcTwilightLookasideList, NULL, NULL, 0, sizeof(WORK_QUEUE_ENTRY), 'KWcC', 0);
110
111 return TRUE;
112 }
113
114 VOID
115 NTAPI
116 CcShutdownSystem(VOID)
117 {
118 /* NOTHING TO DO */
119 }
120
121 /*
122 * @unimplemented
123 */
124 LARGE_INTEGER
125 NTAPI
126 CcGetFlushedValidData (
127 IN PSECTION_OBJECT_POINTERS SectionObjectPointer,
128 IN BOOLEAN BcbListHeld
129 )
130 {
131 LARGE_INTEGER i;
132
133 UNIMPLEMENTED;
134
135 i.QuadPart = 0;
136 return i;
137 }
138
139 /*
140 * @unimplemented
141 */
142 PVOID
143 NTAPI
144 CcRemapBcb (
145 IN PVOID Bcb
146 )
147 {
148 UNIMPLEMENTED;
149
150 return 0;
151 }
152
153 /*
154 * @unimplemented
155 */
156 VOID
157 NTAPI
158 CcScheduleReadAhead (
159 IN PFILE_OBJECT FileObject,
160 IN PLARGE_INTEGER FileOffset,
161 IN ULONG Length
162 )
163 {
164 KIRQL OldIrql;
165 LARGE_INTEGER NewOffset;
166 PROS_SHARED_CACHE_MAP SharedCacheMap;
167 PPRIVATE_CACHE_MAP PrivateCacheMap;
168 static ULONG Warn;
169
170 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
171 PrivateCacheMap = FileObject->PrivateCacheMap;
172
173 /* If file isn't cached, or if read ahead is disabled, this is no op */
174 if (SharedCacheMap == NULL || PrivateCacheMap == NULL ||
175 BooleanFlagOn(SharedCacheMap->Flags, READAHEAD_DISABLED))
176 {
177 return;
178 }
179
180 /* Round read length with read ahead mask */
181 Length = ROUND_UP(Length, PrivateCacheMap->ReadAheadMask + 1);
182 /* Compute the offset we'll reach */
183 NewOffset.QuadPart = FileOffset->QuadPart + Length;
184
185 /* Lock read ahead spin lock */
186 KeAcquireSpinLock(&PrivateCacheMap->ReadAheadSpinLock, &OldIrql);
187 /* Easy case: the file is sequentially read */
188 if (BooleanFlagOn(FileObject->Flags, FO_SEQUENTIAL_ONLY))
189 {
190 /* If we went backward, this is no go! */
191 if (NewOffset.QuadPart < PrivateCacheMap->ReadAheadOffset[1].QuadPart)
192 {
193 KeReleaseSpinLock(&PrivateCacheMap->ReadAheadSpinLock, OldIrql);
194 return;
195 }
196
197 /* FIXME: hackish, but will do the job for now */
198 PrivateCacheMap->ReadAheadOffset[1].QuadPart = NewOffset.QuadPart;
199 PrivateCacheMap->ReadAheadLength[1] = Length;
200 }
201 /* Other cases: try to find some logic in that mess... */
202 else
203 {
204 /* Let's check if we always read the same way (like going down in the file)
205 * and pretend it's enough for now
206 */
207 if (PrivateCacheMap->FileOffset2.QuadPart >= PrivateCacheMap->FileOffset1.QuadPart &&
208 FileOffset->QuadPart >= PrivateCacheMap->FileOffset2.QuadPart)
209 {
210 /* FIXME: hackish, but will do the job for now */
211 PrivateCacheMap->ReadAheadOffset[1].QuadPart = NewOffset.QuadPart;
212 PrivateCacheMap->ReadAheadLength[1] = Length;
213 }
214 else
215 {
216 /* FIXME: handle the other cases */
217 KeReleaseSpinLock(&PrivateCacheMap->ReadAheadSpinLock, OldIrql);
218 if (!Warn++) UNIMPLEMENTED;
219 return;
220 }
221 }
222
223 /* If read ahead isn't active yet */
224 if (!PrivateCacheMap->Flags.ReadAheadActive)
225 {
226 PWORK_QUEUE_ENTRY WorkItem;
227
228 /* It's active now!
229 * Be careful with the mask, you don't want to mess with node code
230 */
231 InterlockedOr((volatile long *)&PrivateCacheMap->UlongFlags, PRIVATE_CACHE_MAP_READ_AHEAD_ACTIVE);
232 KeReleaseSpinLock(&PrivateCacheMap->ReadAheadSpinLock, OldIrql);
233
234 /* Get a work item */
235 WorkItem = ExAllocateFromNPagedLookasideList(&CcTwilightLookasideList);
236 if (WorkItem != NULL)
237 {
238 /* Reference our FO so that it doesn't go in between */
239 ObReferenceObject(FileObject);
240
241 /* We want to do read ahead! */
242 WorkItem->Function = ReadAhead;
243 WorkItem->Parameters.Read.FileObject = FileObject;
244
245 /* Queue in the read ahead dedicated queue */
246 CcPostWorkQueue(WorkItem, &CcExpressWorkQueue);
247
248 return;
249 }
250
251 /* Fail path: lock again, and revert read ahead active */
252 KeAcquireSpinLock(&PrivateCacheMap->ReadAheadSpinLock, &OldIrql);
253 InterlockedAnd((volatile long *)&PrivateCacheMap->UlongFlags, ~PRIVATE_CACHE_MAP_READ_AHEAD_ACTIVE);
254 }
255
256 /* Done (fail) */
257 KeReleaseSpinLock(&PrivateCacheMap->ReadAheadSpinLock, OldIrql);
258 }
259
260 /*
261 * @implemented
262 */
263 VOID
264 NTAPI
265 CcSetAdditionalCacheAttributes (
266 IN PFILE_OBJECT FileObject,
267 IN BOOLEAN DisableReadAhead,
268 IN BOOLEAN DisableWriteBehind
269 )
270 {
271 KIRQL OldIrql;
272 PROS_SHARED_CACHE_MAP SharedCacheMap;
273
274 CCTRACE(CC_API_DEBUG, "FileObject=%p DisableReadAhead=%d DisableWriteBehind=%d\n",
275 FileObject, DisableReadAhead, DisableWriteBehind);
276
277 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
278
279 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
280
281 if (DisableReadAhead)
282 {
283 SetFlag(SharedCacheMap->Flags, READAHEAD_DISABLED);
284 }
285 else
286 {
287 ClearFlag(SharedCacheMap->Flags, READAHEAD_DISABLED);
288 }
289
290 if (DisableWriteBehind)
291 {
292 /* FIXME: also set flag 0x200 */
293 SetFlag(SharedCacheMap->Flags, WRITEBEHIND_DISABLED);
294 }
295 else
296 {
297 ClearFlag(SharedCacheMap->Flags, WRITEBEHIND_DISABLED);
298 }
299 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
300 }
301
302 /*
303 * @unimplemented
304 */
305 VOID
306 NTAPI
307 CcSetBcbOwnerPointer (
308 IN PVOID Bcb,
309 IN PVOID Owner
310 )
311 {
312 PINTERNAL_BCB iBcb = Bcb;
313
314 CCTRACE(CC_API_DEBUG, "Bcb=%p Owner=%p\n",
315 Bcb, Owner);
316
317 if (!ExIsResourceAcquiredExclusiveLite(&iBcb->Lock) && !ExIsResourceAcquiredSharedLite(&iBcb->Lock))
318 {
319 DPRINT1("Current thread doesn't own resource!\n");
320 return;
321 }
322
323 ExSetResourceOwnerPointer(&iBcb->Lock, Owner);
324 }
325
326 /*
327 * @implemented
328 */
329 VOID
330 NTAPI
331 CcSetDirtyPageThreshold (
332 IN PFILE_OBJECT FileObject,
333 IN ULONG DirtyPageThreshold
334 )
335 {
336 PFSRTL_COMMON_FCB_HEADER Fcb;
337 PROS_SHARED_CACHE_MAP SharedCacheMap;
338
339 CCTRACE(CC_API_DEBUG, "FileObject=%p DirtyPageThreshold=%lu\n",
340 FileObject, DirtyPageThreshold);
341
342 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
343 if (SharedCacheMap != NULL)
344 {
345 SharedCacheMap->DirtyPageThreshold = DirtyPageThreshold;
346 }
347
348 Fcb = FileObject->FsContext;
349 if (!BooleanFlagOn(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES))
350 {
351 SetFlag(Fcb->Flags, FSRTL_FLAG_LIMIT_MODIFIED_PAGES);
352 }
353 }
354
355 /*
356 * @implemented
357 */
358 VOID
359 NTAPI
360 CcSetReadAheadGranularity (
361 IN PFILE_OBJECT FileObject,
362 IN ULONG Granularity
363 )
364 {
365 PPRIVATE_CACHE_MAP PrivateMap;
366
367 CCTRACE(CC_API_DEBUG, "FileObject=%p Granularity=%lu\n",
368 FileObject, Granularity);
369
370 PrivateMap = FileObject->PrivateCacheMap;
371 PrivateMap->ReadAheadMask = Granularity - 1;
372 }