[NTOSKRNL] Bug fix: lazy write more often.
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 */
9
10 /* NOTES **********************************************************************
11 *
12 * This is not the NT implementation of a file cache nor anything much like
13 * it.
14 *
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
17 *
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
20 *
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
23 *
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
27 *
28 * (4) Copy the data into or out of the page as necessary.
29 *
30 * (5) Release the cache page
31 */
32 /* INCLUDES ******************************************************************/
33
34 #include <ntoskrnl.h>
35 #define NDEBUG
36 #include <debug.h>
37
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
40 #endif
41
42 /* GLOBALS *******************************************************************/
43
44 LIST_ENTRY DirtyVacbListHead;
45 static LIST_ENTRY VacbLruListHead;
46
47 KGUARDED_MUTEX ViewLock;
48
49 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
50 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
51 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
52
53 /* Counters:
54 * - Amount of pages flushed by lazy writer
55 * - Number of times lazy writer ran
56 */
57 ULONG CcLazyWritePages = 0;
58 ULONG CcLazyWriteIos = 0;
59
60 /* Internal vars (MS):
61 * - Threshold above which lazy writer will start action
62 * - Amount of dirty pages
63 */
64 ULONG CcDirtyPageThreshold = 0;
65 ULONG CcTotalDirtyPages = 0;
66
67 /* Internal vars (ROS):
68 * - Event to notify lazy writer to shutdown
69 * - Event to inform watchers lazy writer is done for this loop
70 */
71 KEVENT iLazyWriterShutdown;
72 KEVENT iLazyWriterNotify;
73
74 #if DBG
75 static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
76 {
77 ++vacb->ReferenceCount;
78 if (vacb->SharedCacheMap->Trace)
79 {
80 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
81 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
82 }
83 }
84 static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
85 {
86 --vacb->ReferenceCount;
87 if (vacb->SharedCacheMap->Trace)
88 {
89 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
90 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
91 }
92 }
93 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
94 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
95 #else
96 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
97 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
98 #endif
99
100 NTSTATUS
101 CcRosInternalFreeVacb(PROS_VACB Vacb);
102
103
104 /* FUNCTIONS *****************************************************************/
105
106 VOID
107 NTAPI
108 CcRosTraceCacheMap (
109 PROS_SHARED_CACHE_MAP SharedCacheMap,
110 BOOLEAN Trace )
111 {
112 #if DBG
113 KIRQL oldirql;
114 PLIST_ENTRY current_entry;
115 PROS_VACB current;
116
117 if (!SharedCacheMap)
118 return;
119
120 SharedCacheMap->Trace = Trace;
121
122 if (Trace)
123 {
124 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
125
126 KeAcquireGuardedMutex(&ViewLock);
127 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
128
129 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
130 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
131 {
132 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
133 current_entry = current_entry->Flink;
134
135 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
136 current, current->ReferenceCount, current->Dirty, current->PageOut );
137 }
138 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
139 KeReleaseGuardedMutex(&ViewLock);
140 }
141 else
142 {
143 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
144 }
145
146 #else
147 UNREFERENCED_PARAMETER(SharedCacheMap);
148 UNREFERENCED_PARAMETER(Trace);
149 #endif
150 }
151
152 NTSTATUS
153 NTAPI
154 CcRosFlushVacb (
155 PROS_VACB Vacb)
156 {
157 NTSTATUS Status;
158 KIRQL oldIrql;
159
160 Status = CcWriteVirtualAddress(Vacb);
161 if (NT_SUCCESS(Status))
162 {
163 KeAcquireGuardedMutex(&ViewLock);
164 KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql);
165
166 Vacb->Dirty = FALSE;
167 RemoveEntryList(&Vacb->DirtyVacbListEntry);
168 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
169 CcRosVacbDecRefCount(Vacb);
170
171 KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
172 KeReleaseGuardedMutex(&ViewLock);
173 }
174
175 return Status;
176 }
177
178 NTSTATUS
179 NTAPI
180 CcRosFlushDirtyPages (
181 ULONG Target,
182 PULONG Count,
183 BOOLEAN Wait,
184 BOOLEAN CalledFromLazy)
185 {
186 PLIST_ENTRY current_entry;
187 PROS_VACB current;
188 BOOLEAN Locked;
189 NTSTATUS Status;
190 LARGE_INTEGER ZeroTimeout;
191
192 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
193
194 (*Count) = 0;
195 ZeroTimeout.QuadPart = 0;
196
197 KeEnterCriticalRegion();
198 KeAcquireGuardedMutex(&ViewLock);
199
200 current_entry = DirtyVacbListHead.Flink;
201 if (current_entry == &DirtyVacbListHead)
202 {
203 DPRINT("No Dirty pages\n");
204 }
205
206 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
207 {
208 current = CONTAINING_RECORD(current_entry,
209 ROS_VACB,
210 DirtyVacbListEntry);
211 current_entry = current_entry->Flink;
212
213 CcRosVacbIncRefCount(current);
214
215 /* When performing lazy write, don't handle temporary files */
216 if (CalledFromLazy &&
217 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
218 {
219 CcRosVacbDecRefCount(current);
220 continue;
221 }
222
223 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
224 current->SharedCacheMap->LazyWriteContext, Wait);
225 if (!Locked)
226 {
227 CcRosVacbDecRefCount(current);
228 continue;
229 }
230
231 Status = CcRosAcquireVacbLock(current,
232 Wait ? NULL : &ZeroTimeout);
233 if (Status != STATUS_SUCCESS)
234 {
235 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
236 current->SharedCacheMap->LazyWriteContext);
237 CcRosVacbDecRefCount(current);
238 continue;
239 }
240
241 ASSERT(current->Dirty);
242
243 /* One reference is added above */
244 if (current->ReferenceCount > 2)
245 {
246 CcRosReleaseVacbLock(current);
247 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
248 current->SharedCacheMap->LazyWriteContext);
249 CcRosVacbDecRefCount(current);
250 continue;
251 }
252
253 KeReleaseGuardedMutex(&ViewLock);
254
255 Status = CcRosFlushVacb(current);
256
257 CcRosReleaseVacbLock(current);
258 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
259 current->SharedCacheMap->LazyWriteContext);
260
261 KeAcquireGuardedMutex(&ViewLock);
262 CcRosVacbDecRefCount(current);
263
264 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
265 (Status != STATUS_MEDIA_WRITE_PROTECTED))
266 {
267 DPRINT1("CC: Failed to flush VACB.\n");
268 }
269 else
270 {
271 ULONG PagesFreed;
272
273 /* How many pages did we free? */
274 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
275 (*Count) += PagesFreed;
276
277 /* Make sure we don't overflow target! */
278 if (Target < PagesFreed)
279 {
280 /* If we would have, jump to zero directly */
281 Target = 0;
282 }
283 else
284 {
285 Target -= PagesFreed;
286 }
287 }
288
289 current_entry = DirtyVacbListHead.Flink;
290 }
291
292 KeReleaseGuardedMutex(&ViewLock);
293 KeLeaveCriticalRegion();
294
295 DPRINT("CcRosFlushDirtyPages() finished\n");
296 return STATUS_SUCCESS;
297 }
298
299 /* FIXME: Someday this could somewhat implement write-behind/read-ahead */
300 VOID
301 NTAPI
302 CciLazyWriter(PVOID Unused)
303 {
304 LARGE_INTEGER OneSecond;
305
306 OneSecond.QuadPart = (LONGLONG)-1*1000*1000*10;
307
308 while (TRUE)
309 {
310 NTSTATUS Status;
311 ULONG Target, Count = 0;
312
313 /* One per second or until we have to stop */
314 Status = KeWaitForSingleObject(&iLazyWriterShutdown,
315 Executive,
316 KernelMode,
317 FALSE,
318 &OneSecond);
319
320 /* If we succeeed, we've to stop running! */
321 if (Status == STATUS_SUCCESS)
322 {
323 break;
324 }
325
326 /* We're not sleeping anymore */
327 KeClearEvent(&iLazyWriterNotify);
328
329 /* Our target is one-eighth of the dirty pages */
330 Target = CcTotalDirtyPages / 8;
331 if (Target != 0)
332 {
333 /* Flush! */
334 DPRINT("Lazy writer starting (%d)\n", Target);
335 CcRosFlushDirtyPages(Target, &Count, FALSE, TRUE);
336
337 /* And update stats */
338 CcLazyWritePages += Count;
339 ++CcLazyWriteIos;
340 DPRINT("Lazy writer done (%d)\n", Count);
341 }
342
343 /* Inform people waiting on us that we're done */
344 KeSetEvent(&iLazyWriterNotify, IO_DISK_INCREMENT, FALSE);
345 }
346 }
347
348 NTSTATUS
349 CcRosTrimCache (
350 ULONG Target,
351 ULONG Priority,
352 PULONG NrFreed)
353 /*
354 * FUNCTION: Try to free some memory from the file cache.
355 * ARGUMENTS:
356 * Target - The number of pages to be freed.
357 * Priority - The priority of free (currently unused).
358 * NrFreed - Points to a variable where the number of pages
359 * actually freed is returned.
360 */
361 {
362 PLIST_ENTRY current_entry;
363 PROS_VACB current;
364 ULONG PagesFreed;
365 KIRQL oldIrql;
366 LIST_ENTRY FreeList;
367 PFN_NUMBER Page;
368 ULONG i;
369 BOOLEAN FlushedPages = FALSE;
370
371 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
372
373 InitializeListHead(&FreeList);
374
375 *NrFreed = 0;
376
377 retry:
378 KeAcquireGuardedMutex(&ViewLock);
379
380 current_entry = VacbLruListHead.Flink;
381 while (current_entry != &VacbLruListHead)
382 {
383 current = CONTAINING_RECORD(current_entry,
384 ROS_VACB,
385 VacbLruListEntry);
386 current_entry = current_entry->Flink;
387
388 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
389
390 /* Reference the VACB */
391 CcRosVacbIncRefCount(current);
392
393 /* Check if it's mapped and not dirty */
394 if (current->MappedCount > 0 && !current->Dirty)
395 {
396 /* We have to break these locks because Cc sucks */
397 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
398 KeReleaseGuardedMutex(&ViewLock);
399
400 /* Page out the VACB */
401 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
402 {
403 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
404
405 MmPageOutPhysicalAddress(Page);
406 }
407
408 /* Reacquire the locks */
409 KeAcquireGuardedMutex(&ViewLock);
410 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
411 }
412
413 /* Dereference the VACB */
414 CcRosVacbDecRefCount(current);
415
416 /* Check if we can free this entry now */
417 if (current->ReferenceCount == 0)
418 {
419 ASSERT(!current->Dirty);
420 ASSERT(!current->MappedCount);
421
422 RemoveEntryList(&current->CacheMapVacbListEntry);
423 RemoveEntryList(&current->VacbLruListEntry);
424 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
425
426 /* Calculate how many pages we freed for Mm */
427 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
428 Target -= PagesFreed;
429 (*NrFreed) += PagesFreed;
430 }
431
432 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
433 }
434
435 KeReleaseGuardedMutex(&ViewLock);
436
437 /* Try flushing pages if we haven't met our target */
438 if ((Target > 0) && !FlushedPages)
439 {
440 /* Flush dirty pages to disk */
441 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
442 FlushedPages = TRUE;
443
444 /* We can only swap as many pages as we flushed */
445 if (PagesFreed < Target) Target = PagesFreed;
446
447 /* Check if we flushed anything */
448 if (PagesFreed != 0)
449 {
450 /* Try again after flushing dirty pages */
451 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
452 goto retry;
453 }
454 }
455
456 while (!IsListEmpty(&FreeList))
457 {
458 current_entry = RemoveHeadList(&FreeList);
459 current = CONTAINING_RECORD(current_entry,
460 ROS_VACB,
461 CacheMapVacbListEntry);
462 CcRosInternalFreeVacb(current);
463 }
464
465 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
466
467 return STATUS_SUCCESS;
468 }
469
470 NTSTATUS
471 NTAPI
472 CcRosReleaseVacb (
473 PROS_SHARED_CACHE_MAP SharedCacheMap,
474 PROS_VACB Vacb,
475 BOOLEAN Valid,
476 BOOLEAN Dirty,
477 BOOLEAN Mapped)
478 {
479 BOOLEAN WasDirty;
480 KIRQL oldIrql;
481
482 ASSERT(SharedCacheMap);
483
484 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
485 SharedCacheMap, Vacb, Valid);
486
487 KeAcquireGuardedMutex(&ViewLock);
488 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
489
490 Vacb->Valid = Valid;
491
492 WasDirty = Vacb->Dirty;
493 Vacb->Dirty = Vacb->Dirty || Dirty;
494
495 if (!WasDirty && Vacb->Dirty)
496 {
497 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
498 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
499 }
500
501 if (Mapped)
502 {
503 Vacb->MappedCount++;
504 }
505 CcRosVacbDecRefCount(Vacb);
506 if (Mapped && (Vacb->MappedCount == 1))
507 {
508 CcRosVacbIncRefCount(Vacb);
509 }
510 if (!WasDirty && Vacb->Dirty)
511 {
512 CcRosVacbIncRefCount(Vacb);
513 }
514
515 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
516 KeReleaseGuardedMutex(&ViewLock);
517 CcRosReleaseVacbLock(Vacb);
518
519 return STATUS_SUCCESS;
520 }
521
522 /* Returns with VACB Lock Held! */
523 PROS_VACB
524 NTAPI
525 CcRosLookupVacb (
526 PROS_SHARED_CACHE_MAP SharedCacheMap,
527 LONGLONG FileOffset)
528 {
529 PLIST_ENTRY current_entry;
530 PROS_VACB current;
531 KIRQL oldIrql;
532
533 ASSERT(SharedCacheMap);
534
535 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
536 SharedCacheMap, FileOffset);
537
538 KeAcquireGuardedMutex(&ViewLock);
539 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
540
541 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
542 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
543 {
544 current = CONTAINING_RECORD(current_entry,
545 ROS_VACB,
546 CacheMapVacbListEntry);
547 if (IsPointInRange(current->FileOffset.QuadPart,
548 VACB_MAPPING_GRANULARITY,
549 FileOffset))
550 {
551 CcRosVacbIncRefCount(current);
552 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
553 KeReleaseGuardedMutex(&ViewLock);
554 CcRosAcquireVacbLock(current, NULL);
555 return current;
556 }
557 if (current->FileOffset.QuadPart > FileOffset)
558 break;
559 current_entry = current_entry->Flink;
560 }
561
562 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
563 KeReleaseGuardedMutex(&ViewLock);
564
565 return NULL;
566 }
567
568 NTSTATUS
569 NTAPI
570 CcRosMarkDirtyVacb (
571 PROS_SHARED_CACHE_MAP SharedCacheMap,
572 LONGLONG FileOffset)
573 {
574 PROS_VACB Vacb;
575 KIRQL oldIrql;
576
577 ASSERT(SharedCacheMap);
578
579 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
580 SharedCacheMap, FileOffset);
581
582 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
583 if (Vacb == NULL)
584 {
585 KeBugCheck(CACHE_MANAGER);
586 }
587
588 KeAcquireGuardedMutex(&ViewLock);
589 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
590
591 if (!Vacb->Dirty)
592 {
593 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
594 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
595 }
596 else
597 {
598 CcRosVacbDecRefCount(Vacb);
599 }
600
601 /* Move to the tail of the LRU list */
602 RemoveEntryList(&Vacb->VacbLruListEntry);
603 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
604
605 Vacb->Dirty = TRUE;
606
607 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
608 KeReleaseGuardedMutex(&ViewLock);
609 CcRosReleaseVacbLock(Vacb);
610
611 return STATUS_SUCCESS;
612 }
613
614 NTSTATUS
615 NTAPI
616 CcRosUnmapVacb (
617 PROS_SHARED_CACHE_MAP SharedCacheMap,
618 LONGLONG FileOffset,
619 BOOLEAN NowDirty)
620 {
621 PROS_VACB Vacb;
622 BOOLEAN WasDirty;
623 KIRQL oldIrql;
624
625 ASSERT(SharedCacheMap);
626
627 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
628 SharedCacheMap, FileOffset, NowDirty);
629
630 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
631 if (Vacb == NULL)
632 {
633 return STATUS_UNSUCCESSFUL;
634 }
635
636 KeAcquireGuardedMutex(&ViewLock);
637 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
638
639 WasDirty = Vacb->Dirty;
640 Vacb->Dirty = Vacb->Dirty || NowDirty;
641
642 Vacb->MappedCount--;
643
644 if (!WasDirty && NowDirty)
645 {
646 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
647 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
648 }
649
650 CcRosVacbDecRefCount(Vacb);
651 if (!WasDirty && NowDirty)
652 {
653 CcRosVacbIncRefCount(Vacb);
654 }
655 if (Vacb->MappedCount == 0)
656 {
657 CcRosVacbDecRefCount(Vacb);
658 }
659
660 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
661 KeReleaseGuardedMutex(&ViewLock);
662 CcRosReleaseVacbLock(Vacb);
663
664 return STATUS_SUCCESS;
665 }
666
667 static
668 NTSTATUS
669 CcRosMapVacb(
670 PROS_VACB Vacb)
671 {
672 ULONG i;
673 NTSTATUS Status;
674 ULONG_PTR NumberOfPages;
675
676 /* Create a memory area. */
677 MmLockAddressSpace(MmGetKernelAddressSpace());
678 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
679 0, // nothing checks for VACB mareas, so set to 0
680 &Vacb->BaseAddress,
681 VACB_MAPPING_GRANULARITY,
682 PAGE_READWRITE,
683 (PMEMORY_AREA*)&Vacb->MemoryArea,
684 0,
685 PAGE_SIZE);
686 MmUnlockAddressSpace(MmGetKernelAddressSpace());
687 if (!NT_SUCCESS(Status))
688 {
689 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
690 return Status;
691 }
692
693 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
694 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
695
696 /* Create a virtual mapping for this memory area */
697 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
698 for (i = 0; i < NumberOfPages; i++)
699 {
700 PFN_NUMBER PageFrameNumber;
701
702 MI_SET_USAGE(MI_USAGE_CACHE);
703 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
704 if (PageFrameNumber == 0)
705 {
706 DPRINT1("Unable to allocate page\n");
707 KeBugCheck(MEMORY_MANAGEMENT);
708 }
709
710 Status = MmCreateVirtualMapping(NULL,
711 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
712 PAGE_READWRITE,
713 &PageFrameNumber,
714 1);
715 if (!NT_SUCCESS(Status))
716 {
717 DPRINT1("Unable to create virtual mapping\n");
718 KeBugCheck(MEMORY_MANAGEMENT);
719 }
720 }
721
722 return STATUS_SUCCESS;
723 }
724
725 static
726 NTSTATUS
727 CcRosCreateVacb (
728 PROS_SHARED_CACHE_MAP SharedCacheMap,
729 LONGLONG FileOffset,
730 PROS_VACB *Vacb)
731 {
732 PROS_VACB current;
733 PROS_VACB previous;
734 PLIST_ENTRY current_entry;
735 NTSTATUS Status;
736 KIRQL oldIrql;
737
738 ASSERT(SharedCacheMap);
739
740 DPRINT("CcRosCreateVacb()\n");
741
742 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
743 {
744 *Vacb = NULL;
745 return STATUS_INVALID_PARAMETER;
746 }
747
748 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
749 current->BaseAddress = NULL;
750 current->Valid = FALSE;
751 current->Dirty = FALSE;
752 current->PageOut = FALSE;
753 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
754 current->SharedCacheMap = SharedCacheMap;
755 #if DBG
756 if (SharedCacheMap->Trace)
757 {
758 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
759 }
760 #endif
761 current->MappedCount = 0;
762 current->DirtyVacbListEntry.Flink = NULL;
763 current->DirtyVacbListEntry.Blink = NULL;
764 current->ReferenceCount = 1;
765 current->PinCount = 0;
766 KeInitializeMutex(&current->Mutex, 0);
767 CcRosAcquireVacbLock(current, NULL);
768 KeAcquireGuardedMutex(&ViewLock);
769
770 *Vacb = current;
771 /* There is window between the call to CcRosLookupVacb
772 * and CcRosCreateVacb. We must check if a VACB for the
773 * file offset exist. If there is a VACB, we release
774 * our newly created VACB and return the existing one.
775 */
776 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
777 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
778 previous = NULL;
779 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
780 {
781 current = CONTAINING_RECORD(current_entry,
782 ROS_VACB,
783 CacheMapVacbListEntry);
784 if (IsPointInRange(current->FileOffset.QuadPart,
785 VACB_MAPPING_GRANULARITY,
786 FileOffset))
787 {
788 CcRosVacbIncRefCount(current);
789 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
790 #if DBG
791 if (SharedCacheMap->Trace)
792 {
793 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
794 SharedCacheMap,
795 (*Vacb),
796 current);
797 }
798 #endif
799 CcRosReleaseVacbLock(*Vacb);
800 KeReleaseGuardedMutex(&ViewLock);
801 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
802 *Vacb = current;
803 CcRosAcquireVacbLock(current, NULL);
804 return STATUS_SUCCESS;
805 }
806 if (current->FileOffset.QuadPart < FileOffset)
807 {
808 ASSERT(previous == NULL ||
809 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
810 previous = current;
811 }
812 if (current->FileOffset.QuadPart > FileOffset)
813 break;
814 current_entry = current_entry->Flink;
815 }
816 /* There was no existing VACB. */
817 current = *Vacb;
818 if (previous)
819 {
820 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
821 }
822 else
823 {
824 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
825 }
826 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
827 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
828 KeReleaseGuardedMutex(&ViewLock);
829
830 MI_SET_USAGE(MI_USAGE_CACHE);
831 #if MI_TRACE_PFNS
832 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
833 {
834 PWCHAR pos = NULL;
835 ULONG len = 0;
836 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
837 if (pos)
838 {
839 len = wcslen(pos) * sizeof(WCHAR);
840 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
841 }
842 else
843 {
844 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
845 }
846 }
847 #endif
848
849 Status = CcRosMapVacb(current);
850 if (!NT_SUCCESS(Status))
851 {
852 RemoveEntryList(&current->CacheMapVacbListEntry);
853 RemoveEntryList(&current->VacbLruListEntry);
854 CcRosReleaseVacbLock(current);
855 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
856 }
857
858 return Status;
859 }
860
861 NTSTATUS
862 NTAPI
863 CcRosGetVacb (
864 PROS_SHARED_CACHE_MAP SharedCacheMap,
865 LONGLONG FileOffset,
866 PLONGLONG BaseOffset,
867 PVOID* BaseAddress,
868 PBOOLEAN UptoDate,
869 PROS_VACB *Vacb)
870 {
871 PROS_VACB current;
872 NTSTATUS Status;
873
874 ASSERT(SharedCacheMap);
875
876 DPRINT("CcRosGetVacb()\n");
877
878 /*
879 * Look for a VACB already mapping the same data.
880 */
881 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
882 if (current == NULL)
883 {
884 /*
885 * Otherwise create a new VACB.
886 */
887 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
888 if (!NT_SUCCESS(Status))
889 {
890 return Status;
891 }
892 }
893
894 KeAcquireGuardedMutex(&ViewLock);
895
896 /* Move to the tail of the LRU list */
897 RemoveEntryList(&current->VacbLruListEntry);
898 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
899
900 KeReleaseGuardedMutex(&ViewLock);
901
902 /*
903 * Return information about the VACB to the caller.
904 */
905 *UptoDate = current->Valid;
906 *BaseAddress = current->BaseAddress;
907 DPRINT("*BaseAddress %p\n", *BaseAddress);
908 *Vacb = current;
909 *BaseOffset = current->FileOffset.QuadPart;
910 return STATUS_SUCCESS;
911 }
912
913 NTSTATUS
914 NTAPI
915 CcRosRequestVacb (
916 PROS_SHARED_CACHE_MAP SharedCacheMap,
917 LONGLONG FileOffset,
918 PVOID* BaseAddress,
919 PBOOLEAN UptoDate,
920 PROS_VACB *Vacb)
921 /*
922 * FUNCTION: Request a page mapping for a shared cache map
923 */
924 {
925 LONGLONG BaseOffset;
926
927 ASSERT(SharedCacheMap);
928
929 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
930 {
931 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
932 FileOffset, VACB_MAPPING_GRANULARITY);
933 KeBugCheck(CACHE_MANAGER);
934 }
935
936 return CcRosGetVacb(SharedCacheMap,
937 FileOffset,
938 &BaseOffset,
939 BaseAddress,
940 UptoDate,
941 Vacb);
942 }
943
944 static
945 VOID
946 CcFreeCachePage (
947 PVOID Context,
948 MEMORY_AREA* MemoryArea,
949 PVOID Address,
950 PFN_NUMBER Page,
951 SWAPENTRY SwapEntry,
952 BOOLEAN Dirty)
953 {
954 ASSERT(SwapEntry == 0);
955 if (Page != 0)
956 {
957 ASSERT(MmGetReferenceCountPage(Page) == 1);
958 MmReleasePageMemoryConsumer(MC_CACHE, Page);
959 }
960 }
961
962 NTSTATUS
963 CcRosInternalFreeVacb (
964 PROS_VACB Vacb)
965 /*
966 * FUNCTION: Releases a VACB associated with a shared cache map
967 */
968 {
969 DPRINT("Freeing VACB 0x%p\n", Vacb);
970 #if DBG
971 if (Vacb->SharedCacheMap->Trace)
972 {
973 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
974 }
975 #endif
976
977 MmLockAddressSpace(MmGetKernelAddressSpace());
978 MmFreeMemoryArea(MmGetKernelAddressSpace(),
979 Vacb->MemoryArea,
980 CcFreeCachePage,
981 NULL);
982 MmUnlockAddressSpace(MmGetKernelAddressSpace());
983
984 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
985 return STATUS_SUCCESS;
986 }
987
988 /*
989 * @implemented
990 */
991 VOID
992 NTAPI
993 CcFlushCache (
994 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
995 IN PLARGE_INTEGER FileOffset OPTIONAL,
996 IN ULONG Length,
997 OUT PIO_STATUS_BLOCK IoStatus)
998 {
999 PROS_SHARED_CACHE_MAP SharedCacheMap;
1000 LARGE_INTEGER Offset;
1001 LONGLONG RemainingLength;
1002 PROS_VACB current;
1003 NTSTATUS Status;
1004 KIRQL oldIrql;
1005
1006 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1007 SectionObjectPointers, FileOffset, Length);
1008
1009 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1010 SectionObjectPointers, FileOffset, Length, IoStatus);
1011
1012 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1013 {
1014 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1015 ASSERT(SharedCacheMap);
1016 if (FileOffset)
1017 {
1018 Offset = *FileOffset;
1019 RemainingLength = Length;
1020 }
1021 else
1022 {
1023 Offset.QuadPart = 0;
1024 RemainingLength = SharedCacheMap->FileSize.QuadPart;
1025 }
1026
1027 if (IoStatus)
1028 {
1029 IoStatus->Status = STATUS_SUCCESS;
1030 IoStatus->Information = 0;
1031 }
1032
1033 while (RemainingLength > 0)
1034 {
1035 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1036 if (current != NULL)
1037 {
1038 if (current->Dirty)
1039 {
1040 Status = CcRosFlushVacb(current);
1041 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1042 {
1043 IoStatus->Status = Status;
1044 }
1045 }
1046
1047 CcRosReleaseVacbLock(current);
1048
1049 KeAcquireGuardedMutex(&ViewLock);
1050 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1051 CcRosVacbDecRefCount(current);
1052 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1053 KeReleaseGuardedMutex(&ViewLock);
1054 }
1055
1056 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1057 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1058 }
1059 }
1060 else
1061 {
1062 if (IoStatus)
1063 {
1064 IoStatus->Status = STATUS_INVALID_PARAMETER;
1065 }
1066 }
1067 }
1068
1069 NTSTATUS
1070 NTAPI
1071 CcRosDeleteFileCache (
1072 PFILE_OBJECT FileObject,
1073 PROS_SHARED_CACHE_MAP SharedCacheMap)
1074 /*
1075 * FUNCTION: Releases the shared cache map associated with a file object
1076 */
1077 {
1078 PLIST_ENTRY current_entry;
1079 PROS_VACB current;
1080 LIST_ENTRY FreeList;
1081 KIRQL oldIrql;
1082
1083 ASSERT(SharedCacheMap);
1084
1085 SharedCacheMap->OpenCount++;
1086 KeReleaseGuardedMutex(&ViewLock);
1087
1088 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1089
1090 KeAcquireGuardedMutex(&ViewLock);
1091 SharedCacheMap->OpenCount--;
1092 if (SharedCacheMap->OpenCount == 0)
1093 {
1094 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1095
1096 /*
1097 * Release all VACBs
1098 */
1099 InitializeListHead(&FreeList);
1100 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1101 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1102 {
1103 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1104 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1105 RemoveEntryList(&current->VacbLruListEntry);
1106 if (current->Dirty)
1107 {
1108 RemoveEntryList(&current->DirtyVacbListEntry);
1109 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
1110 DPRINT1("Freeing dirty VACB\n");
1111 }
1112 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1113 }
1114 #if DBG
1115 SharedCacheMap->Trace = FALSE;
1116 #endif
1117 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1118
1119 KeReleaseGuardedMutex(&ViewLock);
1120 ObDereferenceObject(SharedCacheMap->FileObject);
1121
1122 while (!IsListEmpty(&FreeList))
1123 {
1124 current_entry = RemoveTailList(&FreeList);
1125 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1126 CcRosInternalFreeVacb(current);
1127 }
1128 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1129 KeAcquireGuardedMutex(&ViewLock);
1130 }
1131 return STATUS_SUCCESS;
1132 }
1133
1134 VOID
1135 NTAPI
1136 CcRosReferenceCache (
1137 PFILE_OBJECT FileObject)
1138 {
1139 PROS_SHARED_CACHE_MAP SharedCacheMap;
1140 KeAcquireGuardedMutex(&ViewLock);
1141 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1142 ASSERT(SharedCacheMap);
1143 ASSERT(SharedCacheMap->OpenCount != 0);
1144 SharedCacheMap->OpenCount++;
1145 KeReleaseGuardedMutex(&ViewLock);
1146 }
1147
1148 VOID
1149 NTAPI
1150 CcRosRemoveIfClosed (
1151 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1152 {
1153 PROS_SHARED_CACHE_MAP SharedCacheMap;
1154 DPRINT("CcRosRemoveIfClosed()\n");
1155 KeAcquireGuardedMutex(&ViewLock);
1156 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1157 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1158 {
1159 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1160 }
1161 KeReleaseGuardedMutex(&ViewLock);
1162 }
1163
1164
1165 VOID
1166 NTAPI
1167 CcRosDereferenceCache (
1168 PFILE_OBJECT FileObject)
1169 {
1170 PROS_SHARED_CACHE_MAP SharedCacheMap;
1171 KeAcquireGuardedMutex(&ViewLock);
1172 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1173 ASSERT(SharedCacheMap);
1174 if (SharedCacheMap->OpenCount > 0)
1175 {
1176 SharedCacheMap->OpenCount--;
1177 if (SharedCacheMap->OpenCount == 0)
1178 {
1179 MmFreeSectionSegments(SharedCacheMap->FileObject);
1180 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1181 }
1182 }
1183 KeReleaseGuardedMutex(&ViewLock);
1184 }
1185
1186 NTSTATUS
1187 NTAPI
1188 CcRosReleaseFileCache (
1189 PFILE_OBJECT FileObject)
1190 /*
1191 * FUNCTION: Called by the file system when a handle to a file object
1192 * has been closed.
1193 */
1194 {
1195 PROS_SHARED_CACHE_MAP SharedCacheMap;
1196
1197 KeAcquireGuardedMutex(&ViewLock);
1198
1199 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1200 {
1201 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1202 if (FileObject->PrivateCacheMap != NULL)
1203 {
1204 FileObject->PrivateCacheMap = NULL;
1205 if (SharedCacheMap->OpenCount > 0)
1206 {
1207 SharedCacheMap->OpenCount--;
1208 if (SharedCacheMap->OpenCount == 0)
1209 {
1210 MmFreeSectionSegments(SharedCacheMap->FileObject);
1211 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1212 }
1213 }
1214 }
1215 }
1216 KeReleaseGuardedMutex(&ViewLock);
1217 return STATUS_SUCCESS;
1218 }
1219
1220 NTSTATUS
1221 NTAPI
1222 CcTryToInitializeFileCache (
1223 PFILE_OBJECT FileObject)
1224 {
1225 PROS_SHARED_CACHE_MAP SharedCacheMap;
1226 NTSTATUS Status;
1227
1228 KeAcquireGuardedMutex(&ViewLock);
1229
1230 ASSERT(FileObject->SectionObjectPointer);
1231 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1232 if (SharedCacheMap == NULL)
1233 {
1234 Status = STATUS_UNSUCCESSFUL;
1235 }
1236 else
1237 {
1238 if (FileObject->PrivateCacheMap == NULL)
1239 {
1240 FileObject->PrivateCacheMap = SharedCacheMap;
1241 SharedCacheMap->OpenCount++;
1242 }
1243 Status = STATUS_SUCCESS;
1244 }
1245 KeReleaseGuardedMutex(&ViewLock);
1246
1247 return Status;
1248 }
1249
1250
1251 NTSTATUS
1252 NTAPI
1253 CcRosInitializeFileCache (
1254 PFILE_OBJECT FileObject,
1255 PCC_FILE_SIZES FileSizes,
1256 BOOLEAN PinAccess,
1257 PCACHE_MANAGER_CALLBACKS CallBacks,
1258 PVOID LazyWriterContext)
1259 /*
1260 * FUNCTION: Initializes a shared cache map for a file object
1261 */
1262 {
1263 PROS_SHARED_CACHE_MAP SharedCacheMap;
1264
1265 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1266 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1267 FileObject, SharedCacheMap);
1268
1269 KeAcquireGuardedMutex(&ViewLock);
1270 if (SharedCacheMap == NULL)
1271 {
1272 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1273 if (SharedCacheMap == NULL)
1274 {
1275 KeReleaseGuardedMutex(&ViewLock);
1276 return STATUS_INSUFFICIENT_RESOURCES;
1277 }
1278 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1279 ObReferenceObjectByPointer(FileObject,
1280 FILE_ALL_ACCESS,
1281 NULL,
1282 KernelMode);
1283 SharedCacheMap->FileObject = FileObject;
1284 SharedCacheMap->Callbacks = CallBacks;
1285 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1286 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1287 SharedCacheMap->FileSize = FileSizes->FileSize;
1288 SharedCacheMap->PinAccess = PinAccess;
1289 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1290 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1291 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1292 }
1293 if (FileObject->PrivateCacheMap == NULL)
1294 {
1295 FileObject->PrivateCacheMap = SharedCacheMap;
1296 SharedCacheMap->OpenCount++;
1297 }
1298 KeReleaseGuardedMutex(&ViewLock);
1299
1300 return STATUS_SUCCESS;
1301 }
1302
1303 /*
1304 * @implemented
1305 */
1306 PFILE_OBJECT
1307 NTAPI
1308 CcGetFileObjectFromSectionPtrs (
1309 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1310 {
1311 PROS_SHARED_CACHE_MAP SharedCacheMap;
1312
1313 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1314
1315 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1316 {
1317 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1318 ASSERT(SharedCacheMap);
1319 return SharedCacheMap->FileObject;
1320 }
1321 return NULL;
1322 }
1323
1324 VOID
1325 NTAPI
1326 CcShutdownLazyWriter (
1327 VOID)
1328 {
1329 /* Simply set the event, lazy writer will stop when it's done */
1330 KeSetEvent(&iLazyWriterShutdown, IO_DISK_INCREMENT, FALSE);
1331 }
1332
1333 BOOLEAN
1334 INIT_FUNCTION
1335 NTAPI
1336 CcInitView (
1337 VOID)
1338 {
1339 HANDLE LazyWriter;
1340 NTSTATUS Status;
1341 OBJECT_ATTRIBUTES ObjectAttributes;
1342
1343 DPRINT("CcInitView()\n");
1344
1345 InitializeListHead(&DirtyVacbListHead);
1346 InitializeListHead(&VacbLruListHead);
1347 KeInitializeGuardedMutex(&ViewLock);
1348 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1349 NULL,
1350 NULL,
1351 0,
1352 sizeof(INTERNAL_BCB),
1353 TAG_BCB,
1354 20);
1355 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1356 NULL,
1357 NULL,
1358 0,
1359 sizeof(ROS_SHARED_CACHE_MAP),
1360 TAG_SHARED_CACHE_MAP,
1361 20);
1362 ExInitializeNPagedLookasideList(&VacbLookasideList,
1363 NULL,
1364 NULL,
1365 0,
1366 sizeof(ROS_VACB),
1367 TAG_VACB,
1368 20);
1369
1370 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1371
1372 /* Initialize lazy writer events */
1373 KeInitializeEvent(&iLazyWriterShutdown, SynchronizationEvent, FALSE);
1374 KeInitializeEvent(&iLazyWriterNotify, NotificationEvent, FALSE);
1375
1376 /* Define lazy writer threshold, depending on system type */
1377 switch (MmQuerySystemSize())
1378 {
1379 case MmSmallSystem:
1380 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8;
1381 break;
1382
1383 case MmMediumSystem:
1384 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4;
1385 break;
1386
1387 case MmLargeSystem:
1388 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8 + MmNumberOfPhysicalPages / 4;
1389 break;
1390 }
1391
1392 /* Start the lazy writer thread */
1393 InitializeObjectAttributes(&ObjectAttributes,
1394 NULL,
1395 OBJ_KERNEL_HANDLE,
1396 NULL,
1397 NULL);
1398 Status = PsCreateSystemThread(&LazyWriter,
1399 THREAD_ALL_ACCESS,
1400 &ObjectAttributes,
1401 NULL,
1402 NULL,
1403 CciLazyWriter,
1404 NULL);
1405 if (!NT_SUCCESS(Status))
1406 {
1407 return FALSE;
1408 }
1409
1410 /* Handle is not needed */
1411 ObCloseHandle(LazyWriter, KernelMode);
1412
1413 CcInitCacheZeroPage();
1414
1415 return TRUE;
1416 }
1417
1418 /* EOF */