[NTOSKRNL] When marking a BCB dirty, also mark the underlying VACB dirty.
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 */
9
10 /* NOTES **********************************************************************
11 *
12 * This is not the NT implementation of a file cache nor anything much like
13 * it.
14 *
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
17 *
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
20 *
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
23 *
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
27 *
28 * (4) Copy the data into or out of the page as necessary.
29 *
30 * (5) Release the cache page
31 */
32 /* INCLUDES ******************************************************************/
33
34 #include <ntoskrnl.h>
35 #define NDEBUG
36 #include <debug.h>
37
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
40 #endif
41
42 /* GLOBALS *******************************************************************/
43
44 LIST_ENTRY DirtyVacbListHead;
45 static LIST_ENTRY VacbLruListHead;
46
47 KGUARDED_MUTEX ViewLock;
48
49 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
50 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
51 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
52
53 /* Counters:
54 * - Amount of pages flushed by lazy writer
55 * - Number of times lazy writer ran
56 */
57 ULONG CcLazyWritePages = 0;
58 ULONG CcLazyWriteIos = 0;
59
60 /* Internal vars (MS):
61 * - Threshold above which lazy writer will start action
62 * - Amount of dirty pages
63 */
64 ULONG CcDirtyPageThreshold = 0;
65 ULONG CcTotalDirtyPages = 0;
66
67 /* Internal vars (ROS):
68 * - Event to notify lazy writer to shutdown
69 * - Event to inform watchers lazy writer is done for this loop
70 */
71 KEVENT iLazyWriterShutdown;
72 KEVENT iLazyWriterNotify;
73
74 #if DBG
75 static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
76 {
77 ++vacb->ReferenceCount;
78 if (vacb->SharedCacheMap->Trace)
79 {
80 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
81 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
82 }
83 }
84 static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
85 {
86 --vacb->ReferenceCount;
87 if (vacb->SharedCacheMap->Trace)
88 {
89 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
90 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
91 }
92 }
93 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
94 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
95 #else
96 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
97 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
98 #endif
99
100 NTSTATUS
101 CcRosInternalFreeVacb(PROS_VACB Vacb);
102
103
104 /* FUNCTIONS *****************************************************************/
105
106 VOID
107 NTAPI
108 CcRosTraceCacheMap (
109 PROS_SHARED_CACHE_MAP SharedCacheMap,
110 BOOLEAN Trace )
111 {
112 #if DBG
113 KIRQL oldirql;
114 PLIST_ENTRY current_entry;
115 PROS_VACB current;
116
117 if (!SharedCacheMap)
118 return;
119
120 SharedCacheMap->Trace = Trace;
121
122 if (Trace)
123 {
124 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
125
126 KeAcquireGuardedMutex(&ViewLock);
127 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
128
129 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
130 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
131 {
132 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
133 current_entry = current_entry->Flink;
134
135 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
136 current, current->ReferenceCount, current->Dirty, current->PageOut );
137 }
138 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
139 KeReleaseGuardedMutex(&ViewLock);
140 }
141 else
142 {
143 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
144 }
145
146 #else
147 UNREFERENCED_PARAMETER(SharedCacheMap);
148 UNREFERENCED_PARAMETER(Trace);
149 #endif
150 }
151
152 NTSTATUS
153 NTAPI
154 CcRosFlushVacb (
155 PROS_VACB Vacb)
156 {
157 NTSTATUS Status;
158 KIRQL oldIrql;
159
160 Status = CcWriteVirtualAddress(Vacb);
161 if (NT_SUCCESS(Status))
162 {
163 KeAcquireGuardedMutex(&ViewLock);
164 KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql);
165
166 Vacb->Dirty = FALSE;
167 RemoveEntryList(&Vacb->DirtyVacbListEntry);
168 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
169 CcRosVacbDecRefCount(Vacb);
170
171 KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
172 KeReleaseGuardedMutex(&ViewLock);
173 }
174
175 return Status;
176 }
177
178 NTSTATUS
179 NTAPI
180 CcRosFlushDirtyPages (
181 ULONG Target,
182 PULONG Count,
183 BOOLEAN Wait,
184 BOOLEAN CalledFromLazy)
185 {
186 PLIST_ENTRY current_entry;
187 PROS_VACB current;
188 BOOLEAN Locked;
189 NTSTATUS Status;
190 LARGE_INTEGER ZeroTimeout;
191
192 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
193
194 (*Count) = 0;
195 ZeroTimeout.QuadPart = 0;
196
197 KeEnterCriticalRegion();
198 KeAcquireGuardedMutex(&ViewLock);
199
200 current_entry = DirtyVacbListHead.Flink;
201 if (current_entry == &DirtyVacbListHead)
202 {
203 DPRINT("No Dirty pages\n");
204 }
205
206 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
207 {
208 current = CONTAINING_RECORD(current_entry,
209 ROS_VACB,
210 DirtyVacbListEntry);
211 current_entry = current_entry->Flink;
212
213 CcRosVacbIncRefCount(current);
214
215 /* When performing lazy write, don't handle temporary files */
216 if (CalledFromLazy &&
217 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
218 {
219 CcRosVacbDecRefCount(current);
220 continue;
221 }
222
223 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
224 current->SharedCacheMap->LazyWriteContext, Wait);
225 if (!Locked)
226 {
227 CcRosVacbDecRefCount(current);
228 continue;
229 }
230
231 Status = CcRosAcquireVacbLock(current,
232 Wait ? NULL : &ZeroTimeout);
233 if (Status != STATUS_SUCCESS)
234 {
235 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
236 current->SharedCacheMap->LazyWriteContext);
237 CcRosVacbDecRefCount(current);
238 continue;
239 }
240
241 ASSERT(current->Dirty);
242
243 /* One reference is added above */
244 if (current->ReferenceCount > 2)
245 {
246 CcRosReleaseVacbLock(current);
247 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
248 current->SharedCacheMap->LazyWriteContext);
249 CcRosVacbDecRefCount(current);
250 continue;
251 }
252
253 KeReleaseGuardedMutex(&ViewLock);
254
255 Status = CcRosFlushVacb(current);
256
257 CcRosReleaseVacbLock(current);
258 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
259 current->SharedCacheMap->LazyWriteContext);
260
261 KeAcquireGuardedMutex(&ViewLock);
262 CcRosVacbDecRefCount(current);
263
264 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
265 (Status != STATUS_MEDIA_WRITE_PROTECTED))
266 {
267 DPRINT1("CC: Failed to flush VACB.\n");
268 }
269 else
270 {
271 ULONG PagesFreed;
272
273 /* How many pages did we free? */
274 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
275 (*Count) += PagesFreed;
276
277 /* Make sure we don't overflow target! */
278 if (Target < PagesFreed)
279 {
280 /* If we would have, jump to zero directly */
281 Target = 0;
282 }
283 else
284 {
285 Target -= PagesFreed;
286 }
287 }
288
289 current_entry = DirtyVacbListHead.Flink;
290 }
291
292 KeReleaseGuardedMutex(&ViewLock);
293 KeLeaveCriticalRegion();
294
295 DPRINT("CcRosFlushDirtyPages() finished\n");
296 return STATUS_SUCCESS;
297 }
298
299 /* FIXME: Someday this could somewhat implement write-behind/read-ahead */
300 VOID
301 NTAPI
302 CciLazyWriter(PVOID Unused)
303 {
304 LARGE_INTEGER OneSecond;
305
306 OneSecond.QuadPart = (LONGLONG)-1*1000*1000*10;
307
308 while (TRUE)
309 {
310 NTSTATUS Status;
311 ULONG Target, Count = 0;
312
313 /* One per second or until we have to stop */
314 Status = KeWaitForSingleObject(&iLazyWriterShutdown,
315 Executive,
316 KernelMode,
317 FALSE,
318 &OneSecond);
319
320 /* If we succeeed, we've to stop running! */
321 if (Status == STATUS_SUCCESS)
322 {
323 break;
324 }
325
326 /* We're not sleeping anymore */
327 KeClearEvent(&iLazyWriterNotify);
328
329 /* Our target is one-eighth of the dirty pages */
330 Target = CcTotalDirtyPages / 8;
331 if (Target != 0)
332 {
333 /* Flush! */
334 DPRINT("Lazy writer starting (%d)\n", Target);
335 CcRosFlushDirtyPages(Target, &Count, FALSE, TRUE);
336
337 /* And update stats */
338 CcLazyWritePages += Count;
339 ++CcLazyWriteIos;
340 DPRINT("Lazy writer done (%d)\n", Count);
341 }
342
343 /* Inform people waiting on us that we're done */
344 KeSetEvent(&iLazyWriterNotify, IO_DISK_INCREMENT, FALSE);
345 }
346 }
347
348 NTSTATUS
349 CcRosTrimCache (
350 ULONG Target,
351 ULONG Priority,
352 PULONG NrFreed)
353 /*
354 * FUNCTION: Try to free some memory from the file cache.
355 * ARGUMENTS:
356 * Target - The number of pages to be freed.
357 * Priority - The priority of free (currently unused).
358 * NrFreed - Points to a variable where the number of pages
359 * actually freed is returned.
360 */
361 {
362 PLIST_ENTRY current_entry;
363 PROS_VACB current;
364 ULONG PagesFreed;
365 KIRQL oldIrql;
366 LIST_ENTRY FreeList;
367 PFN_NUMBER Page;
368 ULONG i;
369 BOOLEAN FlushedPages = FALSE;
370
371 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
372
373 InitializeListHead(&FreeList);
374
375 *NrFreed = 0;
376
377 retry:
378 KeAcquireGuardedMutex(&ViewLock);
379
380 current_entry = VacbLruListHead.Flink;
381 while (current_entry != &VacbLruListHead)
382 {
383 current = CONTAINING_RECORD(current_entry,
384 ROS_VACB,
385 VacbLruListEntry);
386 current_entry = current_entry->Flink;
387
388 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
389
390 /* Reference the VACB */
391 CcRosVacbIncRefCount(current);
392
393 /* Check if it's mapped and not dirty */
394 if (current->MappedCount > 0 && !current->Dirty)
395 {
396 /* We have to break these locks because Cc sucks */
397 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
398 KeReleaseGuardedMutex(&ViewLock);
399
400 /* Page out the VACB */
401 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
402 {
403 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
404
405 MmPageOutPhysicalAddress(Page);
406 }
407
408 /* Reacquire the locks */
409 KeAcquireGuardedMutex(&ViewLock);
410 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
411 }
412
413 /* Dereference the VACB */
414 CcRosVacbDecRefCount(current);
415
416 /* Check if we can free this entry now */
417 if (current->ReferenceCount == 0)
418 {
419 ASSERT(!current->Dirty);
420 ASSERT(!current->MappedCount);
421
422 RemoveEntryList(&current->CacheMapVacbListEntry);
423 RemoveEntryList(&current->VacbLruListEntry);
424 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
425
426 /* Calculate how many pages we freed for Mm */
427 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
428 Target -= PagesFreed;
429 (*NrFreed) += PagesFreed;
430 }
431
432 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
433 }
434
435 KeReleaseGuardedMutex(&ViewLock);
436
437 /* Try flushing pages if we haven't met our target */
438 if ((Target > 0) && !FlushedPages)
439 {
440 /* Flush dirty pages to disk */
441 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
442 FlushedPages = TRUE;
443
444 /* We can only swap as many pages as we flushed */
445 if (PagesFreed < Target) Target = PagesFreed;
446
447 /* Check if we flushed anything */
448 if (PagesFreed != 0)
449 {
450 /* Try again after flushing dirty pages */
451 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
452 goto retry;
453 }
454 }
455
456 while (!IsListEmpty(&FreeList))
457 {
458 current_entry = RemoveHeadList(&FreeList);
459 current = CONTAINING_RECORD(current_entry,
460 ROS_VACB,
461 CacheMapVacbListEntry);
462 CcRosInternalFreeVacb(current);
463 }
464
465 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
466
467 return STATUS_SUCCESS;
468 }
469
470 NTSTATUS
471 NTAPI
472 CcRosReleaseVacb (
473 PROS_SHARED_CACHE_MAP SharedCacheMap,
474 PROS_VACB Vacb,
475 BOOLEAN Valid,
476 BOOLEAN Dirty,
477 BOOLEAN Mapped)
478 {
479 BOOLEAN WasDirty;
480 KIRQL oldIrql;
481
482 ASSERT(SharedCacheMap);
483
484 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
485 SharedCacheMap, Vacb, Valid);
486
487 KeAcquireGuardedMutex(&ViewLock);
488 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
489
490 Vacb->Valid = Valid;
491
492 WasDirty = Vacb->Dirty;
493 Vacb->Dirty = Vacb->Dirty || Dirty;
494
495 if (!WasDirty && Vacb->Dirty)
496 {
497 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
498 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
499 }
500
501 if (Mapped)
502 {
503 Vacb->MappedCount++;
504 }
505 CcRosVacbDecRefCount(Vacb);
506 if (Mapped && (Vacb->MappedCount == 1))
507 {
508 CcRosVacbIncRefCount(Vacb);
509 }
510 if (!WasDirty && Vacb->Dirty)
511 {
512 CcRosVacbIncRefCount(Vacb);
513 }
514
515 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
516 KeReleaseGuardedMutex(&ViewLock);
517 CcRosReleaseVacbLock(Vacb);
518
519 return STATUS_SUCCESS;
520 }
521
522 /* Returns with VACB Lock Held! */
523 PROS_VACB
524 NTAPI
525 CcRosLookupVacb (
526 PROS_SHARED_CACHE_MAP SharedCacheMap,
527 LONGLONG FileOffset)
528 {
529 PLIST_ENTRY current_entry;
530 PROS_VACB current;
531 KIRQL oldIrql;
532
533 ASSERT(SharedCacheMap);
534
535 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
536 SharedCacheMap, FileOffset);
537
538 KeAcquireGuardedMutex(&ViewLock);
539 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
540
541 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
542 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
543 {
544 current = CONTAINING_RECORD(current_entry,
545 ROS_VACB,
546 CacheMapVacbListEntry);
547 if (IsPointInRange(current->FileOffset.QuadPart,
548 VACB_MAPPING_GRANULARITY,
549 FileOffset))
550 {
551 CcRosVacbIncRefCount(current);
552 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
553 KeReleaseGuardedMutex(&ViewLock);
554 CcRosAcquireVacbLock(current, NULL);
555 return current;
556 }
557 if (current->FileOffset.QuadPart > FileOffset)
558 break;
559 current_entry = current_entry->Flink;
560 }
561
562 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
563 KeReleaseGuardedMutex(&ViewLock);
564
565 return NULL;
566 }
567
568 VOID
569 NTAPI
570 CcRosMarkDirtyVacb (
571 PROS_VACB Vacb)
572 {
573 KIRQL oldIrql;
574 PROS_SHARED_CACHE_MAP SharedCacheMap;
575
576 SharedCacheMap = Vacb->SharedCacheMap;
577
578 KeAcquireGuardedMutex(&ViewLock);
579 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
580
581 if (!Vacb->Dirty)
582 {
583 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
584 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
585 }
586 else
587 {
588 CcRosVacbDecRefCount(Vacb);
589 }
590
591 /* Move to the tail of the LRU list */
592 RemoveEntryList(&Vacb->VacbLruListEntry);
593 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
594
595 Vacb->Dirty = TRUE;
596
597 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
598 KeReleaseGuardedMutex(&ViewLock);
599 }
600
601 NTSTATUS
602 NTAPI
603 CcRosMarkDirtyFile (
604 PROS_SHARED_CACHE_MAP SharedCacheMap,
605 LONGLONG FileOffset)
606 {
607 PROS_VACB Vacb;
608
609 ASSERT(SharedCacheMap);
610
611 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
612 SharedCacheMap, FileOffset);
613
614 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
615 if (Vacb == NULL)
616 {
617 KeBugCheck(CACHE_MANAGER);
618 }
619
620 CcRosMarkDirtyVacb(Vacb);
621
622
623 CcRosReleaseVacbLock(Vacb);
624
625 return STATUS_SUCCESS;
626 }
627
628 NTSTATUS
629 NTAPI
630 CcRosUnmapVacb (
631 PROS_SHARED_CACHE_MAP SharedCacheMap,
632 LONGLONG FileOffset,
633 BOOLEAN NowDirty)
634 {
635 PROS_VACB Vacb;
636 BOOLEAN WasDirty;
637 KIRQL oldIrql;
638
639 ASSERT(SharedCacheMap);
640
641 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
642 SharedCacheMap, FileOffset, NowDirty);
643
644 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
645 if (Vacb == NULL)
646 {
647 return STATUS_UNSUCCESSFUL;
648 }
649
650 KeAcquireGuardedMutex(&ViewLock);
651 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
652
653 WasDirty = Vacb->Dirty;
654 Vacb->Dirty = Vacb->Dirty || NowDirty;
655
656 Vacb->MappedCount--;
657
658 if (!WasDirty && NowDirty)
659 {
660 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
661 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
662 }
663
664 CcRosVacbDecRefCount(Vacb);
665 if (!WasDirty && NowDirty)
666 {
667 CcRosVacbIncRefCount(Vacb);
668 }
669 if (Vacb->MappedCount == 0)
670 {
671 CcRosVacbDecRefCount(Vacb);
672 }
673
674 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
675 KeReleaseGuardedMutex(&ViewLock);
676 CcRosReleaseVacbLock(Vacb);
677
678 return STATUS_SUCCESS;
679 }
680
681 static
682 NTSTATUS
683 CcRosMapVacb(
684 PROS_VACB Vacb)
685 {
686 ULONG i;
687 NTSTATUS Status;
688 ULONG_PTR NumberOfPages;
689
690 /* Create a memory area. */
691 MmLockAddressSpace(MmGetKernelAddressSpace());
692 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
693 0, // nothing checks for VACB mareas, so set to 0
694 &Vacb->BaseAddress,
695 VACB_MAPPING_GRANULARITY,
696 PAGE_READWRITE,
697 (PMEMORY_AREA*)&Vacb->MemoryArea,
698 0,
699 PAGE_SIZE);
700 MmUnlockAddressSpace(MmGetKernelAddressSpace());
701 if (!NT_SUCCESS(Status))
702 {
703 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
704 return Status;
705 }
706
707 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
708 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
709
710 /* Create a virtual mapping for this memory area */
711 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
712 for (i = 0; i < NumberOfPages; i++)
713 {
714 PFN_NUMBER PageFrameNumber;
715
716 MI_SET_USAGE(MI_USAGE_CACHE);
717 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
718 if (PageFrameNumber == 0)
719 {
720 DPRINT1("Unable to allocate page\n");
721 KeBugCheck(MEMORY_MANAGEMENT);
722 }
723
724 Status = MmCreateVirtualMapping(NULL,
725 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
726 PAGE_READWRITE,
727 &PageFrameNumber,
728 1);
729 if (!NT_SUCCESS(Status))
730 {
731 DPRINT1("Unable to create virtual mapping\n");
732 KeBugCheck(MEMORY_MANAGEMENT);
733 }
734 }
735
736 return STATUS_SUCCESS;
737 }
738
739 static
740 NTSTATUS
741 CcRosCreateVacb (
742 PROS_SHARED_CACHE_MAP SharedCacheMap,
743 LONGLONG FileOffset,
744 PROS_VACB *Vacb)
745 {
746 PROS_VACB current;
747 PROS_VACB previous;
748 PLIST_ENTRY current_entry;
749 NTSTATUS Status;
750 KIRQL oldIrql;
751
752 ASSERT(SharedCacheMap);
753
754 DPRINT("CcRosCreateVacb()\n");
755
756 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
757 {
758 *Vacb = NULL;
759 return STATUS_INVALID_PARAMETER;
760 }
761
762 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
763 current->BaseAddress = NULL;
764 current->Valid = FALSE;
765 current->Dirty = FALSE;
766 current->PageOut = FALSE;
767 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
768 current->SharedCacheMap = SharedCacheMap;
769 #if DBG
770 if (SharedCacheMap->Trace)
771 {
772 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
773 }
774 #endif
775 current->MappedCount = 0;
776 current->DirtyVacbListEntry.Flink = NULL;
777 current->DirtyVacbListEntry.Blink = NULL;
778 current->ReferenceCount = 1;
779 current->PinCount = 0;
780 KeInitializeMutex(&current->Mutex, 0);
781 CcRosAcquireVacbLock(current, NULL);
782 KeAcquireGuardedMutex(&ViewLock);
783
784 *Vacb = current;
785 /* There is window between the call to CcRosLookupVacb
786 * and CcRosCreateVacb. We must check if a VACB for the
787 * file offset exist. If there is a VACB, we release
788 * our newly created VACB and return the existing one.
789 */
790 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
791 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
792 previous = NULL;
793 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
794 {
795 current = CONTAINING_RECORD(current_entry,
796 ROS_VACB,
797 CacheMapVacbListEntry);
798 if (IsPointInRange(current->FileOffset.QuadPart,
799 VACB_MAPPING_GRANULARITY,
800 FileOffset))
801 {
802 CcRosVacbIncRefCount(current);
803 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
804 #if DBG
805 if (SharedCacheMap->Trace)
806 {
807 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
808 SharedCacheMap,
809 (*Vacb),
810 current);
811 }
812 #endif
813 CcRosReleaseVacbLock(*Vacb);
814 KeReleaseGuardedMutex(&ViewLock);
815 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
816 *Vacb = current;
817 CcRosAcquireVacbLock(current, NULL);
818 return STATUS_SUCCESS;
819 }
820 if (current->FileOffset.QuadPart < FileOffset)
821 {
822 ASSERT(previous == NULL ||
823 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
824 previous = current;
825 }
826 if (current->FileOffset.QuadPart > FileOffset)
827 break;
828 current_entry = current_entry->Flink;
829 }
830 /* There was no existing VACB. */
831 current = *Vacb;
832 if (previous)
833 {
834 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
835 }
836 else
837 {
838 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
839 }
840 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
841 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
842 KeReleaseGuardedMutex(&ViewLock);
843
844 MI_SET_USAGE(MI_USAGE_CACHE);
845 #if MI_TRACE_PFNS
846 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
847 {
848 PWCHAR pos = NULL;
849 ULONG len = 0;
850 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
851 if (pos)
852 {
853 len = wcslen(pos) * sizeof(WCHAR);
854 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
855 }
856 else
857 {
858 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
859 }
860 }
861 #endif
862
863 Status = CcRosMapVacb(current);
864 if (!NT_SUCCESS(Status))
865 {
866 RemoveEntryList(&current->CacheMapVacbListEntry);
867 RemoveEntryList(&current->VacbLruListEntry);
868 CcRosReleaseVacbLock(current);
869 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
870 }
871
872 return Status;
873 }
874
875 NTSTATUS
876 NTAPI
877 CcRosGetVacb (
878 PROS_SHARED_CACHE_MAP SharedCacheMap,
879 LONGLONG FileOffset,
880 PLONGLONG BaseOffset,
881 PVOID* BaseAddress,
882 PBOOLEAN UptoDate,
883 PROS_VACB *Vacb)
884 {
885 PROS_VACB current;
886 NTSTATUS Status;
887
888 ASSERT(SharedCacheMap);
889
890 DPRINT("CcRosGetVacb()\n");
891
892 /*
893 * Look for a VACB already mapping the same data.
894 */
895 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
896 if (current == NULL)
897 {
898 /*
899 * Otherwise create a new VACB.
900 */
901 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
902 if (!NT_SUCCESS(Status))
903 {
904 return Status;
905 }
906 }
907
908 KeAcquireGuardedMutex(&ViewLock);
909
910 /* Move to the tail of the LRU list */
911 RemoveEntryList(&current->VacbLruListEntry);
912 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
913
914 KeReleaseGuardedMutex(&ViewLock);
915
916 /*
917 * Return information about the VACB to the caller.
918 */
919 *UptoDate = current->Valid;
920 *BaseAddress = current->BaseAddress;
921 DPRINT("*BaseAddress %p\n", *BaseAddress);
922 *Vacb = current;
923 *BaseOffset = current->FileOffset.QuadPart;
924 return STATUS_SUCCESS;
925 }
926
927 NTSTATUS
928 NTAPI
929 CcRosRequestVacb (
930 PROS_SHARED_CACHE_MAP SharedCacheMap,
931 LONGLONG FileOffset,
932 PVOID* BaseAddress,
933 PBOOLEAN UptoDate,
934 PROS_VACB *Vacb)
935 /*
936 * FUNCTION: Request a page mapping for a shared cache map
937 */
938 {
939 LONGLONG BaseOffset;
940
941 ASSERT(SharedCacheMap);
942
943 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
944 {
945 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
946 FileOffset, VACB_MAPPING_GRANULARITY);
947 KeBugCheck(CACHE_MANAGER);
948 }
949
950 return CcRosGetVacb(SharedCacheMap,
951 FileOffset,
952 &BaseOffset,
953 BaseAddress,
954 UptoDate,
955 Vacb);
956 }
957
958 static
959 VOID
960 CcFreeCachePage (
961 PVOID Context,
962 MEMORY_AREA* MemoryArea,
963 PVOID Address,
964 PFN_NUMBER Page,
965 SWAPENTRY SwapEntry,
966 BOOLEAN Dirty)
967 {
968 ASSERT(SwapEntry == 0);
969 if (Page != 0)
970 {
971 ASSERT(MmGetReferenceCountPage(Page) == 1);
972 MmReleasePageMemoryConsumer(MC_CACHE, Page);
973 }
974 }
975
976 NTSTATUS
977 CcRosInternalFreeVacb (
978 PROS_VACB Vacb)
979 /*
980 * FUNCTION: Releases a VACB associated with a shared cache map
981 */
982 {
983 DPRINT("Freeing VACB 0x%p\n", Vacb);
984 #if DBG
985 if (Vacb->SharedCacheMap->Trace)
986 {
987 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
988 }
989 #endif
990
991 MmLockAddressSpace(MmGetKernelAddressSpace());
992 MmFreeMemoryArea(MmGetKernelAddressSpace(),
993 Vacb->MemoryArea,
994 CcFreeCachePage,
995 NULL);
996 MmUnlockAddressSpace(MmGetKernelAddressSpace());
997
998 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
999 return STATUS_SUCCESS;
1000 }
1001
1002 /*
1003 * @implemented
1004 */
1005 VOID
1006 NTAPI
1007 CcFlushCache (
1008 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1009 IN PLARGE_INTEGER FileOffset OPTIONAL,
1010 IN ULONG Length,
1011 OUT PIO_STATUS_BLOCK IoStatus)
1012 {
1013 PROS_SHARED_CACHE_MAP SharedCacheMap;
1014 LARGE_INTEGER Offset;
1015 LONGLONG RemainingLength;
1016 PROS_VACB current;
1017 NTSTATUS Status;
1018 KIRQL oldIrql;
1019
1020 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1021 SectionObjectPointers, FileOffset, Length);
1022
1023 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1024 SectionObjectPointers, FileOffset, Length, IoStatus);
1025
1026 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1027 {
1028 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1029 ASSERT(SharedCacheMap);
1030 if (FileOffset)
1031 {
1032 Offset = *FileOffset;
1033 RemainingLength = Length;
1034 }
1035 else
1036 {
1037 Offset.QuadPart = 0;
1038 RemainingLength = SharedCacheMap->FileSize.QuadPart;
1039 }
1040
1041 if (IoStatus)
1042 {
1043 IoStatus->Status = STATUS_SUCCESS;
1044 IoStatus->Information = 0;
1045 }
1046
1047 while (RemainingLength > 0)
1048 {
1049 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1050 if (current != NULL)
1051 {
1052 if (current->Dirty)
1053 {
1054 Status = CcRosFlushVacb(current);
1055 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1056 {
1057 IoStatus->Status = Status;
1058 }
1059 }
1060
1061 CcRosReleaseVacbLock(current);
1062
1063 KeAcquireGuardedMutex(&ViewLock);
1064 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1065 CcRosVacbDecRefCount(current);
1066 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1067 KeReleaseGuardedMutex(&ViewLock);
1068 }
1069
1070 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1071 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1072 }
1073 }
1074 else
1075 {
1076 if (IoStatus)
1077 {
1078 IoStatus->Status = STATUS_INVALID_PARAMETER;
1079 }
1080 }
1081 }
1082
1083 NTSTATUS
1084 NTAPI
1085 CcRosDeleteFileCache (
1086 PFILE_OBJECT FileObject,
1087 PROS_SHARED_CACHE_MAP SharedCacheMap)
1088 /*
1089 * FUNCTION: Releases the shared cache map associated with a file object
1090 */
1091 {
1092 PLIST_ENTRY current_entry;
1093 PROS_VACB current;
1094 LIST_ENTRY FreeList;
1095 KIRQL oldIrql;
1096
1097 ASSERT(SharedCacheMap);
1098
1099 SharedCacheMap->OpenCount++;
1100 KeReleaseGuardedMutex(&ViewLock);
1101
1102 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1103
1104 KeAcquireGuardedMutex(&ViewLock);
1105 SharedCacheMap->OpenCount--;
1106 if (SharedCacheMap->OpenCount == 0)
1107 {
1108 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1109
1110 /*
1111 * Release all VACBs
1112 */
1113 InitializeListHead(&FreeList);
1114 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1115 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1116 {
1117 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1118 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1119 RemoveEntryList(&current->VacbLruListEntry);
1120 if (current->Dirty)
1121 {
1122 RemoveEntryList(&current->DirtyVacbListEntry);
1123 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
1124 DPRINT1("Freeing dirty VACB\n");
1125 }
1126 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1127 }
1128 #if DBG
1129 SharedCacheMap->Trace = FALSE;
1130 #endif
1131 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1132
1133 KeReleaseGuardedMutex(&ViewLock);
1134 ObDereferenceObject(SharedCacheMap->FileObject);
1135
1136 while (!IsListEmpty(&FreeList))
1137 {
1138 current_entry = RemoveTailList(&FreeList);
1139 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1140 CcRosInternalFreeVacb(current);
1141 }
1142 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1143 KeAcquireGuardedMutex(&ViewLock);
1144 }
1145 return STATUS_SUCCESS;
1146 }
1147
1148 VOID
1149 NTAPI
1150 CcRosReferenceCache (
1151 PFILE_OBJECT FileObject)
1152 {
1153 PROS_SHARED_CACHE_MAP SharedCacheMap;
1154 KeAcquireGuardedMutex(&ViewLock);
1155 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1156 ASSERT(SharedCacheMap);
1157 ASSERT(SharedCacheMap->OpenCount != 0);
1158 SharedCacheMap->OpenCount++;
1159 KeReleaseGuardedMutex(&ViewLock);
1160 }
1161
1162 VOID
1163 NTAPI
1164 CcRosRemoveIfClosed (
1165 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1166 {
1167 PROS_SHARED_CACHE_MAP SharedCacheMap;
1168 DPRINT("CcRosRemoveIfClosed()\n");
1169 KeAcquireGuardedMutex(&ViewLock);
1170 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1171 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1172 {
1173 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1174 }
1175 KeReleaseGuardedMutex(&ViewLock);
1176 }
1177
1178
1179 VOID
1180 NTAPI
1181 CcRosDereferenceCache (
1182 PFILE_OBJECT FileObject)
1183 {
1184 PROS_SHARED_CACHE_MAP SharedCacheMap;
1185 KeAcquireGuardedMutex(&ViewLock);
1186 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1187 ASSERT(SharedCacheMap);
1188 if (SharedCacheMap->OpenCount > 0)
1189 {
1190 SharedCacheMap->OpenCount--;
1191 if (SharedCacheMap->OpenCount == 0)
1192 {
1193 MmFreeSectionSegments(SharedCacheMap->FileObject);
1194 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1195 }
1196 }
1197 KeReleaseGuardedMutex(&ViewLock);
1198 }
1199
1200 NTSTATUS
1201 NTAPI
1202 CcRosReleaseFileCache (
1203 PFILE_OBJECT FileObject)
1204 /*
1205 * FUNCTION: Called by the file system when a handle to a file object
1206 * has been closed.
1207 */
1208 {
1209 PROS_SHARED_CACHE_MAP SharedCacheMap;
1210
1211 KeAcquireGuardedMutex(&ViewLock);
1212
1213 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1214 {
1215 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1216 if (FileObject->PrivateCacheMap != NULL)
1217 {
1218 FileObject->PrivateCacheMap = NULL;
1219 if (SharedCacheMap->OpenCount > 0)
1220 {
1221 SharedCacheMap->OpenCount--;
1222 if (SharedCacheMap->OpenCount == 0)
1223 {
1224 MmFreeSectionSegments(SharedCacheMap->FileObject);
1225 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1226 }
1227 }
1228 }
1229 }
1230 KeReleaseGuardedMutex(&ViewLock);
1231 return STATUS_SUCCESS;
1232 }
1233
1234 NTSTATUS
1235 NTAPI
1236 CcTryToInitializeFileCache (
1237 PFILE_OBJECT FileObject)
1238 {
1239 PROS_SHARED_CACHE_MAP SharedCacheMap;
1240 NTSTATUS Status;
1241
1242 KeAcquireGuardedMutex(&ViewLock);
1243
1244 ASSERT(FileObject->SectionObjectPointer);
1245 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1246 if (SharedCacheMap == NULL)
1247 {
1248 Status = STATUS_UNSUCCESSFUL;
1249 }
1250 else
1251 {
1252 if (FileObject->PrivateCacheMap == NULL)
1253 {
1254 FileObject->PrivateCacheMap = SharedCacheMap;
1255 SharedCacheMap->OpenCount++;
1256 }
1257 Status = STATUS_SUCCESS;
1258 }
1259 KeReleaseGuardedMutex(&ViewLock);
1260
1261 return Status;
1262 }
1263
1264
1265 NTSTATUS
1266 NTAPI
1267 CcRosInitializeFileCache (
1268 PFILE_OBJECT FileObject,
1269 PCC_FILE_SIZES FileSizes,
1270 BOOLEAN PinAccess,
1271 PCACHE_MANAGER_CALLBACKS CallBacks,
1272 PVOID LazyWriterContext)
1273 /*
1274 * FUNCTION: Initializes a shared cache map for a file object
1275 */
1276 {
1277 PROS_SHARED_CACHE_MAP SharedCacheMap;
1278
1279 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1280 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1281 FileObject, SharedCacheMap);
1282
1283 KeAcquireGuardedMutex(&ViewLock);
1284 if (SharedCacheMap == NULL)
1285 {
1286 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1287 if (SharedCacheMap == NULL)
1288 {
1289 KeReleaseGuardedMutex(&ViewLock);
1290 return STATUS_INSUFFICIENT_RESOURCES;
1291 }
1292 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1293 ObReferenceObjectByPointer(FileObject,
1294 FILE_ALL_ACCESS,
1295 NULL,
1296 KernelMode);
1297 SharedCacheMap->FileObject = FileObject;
1298 SharedCacheMap->Callbacks = CallBacks;
1299 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1300 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1301 SharedCacheMap->FileSize = FileSizes->FileSize;
1302 SharedCacheMap->PinAccess = PinAccess;
1303 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1304 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1305 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1306 }
1307 if (FileObject->PrivateCacheMap == NULL)
1308 {
1309 FileObject->PrivateCacheMap = SharedCacheMap;
1310 SharedCacheMap->OpenCount++;
1311 }
1312 KeReleaseGuardedMutex(&ViewLock);
1313
1314 return STATUS_SUCCESS;
1315 }
1316
1317 /*
1318 * @implemented
1319 */
1320 PFILE_OBJECT
1321 NTAPI
1322 CcGetFileObjectFromSectionPtrs (
1323 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1324 {
1325 PROS_SHARED_CACHE_MAP SharedCacheMap;
1326
1327 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1328
1329 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1330 {
1331 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1332 ASSERT(SharedCacheMap);
1333 return SharedCacheMap->FileObject;
1334 }
1335 return NULL;
1336 }
1337
1338 VOID
1339 NTAPI
1340 CcShutdownLazyWriter (
1341 VOID)
1342 {
1343 /* Simply set the event, lazy writer will stop when it's done */
1344 KeSetEvent(&iLazyWriterShutdown, IO_DISK_INCREMENT, FALSE);
1345 }
1346
1347 BOOLEAN
1348 INIT_FUNCTION
1349 NTAPI
1350 CcInitView (
1351 VOID)
1352 {
1353 HANDLE LazyWriter;
1354 NTSTATUS Status;
1355 OBJECT_ATTRIBUTES ObjectAttributes;
1356
1357 DPRINT("CcInitView()\n");
1358
1359 InitializeListHead(&DirtyVacbListHead);
1360 InitializeListHead(&VacbLruListHead);
1361 KeInitializeGuardedMutex(&ViewLock);
1362 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1363 NULL,
1364 NULL,
1365 0,
1366 sizeof(INTERNAL_BCB),
1367 TAG_BCB,
1368 20);
1369 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1370 NULL,
1371 NULL,
1372 0,
1373 sizeof(ROS_SHARED_CACHE_MAP),
1374 TAG_SHARED_CACHE_MAP,
1375 20);
1376 ExInitializeNPagedLookasideList(&VacbLookasideList,
1377 NULL,
1378 NULL,
1379 0,
1380 sizeof(ROS_VACB),
1381 TAG_VACB,
1382 20);
1383
1384 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1385
1386 /* Initialize lazy writer events */
1387 KeInitializeEvent(&iLazyWriterShutdown, SynchronizationEvent, FALSE);
1388 KeInitializeEvent(&iLazyWriterNotify, NotificationEvent, FALSE);
1389
1390 /* Define lazy writer threshold, depending on system type */
1391 switch (MmQuerySystemSize())
1392 {
1393 case MmSmallSystem:
1394 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8;
1395 break;
1396
1397 case MmMediumSystem:
1398 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4;
1399 break;
1400
1401 case MmLargeSystem:
1402 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8 + MmNumberOfPhysicalPages / 4;
1403 break;
1404 }
1405
1406 /* Start the lazy writer thread */
1407 InitializeObjectAttributes(&ObjectAttributes,
1408 NULL,
1409 OBJ_KERNEL_HANDLE,
1410 NULL,
1411 NULL);
1412 Status = PsCreateSystemThread(&LazyWriter,
1413 THREAD_ALL_ACCESS,
1414 &ObjectAttributes,
1415 NULL,
1416 NULL,
1417 CciLazyWriter,
1418 NULL);
1419 if (!NT_SUCCESS(Status))
1420 {
1421 return FALSE;
1422 }
1423
1424 /* Handle is not needed */
1425 ObCloseHandle(LazyWriter, KernelMode);
1426
1427 CcInitCacheZeroPage();
1428
1429 return TRUE;
1430 }
1431
1432 /* EOF */