[NTOSKRNL] Hello CcIdleDelay :-)
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42
43 /* GLOBALS *******************************************************************/
44
45 LIST_ENTRY DirtyVacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 /* Counters:
55 * - Amount of pages flushed by lazy writer
56 * - Number of times lazy writer ran
57 */
58 ULONG CcLazyWritePages = 0;
59 ULONG CcLazyWriteIos = 0;
60
61 /* Internal vars (MS):
62 * - Threshold above which lazy writer will start action
63 * - Amount of dirty pages
64 * - List for deferred writes
65 * - Spinlock when dealing with the deferred list
66 * - List for "clean" shared cache maps
67 * - One second delay for lazy writer
68 */
69 ULONG CcDirtyPageThreshold = 0;
70 ULONG CcTotalDirtyPages = 0;
71 LIST_ENTRY CcDeferredWrites;
72 KSPIN_LOCK CcDeferredWriteSpinLock;
73 LIST_ENTRY CcCleanSharedCacheMapList;
74 LARGE_INTEGER CcIdleDelay = {.QuadPart = (LONGLONG)-1*1000*1000*10};
75
76 /* Internal vars (ROS):
77 * - Event to notify lazy writer to shutdown
78 * - Event to inform watchers lazy writer is done for this loop
79 * - Lock for the CcCleanSharedCacheMapList list
80 */
81 KEVENT iLazyWriterShutdown;
82 KEVENT iLazyWriterNotify;
83 KSPIN_LOCK iSharedCacheMapLock;
84
85 #if DBG
86 static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
87 {
88 ++vacb->ReferenceCount;
89 if (vacb->SharedCacheMap->Trace)
90 {
91 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
92 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
93 }
94 }
95 static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
96 {
97 --vacb->ReferenceCount;
98 if (vacb->SharedCacheMap->Trace)
99 {
100 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
101 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
102 }
103 }
104 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
105 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
106 #else
107 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
108 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
109 #endif
110
111 NTSTATUS
112 CcRosInternalFreeVacb(PROS_VACB Vacb);
113
114
115 /* FUNCTIONS *****************************************************************/
116
117 VOID
118 NTAPI
119 CcRosTraceCacheMap (
120 PROS_SHARED_CACHE_MAP SharedCacheMap,
121 BOOLEAN Trace )
122 {
123 #if DBG
124 KIRQL oldirql;
125 PLIST_ENTRY current_entry;
126 PROS_VACB current;
127
128 if (!SharedCacheMap)
129 return;
130
131 SharedCacheMap->Trace = Trace;
132
133 if (Trace)
134 {
135 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
136
137 KeAcquireGuardedMutex(&ViewLock);
138 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
139
140 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
141 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
142 {
143 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
144 current_entry = current_entry->Flink;
145
146 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
147 current, current->ReferenceCount, current->Dirty, current->PageOut );
148 }
149 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
150 KeReleaseGuardedMutex(&ViewLock);
151 }
152 else
153 {
154 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
155 }
156
157 #else
158 UNREFERENCED_PARAMETER(SharedCacheMap);
159 UNREFERENCED_PARAMETER(Trace);
160 #endif
161 }
162
163 NTSTATUS
164 NTAPI
165 CcRosFlushVacb (
166 PROS_VACB Vacb)
167 {
168 NTSTATUS Status;
169 KIRQL oldIrql;
170
171 Status = CcWriteVirtualAddress(Vacb);
172 if (NT_SUCCESS(Status))
173 {
174 KeAcquireGuardedMutex(&ViewLock);
175 KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql);
176
177 Vacb->Dirty = FALSE;
178 RemoveEntryList(&Vacb->DirtyVacbListEntry);
179 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
180 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
181 CcRosVacbDecRefCount(Vacb);
182
183 KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
184 KeReleaseGuardedMutex(&ViewLock);
185 }
186
187 return Status;
188 }
189
190 NTSTATUS
191 NTAPI
192 CcRosFlushDirtyPages (
193 ULONG Target,
194 PULONG Count,
195 BOOLEAN Wait,
196 BOOLEAN CalledFromLazy)
197 {
198 PLIST_ENTRY current_entry;
199 PROS_VACB current;
200 BOOLEAN Locked;
201 NTSTATUS Status;
202 LARGE_INTEGER ZeroTimeout;
203
204 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
205
206 (*Count) = 0;
207 ZeroTimeout.QuadPart = 0;
208
209 KeEnterCriticalRegion();
210 KeAcquireGuardedMutex(&ViewLock);
211
212 current_entry = DirtyVacbListHead.Flink;
213 if (current_entry == &DirtyVacbListHead)
214 {
215 DPRINT("No Dirty pages\n");
216 }
217
218 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
219 {
220 current = CONTAINING_RECORD(current_entry,
221 ROS_VACB,
222 DirtyVacbListEntry);
223 current_entry = current_entry->Flink;
224
225 CcRosVacbIncRefCount(current);
226
227 /* When performing lazy write, don't handle temporary files */
228 if (CalledFromLazy &&
229 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
230 {
231 CcRosVacbDecRefCount(current);
232 continue;
233 }
234
235 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
236 current->SharedCacheMap->LazyWriteContext, Wait);
237 if (!Locked)
238 {
239 CcRosVacbDecRefCount(current);
240 continue;
241 }
242
243 Status = CcRosAcquireVacbLock(current,
244 Wait ? NULL : &ZeroTimeout);
245 if (Status != STATUS_SUCCESS)
246 {
247 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
248 current->SharedCacheMap->LazyWriteContext);
249 CcRosVacbDecRefCount(current);
250 continue;
251 }
252
253 ASSERT(current->Dirty);
254
255 /* One reference is added above */
256 if ((current->ReferenceCount > 2 && current->PinCount == 0) ||
257 (current->ReferenceCount > 3 && current->PinCount > 1))
258 {
259 CcRosReleaseVacbLock(current);
260 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
261 current->SharedCacheMap->LazyWriteContext);
262 CcRosVacbDecRefCount(current);
263 continue;
264 }
265
266 KeReleaseGuardedMutex(&ViewLock);
267
268 Status = CcRosFlushVacb(current);
269
270 CcRosReleaseVacbLock(current);
271 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
272 current->SharedCacheMap->LazyWriteContext);
273
274 KeAcquireGuardedMutex(&ViewLock);
275 CcRosVacbDecRefCount(current);
276
277 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
278 (Status != STATUS_MEDIA_WRITE_PROTECTED))
279 {
280 DPRINT1("CC: Failed to flush VACB.\n");
281 }
282 else
283 {
284 ULONG PagesFreed;
285
286 /* How many pages did we free? */
287 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
288 (*Count) += PagesFreed;
289
290 /* Make sure we don't overflow target! */
291 if (Target < PagesFreed)
292 {
293 /* If we would have, jump to zero directly */
294 Target = 0;
295 }
296 else
297 {
298 Target -= PagesFreed;
299 }
300 }
301
302 current_entry = DirtyVacbListHead.Flink;
303 }
304
305 KeReleaseGuardedMutex(&ViewLock);
306 KeLeaveCriticalRegion();
307
308 DPRINT("CcRosFlushDirtyPages() finished\n");
309 return STATUS_SUCCESS;
310 }
311
312 /* FIXME: Someday this could somewhat implement write-behind/read-ahead */
313 VOID
314 NTAPI
315 CciLazyWriter(PVOID Unused)
316 {
317 while (TRUE)
318 {
319 NTSTATUS Status;
320 PLIST_ENTRY ListEntry;
321 ULONG Target, Count = 0;
322
323 /* One per second or until we have to stop */
324 Status = KeWaitForSingleObject(&iLazyWriterShutdown,
325 Executive,
326 KernelMode,
327 FALSE,
328 &CcIdleDelay);
329
330 /* If we succeeed, we've to stop running! */
331 if (Status == STATUS_SUCCESS)
332 {
333 break;
334 }
335
336 /* We're not sleeping anymore */
337 KeClearEvent(&iLazyWriterNotify);
338
339 /* Our target is one-eighth of the dirty pages */
340 Target = CcTotalDirtyPages / 8;
341 if (Target != 0)
342 {
343 /* Flush! */
344 DPRINT("Lazy writer starting (%d)\n", Target);
345 CcRosFlushDirtyPages(Target, &Count, FALSE, TRUE);
346
347 /* And update stats */
348 CcLazyWritePages += Count;
349 ++CcLazyWriteIos;
350 DPRINT("Lazy writer done (%d)\n", Count);
351 }
352
353 /* Inform people waiting on us that we're done */
354 KeSetEvent(&iLazyWriterNotify, IO_DISK_INCREMENT, FALSE);
355
356 /* Likely not optimal, but let's handle one deferred write now! */
357 ListEntry = ExInterlockedRemoveHeadList(&CcDeferredWrites, &CcDeferredWriteSpinLock);
358 if (ListEntry != NULL)
359 {
360 PROS_DEFERRED_WRITE_CONTEXT Context;
361
362 /* Extract the context */
363 Context = CONTAINING_RECORD(ListEntry, ROS_DEFERRED_WRITE_CONTEXT, CcDeferredWritesEntry);
364
365 /* Can we write now? */
366 if (CcCanIWrite(Context->FileObject, Context->BytesToWrite, FALSE, Context->Retrying))
367 {
368 /* Yes! Do it, and destroy the associated context */
369 Context->PostRoutine(Context->Context1, Context->Context2);
370 ExFreePoolWithTag(Context, 'CcDw');
371 }
372 else
373 {
374 /* Otherwise, requeue it, but in tail, so that it doesn't block others
375 * This is clearly to improve, but given the poor algorithm used now
376 * It's better than nothing!
377 */
378 ExInterlockedInsertTailList(&CcDeferredWrites,
379 &Context->CcDeferredWritesEntry,
380 &CcDeferredWriteSpinLock);
381 }
382 }
383 }
384 }
385
386 NTSTATUS
387 CcRosTrimCache (
388 ULONG Target,
389 ULONG Priority,
390 PULONG NrFreed)
391 /*
392 * FUNCTION: Try to free some memory from the file cache.
393 * ARGUMENTS:
394 * Target - The number of pages to be freed.
395 * Priority - The priority of free (currently unused).
396 * NrFreed - Points to a variable where the number of pages
397 * actually freed is returned.
398 */
399 {
400 PLIST_ENTRY current_entry;
401 PROS_VACB current;
402 ULONG PagesFreed;
403 KIRQL oldIrql;
404 LIST_ENTRY FreeList;
405 PFN_NUMBER Page;
406 ULONG i;
407 BOOLEAN FlushedPages = FALSE;
408
409 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
410
411 InitializeListHead(&FreeList);
412
413 *NrFreed = 0;
414
415 retry:
416 KeAcquireGuardedMutex(&ViewLock);
417
418 current_entry = VacbLruListHead.Flink;
419 while (current_entry != &VacbLruListHead)
420 {
421 current = CONTAINING_RECORD(current_entry,
422 ROS_VACB,
423 VacbLruListEntry);
424 current_entry = current_entry->Flink;
425
426 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
427
428 /* Reference the VACB */
429 CcRosVacbIncRefCount(current);
430
431 /* Check if it's mapped and not dirty */
432 if (current->MappedCount > 0 && !current->Dirty)
433 {
434 /* We have to break these locks because Cc sucks */
435 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
436 KeReleaseGuardedMutex(&ViewLock);
437
438 /* Page out the VACB */
439 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
440 {
441 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
442
443 MmPageOutPhysicalAddress(Page);
444 }
445
446 /* Reacquire the locks */
447 KeAcquireGuardedMutex(&ViewLock);
448 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
449 }
450
451 /* Dereference the VACB */
452 CcRosVacbDecRefCount(current);
453
454 /* Check if we can free this entry now */
455 if (current->ReferenceCount == 0)
456 {
457 ASSERT(!current->Dirty);
458 ASSERT(!current->MappedCount);
459
460 RemoveEntryList(&current->CacheMapVacbListEntry);
461 RemoveEntryList(&current->VacbLruListEntry);
462 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
463
464 /* Calculate how many pages we freed for Mm */
465 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
466 Target -= PagesFreed;
467 (*NrFreed) += PagesFreed;
468 }
469
470 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
471 }
472
473 KeReleaseGuardedMutex(&ViewLock);
474
475 /* Try flushing pages if we haven't met our target */
476 if ((Target > 0) && !FlushedPages)
477 {
478 /* Flush dirty pages to disk */
479 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
480 FlushedPages = TRUE;
481
482 /* We can only swap as many pages as we flushed */
483 if (PagesFreed < Target) Target = PagesFreed;
484
485 /* Check if we flushed anything */
486 if (PagesFreed != 0)
487 {
488 /* Try again after flushing dirty pages */
489 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
490 goto retry;
491 }
492 }
493
494 while (!IsListEmpty(&FreeList))
495 {
496 current_entry = RemoveHeadList(&FreeList);
497 current = CONTAINING_RECORD(current_entry,
498 ROS_VACB,
499 CacheMapVacbListEntry);
500 CcRosInternalFreeVacb(current);
501 }
502
503 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
504
505 return STATUS_SUCCESS;
506 }
507
508 NTSTATUS
509 NTAPI
510 CcRosReleaseVacb (
511 PROS_SHARED_CACHE_MAP SharedCacheMap,
512 PROS_VACB Vacb,
513 BOOLEAN Valid,
514 BOOLEAN Dirty,
515 BOOLEAN Mapped)
516 {
517 BOOLEAN WasDirty;
518
519 ASSERT(SharedCacheMap);
520
521 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
522 SharedCacheMap, Vacb, Valid);
523
524 Vacb->Valid = Valid;
525
526 WasDirty = FALSE;
527 if (Dirty)
528 {
529 if (!Vacb->Dirty)
530 {
531 CcRosMarkDirtyVacb(Vacb);
532 }
533 else
534 {
535 WasDirty = TRUE;
536 }
537 }
538
539 if (Mapped)
540 {
541 Vacb->MappedCount++;
542 }
543 CcRosVacbDecRefCount(Vacb);
544 if (Mapped && (Vacb->MappedCount == 1))
545 {
546 CcRosVacbIncRefCount(Vacb);
547 }
548 if (!WasDirty && Vacb->Dirty)
549 {
550 CcRosVacbIncRefCount(Vacb);
551 }
552
553 CcRosReleaseVacbLock(Vacb);
554
555 return STATUS_SUCCESS;
556 }
557
558 /* Returns with VACB Lock Held! */
559 PROS_VACB
560 NTAPI
561 CcRosLookupVacb (
562 PROS_SHARED_CACHE_MAP SharedCacheMap,
563 LONGLONG FileOffset)
564 {
565 PLIST_ENTRY current_entry;
566 PROS_VACB current;
567 KIRQL oldIrql;
568
569 ASSERT(SharedCacheMap);
570
571 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
572 SharedCacheMap, FileOffset);
573
574 KeAcquireGuardedMutex(&ViewLock);
575 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
576
577 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
578 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
579 {
580 current = CONTAINING_RECORD(current_entry,
581 ROS_VACB,
582 CacheMapVacbListEntry);
583 if (IsPointInRange(current->FileOffset.QuadPart,
584 VACB_MAPPING_GRANULARITY,
585 FileOffset))
586 {
587 CcRosVacbIncRefCount(current);
588 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
589 KeReleaseGuardedMutex(&ViewLock);
590 CcRosAcquireVacbLock(current, NULL);
591 return current;
592 }
593 if (current->FileOffset.QuadPart > FileOffset)
594 break;
595 current_entry = current_entry->Flink;
596 }
597
598 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
599 KeReleaseGuardedMutex(&ViewLock);
600
601 return NULL;
602 }
603
604 VOID
605 NTAPI
606 CcRosMarkDirtyVacb (
607 PROS_VACB Vacb)
608 {
609 KIRQL oldIrql;
610 PROS_SHARED_CACHE_MAP SharedCacheMap;
611
612 SharedCacheMap = Vacb->SharedCacheMap;
613
614 KeAcquireGuardedMutex(&ViewLock);
615 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
616
617 if (!Vacb->Dirty)
618 {
619 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
620 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
621 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
622 }
623 else
624 {
625 CcRosVacbDecRefCount(Vacb);
626 }
627
628 /* Move to the tail of the LRU list */
629 RemoveEntryList(&Vacb->VacbLruListEntry);
630 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
631
632 Vacb->Dirty = TRUE;
633
634 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
635 KeReleaseGuardedMutex(&ViewLock);
636 }
637
638 NTSTATUS
639 NTAPI
640 CcRosMarkDirtyFile (
641 PROS_SHARED_CACHE_MAP SharedCacheMap,
642 LONGLONG FileOffset)
643 {
644 PROS_VACB Vacb;
645
646 ASSERT(SharedCacheMap);
647
648 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
649 SharedCacheMap, FileOffset);
650
651 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
652 if (Vacb == NULL)
653 {
654 KeBugCheck(CACHE_MANAGER);
655 }
656
657 CcRosMarkDirtyVacb(Vacb);
658
659 CcRosReleaseVacbLock(Vacb);
660
661 return STATUS_SUCCESS;
662 }
663
664 NTSTATUS
665 NTAPI
666 CcRosUnmapVacb (
667 PROS_SHARED_CACHE_MAP SharedCacheMap,
668 LONGLONG FileOffset,
669 BOOLEAN NowDirty)
670 {
671 PROS_VACB Vacb;
672 BOOLEAN WasDirty;
673
674 ASSERT(SharedCacheMap);
675
676 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
677 SharedCacheMap, FileOffset, NowDirty);
678
679 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
680 if (Vacb == NULL)
681 {
682 return STATUS_UNSUCCESSFUL;
683 }
684
685 WasDirty = FALSE;
686 if (NowDirty)
687 {
688 if (!Vacb->Dirty)
689 {
690 CcRosMarkDirtyVacb(Vacb);
691 }
692 else
693 {
694 WasDirty = TRUE;
695 }
696 }
697
698 Vacb->MappedCount--;
699
700 CcRosVacbDecRefCount(Vacb);
701 if (!WasDirty && NowDirty)
702 {
703 CcRosVacbIncRefCount(Vacb);
704 }
705 if (Vacb->MappedCount == 0)
706 {
707 CcRosVacbDecRefCount(Vacb);
708 }
709
710 CcRosReleaseVacbLock(Vacb);
711
712 return STATUS_SUCCESS;
713 }
714
715 static
716 NTSTATUS
717 CcRosMapVacb(
718 PROS_VACB Vacb)
719 {
720 ULONG i;
721 NTSTATUS Status;
722 ULONG_PTR NumberOfPages;
723
724 /* Create a memory area. */
725 MmLockAddressSpace(MmGetKernelAddressSpace());
726 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
727 0, // nothing checks for VACB mareas, so set to 0
728 &Vacb->BaseAddress,
729 VACB_MAPPING_GRANULARITY,
730 PAGE_READWRITE,
731 (PMEMORY_AREA*)&Vacb->MemoryArea,
732 0,
733 PAGE_SIZE);
734 MmUnlockAddressSpace(MmGetKernelAddressSpace());
735 if (!NT_SUCCESS(Status))
736 {
737 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
738 return Status;
739 }
740
741 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
742 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
743
744 /* Create a virtual mapping for this memory area */
745 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
746 for (i = 0; i < NumberOfPages; i++)
747 {
748 PFN_NUMBER PageFrameNumber;
749
750 MI_SET_USAGE(MI_USAGE_CACHE);
751 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
752 if (PageFrameNumber == 0)
753 {
754 DPRINT1("Unable to allocate page\n");
755 KeBugCheck(MEMORY_MANAGEMENT);
756 }
757
758 Status = MmCreateVirtualMapping(NULL,
759 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
760 PAGE_READWRITE,
761 &PageFrameNumber,
762 1);
763 if (!NT_SUCCESS(Status))
764 {
765 DPRINT1("Unable to create virtual mapping\n");
766 KeBugCheck(MEMORY_MANAGEMENT);
767 }
768 }
769
770 return STATUS_SUCCESS;
771 }
772
773 static
774 NTSTATUS
775 CcRosCreateVacb (
776 PROS_SHARED_CACHE_MAP SharedCacheMap,
777 LONGLONG FileOffset,
778 PROS_VACB *Vacb)
779 {
780 PROS_VACB current;
781 PROS_VACB previous;
782 PLIST_ENTRY current_entry;
783 NTSTATUS Status;
784 KIRQL oldIrql;
785
786 ASSERT(SharedCacheMap);
787
788 DPRINT("CcRosCreateVacb()\n");
789
790 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
791 {
792 *Vacb = NULL;
793 return STATUS_INVALID_PARAMETER;
794 }
795
796 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
797 current->BaseAddress = NULL;
798 current->Valid = FALSE;
799 current->Dirty = FALSE;
800 current->PageOut = FALSE;
801 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
802 current->SharedCacheMap = SharedCacheMap;
803 #if DBG
804 if (SharedCacheMap->Trace)
805 {
806 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
807 }
808 #endif
809 current->MappedCount = 0;
810 current->DirtyVacbListEntry.Flink = NULL;
811 current->DirtyVacbListEntry.Blink = NULL;
812 current->ReferenceCount = 1;
813 current->PinCount = 0;
814 KeInitializeMutex(&current->Mutex, 0);
815 CcRosAcquireVacbLock(current, NULL);
816 KeAcquireGuardedMutex(&ViewLock);
817
818 *Vacb = current;
819 /* There is window between the call to CcRosLookupVacb
820 * and CcRosCreateVacb. We must check if a VACB for the
821 * file offset exist. If there is a VACB, we release
822 * our newly created VACB and return the existing one.
823 */
824 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
825 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
826 previous = NULL;
827 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
828 {
829 current = CONTAINING_RECORD(current_entry,
830 ROS_VACB,
831 CacheMapVacbListEntry);
832 if (IsPointInRange(current->FileOffset.QuadPart,
833 VACB_MAPPING_GRANULARITY,
834 FileOffset))
835 {
836 CcRosVacbIncRefCount(current);
837 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
838 #if DBG
839 if (SharedCacheMap->Trace)
840 {
841 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
842 SharedCacheMap,
843 (*Vacb),
844 current);
845 }
846 #endif
847 CcRosReleaseVacbLock(*Vacb);
848 KeReleaseGuardedMutex(&ViewLock);
849 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
850 *Vacb = current;
851 CcRosAcquireVacbLock(current, NULL);
852 return STATUS_SUCCESS;
853 }
854 if (current->FileOffset.QuadPart < FileOffset)
855 {
856 ASSERT(previous == NULL ||
857 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
858 previous = current;
859 }
860 if (current->FileOffset.QuadPart > FileOffset)
861 break;
862 current_entry = current_entry->Flink;
863 }
864 /* There was no existing VACB. */
865 current = *Vacb;
866 if (previous)
867 {
868 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
869 }
870 else
871 {
872 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
873 }
874 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
875 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
876 KeReleaseGuardedMutex(&ViewLock);
877
878 MI_SET_USAGE(MI_USAGE_CACHE);
879 #if MI_TRACE_PFNS
880 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
881 {
882 PWCHAR pos;
883 ULONG len = 0;
884 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
885 if (pos)
886 {
887 len = wcslen(pos) * sizeof(WCHAR);
888 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
889 }
890 else
891 {
892 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
893 }
894 }
895 #endif
896
897 Status = CcRosMapVacb(current);
898 if (!NT_SUCCESS(Status))
899 {
900 RemoveEntryList(&current->CacheMapVacbListEntry);
901 RemoveEntryList(&current->VacbLruListEntry);
902 CcRosReleaseVacbLock(current);
903 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
904 }
905
906 return Status;
907 }
908
909 NTSTATUS
910 NTAPI
911 CcRosGetVacb (
912 PROS_SHARED_CACHE_MAP SharedCacheMap,
913 LONGLONG FileOffset,
914 PLONGLONG BaseOffset,
915 PVOID* BaseAddress,
916 PBOOLEAN UptoDate,
917 PROS_VACB *Vacb)
918 {
919 PROS_VACB current;
920 NTSTATUS Status;
921
922 ASSERT(SharedCacheMap);
923
924 DPRINT("CcRosGetVacb()\n");
925
926 /*
927 * Look for a VACB already mapping the same data.
928 */
929 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
930 if (current == NULL)
931 {
932 /*
933 * Otherwise create a new VACB.
934 */
935 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
936 if (!NT_SUCCESS(Status))
937 {
938 return Status;
939 }
940 }
941
942 KeAcquireGuardedMutex(&ViewLock);
943
944 /* Move to the tail of the LRU list */
945 RemoveEntryList(&current->VacbLruListEntry);
946 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
947
948 KeReleaseGuardedMutex(&ViewLock);
949
950 /*
951 * Return information about the VACB to the caller.
952 */
953 *UptoDate = current->Valid;
954 *BaseAddress = current->BaseAddress;
955 DPRINT("*BaseAddress %p\n", *BaseAddress);
956 *Vacb = current;
957 *BaseOffset = current->FileOffset.QuadPart;
958 return STATUS_SUCCESS;
959 }
960
961 NTSTATUS
962 NTAPI
963 CcRosRequestVacb (
964 PROS_SHARED_CACHE_MAP SharedCacheMap,
965 LONGLONG FileOffset,
966 PVOID* BaseAddress,
967 PBOOLEAN UptoDate,
968 PROS_VACB *Vacb)
969 /*
970 * FUNCTION: Request a page mapping for a shared cache map
971 */
972 {
973 LONGLONG BaseOffset;
974
975 ASSERT(SharedCacheMap);
976
977 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
978 {
979 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
980 FileOffset, VACB_MAPPING_GRANULARITY);
981 KeBugCheck(CACHE_MANAGER);
982 }
983
984 return CcRosGetVacb(SharedCacheMap,
985 FileOffset,
986 &BaseOffset,
987 BaseAddress,
988 UptoDate,
989 Vacb);
990 }
991
992 static
993 VOID
994 CcFreeCachePage (
995 PVOID Context,
996 MEMORY_AREA* MemoryArea,
997 PVOID Address,
998 PFN_NUMBER Page,
999 SWAPENTRY SwapEntry,
1000 BOOLEAN Dirty)
1001 {
1002 ASSERT(SwapEntry == 0);
1003 if (Page != 0)
1004 {
1005 ASSERT(MmGetReferenceCountPage(Page) == 1);
1006 MmReleasePageMemoryConsumer(MC_CACHE, Page);
1007 }
1008 }
1009
1010 NTSTATUS
1011 CcRosInternalFreeVacb (
1012 PROS_VACB Vacb)
1013 /*
1014 * FUNCTION: Releases a VACB associated with a shared cache map
1015 */
1016 {
1017 DPRINT("Freeing VACB 0x%p\n", Vacb);
1018 #if DBG
1019 if (Vacb->SharedCacheMap->Trace)
1020 {
1021 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
1022 }
1023 #endif
1024
1025 MmLockAddressSpace(MmGetKernelAddressSpace());
1026 MmFreeMemoryArea(MmGetKernelAddressSpace(),
1027 Vacb->MemoryArea,
1028 CcFreeCachePage,
1029 NULL);
1030 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1031
1032 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
1033 return STATUS_SUCCESS;
1034 }
1035
1036 /*
1037 * @implemented
1038 */
1039 VOID
1040 NTAPI
1041 CcFlushCache (
1042 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1043 IN PLARGE_INTEGER FileOffset OPTIONAL,
1044 IN ULONG Length,
1045 OUT PIO_STATUS_BLOCK IoStatus)
1046 {
1047 PROS_SHARED_CACHE_MAP SharedCacheMap;
1048 LARGE_INTEGER Offset;
1049 LONGLONG RemainingLength;
1050 PROS_VACB current;
1051 NTSTATUS Status;
1052 KIRQL oldIrql;
1053
1054 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1055 SectionObjectPointers, FileOffset, Length);
1056
1057 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1058 SectionObjectPointers, FileOffset, Length, IoStatus);
1059
1060 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1061 {
1062 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1063 ASSERT(SharedCacheMap);
1064 if (FileOffset)
1065 {
1066 Offset = *FileOffset;
1067 RemainingLength = Length;
1068 }
1069 else
1070 {
1071 Offset.QuadPart = 0;
1072 RemainingLength = SharedCacheMap->FileSize.QuadPart;
1073 }
1074
1075 if (IoStatus)
1076 {
1077 IoStatus->Status = STATUS_SUCCESS;
1078 IoStatus->Information = 0;
1079 }
1080
1081 while (RemainingLength > 0)
1082 {
1083 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1084 if (current != NULL)
1085 {
1086 if (current->Dirty)
1087 {
1088 Status = CcRosFlushVacb(current);
1089 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1090 {
1091 IoStatus->Status = Status;
1092 }
1093 }
1094
1095 CcRosReleaseVacbLock(current);
1096
1097 KeAcquireGuardedMutex(&ViewLock);
1098 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1099 CcRosVacbDecRefCount(current);
1100 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1101 KeReleaseGuardedMutex(&ViewLock);
1102 }
1103
1104 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1105 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1106 }
1107 }
1108 else
1109 {
1110 if (IoStatus)
1111 {
1112 IoStatus->Status = STATUS_INVALID_PARAMETER;
1113 }
1114 }
1115 }
1116
1117 NTSTATUS
1118 NTAPI
1119 CcRosDeleteFileCache (
1120 PFILE_OBJECT FileObject,
1121 PROS_SHARED_CACHE_MAP SharedCacheMap)
1122 /*
1123 * FUNCTION: Releases the shared cache map associated with a file object
1124 */
1125 {
1126 PLIST_ENTRY current_entry;
1127 PROS_VACB current;
1128 LIST_ENTRY FreeList;
1129 KIRQL oldIrql;
1130
1131 ASSERT(SharedCacheMap);
1132
1133 SharedCacheMap->OpenCount++;
1134 KeReleaseGuardedMutex(&ViewLock);
1135
1136 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1137
1138 KeAcquireGuardedMutex(&ViewLock);
1139 SharedCacheMap->OpenCount--;
1140 if (SharedCacheMap->OpenCount == 0)
1141 {
1142 KIRQL OldIrql;
1143
1144 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1145
1146 /*
1147 * Release all VACBs
1148 */
1149 InitializeListHead(&FreeList);
1150 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1151 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1152 {
1153 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1154 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1155
1156 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1157 CcRosAcquireVacbLock(current, NULL);
1158 RemoveEntryList(&current->VacbLruListEntry);
1159 if (current->Dirty)
1160 {
1161 RemoveEntryList(&current->DirtyVacbListEntry);
1162 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
1163 current->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
1164 DPRINT1("Freeing dirty VACB\n");
1165 }
1166 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1167 CcRosReleaseVacbLock(current);
1168
1169 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1170 }
1171 #if DBG
1172 SharedCacheMap->Trace = FALSE;
1173 #endif
1174 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1175
1176 KeReleaseGuardedMutex(&ViewLock);
1177 ObDereferenceObject(SharedCacheMap->FileObject);
1178
1179 while (!IsListEmpty(&FreeList))
1180 {
1181 current_entry = RemoveTailList(&FreeList);
1182 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1183 CcRosInternalFreeVacb(current);
1184 }
1185
1186 KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
1187 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1188 KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
1189
1190 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1191 KeAcquireGuardedMutex(&ViewLock);
1192 }
1193 return STATUS_SUCCESS;
1194 }
1195
1196 VOID
1197 NTAPI
1198 CcRosReferenceCache (
1199 PFILE_OBJECT FileObject)
1200 {
1201 PROS_SHARED_CACHE_MAP SharedCacheMap;
1202 KeAcquireGuardedMutex(&ViewLock);
1203 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1204 ASSERT(SharedCacheMap);
1205 ASSERT(SharedCacheMap->OpenCount != 0);
1206 SharedCacheMap->OpenCount++;
1207 KeReleaseGuardedMutex(&ViewLock);
1208 }
1209
1210 VOID
1211 NTAPI
1212 CcRosRemoveIfClosed (
1213 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1214 {
1215 PROS_SHARED_CACHE_MAP SharedCacheMap;
1216 DPRINT("CcRosRemoveIfClosed()\n");
1217 KeAcquireGuardedMutex(&ViewLock);
1218 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1219 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1220 {
1221 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1222 }
1223 KeReleaseGuardedMutex(&ViewLock);
1224 }
1225
1226
1227 VOID
1228 NTAPI
1229 CcRosDereferenceCache (
1230 PFILE_OBJECT FileObject)
1231 {
1232 PROS_SHARED_CACHE_MAP SharedCacheMap;
1233 KeAcquireGuardedMutex(&ViewLock);
1234 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1235 ASSERT(SharedCacheMap);
1236 if (SharedCacheMap->OpenCount > 0)
1237 {
1238 SharedCacheMap->OpenCount--;
1239 if (SharedCacheMap->OpenCount == 0)
1240 {
1241 MmFreeSectionSegments(SharedCacheMap->FileObject);
1242 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1243 }
1244 }
1245 KeReleaseGuardedMutex(&ViewLock);
1246 }
1247
1248 NTSTATUS
1249 NTAPI
1250 CcRosReleaseFileCache (
1251 PFILE_OBJECT FileObject)
1252 /*
1253 * FUNCTION: Called by the file system when a handle to a file object
1254 * has been closed.
1255 */
1256 {
1257 PROS_SHARED_CACHE_MAP SharedCacheMap;
1258
1259 KeAcquireGuardedMutex(&ViewLock);
1260
1261 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1262 {
1263 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1264 if (FileObject->PrivateCacheMap != NULL)
1265 {
1266 FileObject->PrivateCacheMap = NULL;
1267 if (SharedCacheMap->OpenCount > 0)
1268 {
1269 SharedCacheMap->OpenCount--;
1270 if (SharedCacheMap->OpenCount == 0)
1271 {
1272 MmFreeSectionSegments(SharedCacheMap->FileObject);
1273 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1274 }
1275 }
1276 }
1277 }
1278 KeReleaseGuardedMutex(&ViewLock);
1279 return STATUS_SUCCESS;
1280 }
1281
1282 NTSTATUS
1283 NTAPI
1284 CcTryToInitializeFileCache (
1285 PFILE_OBJECT FileObject)
1286 {
1287 PROS_SHARED_CACHE_MAP SharedCacheMap;
1288 NTSTATUS Status;
1289
1290 KeAcquireGuardedMutex(&ViewLock);
1291
1292 ASSERT(FileObject->SectionObjectPointer);
1293 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1294 if (SharedCacheMap == NULL)
1295 {
1296 Status = STATUS_UNSUCCESSFUL;
1297 }
1298 else
1299 {
1300 if (FileObject->PrivateCacheMap == NULL)
1301 {
1302 FileObject->PrivateCacheMap = SharedCacheMap;
1303 SharedCacheMap->OpenCount++;
1304 }
1305 Status = STATUS_SUCCESS;
1306 }
1307 KeReleaseGuardedMutex(&ViewLock);
1308
1309 return Status;
1310 }
1311
1312
1313 NTSTATUS
1314 NTAPI
1315 CcRosInitializeFileCache (
1316 PFILE_OBJECT FileObject,
1317 PCC_FILE_SIZES FileSizes,
1318 BOOLEAN PinAccess,
1319 PCACHE_MANAGER_CALLBACKS CallBacks,
1320 PVOID LazyWriterContext)
1321 /*
1322 * FUNCTION: Initializes a shared cache map for a file object
1323 */
1324 {
1325 PROS_SHARED_CACHE_MAP SharedCacheMap;
1326
1327 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1328 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1329 FileObject, SharedCacheMap);
1330
1331 KeAcquireGuardedMutex(&ViewLock);
1332 if (SharedCacheMap == NULL)
1333 {
1334 KIRQL OldIrql;
1335
1336 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1337 if (SharedCacheMap == NULL)
1338 {
1339 KeReleaseGuardedMutex(&ViewLock);
1340 return STATUS_INSUFFICIENT_RESOURCES;
1341 }
1342 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1343 ObReferenceObjectByPointer(FileObject,
1344 FILE_ALL_ACCESS,
1345 NULL,
1346 KernelMode);
1347 SharedCacheMap->FileObject = FileObject;
1348 SharedCacheMap->Callbacks = CallBacks;
1349 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1350 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1351 SharedCacheMap->FileSize = FileSizes->FileSize;
1352 SharedCacheMap->PinAccess = PinAccess;
1353 SharedCacheMap->DirtyPageThreshold = 0;
1354 SharedCacheMap->DirtyPages = 0;
1355 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1356 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1357 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1358
1359 KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
1360 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1361 KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
1362 }
1363 if (FileObject->PrivateCacheMap == NULL)
1364 {
1365 FileObject->PrivateCacheMap = SharedCacheMap;
1366 SharedCacheMap->OpenCount++;
1367 }
1368 KeReleaseGuardedMutex(&ViewLock);
1369
1370 return STATUS_SUCCESS;
1371 }
1372
1373 /*
1374 * @implemented
1375 */
1376 PFILE_OBJECT
1377 NTAPI
1378 CcGetFileObjectFromSectionPtrs (
1379 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1380 {
1381 PROS_SHARED_CACHE_MAP SharedCacheMap;
1382
1383 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1384
1385 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1386 {
1387 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1388 ASSERT(SharedCacheMap);
1389 return SharedCacheMap->FileObject;
1390 }
1391 return NULL;
1392 }
1393
1394 VOID
1395 NTAPI
1396 CcShutdownLazyWriter (
1397 VOID)
1398 {
1399 /* Simply set the event, lazy writer will stop when it's done */
1400 KeSetEvent(&iLazyWriterShutdown, IO_DISK_INCREMENT, FALSE);
1401 }
1402
1403 BOOLEAN
1404 INIT_FUNCTION
1405 NTAPI
1406 CcInitView (
1407 VOID)
1408 {
1409 HANDLE LazyWriter;
1410 NTSTATUS Status;
1411 KPRIORITY Priority;
1412 OBJECT_ATTRIBUTES ObjectAttributes;
1413
1414 DPRINT("CcInitView()\n");
1415
1416 InitializeListHead(&DirtyVacbListHead);
1417 InitializeListHead(&VacbLruListHead);
1418 InitializeListHead(&CcDeferredWrites);
1419 InitializeListHead(&CcCleanSharedCacheMapList);
1420 KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1421 KeInitializeSpinLock(&iSharedCacheMapLock);
1422 KeInitializeGuardedMutex(&ViewLock);
1423 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1424 NULL,
1425 NULL,
1426 0,
1427 sizeof(INTERNAL_BCB),
1428 TAG_BCB,
1429 20);
1430 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1431 NULL,
1432 NULL,
1433 0,
1434 sizeof(ROS_SHARED_CACHE_MAP),
1435 TAG_SHARED_CACHE_MAP,
1436 20);
1437 ExInitializeNPagedLookasideList(&VacbLookasideList,
1438 NULL,
1439 NULL,
1440 0,
1441 sizeof(ROS_VACB),
1442 TAG_VACB,
1443 20);
1444
1445 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1446
1447 /* Initialize lazy writer events */
1448 KeInitializeEvent(&iLazyWriterShutdown, SynchronizationEvent, FALSE);
1449 KeInitializeEvent(&iLazyWriterNotify, NotificationEvent, FALSE);
1450
1451 /* Define lazy writer threshold, depending on system type */
1452 switch (MmQuerySystemSize())
1453 {
1454 case MmSmallSystem:
1455 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8;
1456 break;
1457
1458 case MmMediumSystem:
1459 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4;
1460 break;
1461
1462 case MmLargeSystem:
1463 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8 + MmNumberOfPhysicalPages / 4;
1464 break;
1465 }
1466
1467 /* Start the lazy writer thread */
1468 InitializeObjectAttributes(&ObjectAttributes,
1469 NULL,
1470 OBJ_KERNEL_HANDLE,
1471 NULL,
1472 NULL);
1473 Status = PsCreateSystemThread(&LazyWriter,
1474 THREAD_ALL_ACCESS,
1475 &ObjectAttributes,
1476 NULL,
1477 NULL,
1478 CciLazyWriter,
1479 NULL);
1480 if (!NT_SUCCESS(Status))
1481 {
1482 return FALSE;
1483 }
1484
1485 Priority = 27;
1486 Status = NtSetInformationThread(LazyWriter,
1487 ThreadPriority,
1488 &Priority,
1489 sizeof(Priority));
1490 ASSERT(NT_SUCCESS(Status));
1491
1492 /* Handle is not needed */
1493 ObCloseHandle(LazyWriter, KernelMode);
1494
1495 CcInitCacheZeroPage();
1496
1497 return TRUE;
1498 }
1499
1500 #if DBG && defined(KDBG)
1501 BOOLEAN
1502 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1503 {
1504 PLIST_ENTRY ListEntry;
1505 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1506
1507 KdbpPrint(" Usage Summary (in kb)\n");
1508 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1509 /* No need to lock the spin lock here, we're in DBG */
1510 for (ListEntry = CcCleanSharedCacheMapList.Flink;
1511 ListEntry != &CcCleanSharedCacheMapList;
1512 ListEntry = ListEntry->Flink)
1513 {
1514 PLIST_ENTRY Vacbs;
1515 ULONG Valid = 0, Dirty = 0;
1516 PROS_SHARED_CACHE_MAP SharedCacheMap;
1517 PUNICODE_STRING FileName;
1518
1519 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1520
1521 /* Dirty size */
1522 Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
1523
1524 /* First, count for all the associated VACB */
1525 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1526 Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1527 Vacbs = Vacbs->Flink)
1528 {
1529 PROS_VACB Vacb;
1530
1531 Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
1532 if (Vacb->Valid)
1533 {
1534 Valid += VACB_MAPPING_GRANULARITY / 1024;
1535 }
1536 }
1537
1538 /* Setup name */
1539 if (SharedCacheMap->FileObject != NULL &&
1540 SharedCacheMap->FileObject->FileName.Length != 0)
1541 {
1542 FileName = &SharedCacheMap->FileObject->FileName;
1543 }
1544 else
1545 {
1546 FileName = &NoName;
1547 }
1548
1549 /* And print */
1550 KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
1551 }
1552
1553 return TRUE;
1554 }
1555 #endif
1556
1557 /* EOF */