[NTOSKRNL] Store the total dirty pages per shared cache map in it.
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42
43 /* GLOBALS *******************************************************************/
44
45 LIST_ENTRY DirtyVacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 /* Counters:
55 * - Amount of pages flushed by lazy writer
56 * - Number of times lazy writer ran
57 */
58 ULONG CcLazyWritePages = 0;
59 ULONG CcLazyWriteIos = 0;
60
61 /* Internal vars (MS):
62 * - Threshold above which lazy writer will start action
63 * - Amount of dirty pages
64 * - List for deferred writes
65 * - Spinlock when dealing with the deferred list
66 * - List for "clean" shared cache maps
67 */
68 ULONG CcDirtyPageThreshold = 0;
69 ULONG CcTotalDirtyPages = 0;
70 LIST_ENTRY CcDeferredWrites;
71 KSPIN_LOCK CcDeferredWriteSpinLock;
72 LIST_ENTRY CcCleanSharedCacheMapList;
73
74 /* Internal vars (ROS):
75 * - Event to notify lazy writer to shutdown
76 * - Event to inform watchers lazy writer is done for this loop
77 * - Lock for the CcCleanSharedCacheMapList list
78 */
79 KEVENT iLazyWriterShutdown;
80 KEVENT iLazyWriterNotify;
81 KSPIN_LOCK iSharedCacheMapLock;
82
83 #if DBG
84 static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
85 {
86 ++vacb->ReferenceCount;
87 if (vacb->SharedCacheMap->Trace)
88 {
89 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
90 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
91 }
92 }
93 static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
94 {
95 --vacb->ReferenceCount;
96 if (vacb->SharedCacheMap->Trace)
97 {
98 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
99 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
100 }
101 }
102 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
103 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
104 #else
105 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
106 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
107 #endif
108
109 NTSTATUS
110 CcRosInternalFreeVacb(PROS_VACB Vacb);
111
112
113 /* FUNCTIONS *****************************************************************/
114
115 VOID
116 NTAPI
117 CcRosTraceCacheMap (
118 PROS_SHARED_CACHE_MAP SharedCacheMap,
119 BOOLEAN Trace )
120 {
121 #if DBG
122 KIRQL oldirql;
123 PLIST_ENTRY current_entry;
124 PROS_VACB current;
125
126 if (!SharedCacheMap)
127 return;
128
129 SharedCacheMap->Trace = Trace;
130
131 if (Trace)
132 {
133 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
134
135 KeAcquireGuardedMutex(&ViewLock);
136 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
137
138 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
139 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
140 {
141 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
142 current_entry = current_entry->Flink;
143
144 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
145 current, current->ReferenceCount, current->Dirty, current->PageOut );
146 }
147 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
148 KeReleaseGuardedMutex(&ViewLock);
149 }
150 else
151 {
152 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
153 }
154
155 #else
156 UNREFERENCED_PARAMETER(SharedCacheMap);
157 UNREFERENCED_PARAMETER(Trace);
158 #endif
159 }
160
161 NTSTATUS
162 NTAPI
163 CcRosFlushVacb (
164 PROS_VACB Vacb)
165 {
166 NTSTATUS Status;
167 KIRQL oldIrql;
168
169 Status = CcWriteVirtualAddress(Vacb);
170 if (NT_SUCCESS(Status))
171 {
172 KeAcquireGuardedMutex(&ViewLock);
173 KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql);
174
175 Vacb->Dirty = FALSE;
176 RemoveEntryList(&Vacb->DirtyVacbListEntry);
177 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
178 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
179 CcRosVacbDecRefCount(Vacb);
180
181 KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
182 KeReleaseGuardedMutex(&ViewLock);
183 }
184
185 return Status;
186 }
187
188 NTSTATUS
189 NTAPI
190 CcRosFlushDirtyPages (
191 ULONG Target,
192 PULONG Count,
193 BOOLEAN Wait,
194 BOOLEAN CalledFromLazy)
195 {
196 PLIST_ENTRY current_entry;
197 PROS_VACB current;
198 BOOLEAN Locked;
199 NTSTATUS Status;
200 LARGE_INTEGER ZeroTimeout;
201
202 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
203
204 (*Count) = 0;
205 ZeroTimeout.QuadPart = 0;
206
207 KeEnterCriticalRegion();
208 KeAcquireGuardedMutex(&ViewLock);
209
210 current_entry = DirtyVacbListHead.Flink;
211 if (current_entry == &DirtyVacbListHead)
212 {
213 DPRINT("No Dirty pages\n");
214 }
215
216 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
217 {
218 current = CONTAINING_RECORD(current_entry,
219 ROS_VACB,
220 DirtyVacbListEntry);
221 current_entry = current_entry->Flink;
222
223 CcRosVacbIncRefCount(current);
224
225 /* When performing lazy write, don't handle temporary files */
226 if (CalledFromLazy &&
227 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
228 {
229 CcRosVacbDecRefCount(current);
230 continue;
231 }
232
233 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
234 current->SharedCacheMap->LazyWriteContext, Wait);
235 if (!Locked)
236 {
237 CcRosVacbDecRefCount(current);
238 continue;
239 }
240
241 Status = CcRosAcquireVacbLock(current,
242 Wait ? NULL : &ZeroTimeout);
243 if (Status != STATUS_SUCCESS)
244 {
245 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
246 current->SharedCacheMap->LazyWriteContext);
247 CcRosVacbDecRefCount(current);
248 continue;
249 }
250
251 ASSERT(current->Dirty);
252
253 /* One reference is added above */
254 if ((current->ReferenceCount > 2 && current->PinCount == 0) ||
255 (current->ReferenceCount > 3 && current->PinCount > 1))
256 {
257 CcRosReleaseVacbLock(current);
258 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
259 current->SharedCacheMap->LazyWriteContext);
260 CcRosVacbDecRefCount(current);
261 continue;
262 }
263
264 KeReleaseGuardedMutex(&ViewLock);
265
266 Status = CcRosFlushVacb(current);
267
268 CcRosReleaseVacbLock(current);
269 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
270 current->SharedCacheMap->LazyWriteContext);
271
272 KeAcquireGuardedMutex(&ViewLock);
273 CcRosVacbDecRefCount(current);
274
275 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
276 (Status != STATUS_MEDIA_WRITE_PROTECTED))
277 {
278 DPRINT1("CC: Failed to flush VACB.\n");
279 }
280 else
281 {
282 ULONG PagesFreed;
283
284 /* How many pages did we free? */
285 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
286 (*Count) += PagesFreed;
287
288 /* Make sure we don't overflow target! */
289 if (Target < PagesFreed)
290 {
291 /* If we would have, jump to zero directly */
292 Target = 0;
293 }
294 else
295 {
296 Target -= PagesFreed;
297 }
298 }
299
300 current_entry = DirtyVacbListHead.Flink;
301 }
302
303 KeReleaseGuardedMutex(&ViewLock);
304 KeLeaveCriticalRegion();
305
306 DPRINT("CcRosFlushDirtyPages() finished\n");
307 return STATUS_SUCCESS;
308 }
309
310 /* FIXME: Someday this could somewhat implement write-behind/read-ahead */
311 VOID
312 NTAPI
313 CciLazyWriter(PVOID Unused)
314 {
315 LARGE_INTEGER OneSecond;
316
317 OneSecond.QuadPart = (LONGLONG)-1*1000*1000*10;
318
319 while (TRUE)
320 {
321 NTSTATUS Status;
322 PLIST_ENTRY ListEntry;
323 ULONG Target, Count = 0;
324
325 /* One per second or until we have to stop */
326 Status = KeWaitForSingleObject(&iLazyWriterShutdown,
327 Executive,
328 KernelMode,
329 FALSE,
330 &OneSecond);
331
332 /* If we succeeed, we've to stop running! */
333 if (Status == STATUS_SUCCESS)
334 {
335 break;
336 }
337
338 /* We're not sleeping anymore */
339 KeClearEvent(&iLazyWriterNotify);
340
341 /* Our target is one-eighth of the dirty pages */
342 Target = CcTotalDirtyPages / 8;
343 if (Target != 0)
344 {
345 /* Flush! */
346 DPRINT("Lazy writer starting (%d)\n", Target);
347 CcRosFlushDirtyPages(Target, &Count, FALSE, TRUE);
348
349 /* And update stats */
350 CcLazyWritePages += Count;
351 ++CcLazyWriteIos;
352 DPRINT("Lazy writer done (%d)\n", Count);
353 }
354
355 /* Inform people waiting on us that we're done */
356 KeSetEvent(&iLazyWriterNotify, IO_DISK_INCREMENT, FALSE);
357
358 /* Likely not optimal, but let's handle one deferred write now! */
359 ListEntry = ExInterlockedRemoveHeadList(&CcDeferredWrites, &CcDeferredWriteSpinLock);
360 if (ListEntry != NULL)
361 {
362 PROS_DEFERRED_WRITE_CONTEXT Context;
363
364 /* Extract the context */
365 Context = CONTAINING_RECORD(ListEntry, ROS_DEFERRED_WRITE_CONTEXT, CcDeferredWritesEntry);
366
367 /* Can we write now? */
368 if (CcCanIWrite(Context->FileObject, Context->BytesToWrite, FALSE, Context->Retrying))
369 {
370 /* Yes! Do it, and destroy the associated context */
371 Context->PostRoutine(Context->Context1, Context->Context2);
372 ExFreePoolWithTag(Context, 'CcDw');
373 }
374 else
375 {
376 /* Otherwise, requeue it, but in tail, so that it doesn't block others
377 * This is clearly to improve, but given the poor algorithm used now
378 * It's better than nothing!
379 */
380 ExInterlockedInsertTailList(&CcDeferredWrites,
381 &Context->CcDeferredWritesEntry,
382 &CcDeferredWriteSpinLock);
383 }
384 }
385 }
386 }
387
388 NTSTATUS
389 CcRosTrimCache (
390 ULONG Target,
391 ULONG Priority,
392 PULONG NrFreed)
393 /*
394 * FUNCTION: Try to free some memory from the file cache.
395 * ARGUMENTS:
396 * Target - The number of pages to be freed.
397 * Priority - The priority of free (currently unused).
398 * NrFreed - Points to a variable where the number of pages
399 * actually freed is returned.
400 */
401 {
402 PLIST_ENTRY current_entry;
403 PROS_VACB current;
404 ULONG PagesFreed;
405 KIRQL oldIrql;
406 LIST_ENTRY FreeList;
407 PFN_NUMBER Page;
408 ULONG i;
409 BOOLEAN FlushedPages = FALSE;
410
411 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
412
413 InitializeListHead(&FreeList);
414
415 *NrFreed = 0;
416
417 retry:
418 KeAcquireGuardedMutex(&ViewLock);
419
420 current_entry = VacbLruListHead.Flink;
421 while (current_entry != &VacbLruListHead)
422 {
423 current = CONTAINING_RECORD(current_entry,
424 ROS_VACB,
425 VacbLruListEntry);
426 current_entry = current_entry->Flink;
427
428 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
429
430 /* Reference the VACB */
431 CcRosVacbIncRefCount(current);
432
433 /* Check if it's mapped and not dirty */
434 if (current->MappedCount > 0 && !current->Dirty)
435 {
436 /* We have to break these locks because Cc sucks */
437 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
438 KeReleaseGuardedMutex(&ViewLock);
439
440 /* Page out the VACB */
441 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
442 {
443 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
444
445 MmPageOutPhysicalAddress(Page);
446 }
447
448 /* Reacquire the locks */
449 KeAcquireGuardedMutex(&ViewLock);
450 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
451 }
452
453 /* Dereference the VACB */
454 CcRosVacbDecRefCount(current);
455
456 /* Check if we can free this entry now */
457 if (current->ReferenceCount == 0)
458 {
459 ASSERT(!current->Dirty);
460 ASSERT(!current->MappedCount);
461
462 RemoveEntryList(&current->CacheMapVacbListEntry);
463 RemoveEntryList(&current->VacbLruListEntry);
464 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
465
466 /* Calculate how many pages we freed for Mm */
467 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
468 Target -= PagesFreed;
469 (*NrFreed) += PagesFreed;
470 }
471
472 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
473 }
474
475 KeReleaseGuardedMutex(&ViewLock);
476
477 /* Try flushing pages if we haven't met our target */
478 if ((Target > 0) && !FlushedPages)
479 {
480 /* Flush dirty pages to disk */
481 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
482 FlushedPages = TRUE;
483
484 /* We can only swap as many pages as we flushed */
485 if (PagesFreed < Target) Target = PagesFreed;
486
487 /* Check if we flushed anything */
488 if (PagesFreed != 0)
489 {
490 /* Try again after flushing dirty pages */
491 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
492 goto retry;
493 }
494 }
495
496 while (!IsListEmpty(&FreeList))
497 {
498 current_entry = RemoveHeadList(&FreeList);
499 current = CONTAINING_RECORD(current_entry,
500 ROS_VACB,
501 CacheMapVacbListEntry);
502 CcRosInternalFreeVacb(current);
503 }
504
505 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
506
507 return STATUS_SUCCESS;
508 }
509
510 NTSTATUS
511 NTAPI
512 CcRosReleaseVacb (
513 PROS_SHARED_CACHE_MAP SharedCacheMap,
514 PROS_VACB Vacb,
515 BOOLEAN Valid,
516 BOOLEAN Dirty,
517 BOOLEAN Mapped)
518 {
519 BOOLEAN WasDirty;
520 KIRQL oldIrql;
521
522 ASSERT(SharedCacheMap);
523
524 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
525 SharedCacheMap, Vacb, Valid);
526
527 KeAcquireGuardedMutex(&ViewLock);
528 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
529
530 Vacb->Valid = Valid;
531
532 WasDirty = Vacb->Dirty;
533 Vacb->Dirty = Vacb->Dirty || Dirty;
534
535 if (!WasDirty && Vacb->Dirty)
536 {
537 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
538 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
539 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
540 }
541
542 if (Mapped)
543 {
544 Vacb->MappedCount++;
545 }
546 CcRosVacbDecRefCount(Vacb);
547 if (Mapped && (Vacb->MappedCount == 1))
548 {
549 CcRosVacbIncRefCount(Vacb);
550 }
551 if (!WasDirty && Vacb->Dirty)
552 {
553 CcRosVacbIncRefCount(Vacb);
554 }
555
556 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
557 KeReleaseGuardedMutex(&ViewLock);
558 CcRosReleaseVacbLock(Vacb);
559
560 return STATUS_SUCCESS;
561 }
562
563 /* Returns with VACB Lock Held! */
564 PROS_VACB
565 NTAPI
566 CcRosLookupVacb (
567 PROS_SHARED_CACHE_MAP SharedCacheMap,
568 LONGLONG FileOffset)
569 {
570 PLIST_ENTRY current_entry;
571 PROS_VACB current;
572 KIRQL oldIrql;
573
574 ASSERT(SharedCacheMap);
575
576 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
577 SharedCacheMap, FileOffset);
578
579 KeAcquireGuardedMutex(&ViewLock);
580 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
581
582 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
583 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
584 {
585 current = CONTAINING_RECORD(current_entry,
586 ROS_VACB,
587 CacheMapVacbListEntry);
588 if (IsPointInRange(current->FileOffset.QuadPart,
589 VACB_MAPPING_GRANULARITY,
590 FileOffset))
591 {
592 CcRosVacbIncRefCount(current);
593 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
594 KeReleaseGuardedMutex(&ViewLock);
595 CcRosAcquireVacbLock(current, NULL);
596 return current;
597 }
598 if (current->FileOffset.QuadPart > FileOffset)
599 break;
600 current_entry = current_entry->Flink;
601 }
602
603 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
604 KeReleaseGuardedMutex(&ViewLock);
605
606 return NULL;
607 }
608
609 VOID
610 NTAPI
611 CcRosMarkDirtyVacb (
612 PROS_VACB Vacb)
613 {
614 KIRQL oldIrql;
615 PROS_SHARED_CACHE_MAP SharedCacheMap;
616
617 SharedCacheMap = Vacb->SharedCacheMap;
618
619 KeAcquireGuardedMutex(&ViewLock);
620 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
621
622 if (!Vacb->Dirty)
623 {
624 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
625 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
626 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
627 }
628 else
629 {
630 CcRosVacbDecRefCount(Vacb);
631 }
632
633 /* Move to the tail of the LRU list */
634 RemoveEntryList(&Vacb->VacbLruListEntry);
635 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
636
637 Vacb->Dirty = TRUE;
638
639 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
640 KeReleaseGuardedMutex(&ViewLock);
641 }
642
643 NTSTATUS
644 NTAPI
645 CcRosMarkDirtyFile (
646 PROS_SHARED_CACHE_MAP SharedCacheMap,
647 LONGLONG FileOffset)
648 {
649 PROS_VACB Vacb;
650
651 ASSERT(SharedCacheMap);
652
653 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
654 SharedCacheMap, FileOffset);
655
656 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
657 if (Vacb == NULL)
658 {
659 KeBugCheck(CACHE_MANAGER);
660 }
661
662 CcRosMarkDirtyVacb(Vacb);
663
664
665 CcRosReleaseVacbLock(Vacb);
666
667 return STATUS_SUCCESS;
668 }
669
670 NTSTATUS
671 NTAPI
672 CcRosUnmapVacb (
673 PROS_SHARED_CACHE_MAP SharedCacheMap,
674 LONGLONG FileOffset,
675 BOOLEAN NowDirty)
676 {
677 PROS_VACB Vacb;
678 BOOLEAN WasDirty;
679 KIRQL oldIrql;
680
681 ASSERT(SharedCacheMap);
682
683 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
684 SharedCacheMap, FileOffset, NowDirty);
685
686 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
687 if (Vacb == NULL)
688 {
689 return STATUS_UNSUCCESSFUL;
690 }
691
692 KeAcquireGuardedMutex(&ViewLock);
693 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
694
695 WasDirty = Vacb->Dirty;
696 Vacb->Dirty = Vacb->Dirty || NowDirty;
697
698 Vacb->MappedCount--;
699
700 if (!WasDirty && NowDirty)
701 {
702 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
703 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
704 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
705 }
706
707 CcRosVacbDecRefCount(Vacb);
708 if (!WasDirty && NowDirty)
709 {
710 CcRosVacbIncRefCount(Vacb);
711 }
712 if (Vacb->MappedCount == 0)
713 {
714 CcRosVacbDecRefCount(Vacb);
715 }
716
717 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
718 KeReleaseGuardedMutex(&ViewLock);
719 CcRosReleaseVacbLock(Vacb);
720
721 return STATUS_SUCCESS;
722 }
723
724 static
725 NTSTATUS
726 CcRosMapVacb(
727 PROS_VACB Vacb)
728 {
729 ULONG i;
730 NTSTATUS Status;
731 ULONG_PTR NumberOfPages;
732
733 /* Create a memory area. */
734 MmLockAddressSpace(MmGetKernelAddressSpace());
735 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
736 0, // nothing checks for VACB mareas, so set to 0
737 &Vacb->BaseAddress,
738 VACB_MAPPING_GRANULARITY,
739 PAGE_READWRITE,
740 (PMEMORY_AREA*)&Vacb->MemoryArea,
741 0,
742 PAGE_SIZE);
743 MmUnlockAddressSpace(MmGetKernelAddressSpace());
744 if (!NT_SUCCESS(Status))
745 {
746 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
747 return Status;
748 }
749
750 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
751 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
752
753 /* Create a virtual mapping for this memory area */
754 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
755 for (i = 0; i < NumberOfPages; i++)
756 {
757 PFN_NUMBER PageFrameNumber;
758
759 MI_SET_USAGE(MI_USAGE_CACHE);
760 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
761 if (PageFrameNumber == 0)
762 {
763 DPRINT1("Unable to allocate page\n");
764 KeBugCheck(MEMORY_MANAGEMENT);
765 }
766
767 Status = MmCreateVirtualMapping(NULL,
768 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
769 PAGE_READWRITE,
770 &PageFrameNumber,
771 1);
772 if (!NT_SUCCESS(Status))
773 {
774 DPRINT1("Unable to create virtual mapping\n");
775 KeBugCheck(MEMORY_MANAGEMENT);
776 }
777 }
778
779 return STATUS_SUCCESS;
780 }
781
782 static
783 NTSTATUS
784 CcRosCreateVacb (
785 PROS_SHARED_CACHE_MAP SharedCacheMap,
786 LONGLONG FileOffset,
787 PROS_VACB *Vacb)
788 {
789 PROS_VACB current;
790 PROS_VACB previous;
791 PLIST_ENTRY current_entry;
792 NTSTATUS Status;
793 KIRQL oldIrql;
794
795 ASSERT(SharedCacheMap);
796
797 DPRINT("CcRosCreateVacb()\n");
798
799 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
800 {
801 *Vacb = NULL;
802 return STATUS_INVALID_PARAMETER;
803 }
804
805 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
806 current->BaseAddress = NULL;
807 current->Valid = FALSE;
808 current->Dirty = FALSE;
809 current->PageOut = FALSE;
810 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
811 current->SharedCacheMap = SharedCacheMap;
812 #if DBG
813 if (SharedCacheMap->Trace)
814 {
815 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
816 }
817 #endif
818 current->MappedCount = 0;
819 current->DirtyVacbListEntry.Flink = NULL;
820 current->DirtyVacbListEntry.Blink = NULL;
821 current->ReferenceCount = 1;
822 current->PinCount = 0;
823 KeInitializeMutex(&current->Mutex, 0);
824 CcRosAcquireVacbLock(current, NULL);
825 KeAcquireGuardedMutex(&ViewLock);
826
827 *Vacb = current;
828 /* There is window between the call to CcRosLookupVacb
829 * and CcRosCreateVacb. We must check if a VACB for the
830 * file offset exist. If there is a VACB, we release
831 * our newly created VACB and return the existing one.
832 */
833 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
834 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
835 previous = NULL;
836 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
837 {
838 current = CONTAINING_RECORD(current_entry,
839 ROS_VACB,
840 CacheMapVacbListEntry);
841 if (IsPointInRange(current->FileOffset.QuadPart,
842 VACB_MAPPING_GRANULARITY,
843 FileOffset))
844 {
845 CcRosVacbIncRefCount(current);
846 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
847 #if DBG
848 if (SharedCacheMap->Trace)
849 {
850 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
851 SharedCacheMap,
852 (*Vacb),
853 current);
854 }
855 #endif
856 CcRosReleaseVacbLock(*Vacb);
857 KeReleaseGuardedMutex(&ViewLock);
858 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
859 *Vacb = current;
860 CcRosAcquireVacbLock(current, NULL);
861 return STATUS_SUCCESS;
862 }
863 if (current->FileOffset.QuadPart < FileOffset)
864 {
865 ASSERT(previous == NULL ||
866 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
867 previous = current;
868 }
869 if (current->FileOffset.QuadPart > FileOffset)
870 break;
871 current_entry = current_entry->Flink;
872 }
873 /* There was no existing VACB. */
874 current = *Vacb;
875 if (previous)
876 {
877 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
878 }
879 else
880 {
881 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
882 }
883 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
884 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
885 KeReleaseGuardedMutex(&ViewLock);
886
887 MI_SET_USAGE(MI_USAGE_CACHE);
888 #if MI_TRACE_PFNS
889 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
890 {
891 PWCHAR pos;
892 ULONG len = 0;
893 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
894 if (pos)
895 {
896 len = wcslen(pos) * sizeof(WCHAR);
897 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
898 }
899 else
900 {
901 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
902 }
903 }
904 #endif
905
906 Status = CcRosMapVacb(current);
907 if (!NT_SUCCESS(Status))
908 {
909 RemoveEntryList(&current->CacheMapVacbListEntry);
910 RemoveEntryList(&current->VacbLruListEntry);
911 CcRosReleaseVacbLock(current);
912 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
913 }
914
915 return Status;
916 }
917
918 NTSTATUS
919 NTAPI
920 CcRosGetVacb (
921 PROS_SHARED_CACHE_MAP SharedCacheMap,
922 LONGLONG FileOffset,
923 PLONGLONG BaseOffset,
924 PVOID* BaseAddress,
925 PBOOLEAN UptoDate,
926 PROS_VACB *Vacb)
927 {
928 PROS_VACB current;
929 NTSTATUS Status;
930
931 ASSERT(SharedCacheMap);
932
933 DPRINT("CcRosGetVacb()\n");
934
935 /*
936 * Look for a VACB already mapping the same data.
937 */
938 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
939 if (current == NULL)
940 {
941 /*
942 * Otherwise create a new VACB.
943 */
944 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
945 if (!NT_SUCCESS(Status))
946 {
947 return Status;
948 }
949 }
950
951 KeAcquireGuardedMutex(&ViewLock);
952
953 /* Move to the tail of the LRU list */
954 RemoveEntryList(&current->VacbLruListEntry);
955 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
956
957 KeReleaseGuardedMutex(&ViewLock);
958
959 /*
960 * Return information about the VACB to the caller.
961 */
962 *UptoDate = current->Valid;
963 *BaseAddress = current->BaseAddress;
964 DPRINT("*BaseAddress %p\n", *BaseAddress);
965 *Vacb = current;
966 *BaseOffset = current->FileOffset.QuadPart;
967 return STATUS_SUCCESS;
968 }
969
970 NTSTATUS
971 NTAPI
972 CcRosRequestVacb (
973 PROS_SHARED_CACHE_MAP SharedCacheMap,
974 LONGLONG FileOffset,
975 PVOID* BaseAddress,
976 PBOOLEAN UptoDate,
977 PROS_VACB *Vacb)
978 /*
979 * FUNCTION: Request a page mapping for a shared cache map
980 */
981 {
982 LONGLONG BaseOffset;
983
984 ASSERT(SharedCacheMap);
985
986 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
987 {
988 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
989 FileOffset, VACB_MAPPING_GRANULARITY);
990 KeBugCheck(CACHE_MANAGER);
991 }
992
993 return CcRosGetVacb(SharedCacheMap,
994 FileOffset,
995 &BaseOffset,
996 BaseAddress,
997 UptoDate,
998 Vacb);
999 }
1000
1001 static
1002 VOID
1003 CcFreeCachePage (
1004 PVOID Context,
1005 MEMORY_AREA* MemoryArea,
1006 PVOID Address,
1007 PFN_NUMBER Page,
1008 SWAPENTRY SwapEntry,
1009 BOOLEAN Dirty)
1010 {
1011 ASSERT(SwapEntry == 0);
1012 if (Page != 0)
1013 {
1014 ASSERT(MmGetReferenceCountPage(Page) == 1);
1015 MmReleasePageMemoryConsumer(MC_CACHE, Page);
1016 }
1017 }
1018
1019 NTSTATUS
1020 CcRosInternalFreeVacb (
1021 PROS_VACB Vacb)
1022 /*
1023 * FUNCTION: Releases a VACB associated with a shared cache map
1024 */
1025 {
1026 DPRINT("Freeing VACB 0x%p\n", Vacb);
1027 #if DBG
1028 if (Vacb->SharedCacheMap->Trace)
1029 {
1030 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
1031 }
1032 #endif
1033
1034 MmLockAddressSpace(MmGetKernelAddressSpace());
1035 MmFreeMemoryArea(MmGetKernelAddressSpace(),
1036 Vacb->MemoryArea,
1037 CcFreeCachePage,
1038 NULL);
1039 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1040
1041 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
1042 return STATUS_SUCCESS;
1043 }
1044
1045 /*
1046 * @implemented
1047 */
1048 VOID
1049 NTAPI
1050 CcFlushCache (
1051 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1052 IN PLARGE_INTEGER FileOffset OPTIONAL,
1053 IN ULONG Length,
1054 OUT PIO_STATUS_BLOCK IoStatus)
1055 {
1056 PROS_SHARED_CACHE_MAP SharedCacheMap;
1057 LARGE_INTEGER Offset;
1058 LONGLONG RemainingLength;
1059 PROS_VACB current;
1060 NTSTATUS Status;
1061 KIRQL oldIrql;
1062
1063 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1064 SectionObjectPointers, FileOffset, Length);
1065
1066 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1067 SectionObjectPointers, FileOffset, Length, IoStatus);
1068
1069 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1070 {
1071 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1072 ASSERT(SharedCacheMap);
1073 if (FileOffset)
1074 {
1075 Offset = *FileOffset;
1076 RemainingLength = Length;
1077 }
1078 else
1079 {
1080 Offset.QuadPart = 0;
1081 RemainingLength = SharedCacheMap->FileSize.QuadPart;
1082 }
1083
1084 if (IoStatus)
1085 {
1086 IoStatus->Status = STATUS_SUCCESS;
1087 IoStatus->Information = 0;
1088 }
1089
1090 while (RemainingLength > 0)
1091 {
1092 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1093 if (current != NULL)
1094 {
1095 if (current->Dirty)
1096 {
1097 Status = CcRosFlushVacb(current);
1098 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1099 {
1100 IoStatus->Status = Status;
1101 }
1102 }
1103
1104 CcRosReleaseVacbLock(current);
1105
1106 KeAcquireGuardedMutex(&ViewLock);
1107 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1108 CcRosVacbDecRefCount(current);
1109 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1110 KeReleaseGuardedMutex(&ViewLock);
1111 }
1112
1113 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1114 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1115 }
1116 }
1117 else
1118 {
1119 if (IoStatus)
1120 {
1121 IoStatus->Status = STATUS_INVALID_PARAMETER;
1122 }
1123 }
1124 }
1125
1126 NTSTATUS
1127 NTAPI
1128 CcRosDeleteFileCache (
1129 PFILE_OBJECT FileObject,
1130 PROS_SHARED_CACHE_MAP SharedCacheMap)
1131 /*
1132 * FUNCTION: Releases the shared cache map associated with a file object
1133 */
1134 {
1135 PLIST_ENTRY current_entry;
1136 PROS_VACB current;
1137 LIST_ENTRY FreeList;
1138 KIRQL oldIrql;
1139
1140 ASSERT(SharedCacheMap);
1141
1142 SharedCacheMap->OpenCount++;
1143 KeReleaseGuardedMutex(&ViewLock);
1144
1145 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1146
1147 KeAcquireGuardedMutex(&ViewLock);
1148 SharedCacheMap->OpenCount--;
1149 if (SharedCacheMap->OpenCount == 0)
1150 {
1151 KIRQL OldIrql;
1152
1153 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1154
1155 /*
1156 * Release all VACBs
1157 */
1158 InitializeListHead(&FreeList);
1159 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1160 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1161 {
1162 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1163 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1164 RemoveEntryList(&current->VacbLruListEntry);
1165 if (current->Dirty)
1166 {
1167 RemoveEntryList(&current->DirtyVacbListEntry);
1168 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
1169 current->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
1170 DPRINT1("Freeing dirty VACB\n");
1171 }
1172 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1173 }
1174 #if DBG
1175 SharedCacheMap->Trace = FALSE;
1176 #endif
1177 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1178
1179 KeReleaseGuardedMutex(&ViewLock);
1180 ObDereferenceObject(SharedCacheMap->FileObject);
1181
1182 while (!IsListEmpty(&FreeList))
1183 {
1184 current_entry = RemoveTailList(&FreeList);
1185 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1186 CcRosInternalFreeVacb(current);
1187 }
1188
1189 KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
1190 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1191 KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
1192
1193 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1194 KeAcquireGuardedMutex(&ViewLock);
1195 }
1196 return STATUS_SUCCESS;
1197 }
1198
1199 VOID
1200 NTAPI
1201 CcRosReferenceCache (
1202 PFILE_OBJECT FileObject)
1203 {
1204 PROS_SHARED_CACHE_MAP SharedCacheMap;
1205 KeAcquireGuardedMutex(&ViewLock);
1206 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1207 ASSERT(SharedCacheMap);
1208 ASSERT(SharedCacheMap->OpenCount != 0);
1209 SharedCacheMap->OpenCount++;
1210 KeReleaseGuardedMutex(&ViewLock);
1211 }
1212
1213 VOID
1214 NTAPI
1215 CcRosRemoveIfClosed (
1216 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1217 {
1218 PROS_SHARED_CACHE_MAP SharedCacheMap;
1219 DPRINT("CcRosRemoveIfClosed()\n");
1220 KeAcquireGuardedMutex(&ViewLock);
1221 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1222 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1223 {
1224 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1225 }
1226 KeReleaseGuardedMutex(&ViewLock);
1227 }
1228
1229
1230 VOID
1231 NTAPI
1232 CcRosDereferenceCache (
1233 PFILE_OBJECT FileObject)
1234 {
1235 PROS_SHARED_CACHE_MAP SharedCacheMap;
1236 KeAcquireGuardedMutex(&ViewLock);
1237 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1238 ASSERT(SharedCacheMap);
1239 if (SharedCacheMap->OpenCount > 0)
1240 {
1241 SharedCacheMap->OpenCount--;
1242 if (SharedCacheMap->OpenCount == 0)
1243 {
1244 MmFreeSectionSegments(SharedCacheMap->FileObject);
1245 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1246 }
1247 }
1248 KeReleaseGuardedMutex(&ViewLock);
1249 }
1250
1251 NTSTATUS
1252 NTAPI
1253 CcRosReleaseFileCache (
1254 PFILE_OBJECT FileObject)
1255 /*
1256 * FUNCTION: Called by the file system when a handle to a file object
1257 * has been closed.
1258 */
1259 {
1260 PROS_SHARED_CACHE_MAP SharedCacheMap;
1261
1262 KeAcquireGuardedMutex(&ViewLock);
1263
1264 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1265 {
1266 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1267 if (FileObject->PrivateCacheMap != NULL)
1268 {
1269 FileObject->PrivateCacheMap = NULL;
1270 if (SharedCacheMap->OpenCount > 0)
1271 {
1272 SharedCacheMap->OpenCount--;
1273 if (SharedCacheMap->OpenCount == 0)
1274 {
1275 MmFreeSectionSegments(SharedCacheMap->FileObject);
1276 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1277 }
1278 }
1279 }
1280 }
1281 KeReleaseGuardedMutex(&ViewLock);
1282 return STATUS_SUCCESS;
1283 }
1284
1285 NTSTATUS
1286 NTAPI
1287 CcTryToInitializeFileCache (
1288 PFILE_OBJECT FileObject)
1289 {
1290 PROS_SHARED_CACHE_MAP SharedCacheMap;
1291 NTSTATUS Status;
1292
1293 KeAcquireGuardedMutex(&ViewLock);
1294
1295 ASSERT(FileObject->SectionObjectPointer);
1296 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1297 if (SharedCacheMap == NULL)
1298 {
1299 Status = STATUS_UNSUCCESSFUL;
1300 }
1301 else
1302 {
1303 if (FileObject->PrivateCacheMap == NULL)
1304 {
1305 FileObject->PrivateCacheMap = SharedCacheMap;
1306 SharedCacheMap->OpenCount++;
1307 }
1308 Status = STATUS_SUCCESS;
1309 }
1310 KeReleaseGuardedMutex(&ViewLock);
1311
1312 return Status;
1313 }
1314
1315
1316 NTSTATUS
1317 NTAPI
1318 CcRosInitializeFileCache (
1319 PFILE_OBJECT FileObject,
1320 PCC_FILE_SIZES FileSizes,
1321 BOOLEAN PinAccess,
1322 PCACHE_MANAGER_CALLBACKS CallBacks,
1323 PVOID LazyWriterContext)
1324 /*
1325 * FUNCTION: Initializes a shared cache map for a file object
1326 */
1327 {
1328 PROS_SHARED_CACHE_MAP SharedCacheMap;
1329
1330 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1331 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1332 FileObject, SharedCacheMap);
1333
1334 KeAcquireGuardedMutex(&ViewLock);
1335 if (SharedCacheMap == NULL)
1336 {
1337 KIRQL OldIrql;
1338
1339 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1340 if (SharedCacheMap == NULL)
1341 {
1342 KeReleaseGuardedMutex(&ViewLock);
1343 return STATUS_INSUFFICIENT_RESOURCES;
1344 }
1345 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1346 ObReferenceObjectByPointer(FileObject,
1347 FILE_ALL_ACCESS,
1348 NULL,
1349 KernelMode);
1350 SharedCacheMap->FileObject = FileObject;
1351 SharedCacheMap->Callbacks = CallBacks;
1352 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1353 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1354 SharedCacheMap->FileSize = FileSizes->FileSize;
1355 SharedCacheMap->PinAccess = PinAccess;
1356 SharedCacheMap->DirtyPageThreshold = 0;
1357 SharedCacheMap->DirtyPages = 0;
1358 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1359 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1360 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1361
1362 KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
1363 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1364 KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
1365 }
1366 if (FileObject->PrivateCacheMap == NULL)
1367 {
1368 FileObject->PrivateCacheMap = SharedCacheMap;
1369 SharedCacheMap->OpenCount++;
1370 }
1371 KeReleaseGuardedMutex(&ViewLock);
1372
1373 return STATUS_SUCCESS;
1374 }
1375
1376 /*
1377 * @implemented
1378 */
1379 PFILE_OBJECT
1380 NTAPI
1381 CcGetFileObjectFromSectionPtrs (
1382 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1383 {
1384 PROS_SHARED_CACHE_MAP SharedCacheMap;
1385
1386 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1387
1388 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1389 {
1390 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1391 ASSERT(SharedCacheMap);
1392 return SharedCacheMap->FileObject;
1393 }
1394 return NULL;
1395 }
1396
1397 VOID
1398 NTAPI
1399 CcShutdownLazyWriter (
1400 VOID)
1401 {
1402 /* Simply set the event, lazy writer will stop when it's done */
1403 KeSetEvent(&iLazyWriterShutdown, IO_DISK_INCREMENT, FALSE);
1404 }
1405
1406 BOOLEAN
1407 INIT_FUNCTION
1408 NTAPI
1409 CcInitView (
1410 VOID)
1411 {
1412 HANDLE LazyWriter;
1413 NTSTATUS Status;
1414 KPRIORITY Priority;
1415 OBJECT_ATTRIBUTES ObjectAttributes;
1416
1417 DPRINT("CcInitView()\n");
1418
1419 InitializeListHead(&DirtyVacbListHead);
1420 InitializeListHead(&VacbLruListHead);
1421 InitializeListHead(&CcDeferredWrites);
1422 InitializeListHead(&CcCleanSharedCacheMapList);
1423 KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1424 KeInitializeSpinLock(&iSharedCacheMapLock);
1425 KeInitializeGuardedMutex(&ViewLock);
1426 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1427 NULL,
1428 NULL,
1429 0,
1430 sizeof(INTERNAL_BCB),
1431 TAG_BCB,
1432 20);
1433 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1434 NULL,
1435 NULL,
1436 0,
1437 sizeof(ROS_SHARED_CACHE_MAP),
1438 TAG_SHARED_CACHE_MAP,
1439 20);
1440 ExInitializeNPagedLookasideList(&VacbLookasideList,
1441 NULL,
1442 NULL,
1443 0,
1444 sizeof(ROS_VACB),
1445 TAG_VACB,
1446 20);
1447
1448 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1449
1450 /* Initialize lazy writer events */
1451 KeInitializeEvent(&iLazyWriterShutdown, SynchronizationEvent, FALSE);
1452 KeInitializeEvent(&iLazyWriterNotify, NotificationEvent, FALSE);
1453
1454 /* Define lazy writer threshold, depending on system type */
1455 switch (MmQuerySystemSize())
1456 {
1457 case MmSmallSystem:
1458 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8;
1459 break;
1460
1461 case MmMediumSystem:
1462 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4;
1463 break;
1464
1465 case MmLargeSystem:
1466 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8 + MmNumberOfPhysicalPages / 4;
1467 break;
1468 }
1469
1470 /* Start the lazy writer thread */
1471 InitializeObjectAttributes(&ObjectAttributes,
1472 NULL,
1473 OBJ_KERNEL_HANDLE,
1474 NULL,
1475 NULL);
1476 Status = PsCreateSystemThread(&LazyWriter,
1477 THREAD_ALL_ACCESS,
1478 &ObjectAttributes,
1479 NULL,
1480 NULL,
1481 CciLazyWriter,
1482 NULL);
1483 if (!NT_SUCCESS(Status))
1484 {
1485 return FALSE;
1486 }
1487
1488 Priority = 27;
1489 Status = NtSetInformationThread(LazyWriter,
1490 ThreadPriority,
1491 &Priority,
1492 sizeof(Priority));
1493 ASSERT(NT_SUCCESS(Status));
1494
1495 /* Handle is not needed */
1496 ObCloseHandle(LazyWriter, KernelMode);
1497
1498 CcInitCacheZeroPage();
1499
1500 return TRUE;
1501 }
1502
1503 #if DBG && defined(KDBG)
1504 BOOLEAN
1505 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1506 {
1507 PLIST_ENTRY ListEntry;
1508 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1509
1510 KdbpPrint(" Usage Summary (in kb)\n");
1511 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1512 /* No need to lock the spin lock here, we're in DBG */
1513 for (ListEntry = CcCleanSharedCacheMapList.Flink;
1514 ListEntry != &CcCleanSharedCacheMapList;
1515 ListEntry = ListEntry->Flink)
1516 {
1517 PLIST_ENTRY Vacbs;
1518 ULONG Valid = 0, Dirty = 0;
1519 PROS_SHARED_CACHE_MAP SharedCacheMap;
1520 PUNICODE_STRING FileName;
1521
1522 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1523
1524 /* First, count for all the associated VACB */
1525 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1526 Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1527 Vacbs = Vacbs->Flink)
1528 {
1529 PROS_VACB Vacb;
1530
1531 Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
1532 if (Vacb->Dirty)
1533 {
1534 Dirty += VACB_MAPPING_GRANULARITY / 1024;
1535 }
1536 if (Vacb->Valid)
1537 {
1538 Valid += VACB_MAPPING_GRANULARITY / 1024;
1539 }
1540 }
1541
1542 /* Setup name */
1543 if (SharedCacheMap->FileObject != NULL &&
1544 SharedCacheMap->FileObject->FileName.Length != 0)
1545 {
1546 FileName = &SharedCacheMap->FileObject->FileName;
1547 }
1548 else
1549 {
1550 FileName = &NoName;
1551 }
1552
1553 /* And print */
1554 KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
1555 }
1556
1557 return TRUE;
1558 }
1559 #endif
1560
1561 /* EOF */