4933665bd11545baff7eb37738409f72d721ee9c
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42
43 /* GLOBALS *******************************************************************/
44
45 LIST_ENTRY DirtyVacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 /* Internal vars (MS):
55 * - Threshold above which lazy writer will start action
56 * - Amount of dirty pages
57 * - List for deferred writes
58 * - Spinlock when dealing with the deferred list
59 * - List for "clean" shared cache maps
60 */
61 ULONG CcDirtyPageThreshold = 0;
62 ULONG CcTotalDirtyPages = 0;
63 LIST_ENTRY CcDeferredWrites;
64 KSPIN_LOCK CcDeferredWriteSpinLock;
65 LIST_ENTRY CcCleanSharedCacheMapList;
66
67 /* Internal vars (ROS):
68 * - Lock for the CcCleanSharedCacheMapList list
69 */
70 KSPIN_LOCK iSharedCacheMapLock;
71
72 #if DBG
73 static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
74 {
75 ++vacb->ReferenceCount;
76 if (vacb->SharedCacheMap->Trace)
77 {
78 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
79 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
80 }
81 }
82 static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
83 {
84 ASSERT(vacb->ReferenceCount != 0);
85 --vacb->ReferenceCount;
86 ASSERT(!(vacb->ReferenceCount == 0 && vacb->Dirty));
87 if (vacb->SharedCacheMap->Trace)
88 {
89 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
90 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
91 }
92 }
93 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
94 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
95 #else
96 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
97 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
98 #endif
99
100 NTSTATUS
101 CcRosInternalFreeVacb(PROS_VACB Vacb);
102
103
104 /* FUNCTIONS *****************************************************************/
105
106 VOID
107 NTAPI
108 CcRosTraceCacheMap (
109 PROS_SHARED_CACHE_MAP SharedCacheMap,
110 BOOLEAN Trace )
111 {
112 #if DBG
113 KIRQL oldirql;
114 PLIST_ENTRY current_entry;
115 PROS_VACB current;
116
117 if (!SharedCacheMap)
118 return;
119
120 SharedCacheMap->Trace = Trace;
121
122 if (Trace)
123 {
124 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
125
126 KeAcquireGuardedMutex(&ViewLock);
127 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
128
129 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
130 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
131 {
132 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
133 current_entry = current_entry->Flink;
134
135 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
136 current, current->ReferenceCount, current->Dirty, current->PageOut );
137 }
138 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
139 KeReleaseGuardedMutex(&ViewLock);
140 }
141 else
142 {
143 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
144 }
145
146 #else
147 UNREFERENCED_PARAMETER(SharedCacheMap);
148 UNREFERENCED_PARAMETER(Trace);
149 #endif
150 }
151
152 NTSTATUS
153 NTAPI
154 CcRosFlushVacb (
155 PROS_VACB Vacb)
156 {
157 NTSTATUS Status;
158
159 Status = CcWriteVirtualAddress(Vacb);
160 if (NT_SUCCESS(Status))
161 {
162 CcRosUnmarkDirtyVacb(Vacb, TRUE);
163 }
164
165 return Status;
166 }
167
168 NTSTATUS
169 NTAPI
170 CcRosFlushDirtyPages (
171 ULONG Target,
172 PULONG Count,
173 BOOLEAN Wait,
174 BOOLEAN CalledFromLazy)
175 {
176 PLIST_ENTRY current_entry;
177 PROS_VACB current;
178 BOOLEAN Locked;
179 NTSTATUS Status;
180 LARGE_INTEGER ZeroTimeout;
181
182 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
183
184 (*Count) = 0;
185 ZeroTimeout.QuadPart = 0;
186
187 KeEnterCriticalRegion();
188 KeAcquireGuardedMutex(&ViewLock);
189
190 current_entry = DirtyVacbListHead.Flink;
191 if (current_entry == &DirtyVacbListHead)
192 {
193 DPRINT("No Dirty pages\n");
194 }
195
196 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
197 {
198 current = CONTAINING_RECORD(current_entry,
199 ROS_VACB,
200 DirtyVacbListEntry);
201 current_entry = current_entry->Flink;
202
203 CcRosVacbIncRefCount(current);
204
205 /* When performing lazy write, don't handle temporary files */
206 if (CalledFromLazy &&
207 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
208 {
209 CcRosVacbDecRefCount(current);
210 continue;
211 }
212
213 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
214 current->SharedCacheMap->LazyWriteContext, Wait);
215 if (!Locked)
216 {
217 CcRosVacbDecRefCount(current);
218 continue;
219 }
220
221 Status = CcRosAcquireVacbLock(current,
222 Wait ? NULL : &ZeroTimeout);
223 if (Status != STATUS_SUCCESS)
224 {
225 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
226 current->SharedCacheMap->LazyWriteContext);
227 CcRosVacbDecRefCount(current);
228 continue;
229 }
230
231 ASSERT(current->Dirty);
232
233 /* One reference is added above */
234 if ((current->ReferenceCount > 2 && current->PinCount == 0) ||
235 (current->ReferenceCount > 3 && current->PinCount > 1))
236 {
237 CcRosReleaseVacbLock(current);
238 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
239 current->SharedCacheMap->LazyWriteContext);
240 CcRosVacbDecRefCount(current);
241 continue;
242 }
243
244 KeReleaseGuardedMutex(&ViewLock);
245
246 Status = CcRosFlushVacb(current);
247
248 CcRosReleaseVacbLock(current);
249 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
250 current->SharedCacheMap->LazyWriteContext);
251
252 KeAcquireGuardedMutex(&ViewLock);
253 CcRosVacbDecRefCount(current);
254
255 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
256 (Status != STATUS_MEDIA_WRITE_PROTECTED))
257 {
258 DPRINT1("CC: Failed to flush VACB.\n");
259 }
260 else
261 {
262 ULONG PagesFreed;
263
264 /* How many pages did we free? */
265 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
266 (*Count) += PagesFreed;
267
268 /* Make sure we don't overflow target! */
269 if (Target < PagesFreed)
270 {
271 /* If we would have, jump to zero directly */
272 Target = 0;
273 }
274 else
275 {
276 Target -= PagesFreed;
277 }
278 }
279
280 current_entry = DirtyVacbListHead.Flink;
281 }
282
283 KeReleaseGuardedMutex(&ViewLock);
284 KeLeaveCriticalRegion();
285
286 DPRINT("CcRosFlushDirtyPages() finished\n");
287 return STATUS_SUCCESS;
288 }
289
290 NTSTATUS
291 CcRosTrimCache (
292 ULONG Target,
293 ULONG Priority,
294 PULONG NrFreed)
295 /*
296 * FUNCTION: Try to free some memory from the file cache.
297 * ARGUMENTS:
298 * Target - The number of pages to be freed.
299 * Priority - The priority of free (currently unused).
300 * NrFreed - Points to a variable where the number of pages
301 * actually freed is returned.
302 */
303 {
304 PLIST_ENTRY current_entry;
305 PROS_VACB current;
306 ULONG PagesFreed;
307 KIRQL oldIrql;
308 LIST_ENTRY FreeList;
309 PFN_NUMBER Page;
310 ULONG i;
311 BOOLEAN FlushedPages = FALSE;
312
313 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
314
315 InitializeListHead(&FreeList);
316
317 *NrFreed = 0;
318
319 retry:
320 KeAcquireGuardedMutex(&ViewLock);
321
322 current_entry = VacbLruListHead.Flink;
323 while (current_entry != &VacbLruListHead)
324 {
325 current = CONTAINING_RECORD(current_entry,
326 ROS_VACB,
327 VacbLruListEntry);
328 current_entry = current_entry->Flink;
329
330 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
331
332 /* Reference the VACB */
333 CcRosVacbIncRefCount(current);
334
335 /* Check if it's mapped and not dirty */
336 if (current->MappedCount > 0 && !current->Dirty)
337 {
338 /* We have to break these locks because Cc sucks */
339 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
340 KeReleaseGuardedMutex(&ViewLock);
341
342 /* Page out the VACB */
343 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
344 {
345 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
346
347 MmPageOutPhysicalAddress(Page);
348 }
349
350 /* Reacquire the locks */
351 KeAcquireGuardedMutex(&ViewLock);
352 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
353 }
354
355 /* Dereference the VACB */
356 CcRosVacbDecRefCount(current);
357
358 /* Check if we can free this entry now */
359 if (current->ReferenceCount == 0)
360 {
361 ASSERT(!current->Dirty);
362 ASSERT(!current->MappedCount);
363
364 RemoveEntryList(&current->CacheMapVacbListEntry);
365 RemoveEntryList(&current->VacbLruListEntry);
366 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
367
368 /* Calculate how many pages we freed for Mm */
369 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
370 Target -= PagesFreed;
371 (*NrFreed) += PagesFreed;
372 }
373
374 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
375 }
376
377 KeReleaseGuardedMutex(&ViewLock);
378
379 /* Try flushing pages if we haven't met our target */
380 if ((Target > 0) && !FlushedPages)
381 {
382 /* Flush dirty pages to disk */
383 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
384 FlushedPages = TRUE;
385
386 /* We can only swap as many pages as we flushed */
387 if (PagesFreed < Target) Target = PagesFreed;
388
389 /* Check if we flushed anything */
390 if (PagesFreed != 0)
391 {
392 /* Try again after flushing dirty pages */
393 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
394 goto retry;
395 }
396 }
397
398 while (!IsListEmpty(&FreeList))
399 {
400 current_entry = RemoveHeadList(&FreeList);
401 current = CONTAINING_RECORD(current_entry,
402 ROS_VACB,
403 CacheMapVacbListEntry);
404 CcRosInternalFreeVacb(current);
405 }
406
407 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
408
409 return STATUS_SUCCESS;
410 }
411
412 NTSTATUS
413 NTAPI
414 CcRosReleaseVacb (
415 PROS_SHARED_CACHE_MAP SharedCacheMap,
416 PROS_VACB Vacb,
417 BOOLEAN Valid,
418 BOOLEAN Dirty,
419 BOOLEAN Mapped)
420 {
421 ASSERT(SharedCacheMap);
422
423 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
424 SharedCacheMap, Vacb, Valid);
425
426 Vacb->Valid = Valid;
427
428 if (Dirty && !Vacb->Dirty)
429 {
430 CcRosMarkDirtyVacb(Vacb);
431 }
432
433 if (Mapped)
434 {
435 Vacb->MappedCount++;
436 }
437 CcRosVacbDecRefCount(Vacb);
438 if (Mapped && (Vacb->MappedCount == 1))
439 {
440 CcRosVacbIncRefCount(Vacb);
441 }
442
443 CcRosReleaseVacbLock(Vacb);
444
445 return STATUS_SUCCESS;
446 }
447
448 /* Returns with VACB Lock Held! */
449 PROS_VACB
450 NTAPI
451 CcRosLookupVacb (
452 PROS_SHARED_CACHE_MAP SharedCacheMap,
453 LONGLONG FileOffset)
454 {
455 PLIST_ENTRY current_entry;
456 PROS_VACB current;
457 KIRQL oldIrql;
458
459 ASSERT(SharedCacheMap);
460
461 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
462 SharedCacheMap, FileOffset);
463
464 KeAcquireGuardedMutex(&ViewLock);
465 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
466
467 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
468 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
469 {
470 current = CONTAINING_RECORD(current_entry,
471 ROS_VACB,
472 CacheMapVacbListEntry);
473 if (IsPointInRange(current->FileOffset.QuadPart,
474 VACB_MAPPING_GRANULARITY,
475 FileOffset))
476 {
477 CcRosVacbIncRefCount(current);
478 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
479 KeReleaseGuardedMutex(&ViewLock);
480 CcRosAcquireVacbLock(current, NULL);
481 return current;
482 }
483 if (current->FileOffset.QuadPart > FileOffset)
484 break;
485 current_entry = current_entry->Flink;
486 }
487
488 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
489 KeReleaseGuardedMutex(&ViewLock);
490
491 return NULL;
492 }
493
494 VOID
495 NTAPI
496 CcRosMarkDirtyVacb (
497 PROS_VACB Vacb)
498 {
499 KIRQL oldIrql;
500 PROS_SHARED_CACHE_MAP SharedCacheMap;
501
502 SharedCacheMap = Vacb->SharedCacheMap;
503
504 KeAcquireGuardedMutex(&ViewLock);
505 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
506
507 ASSERT(!Vacb->Dirty);
508
509 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
510 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
511 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
512 CcRosVacbIncRefCount(Vacb);
513
514 /* Move to the tail of the LRU list */
515 RemoveEntryList(&Vacb->VacbLruListEntry);
516 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
517
518 Vacb->Dirty = TRUE;
519
520 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
521 KeReleaseGuardedMutex(&ViewLock);
522
523 /* Schedule a lazy writer run to now that we have dirty VACB */
524 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
525 if (!LazyWriter.ScanActive)
526 {
527 CcScheduleLazyWriteScan(FALSE);
528 }
529 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
530 }
531
532 VOID
533 NTAPI
534 CcRosUnmarkDirtyVacb (
535 PROS_VACB Vacb,
536 BOOLEAN LockViews)
537 {
538 KIRQL oldIrql;
539 PROS_SHARED_CACHE_MAP SharedCacheMap;
540
541 SharedCacheMap = Vacb->SharedCacheMap;
542
543 if (LockViews)
544 {
545 KeAcquireGuardedMutex(&ViewLock);
546 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
547 }
548
549 ASSERT(Vacb->Dirty);
550
551 Vacb->Dirty = FALSE;
552
553 RemoveEntryList(&Vacb->DirtyVacbListEntry);
554 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
555 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
556 CcRosVacbDecRefCount(Vacb);
557
558 if (LockViews)
559 {
560 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
561 KeReleaseGuardedMutex(&ViewLock);
562 }
563 }
564
565 NTSTATUS
566 NTAPI
567 CcRosMarkDirtyFile (
568 PROS_SHARED_CACHE_MAP SharedCacheMap,
569 LONGLONG FileOffset)
570 {
571 PROS_VACB Vacb;
572
573 ASSERT(SharedCacheMap);
574
575 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
576 SharedCacheMap, FileOffset);
577
578 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
579 if (Vacb == NULL)
580 {
581 KeBugCheck(CACHE_MANAGER);
582 }
583
584 if (!Vacb->Dirty)
585 {
586 CcRosMarkDirtyVacb(Vacb);
587 }
588
589 CcRosReleaseVacbLock(Vacb);
590
591 return STATUS_SUCCESS;
592 }
593
594 NTSTATUS
595 NTAPI
596 CcRosUnmapVacb (
597 PROS_SHARED_CACHE_MAP SharedCacheMap,
598 LONGLONG FileOffset,
599 BOOLEAN NowDirty)
600 {
601 PROS_VACB Vacb;
602
603 ASSERT(SharedCacheMap);
604
605 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
606 SharedCacheMap, FileOffset, NowDirty);
607
608 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
609 if (Vacb == NULL)
610 {
611 return STATUS_UNSUCCESSFUL;
612 }
613
614 if (NowDirty && !Vacb->Dirty)
615 {
616 CcRosMarkDirtyVacb(Vacb);
617 }
618
619 ASSERT(Vacb->MappedCount != 0);
620 Vacb->MappedCount--;
621
622 CcRosVacbDecRefCount(Vacb);
623 if (Vacb->MappedCount == 0)
624 {
625 CcRosVacbDecRefCount(Vacb);
626 }
627
628 CcRosReleaseVacbLock(Vacb);
629
630 return STATUS_SUCCESS;
631 }
632
633 static
634 NTSTATUS
635 CcRosMapVacb(
636 PROS_VACB Vacb)
637 {
638 ULONG i;
639 NTSTATUS Status;
640 ULONG_PTR NumberOfPages;
641
642 /* Create a memory area. */
643 MmLockAddressSpace(MmGetKernelAddressSpace());
644 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
645 0, // nothing checks for VACB mareas, so set to 0
646 &Vacb->BaseAddress,
647 VACB_MAPPING_GRANULARITY,
648 PAGE_READWRITE,
649 (PMEMORY_AREA*)&Vacb->MemoryArea,
650 0,
651 PAGE_SIZE);
652 MmUnlockAddressSpace(MmGetKernelAddressSpace());
653 if (!NT_SUCCESS(Status))
654 {
655 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
656 return Status;
657 }
658
659 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
660 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
661
662 /* Create a virtual mapping for this memory area */
663 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
664 for (i = 0; i < NumberOfPages; i++)
665 {
666 PFN_NUMBER PageFrameNumber;
667
668 MI_SET_USAGE(MI_USAGE_CACHE);
669 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
670 if (PageFrameNumber == 0)
671 {
672 DPRINT1("Unable to allocate page\n");
673 KeBugCheck(MEMORY_MANAGEMENT);
674 }
675
676 Status = MmCreateVirtualMapping(NULL,
677 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
678 PAGE_READWRITE,
679 &PageFrameNumber,
680 1);
681 if (!NT_SUCCESS(Status))
682 {
683 DPRINT1("Unable to create virtual mapping\n");
684 KeBugCheck(MEMORY_MANAGEMENT);
685 }
686 }
687
688 return STATUS_SUCCESS;
689 }
690
691 static
692 NTSTATUS
693 CcRosCreateVacb (
694 PROS_SHARED_CACHE_MAP SharedCacheMap,
695 LONGLONG FileOffset,
696 PROS_VACB *Vacb)
697 {
698 PROS_VACB current;
699 PROS_VACB previous;
700 PLIST_ENTRY current_entry;
701 NTSTATUS Status;
702 KIRQL oldIrql;
703
704 ASSERT(SharedCacheMap);
705
706 DPRINT("CcRosCreateVacb()\n");
707
708 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
709 {
710 *Vacb = NULL;
711 return STATUS_INVALID_PARAMETER;
712 }
713
714 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
715 current->BaseAddress = NULL;
716 current->Valid = FALSE;
717 current->Dirty = FALSE;
718 current->PageOut = FALSE;
719 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
720 current->SharedCacheMap = SharedCacheMap;
721 #if DBG
722 if (SharedCacheMap->Trace)
723 {
724 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
725 }
726 #endif
727 current->MappedCount = 0;
728 current->DirtyVacbListEntry.Flink = NULL;
729 current->DirtyVacbListEntry.Blink = NULL;
730 current->ReferenceCount = 1;
731 current->PinCount = 0;
732 KeInitializeMutex(&current->Mutex, 0);
733 CcRosAcquireVacbLock(current, NULL);
734 KeAcquireGuardedMutex(&ViewLock);
735
736 *Vacb = current;
737 /* There is window between the call to CcRosLookupVacb
738 * and CcRosCreateVacb. We must check if a VACB for the
739 * file offset exist. If there is a VACB, we release
740 * our newly created VACB and return the existing one.
741 */
742 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
743 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
744 previous = NULL;
745 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
746 {
747 current = CONTAINING_RECORD(current_entry,
748 ROS_VACB,
749 CacheMapVacbListEntry);
750 if (IsPointInRange(current->FileOffset.QuadPart,
751 VACB_MAPPING_GRANULARITY,
752 FileOffset))
753 {
754 CcRosVacbIncRefCount(current);
755 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
756 #if DBG
757 if (SharedCacheMap->Trace)
758 {
759 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
760 SharedCacheMap,
761 (*Vacb),
762 current);
763 }
764 #endif
765 CcRosReleaseVacbLock(*Vacb);
766 KeReleaseGuardedMutex(&ViewLock);
767 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
768 *Vacb = current;
769 CcRosAcquireVacbLock(current, NULL);
770 return STATUS_SUCCESS;
771 }
772 if (current->FileOffset.QuadPart < FileOffset)
773 {
774 ASSERT(previous == NULL ||
775 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
776 previous = current;
777 }
778 if (current->FileOffset.QuadPart > FileOffset)
779 break;
780 current_entry = current_entry->Flink;
781 }
782 /* There was no existing VACB. */
783 current = *Vacb;
784 if (previous)
785 {
786 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
787 }
788 else
789 {
790 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
791 }
792 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
793 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
794 KeReleaseGuardedMutex(&ViewLock);
795
796 MI_SET_USAGE(MI_USAGE_CACHE);
797 #if MI_TRACE_PFNS
798 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
799 {
800 PWCHAR pos;
801 ULONG len = 0;
802 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
803 if (pos)
804 {
805 len = wcslen(pos) * sizeof(WCHAR);
806 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
807 }
808 else
809 {
810 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
811 }
812 }
813 #endif
814
815 Status = CcRosMapVacb(current);
816 if (!NT_SUCCESS(Status))
817 {
818 RemoveEntryList(&current->CacheMapVacbListEntry);
819 RemoveEntryList(&current->VacbLruListEntry);
820 CcRosReleaseVacbLock(current);
821 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
822 }
823
824 return Status;
825 }
826
827 NTSTATUS
828 NTAPI
829 CcRosGetVacb (
830 PROS_SHARED_CACHE_MAP SharedCacheMap,
831 LONGLONG FileOffset,
832 PLONGLONG BaseOffset,
833 PVOID* BaseAddress,
834 PBOOLEAN UptoDate,
835 PROS_VACB *Vacb)
836 {
837 PROS_VACB current;
838 NTSTATUS Status;
839
840 ASSERT(SharedCacheMap);
841
842 DPRINT("CcRosGetVacb()\n");
843
844 /*
845 * Look for a VACB already mapping the same data.
846 */
847 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
848 if (current == NULL)
849 {
850 /*
851 * Otherwise create a new VACB.
852 */
853 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
854 if (!NT_SUCCESS(Status))
855 {
856 return Status;
857 }
858 }
859
860 KeAcquireGuardedMutex(&ViewLock);
861
862 /* Move to the tail of the LRU list */
863 RemoveEntryList(&current->VacbLruListEntry);
864 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
865
866 KeReleaseGuardedMutex(&ViewLock);
867
868 /*
869 * Return information about the VACB to the caller.
870 */
871 *UptoDate = current->Valid;
872 *BaseAddress = current->BaseAddress;
873 DPRINT("*BaseAddress %p\n", *BaseAddress);
874 *Vacb = current;
875 *BaseOffset = current->FileOffset.QuadPart;
876 return STATUS_SUCCESS;
877 }
878
879 NTSTATUS
880 NTAPI
881 CcRosRequestVacb (
882 PROS_SHARED_CACHE_MAP SharedCacheMap,
883 LONGLONG FileOffset,
884 PVOID* BaseAddress,
885 PBOOLEAN UptoDate,
886 PROS_VACB *Vacb)
887 /*
888 * FUNCTION: Request a page mapping for a shared cache map
889 */
890 {
891 LONGLONG BaseOffset;
892
893 ASSERT(SharedCacheMap);
894
895 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
896 {
897 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
898 FileOffset, VACB_MAPPING_GRANULARITY);
899 KeBugCheck(CACHE_MANAGER);
900 }
901
902 return CcRosGetVacb(SharedCacheMap,
903 FileOffset,
904 &BaseOffset,
905 BaseAddress,
906 UptoDate,
907 Vacb);
908 }
909
910 static
911 VOID
912 CcFreeCachePage (
913 PVOID Context,
914 MEMORY_AREA* MemoryArea,
915 PVOID Address,
916 PFN_NUMBER Page,
917 SWAPENTRY SwapEntry,
918 BOOLEAN Dirty)
919 {
920 ASSERT(SwapEntry == 0);
921 if (Page != 0)
922 {
923 ASSERT(MmGetReferenceCountPage(Page) == 1);
924 MmReleasePageMemoryConsumer(MC_CACHE, Page);
925 }
926 }
927
928 NTSTATUS
929 CcRosInternalFreeVacb (
930 PROS_VACB Vacb)
931 /*
932 * FUNCTION: Releases a VACB associated with a shared cache map
933 */
934 {
935 DPRINT("Freeing VACB 0x%p\n", Vacb);
936 #if DBG
937 if (Vacb->SharedCacheMap->Trace)
938 {
939 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
940 }
941 #endif
942
943 MmLockAddressSpace(MmGetKernelAddressSpace());
944 MmFreeMemoryArea(MmGetKernelAddressSpace(),
945 Vacb->MemoryArea,
946 CcFreeCachePage,
947 NULL);
948 MmUnlockAddressSpace(MmGetKernelAddressSpace());
949
950 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
951 return STATUS_SUCCESS;
952 }
953
954 /*
955 * @implemented
956 */
957 VOID
958 NTAPI
959 CcFlushCache (
960 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
961 IN PLARGE_INTEGER FileOffset OPTIONAL,
962 IN ULONG Length,
963 OUT PIO_STATUS_BLOCK IoStatus)
964 {
965 PROS_SHARED_CACHE_MAP SharedCacheMap;
966 LARGE_INTEGER Offset;
967 LONGLONG RemainingLength;
968 PROS_VACB current;
969 NTSTATUS Status;
970 KIRQL oldIrql;
971
972 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
973 SectionObjectPointers, FileOffset, Length);
974
975 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
976 SectionObjectPointers, FileOffset, Length, IoStatus);
977
978 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
979 {
980 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
981 ASSERT(SharedCacheMap);
982 if (FileOffset)
983 {
984 Offset = *FileOffset;
985 RemainingLength = Length;
986 }
987 else
988 {
989 Offset.QuadPart = 0;
990 RemainingLength = SharedCacheMap->FileSize.QuadPart;
991 }
992
993 if (IoStatus)
994 {
995 IoStatus->Status = STATUS_SUCCESS;
996 IoStatus->Information = 0;
997 }
998
999 while (RemainingLength > 0)
1000 {
1001 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1002 if (current != NULL)
1003 {
1004 if (current->Dirty)
1005 {
1006 Status = CcRosFlushVacb(current);
1007 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1008 {
1009 IoStatus->Status = Status;
1010 }
1011 }
1012
1013 CcRosReleaseVacbLock(current);
1014
1015 KeAcquireGuardedMutex(&ViewLock);
1016 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1017 CcRosVacbDecRefCount(current);
1018 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1019 KeReleaseGuardedMutex(&ViewLock);
1020 }
1021
1022 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1023 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1024 }
1025 }
1026 else
1027 {
1028 if (IoStatus)
1029 {
1030 IoStatus->Status = STATUS_INVALID_PARAMETER;
1031 }
1032 }
1033 }
1034
1035 NTSTATUS
1036 NTAPI
1037 CcRosDeleteFileCache (
1038 PFILE_OBJECT FileObject,
1039 PROS_SHARED_CACHE_MAP SharedCacheMap)
1040 /*
1041 * FUNCTION: Releases the shared cache map associated with a file object
1042 */
1043 {
1044 PLIST_ENTRY current_entry;
1045 PROS_VACB current;
1046 LIST_ENTRY FreeList;
1047 KIRQL oldIrql;
1048
1049 ASSERT(SharedCacheMap);
1050
1051 SharedCacheMap->OpenCount++;
1052 KeReleaseGuardedMutex(&ViewLock);
1053
1054 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1055
1056 KeAcquireGuardedMutex(&ViewLock);
1057 SharedCacheMap->OpenCount--;
1058 if (SharedCacheMap->OpenCount == 0)
1059 {
1060 KIRQL OldIrql;
1061
1062 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1063
1064 /*
1065 * Release all VACBs
1066 */
1067 InitializeListHead(&FreeList);
1068 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1069 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1070 {
1071 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1072 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1073
1074 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1075 CcRosAcquireVacbLock(current, NULL);
1076 RemoveEntryList(&current->VacbLruListEntry);
1077 if (current->Dirty)
1078 {
1079 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1080 CcRosUnmarkDirtyVacb(current, FALSE);
1081 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1082 DPRINT1("Freeing dirty VACB\n");
1083 }
1084 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1085 CcRosReleaseVacbLock(current);
1086
1087 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1088 }
1089 #if DBG
1090 SharedCacheMap->Trace = FALSE;
1091 #endif
1092 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1093
1094 KeReleaseGuardedMutex(&ViewLock);
1095 ObDereferenceObject(SharedCacheMap->FileObject);
1096
1097 while (!IsListEmpty(&FreeList))
1098 {
1099 current_entry = RemoveTailList(&FreeList);
1100 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1101 CcRosInternalFreeVacb(current);
1102 }
1103
1104 KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
1105 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1106 KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
1107
1108 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1109 KeAcquireGuardedMutex(&ViewLock);
1110 }
1111 return STATUS_SUCCESS;
1112 }
1113
1114 VOID
1115 NTAPI
1116 CcRosReferenceCache (
1117 PFILE_OBJECT FileObject)
1118 {
1119 PROS_SHARED_CACHE_MAP SharedCacheMap;
1120 KeAcquireGuardedMutex(&ViewLock);
1121 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1122 ASSERT(SharedCacheMap);
1123 ASSERT(SharedCacheMap->OpenCount != 0);
1124 SharedCacheMap->OpenCount++;
1125 KeReleaseGuardedMutex(&ViewLock);
1126 }
1127
1128 VOID
1129 NTAPI
1130 CcRosRemoveIfClosed (
1131 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1132 {
1133 PROS_SHARED_CACHE_MAP SharedCacheMap;
1134 DPRINT("CcRosRemoveIfClosed()\n");
1135 KeAcquireGuardedMutex(&ViewLock);
1136 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1137 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1138 {
1139 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1140 }
1141 KeReleaseGuardedMutex(&ViewLock);
1142 }
1143
1144
1145 VOID
1146 NTAPI
1147 CcRosDereferenceCache (
1148 PFILE_OBJECT FileObject)
1149 {
1150 PROS_SHARED_CACHE_MAP SharedCacheMap;
1151 KeAcquireGuardedMutex(&ViewLock);
1152 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1153 ASSERT(SharedCacheMap);
1154 if (SharedCacheMap->OpenCount > 0)
1155 {
1156 SharedCacheMap->OpenCount--;
1157 if (SharedCacheMap->OpenCount == 0)
1158 {
1159 MmFreeSectionSegments(SharedCacheMap->FileObject);
1160 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1161 }
1162 }
1163 KeReleaseGuardedMutex(&ViewLock);
1164 }
1165
1166 NTSTATUS
1167 NTAPI
1168 CcRosReleaseFileCache (
1169 PFILE_OBJECT FileObject)
1170 /*
1171 * FUNCTION: Called by the file system when a handle to a file object
1172 * has been closed.
1173 */
1174 {
1175 PROS_SHARED_CACHE_MAP SharedCacheMap;
1176
1177 KeAcquireGuardedMutex(&ViewLock);
1178
1179 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1180 {
1181 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1182 if (FileObject->PrivateCacheMap != NULL)
1183 {
1184 FileObject->PrivateCacheMap = NULL;
1185 if (SharedCacheMap->OpenCount > 0)
1186 {
1187 SharedCacheMap->OpenCount--;
1188 if (SharedCacheMap->OpenCount == 0)
1189 {
1190 MmFreeSectionSegments(SharedCacheMap->FileObject);
1191 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1192 }
1193 }
1194 }
1195 }
1196 KeReleaseGuardedMutex(&ViewLock);
1197 return STATUS_SUCCESS;
1198 }
1199
1200 NTSTATUS
1201 NTAPI
1202 CcRosInitializeFileCache (
1203 PFILE_OBJECT FileObject,
1204 PCC_FILE_SIZES FileSizes,
1205 BOOLEAN PinAccess,
1206 PCACHE_MANAGER_CALLBACKS CallBacks,
1207 PVOID LazyWriterContext)
1208 /*
1209 * FUNCTION: Initializes a shared cache map for a file object
1210 */
1211 {
1212 KIRQL OldIrql;
1213 BOOLEAN Allocated;
1214 PROS_SHARED_CACHE_MAP SharedCacheMap;
1215
1216 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1217 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1218 FileObject, SharedCacheMap);
1219
1220 Allocated = FALSE;
1221 KeAcquireGuardedMutex(&ViewLock);
1222 if (SharedCacheMap == NULL)
1223 {
1224 Allocated = TRUE;
1225 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1226 if (SharedCacheMap == NULL)
1227 {
1228 KeReleaseGuardedMutex(&ViewLock);
1229 return STATUS_INSUFFICIENT_RESOURCES;
1230 }
1231 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1232 ObReferenceObjectByPointer(FileObject,
1233 FILE_ALL_ACCESS,
1234 NULL,
1235 KernelMode);
1236 SharedCacheMap->FileObject = FileObject;
1237 SharedCacheMap->Callbacks = CallBacks;
1238 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1239 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1240 SharedCacheMap->FileSize = FileSizes->FileSize;
1241 SharedCacheMap->PinAccess = PinAccess;
1242 SharedCacheMap->DirtyPageThreshold = 0;
1243 SharedCacheMap->DirtyPages = 0;
1244 InitializeListHead(&SharedCacheMap->PrivateList);
1245 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1246 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1247 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1248
1249 KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
1250 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1251 KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
1252 }
1253 if (FileObject->PrivateCacheMap == NULL)
1254 {
1255 PPRIVATE_CACHE_MAP PrivateMap;
1256
1257 /* Allocate the private cache map for this handle */
1258 PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), 'cPcC');
1259 if (PrivateMap == NULL)
1260 {
1261 /* If we also allocated the shared cache map for this file, kill it */
1262 if (Allocated)
1263 {
1264 KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
1265 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1266 KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
1267
1268 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1269 ObDereferenceObject(FileObject);
1270 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1271 }
1272
1273 KeReleaseGuardedMutex(&ViewLock);
1274 return STATUS_INSUFFICIENT_RESOURCES;
1275 }
1276
1277 /* Initialize it */
1278 RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP));
1279 PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP;
1280 PrivateMap->ReadAheadMask = PAGE_SIZE - 1;
1281 PrivateMap->FileObject = FileObject;
1282
1283 /* Link it to the file */
1284 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
1285 InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks);
1286 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
1287
1288 FileObject->PrivateCacheMap = PrivateMap;
1289 SharedCacheMap->OpenCount++;
1290 }
1291 KeReleaseGuardedMutex(&ViewLock);
1292
1293 return STATUS_SUCCESS;
1294 }
1295
1296 /*
1297 * @implemented
1298 */
1299 PFILE_OBJECT
1300 NTAPI
1301 CcGetFileObjectFromSectionPtrs (
1302 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1303 {
1304 PROS_SHARED_CACHE_MAP SharedCacheMap;
1305
1306 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1307
1308 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1309 {
1310 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1311 ASSERT(SharedCacheMap);
1312 return SharedCacheMap->FileObject;
1313 }
1314 return NULL;
1315 }
1316
1317 VOID
1318 INIT_FUNCTION
1319 NTAPI
1320 CcInitView (
1321 VOID)
1322 {
1323 DPRINT("CcInitView()\n");
1324
1325 InitializeListHead(&DirtyVacbListHead);
1326 InitializeListHead(&VacbLruListHead);
1327 InitializeListHead(&CcDeferredWrites);
1328 InitializeListHead(&CcCleanSharedCacheMapList);
1329 KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1330 KeInitializeSpinLock(&iSharedCacheMapLock);
1331 KeInitializeGuardedMutex(&ViewLock);
1332 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1333 NULL,
1334 NULL,
1335 0,
1336 sizeof(INTERNAL_BCB),
1337 TAG_BCB,
1338 20);
1339 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1340 NULL,
1341 NULL,
1342 0,
1343 sizeof(ROS_SHARED_CACHE_MAP),
1344 TAG_SHARED_CACHE_MAP,
1345 20);
1346 ExInitializeNPagedLookasideList(&VacbLookasideList,
1347 NULL,
1348 NULL,
1349 0,
1350 sizeof(ROS_VACB),
1351 TAG_VACB,
1352 20);
1353
1354 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1355
1356 CcInitCacheZeroPage();
1357 }
1358
1359 #if DBG && defined(KDBG)
1360 BOOLEAN
1361 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1362 {
1363 PLIST_ENTRY ListEntry;
1364 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1365
1366 KdbpPrint(" Usage Summary (in kb)\n");
1367 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1368 /* No need to lock the spin lock here, we're in DBG */
1369 for (ListEntry = CcCleanSharedCacheMapList.Flink;
1370 ListEntry != &CcCleanSharedCacheMapList;
1371 ListEntry = ListEntry->Flink)
1372 {
1373 PLIST_ENTRY Vacbs;
1374 ULONG Valid = 0, Dirty = 0;
1375 PROS_SHARED_CACHE_MAP SharedCacheMap;
1376 PUNICODE_STRING FileName;
1377
1378 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1379
1380 /* Dirty size */
1381 Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
1382
1383 /* First, count for all the associated VACB */
1384 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1385 Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1386 Vacbs = Vacbs->Flink)
1387 {
1388 PROS_VACB Vacb;
1389
1390 Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
1391 if (Vacb->Valid)
1392 {
1393 Valid += VACB_MAPPING_GRANULARITY / 1024;
1394 }
1395 }
1396
1397 /* Setup name */
1398 if (SharedCacheMap->FileObject != NULL &&
1399 SharedCacheMap->FileObject->FileName.Length != 0)
1400 {
1401 FileName = &SharedCacheMap->FileObject->FileName;
1402 }
1403 else
1404 {
1405 FileName = &NoName;
1406 }
1407
1408 /* And print */
1409 KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
1410 }
1411
1412 return TRUE;
1413 }
1414 #endif
1415
1416 /* EOF */