[NTOSKNRL] In case we leak a VACB, debug as much information as possible.
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42
43 /* GLOBALS *******************************************************************/
44
45 LIST_ENTRY DirtyVacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 /* Internal vars (MS):
55 * - Threshold above which lazy writer will start action
56 * - Amount of dirty pages
57 * - List for deferred writes
58 * - Spinlock when dealing with the deferred list
59 * - List for "clean" shared cache maps
60 */
61 ULONG CcDirtyPageThreshold = 0;
62 ULONG CcTotalDirtyPages = 0;
63 LIST_ENTRY CcDeferredWrites;
64 KSPIN_LOCK CcDeferredWriteSpinLock;
65 LIST_ENTRY CcCleanSharedCacheMapList;
66
67 #if DBG
68 ULONG CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line)
69 {
70 ULONG Refs;
71
72 Refs = InterlockedIncrement((PLONG)&vacb->ReferenceCount);
73 if (vacb->SharedCacheMap->Trace)
74 {
75 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
76 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
77 }
78
79 return Refs;
80 }
81 ULONG CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line)
82 {
83 ULONG Refs;
84
85 Refs = InterlockedDecrement((PLONG)&vacb->ReferenceCount);
86 ASSERT(!(Refs == 0 && vacb->Dirty));
87 if (vacb->SharedCacheMap->Trace)
88 {
89 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
90 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
91 }
92
93 if (Refs == 0)
94 {
95 CcRosInternalFreeVacb(vacb);
96 }
97
98 return Refs;
99 }
100 ULONG CcRosVacbGetRefCount_(PROS_VACB vacb, PCSTR file, INT line)
101 {
102 ULONG Refs;
103
104 Refs = InterlockedCompareExchange((PLONG)&vacb->ReferenceCount, 0, 0);
105 if (vacb->SharedCacheMap->Trace)
106 {
107 DbgPrint("(%s:%i) VACB %p ==RefCount=%lu, Dirty %u, PageOut %lu\n",
108 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
109 }
110
111 return Refs;
112 }
113 #endif
114
115
116 /* FUNCTIONS *****************************************************************/
117
118 VOID
119 NTAPI
120 CcRosTraceCacheMap (
121 PROS_SHARED_CACHE_MAP SharedCacheMap,
122 BOOLEAN Trace )
123 {
124 #if DBG
125 KIRQL oldirql;
126 PLIST_ENTRY current_entry;
127 PROS_VACB current;
128
129 if (!SharedCacheMap)
130 return;
131
132 SharedCacheMap->Trace = Trace;
133
134 if (Trace)
135 {
136 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
137
138 KeAcquireGuardedMutex(&ViewLock);
139 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
140
141 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
142 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
143 {
144 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
145 current_entry = current_entry->Flink;
146
147 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
148 current, current->ReferenceCount, current->Dirty, current->PageOut );
149 }
150 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
151 KeReleaseGuardedMutex(&ViewLock);
152 }
153 else
154 {
155 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
156 }
157
158 #else
159 UNREFERENCED_PARAMETER(SharedCacheMap);
160 UNREFERENCED_PARAMETER(Trace);
161 #endif
162 }
163
164 NTSTATUS
165 NTAPI
166 CcRosFlushVacb (
167 PROS_VACB Vacb)
168 {
169 NTSTATUS Status;
170
171 Status = CcWriteVirtualAddress(Vacb);
172 if (NT_SUCCESS(Status))
173 {
174 CcRosUnmarkDirtyVacb(Vacb, TRUE);
175 }
176
177 return Status;
178 }
179
180 NTSTATUS
181 NTAPI
182 CcRosFlushDirtyPages (
183 ULONG Target,
184 PULONG Count,
185 BOOLEAN Wait,
186 BOOLEAN CalledFromLazy)
187 {
188 PLIST_ENTRY current_entry;
189 PROS_VACB current;
190 BOOLEAN Locked;
191 NTSTATUS Status;
192
193 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
194
195 (*Count) = 0;
196
197 KeEnterCriticalRegion();
198 KeAcquireGuardedMutex(&ViewLock);
199
200 current_entry = DirtyVacbListHead.Flink;
201 if (current_entry == &DirtyVacbListHead)
202 {
203 DPRINT("No Dirty pages\n");
204 }
205
206 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
207 {
208 current = CONTAINING_RECORD(current_entry,
209 ROS_VACB,
210 DirtyVacbListEntry);
211 current_entry = current_entry->Flink;
212
213 CcRosVacbIncRefCount(current);
214
215 /* When performing lazy write, don't handle temporary files */
216 if (CalledFromLazy &&
217 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
218 {
219 CcRosVacbDecRefCount(current);
220 continue;
221 }
222
223 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
224 current->SharedCacheMap->LazyWriteContext, Wait);
225 if (!Locked)
226 {
227 CcRosVacbDecRefCount(current);
228 continue;
229 }
230
231 ASSERT(current->Dirty);
232
233 /* One reference is added above */
234 if (CcRosVacbGetRefCount(current) > 2)
235 {
236 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
237 current->SharedCacheMap->LazyWriteContext);
238 CcRosVacbDecRefCount(current);
239 continue;
240 }
241
242 KeReleaseGuardedMutex(&ViewLock);
243
244 Status = CcRosFlushVacb(current);
245
246 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
247 current->SharedCacheMap->LazyWriteContext);
248
249 KeAcquireGuardedMutex(&ViewLock);
250 CcRosVacbDecRefCount(current);
251
252 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
253 (Status != STATUS_MEDIA_WRITE_PROTECTED))
254 {
255 DPRINT1("CC: Failed to flush VACB.\n");
256 }
257 else
258 {
259 ULONG PagesFreed;
260
261 /* How many pages did we free? */
262 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
263 (*Count) += PagesFreed;
264
265 /* Make sure we don't overflow target! */
266 if (Target < PagesFreed)
267 {
268 /* If we would have, jump to zero directly */
269 Target = 0;
270 }
271 else
272 {
273 Target -= PagesFreed;
274 }
275 }
276
277 current_entry = DirtyVacbListHead.Flink;
278 }
279
280 KeReleaseGuardedMutex(&ViewLock);
281 KeLeaveCriticalRegion();
282
283 DPRINT("CcRosFlushDirtyPages() finished\n");
284 return STATUS_SUCCESS;
285 }
286
287 NTSTATUS
288 CcRosTrimCache (
289 ULONG Target,
290 ULONG Priority,
291 PULONG NrFreed)
292 /*
293 * FUNCTION: Try to free some memory from the file cache.
294 * ARGUMENTS:
295 * Target - The number of pages to be freed.
296 * Priority - The priority of free (currently unused).
297 * NrFreed - Points to a variable where the number of pages
298 * actually freed is returned.
299 */
300 {
301 PLIST_ENTRY current_entry;
302 PROS_VACB current;
303 ULONG PagesFreed;
304 KIRQL oldIrql;
305 LIST_ENTRY FreeList;
306 PFN_NUMBER Page;
307 ULONG i;
308 BOOLEAN FlushedPages = FALSE;
309
310 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
311
312 InitializeListHead(&FreeList);
313
314 *NrFreed = 0;
315
316 retry:
317 KeAcquireGuardedMutex(&ViewLock);
318
319 current_entry = VacbLruListHead.Flink;
320 while (current_entry != &VacbLruListHead)
321 {
322 ULONG Refs;
323
324 current = CONTAINING_RECORD(current_entry,
325 ROS_VACB,
326 VacbLruListEntry);
327 current_entry = current_entry->Flink;
328
329 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
330
331 /* Reference the VACB */
332 CcRosVacbIncRefCount(current);
333
334 /* Check if it's mapped and not dirty */
335 if (InterlockedCompareExchange((PLONG)&current->MappedCount, 0, 0) > 0 && !current->Dirty)
336 {
337 /* We have to break these locks because Cc sucks */
338 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
339 KeReleaseGuardedMutex(&ViewLock);
340
341 /* Page out the VACB */
342 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
343 {
344 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
345
346 MmPageOutPhysicalAddress(Page);
347 }
348
349 /* Reacquire the locks */
350 KeAcquireGuardedMutex(&ViewLock);
351 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
352 }
353
354 /* Dereference the VACB */
355 Refs = CcRosVacbDecRefCount(current);
356
357 /* Check if we can free this entry now */
358 if (Refs < 2)
359 {
360 ASSERT(!current->Dirty);
361 ASSERT(!current->MappedCount);
362 ASSERT(Refs == 1);
363
364 RemoveEntryList(&current->CacheMapVacbListEntry);
365 RemoveEntryList(&current->VacbLruListEntry);
366 InitializeListHead(&current->VacbLruListEntry);
367 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
368
369 /* Calculate how many pages we freed for Mm */
370 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
371 Target -= PagesFreed;
372 (*NrFreed) += PagesFreed;
373 }
374
375 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
376 }
377
378 KeReleaseGuardedMutex(&ViewLock);
379
380 /* Try flushing pages if we haven't met our target */
381 if ((Target > 0) && !FlushedPages)
382 {
383 /* Flush dirty pages to disk */
384 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
385 FlushedPages = TRUE;
386
387 /* We can only swap as many pages as we flushed */
388 if (PagesFreed < Target) Target = PagesFreed;
389
390 /* Check if we flushed anything */
391 if (PagesFreed != 0)
392 {
393 /* Try again after flushing dirty pages */
394 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
395 goto retry;
396 }
397 }
398
399 while (!IsListEmpty(&FreeList))
400 {
401 ULONG Refs;
402
403 current_entry = RemoveHeadList(&FreeList);
404 current = CONTAINING_RECORD(current_entry,
405 ROS_VACB,
406 CacheMapVacbListEntry);
407 InitializeListHead(&current->CacheMapVacbListEntry);
408 Refs = CcRosVacbDecRefCount(current);
409 ASSERT(Refs == 0);
410 }
411
412 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
413
414 return STATUS_SUCCESS;
415 }
416
417 NTSTATUS
418 NTAPI
419 CcRosReleaseVacb (
420 PROS_SHARED_CACHE_MAP SharedCacheMap,
421 PROS_VACB Vacb,
422 BOOLEAN Valid,
423 BOOLEAN Dirty,
424 BOOLEAN Mapped)
425 {
426 ULONG Refs;
427 ASSERT(SharedCacheMap);
428
429 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
430 SharedCacheMap, Vacb, Valid);
431
432 Vacb->Valid = Valid;
433
434 if (Dirty && !Vacb->Dirty)
435 {
436 CcRosMarkDirtyVacb(Vacb);
437 }
438
439 if (Mapped)
440 {
441 if (InterlockedIncrement((PLONG)&Vacb->MappedCount) == 1)
442 {
443 CcRosVacbIncRefCount(Vacb);
444 }
445 }
446
447 Refs = CcRosVacbDecRefCount(Vacb);
448 ASSERT(Refs > 0);
449
450 return STATUS_SUCCESS;
451 }
452
453 /* Returns with VACB Lock Held! */
454 PROS_VACB
455 NTAPI
456 CcRosLookupVacb (
457 PROS_SHARED_CACHE_MAP SharedCacheMap,
458 LONGLONG FileOffset)
459 {
460 PLIST_ENTRY current_entry;
461 PROS_VACB current;
462 KIRQL oldIrql;
463
464 ASSERT(SharedCacheMap);
465
466 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
467 SharedCacheMap, FileOffset);
468
469 KeAcquireGuardedMutex(&ViewLock);
470 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
471
472 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
473 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
474 {
475 current = CONTAINING_RECORD(current_entry,
476 ROS_VACB,
477 CacheMapVacbListEntry);
478 if (IsPointInRange(current->FileOffset.QuadPart,
479 VACB_MAPPING_GRANULARITY,
480 FileOffset))
481 {
482 CcRosVacbIncRefCount(current);
483 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
484 KeReleaseGuardedMutex(&ViewLock);
485 return current;
486 }
487 if (current->FileOffset.QuadPart > FileOffset)
488 break;
489 current_entry = current_entry->Flink;
490 }
491
492 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
493 KeReleaseGuardedMutex(&ViewLock);
494
495 return NULL;
496 }
497
498 VOID
499 NTAPI
500 CcRosMarkDirtyVacb (
501 PROS_VACB Vacb)
502 {
503 KIRQL oldIrql;
504 PROS_SHARED_CACHE_MAP SharedCacheMap;
505
506 SharedCacheMap = Vacb->SharedCacheMap;
507
508 KeAcquireGuardedMutex(&ViewLock);
509 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
510
511 ASSERT(!Vacb->Dirty);
512
513 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
514 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
515 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
516 CcRosVacbIncRefCount(Vacb);
517
518 /* Move to the tail of the LRU list */
519 RemoveEntryList(&Vacb->VacbLruListEntry);
520 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
521
522 Vacb->Dirty = TRUE;
523
524 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
525 KeReleaseGuardedMutex(&ViewLock);
526
527 /* Schedule a lazy writer run to now that we have dirty VACB */
528 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
529 if (!LazyWriter.ScanActive)
530 {
531 CcScheduleLazyWriteScan(FALSE);
532 }
533 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
534 }
535
536 VOID
537 NTAPI
538 CcRosUnmarkDirtyVacb (
539 PROS_VACB Vacb,
540 BOOLEAN LockViews)
541 {
542 KIRQL oldIrql;
543 PROS_SHARED_CACHE_MAP SharedCacheMap;
544
545 SharedCacheMap = Vacb->SharedCacheMap;
546
547 if (LockViews)
548 {
549 KeAcquireGuardedMutex(&ViewLock);
550 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
551 }
552
553 ASSERT(Vacb->Dirty);
554
555 Vacb->Dirty = FALSE;
556
557 RemoveEntryList(&Vacb->DirtyVacbListEntry);
558 InitializeListHead(&Vacb->DirtyVacbListEntry);
559 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
560 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
561 CcRosVacbDecRefCount(Vacb);
562
563 if (LockViews)
564 {
565 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
566 KeReleaseGuardedMutex(&ViewLock);
567 }
568 }
569
570 NTSTATUS
571 NTAPI
572 CcRosMarkDirtyFile (
573 PROS_SHARED_CACHE_MAP SharedCacheMap,
574 LONGLONG FileOffset)
575 {
576 PROS_VACB Vacb;
577
578 ASSERT(SharedCacheMap);
579
580 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
581 SharedCacheMap, FileOffset);
582
583 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
584 if (Vacb == NULL)
585 {
586 KeBugCheck(CACHE_MANAGER);
587 }
588
589 CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, TRUE, FALSE);
590
591 return STATUS_SUCCESS;
592 }
593
594 /*
595 * Note: this is not the contrary function of
596 * CcRosMapVacbInKernelSpace()
597 */
598 NTSTATUS
599 NTAPI
600 CcRosUnmapVacb (
601 PROS_SHARED_CACHE_MAP SharedCacheMap,
602 LONGLONG FileOffset,
603 BOOLEAN NowDirty)
604 {
605 PROS_VACB Vacb;
606
607 ASSERT(SharedCacheMap);
608
609 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
610 SharedCacheMap, FileOffset, NowDirty);
611
612 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
613 if (Vacb == NULL)
614 {
615 return STATUS_UNSUCCESSFUL;
616 }
617
618 ASSERT(Vacb->MappedCount != 0);
619 if (InterlockedDecrement((PLONG)&Vacb->MappedCount) == 0)
620 {
621 CcRosVacbDecRefCount(Vacb);
622 }
623
624 CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, NowDirty, FALSE);
625
626 return STATUS_SUCCESS;
627 }
628
629 static
630 NTSTATUS
631 CcRosMapVacbInKernelSpace(
632 PROS_VACB Vacb)
633 {
634 ULONG i;
635 NTSTATUS Status;
636 ULONG_PTR NumberOfPages;
637 PVOID BaseAddress = NULL;
638
639 /* Create a memory area. */
640 MmLockAddressSpace(MmGetKernelAddressSpace());
641 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
642 0, // nothing checks for VACB mareas, so set to 0
643 &BaseAddress,
644 VACB_MAPPING_GRANULARITY,
645 PAGE_READWRITE,
646 (PMEMORY_AREA*)&Vacb->MemoryArea,
647 0,
648 PAGE_SIZE);
649 ASSERT(Vacb->BaseAddress == NULL);
650 Vacb->BaseAddress = BaseAddress;
651 MmUnlockAddressSpace(MmGetKernelAddressSpace());
652 if (!NT_SUCCESS(Status))
653 {
654 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
655 return Status;
656 }
657
658 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
659 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
660 ASSERT((ULONG_PTR)Vacb->BaseAddress + VACB_MAPPING_GRANULARITY - 1 > (ULONG_PTR)MmSystemRangeStart);
661
662 /* Create a virtual mapping for this memory area */
663 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
664 for (i = 0; i < NumberOfPages; i++)
665 {
666 PFN_NUMBER PageFrameNumber;
667
668 MI_SET_USAGE(MI_USAGE_CACHE);
669 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
670 if (PageFrameNumber == 0)
671 {
672 DPRINT1("Unable to allocate page\n");
673 KeBugCheck(MEMORY_MANAGEMENT);
674 }
675
676 ASSERT(BaseAddress == Vacb->BaseAddress);
677 ASSERT(i * PAGE_SIZE < VACB_MAPPING_GRANULARITY);
678 ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) >= (ULONG_PTR)BaseAddress);
679 ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) > (ULONG_PTR)MmSystemRangeStart);
680
681 Status = MmCreateVirtualMapping(NULL,
682 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
683 PAGE_READWRITE,
684 &PageFrameNumber,
685 1);
686 if (!NT_SUCCESS(Status))
687 {
688 DPRINT1("Unable to create virtual mapping\n");
689 KeBugCheck(MEMORY_MANAGEMENT);
690 }
691 }
692
693 return STATUS_SUCCESS;
694 }
695
696 static
697 NTSTATUS
698 CcRosCreateVacb (
699 PROS_SHARED_CACHE_MAP SharedCacheMap,
700 LONGLONG FileOffset,
701 PROS_VACB *Vacb)
702 {
703 PROS_VACB current;
704 PROS_VACB previous;
705 PLIST_ENTRY current_entry;
706 NTSTATUS Status;
707 KIRQL oldIrql;
708 ULONG Refs;
709
710 ASSERT(SharedCacheMap);
711
712 DPRINT("CcRosCreateVacb()\n");
713
714 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
715 {
716 *Vacb = NULL;
717 return STATUS_INVALID_PARAMETER;
718 }
719
720 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
721 current->BaseAddress = NULL;
722 current->Valid = FALSE;
723 current->Dirty = FALSE;
724 current->PageOut = FALSE;
725 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
726 current->SharedCacheMap = SharedCacheMap;
727 #if DBG
728 if (SharedCacheMap->Trace)
729 {
730 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
731 }
732 #endif
733 current->MappedCount = 0;
734 current->ReferenceCount = 0;
735 current->PinCount = 0;
736 InitializeListHead(&current->CacheMapVacbListEntry);
737 InitializeListHead(&current->DirtyVacbListEntry);
738 InitializeListHead(&current->VacbLruListEntry);
739
740 CcRosVacbIncRefCount(current);
741
742 Status = CcRosMapVacbInKernelSpace(current);
743 if (!NT_SUCCESS(Status))
744 {
745 CcRosVacbDecRefCount(current);
746 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
747 return Status;
748 }
749
750 KeAcquireGuardedMutex(&ViewLock);
751
752 *Vacb = current;
753 /* There is window between the call to CcRosLookupVacb
754 * and CcRosCreateVacb. We must check if a VACB for the
755 * file offset exist. If there is a VACB, we release
756 * our newly created VACB and return the existing one.
757 */
758 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
759 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
760 previous = NULL;
761 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
762 {
763 current = CONTAINING_RECORD(current_entry,
764 ROS_VACB,
765 CacheMapVacbListEntry);
766 if (IsPointInRange(current->FileOffset.QuadPart,
767 VACB_MAPPING_GRANULARITY,
768 FileOffset))
769 {
770 CcRosVacbIncRefCount(current);
771 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
772 #if DBG
773 if (SharedCacheMap->Trace)
774 {
775 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
776 SharedCacheMap,
777 (*Vacb),
778 current);
779 }
780 #endif
781 KeReleaseGuardedMutex(&ViewLock);
782
783 Refs = CcRosVacbDecRefCount(*Vacb);
784 ASSERT(Refs == 0);
785
786 *Vacb = current;
787 return STATUS_SUCCESS;
788 }
789 if (current->FileOffset.QuadPart < FileOffset)
790 {
791 ASSERT(previous == NULL ||
792 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
793 previous = current;
794 }
795 if (current->FileOffset.QuadPart > FileOffset)
796 break;
797 current_entry = current_entry->Flink;
798 }
799 /* There was no existing VACB. */
800 current = *Vacb;
801 if (previous)
802 {
803 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
804 }
805 else
806 {
807 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
808 }
809 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
810 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
811 KeReleaseGuardedMutex(&ViewLock);
812
813 MI_SET_USAGE(MI_USAGE_CACHE);
814 #if MI_TRACE_PFNS
815 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
816 {
817 PWCHAR pos;
818 ULONG len = 0;
819 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
820 if (pos)
821 {
822 len = wcslen(pos) * sizeof(WCHAR);
823 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
824 }
825 else
826 {
827 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
828 }
829 }
830 #endif
831
832 /* Reference it to allow release */
833 CcRosVacbIncRefCount(current);
834
835 return Status;
836 }
837
838 NTSTATUS
839 NTAPI
840 CcRosGetVacb (
841 PROS_SHARED_CACHE_MAP SharedCacheMap,
842 LONGLONG FileOffset,
843 PLONGLONG BaseOffset,
844 PVOID* BaseAddress,
845 PBOOLEAN UptoDate,
846 PROS_VACB *Vacb)
847 {
848 PROS_VACB current;
849 NTSTATUS Status;
850 ULONG Refs;
851
852 ASSERT(SharedCacheMap);
853
854 DPRINT("CcRosGetVacb()\n");
855
856 /*
857 * Look for a VACB already mapping the same data.
858 */
859 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
860 if (current == NULL)
861 {
862 /*
863 * Otherwise create a new VACB.
864 */
865 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
866 if (!NT_SUCCESS(Status))
867 {
868 return Status;
869 }
870 }
871
872 Refs = CcRosVacbGetRefCount(current);
873
874 KeAcquireGuardedMutex(&ViewLock);
875
876 /* Move to the tail of the LRU list */
877 RemoveEntryList(&current->VacbLruListEntry);
878 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
879
880 KeReleaseGuardedMutex(&ViewLock);
881
882 /*
883 * Return information about the VACB to the caller.
884 */
885 *UptoDate = current->Valid;
886 *BaseAddress = current->BaseAddress;
887 DPRINT("*BaseAddress %p\n", *BaseAddress);
888 *Vacb = current;
889 *BaseOffset = current->FileOffset.QuadPart;
890
891 ASSERT(Refs > 1);
892
893 return STATUS_SUCCESS;
894 }
895
896 NTSTATUS
897 NTAPI
898 CcRosRequestVacb (
899 PROS_SHARED_CACHE_MAP SharedCacheMap,
900 LONGLONG FileOffset,
901 PVOID* BaseAddress,
902 PBOOLEAN UptoDate,
903 PROS_VACB *Vacb)
904 /*
905 * FUNCTION: Request a page mapping for a shared cache map
906 */
907 {
908 LONGLONG BaseOffset;
909
910 ASSERT(SharedCacheMap);
911
912 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
913 {
914 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
915 FileOffset, VACB_MAPPING_GRANULARITY);
916 KeBugCheck(CACHE_MANAGER);
917 }
918
919 return CcRosGetVacb(SharedCacheMap,
920 FileOffset,
921 &BaseOffset,
922 BaseAddress,
923 UptoDate,
924 Vacb);
925 }
926
927 static
928 VOID
929 CcFreeCachePage (
930 PVOID Context,
931 MEMORY_AREA* MemoryArea,
932 PVOID Address,
933 PFN_NUMBER Page,
934 SWAPENTRY SwapEntry,
935 BOOLEAN Dirty)
936 {
937 ASSERT(SwapEntry == 0);
938 if (Page != 0)
939 {
940 ASSERT(MmGetReferenceCountPage(Page) == 1);
941 MmReleasePageMemoryConsumer(MC_CACHE, Page);
942 }
943 }
944
945 NTSTATUS
946 CcRosInternalFreeVacb (
947 PROS_VACB Vacb)
948 /*
949 * FUNCTION: Releases a VACB associated with a shared cache map
950 */
951 {
952 DPRINT("Freeing VACB 0x%p\n", Vacb);
953 #if DBG
954 if (Vacb->SharedCacheMap->Trace)
955 {
956 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
957 }
958 #endif
959
960 MmLockAddressSpace(MmGetKernelAddressSpace());
961 MmFreeMemoryArea(MmGetKernelAddressSpace(),
962 Vacb->MemoryArea,
963 CcFreeCachePage,
964 NULL);
965 MmUnlockAddressSpace(MmGetKernelAddressSpace());
966
967 if (Vacb->PinCount != 0 || Vacb->ReferenceCount != 0)
968 {
969 DPRINT1("Invalid free: %ld, %ld\n", Vacb->ReferenceCount, Vacb->PinCount);
970 if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length)
971 {
972 DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName);
973 }
974 }
975
976 ASSERT(Vacb->PinCount == 0);
977 ASSERT(Vacb->ReferenceCount == 0);
978 ASSERT(IsListEmpty(&Vacb->CacheMapVacbListEntry));
979 ASSERT(IsListEmpty(&Vacb->DirtyVacbListEntry));
980 ASSERT(IsListEmpty(&Vacb->VacbLruListEntry));
981 RtlFillMemory(Vacb, sizeof(*Vacb), 0xfd);
982 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
983 return STATUS_SUCCESS;
984 }
985
986 /*
987 * @implemented
988 */
989 VOID
990 NTAPI
991 CcFlushCache (
992 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
993 IN PLARGE_INTEGER FileOffset OPTIONAL,
994 IN ULONG Length,
995 OUT PIO_STATUS_BLOCK IoStatus)
996 {
997 PROS_SHARED_CACHE_MAP SharedCacheMap;
998 LARGE_INTEGER Offset;
999 LONGLONG RemainingLength;
1000 PROS_VACB current;
1001 NTSTATUS Status;
1002
1003 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1004 SectionObjectPointers, FileOffset, Length);
1005
1006 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1007 SectionObjectPointers, FileOffset, Length, IoStatus);
1008
1009 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1010 {
1011 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1012 ASSERT(SharedCacheMap);
1013 if (FileOffset)
1014 {
1015 Offset = *FileOffset;
1016 RemainingLength = Length;
1017 }
1018 else
1019 {
1020 Offset.QuadPart = 0;
1021 RemainingLength = SharedCacheMap->FileSize.QuadPart;
1022 }
1023
1024 if (IoStatus)
1025 {
1026 IoStatus->Status = STATUS_SUCCESS;
1027 IoStatus->Information = 0;
1028 }
1029
1030 while (RemainingLength > 0)
1031 {
1032 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1033 if (current != NULL)
1034 {
1035 if (current->Dirty)
1036 {
1037 Status = CcRosFlushVacb(current);
1038 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1039 {
1040 IoStatus->Status = Status;
1041 }
1042 }
1043
1044 CcRosReleaseVacb(SharedCacheMap, current, current->Valid, current->Dirty, FALSE);
1045 }
1046
1047 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1048 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1049 }
1050 }
1051 else
1052 {
1053 if (IoStatus)
1054 {
1055 IoStatus->Status = STATUS_INVALID_PARAMETER;
1056 }
1057 }
1058 }
1059
1060 NTSTATUS
1061 NTAPI
1062 CcRosDeleteFileCache (
1063 PFILE_OBJECT FileObject,
1064 PROS_SHARED_CACHE_MAP SharedCacheMap)
1065 /*
1066 * FUNCTION: Releases the shared cache map associated with a file object
1067 */
1068 {
1069 PLIST_ENTRY current_entry;
1070 PROS_VACB current;
1071 LIST_ENTRY FreeList;
1072 KIRQL oldIrql;
1073
1074 ASSERT(SharedCacheMap);
1075
1076 SharedCacheMap->OpenCount++;
1077 KeReleaseGuardedMutex(&ViewLock);
1078
1079 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1080
1081 KeAcquireGuardedMutex(&ViewLock);
1082 SharedCacheMap->OpenCount--;
1083 if (SharedCacheMap->OpenCount == 0)
1084 {
1085 KIRQL OldIrql;
1086
1087 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1088
1089 /*
1090 * Release all VACBs
1091 */
1092 InitializeListHead(&FreeList);
1093 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1094 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1095 {
1096 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1097 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1098
1099 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1100 RemoveEntryList(&current->VacbLruListEntry);
1101 InitializeListHead(&current->VacbLruListEntry);
1102 if (current->Dirty)
1103 {
1104 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1105 CcRosUnmarkDirtyVacb(current, FALSE);
1106 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1107 DPRINT1("Freeing dirty VACB\n");
1108 }
1109 if (current->MappedCount != 0)
1110 {
1111 current->MappedCount = 0;
1112 NT_VERIFY(CcRosVacbDecRefCount(current) > 0);
1113 DPRINT1("Freeing mapped VACB\n");
1114 }
1115 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1116
1117 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1118 }
1119 #if DBG
1120 SharedCacheMap->Trace = FALSE;
1121 #endif
1122 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1123
1124 KeReleaseGuardedMutex(&ViewLock);
1125 ObDereferenceObject(SharedCacheMap->FileObject);
1126
1127 while (!IsListEmpty(&FreeList))
1128 {
1129 ULONG Refs;
1130
1131 current_entry = RemoveTailList(&FreeList);
1132 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1133 InitializeListHead(&current->CacheMapVacbListEntry);
1134 Refs = CcRosVacbDecRefCount(current);
1135 #if DBG // CORE-14578
1136 if (Refs != 0)
1137 {
1138 DPRINT1("Leaking VACB %p attached to %p (%I64d)\n", current, FileObject, current->FileOffset.QuadPart);
1139 DPRINT1("There are: %d references left\n", Refs);
1140 DPRINT1("Pin: %d, Map: %d\n", current->PinCount, current->MappedCount);
1141 DPRINT1("Dirty: %d\n", current->Dirty);
1142 if (FileObject->FileName.Length != 0)
1143 {
1144 DPRINT1("File was: %wZ\n", &FileObject->FileName);
1145 }
1146 else
1147 {
1148 DPRINT1("No name for the file\n");
1149 }
1150 }
1151 #else
1152 ASSERT(Refs == 0);
1153 #endif
1154 }
1155
1156 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1157 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1158 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1159
1160 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1161 KeAcquireGuardedMutex(&ViewLock);
1162 }
1163 return STATUS_SUCCESS;
1164 }
1165
1166 VOID
1167 NTAPI
1168 CcRosReferenceCache (
1169 PFILE_OBJECT FileObject)
1170 {
1171 PROS_SHARED_CACHE_MAP SharedCacheMap;
1172 KeAcquireGuardedMutex(&ViewLock);
1173 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1174 ASSERT(SharedCacheMap);
1175 ASSERT(SharedCacheMap->OpenCount != 0);
1176 SharedCacheMap->OpenCount++;
1177 KeReleaseGuardedMutex(&ViewLock);
1178 }
1179
1180 VOID
1181 NTAPI
1182 CcRosRemoveIfClosed (
1183 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1184 {
1185 PROS_SHARED_CACHE_MAP SharedCacheMap;
1186 DPRINT("CcRosRemoveIfClosed()\n");
1187 KeAcquireGuardedMutex(&ViewLock);
1188 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1189 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1190 {
1191 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1192 }
1193 KeReleaseGuardedMutex(&ViewLock);
1194 }
1195
1196
1197 VOID
1198 NTAPI
1199 CcRosDereferenceCache (
1200 PFILE_OBJECT FileObject)
1201 {
1202 PROS_SHARED_CACHE_MAP SharedCacheMap;
1203 KeAcquireGuardedMutex(&ViewLock);
1204 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1205 ASSERT(SharedCacheMap);
1206 if (SharedCacheMap->OpenCount > 0)
1207 {
1208 SharedCacheMap->OpenCount--;
1209 if (SharedCacheMap->OpenCount == 0)
1210 {
1211 MmFreeSectionSegments(SharedCacheMap->FileObject);
1212 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1213 }
1214 }
1215 KeReleaseGuardedMutex(&ViewLock);
1216 }
1217
1218 NTSTATUS
1219 NTAPI
1220 CcRosReleaseFileCache (
1221 PFILE_OBJECT FileObject)
1222 /*
1223 * FUNCTION: Called by the file system when a handle to a file object
1224 * has been closed.
1225 */
1226 {
1227 KIRQL OldIrql;
1228 PPRIVATE_CACHE_MAP PrivateMap;
1229 PROS_SHARED_CACHE_MAP SharedCacheMap;
1230
1231 KeAcquireGuardedMutex(&ViewLock);
1232
1233 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1234 {
1235 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1236
1237 /* Closing the handle, so kill the private cache map
1238 * Before you event try to remove it from FO, always
1239 * lock the master lock, to be sure not to race
1240 * with a potential read ahead ongoing!
1241 */
1242 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1243 PrivateMap = FileObject->PrivateCacheMap;
1244 FileObject->PrivateCacheMap = NULL;
1245 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1246
1247 if (PrivateMap != NULL)
1248 {
1249 /* Remove it from the file */
1250 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
1251 RemoveEntryList(&PrivateMap->PrivateLinks);
1252 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
1253
1254 /* And free it. */
1255 if (PrivateMap != &SharedCacheMap->PrivateCacheMap)
1256 {
1257 ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP);
1258 }
1259 else
1260 {
1261 PrivateMap->NodeTypeCode = 0;
1262 }
1263
1264 if (SharedCacheMap->OpenCount > 0)
1265 {
1266 SharedCacheMap->OpenCount--;
1267 if (SharedCacheMap->OpenCount == 0)
1268 {
1269 MmFreeSectionSegments(SharedCacheMap->FileObject);
1270 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1271 }
1272 }
1273 }
1274 }
1275 KeReleaseGuardedMutex(&ViewLock);
1276 return STATUS_SUCCESS;
1277 }
1278
1279 NTSTATUS
1280 NTAPI
1281 CcRosInitializeFileCache (
1282 PFILE_OBJECT FileObject,
1283 PCC_FILE_SIZES FileSizes,
1284 BOOLEAN PinAccess,
1285 PCACHE_MANAGER_CALLBACKS CallBacks,
1286 PVOID LazyWriterContext)
1287 /*
1288 * FUNCTION: Initializes a shared cache map for a file object
1289 */
1290 {
1291 KIRQL OldIrql;
1292 BOOLEAN Allocated;
1293 PROS_SHARED_CACHE_MAP SharedCacheMap;
1294
1295 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1296 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1297 FileObject, SharedCacheMap);
1298
1299 Allocated = FALSE;
1300 KeAcquireGuardedMutex(&ViewLock);
1301 if (SharedCacheMap == NULL)
1302 {
1303 Allocated = TRUE;
1304 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1305 if (SharedCacheMap == NULL)
1306 {
1307 KeReleaseGuardedMutex(&ViewLock);
1308 return STATUS_INSUFFICIENT_RESOURCES;
1309 }
1310 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1311 ObReferenceObjectByPointer(FileObject,
1312 FILE_ALL_ACCESS,
1313 NULL,
1314 KernelMode);
1315 SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP;
1316 SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap);
1317 SharedCacheMap->FileObject = FileObject;
1318 SharedCacheMap->Callbacks = CallBacks;
1319 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1320 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1321 SharedCacheMap->FileSize = FileSizes->FileSize;
1322 SharedCacheMap->PinAccess = PinAccess;
1323 SharedCacheMap->DirtyPageThreshold = 0;
1324 SharedCacheMap->DirtyPages = 0;
1325 InitializeListHead(&SharedCacheMap->PrivateList);
1326 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1327 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1328 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1329
1330 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1331 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1332 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1333 }
1334 if (FileObject->PrivateCacheMap == NULL)
1335 {
1336 PPRIVATE_CACHE_MAP PrivateMap;
1337
1338 /* Allocate the private cache map for this handle */
1339 if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0)
1340 {
1341 PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP);
1342 }
1343 else
1344 {
1345 PrivateMap = &SharedCacheMap->PrivateCacheMap;
1346 }
1347
1348 if (PrivateMap == NULL)
1349 {
1350 /* If we also allocated the shared cache map for this file, kill it */
1351 if (Allocated)
1352 {
1353 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1354 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1355 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1356
1357 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1358 ObDereferenceObject(FileObject);
1359 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1360 }
1361
1362 KeReleaseGuardedMutex(&ViewLock);
1363 return STATUS_INSUFFICIENT_RESOURCES;
1364 }
1365
1366 /* Initialize it */
1367 RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP));
1368 PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP;
1369 PrivateMap->ReadAheadMask = PAGE_SIZE - 1;
1370 PrivateMap->FileObject = FileObject;
1371 KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock);
1372
1373 /* Link it to the file */
1374 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
1375 InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks);
1376 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
1377
1378 FileObject->PrivateCacheMap = PrivateMap;
1379 SharedCacheMap->OpenCount++;
1380 }
1381 KeReleaseGuardedMutex(&ViewLock);
1382
1383 return STATUS_SUCCESS;
1384 }
1385
1386 /*
1387 * @implemented
1388 */
1389 PFILE_OBJECT
1390 NTAPI
1391 CcGetFileObjectFromSectionPtrs (
1392 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1393 {
1394 PROS_SHARED_CACHE_MAP SharedCacheMap;
1395
1396 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1397
1398 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1399 {
1400 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1401 ASSERT(SharedCacheMap);
1402 return SharedCacheMap->FileObject;
1403 }
1404 return NULL;
1405 }
1406
1407 VOID
1408 INIT_FUNCTION
1409 NTAPI
1410 CcInitView (
1411 VOID)
1412 {
1413 DPRINT("CcInitView()\n");
1414
1415 InitializeListHead(&DirtyVacbListHead);
1416 InitializeListHead(&VacbLruListHead);
1417 InitializeListHead(&CcDeferredWrites);
1418 InitializeListHead(&CcCleanSharedCacheMapList);
1419 KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1420 KeInitializeGuardedMutex(&ViewLock);
1421 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1422 NULL,
1423 NULL,
1424 0,
1425 sizeof(INTERNAL_BCB),
1426 TAG_BCB,
1427 20);
1428 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1429 NULL,
1430 NULL,
1431 0,
1432 sizeof(ROS_SHARED_CACHE_MAP),
1433 TAG_SHARED_CACHE_MAP,
1434 20);
1435 ExInitializeNPagedLookasideList(&VacbLookasideList,
1436 NULL,
1437 NULL,
1438 0,
1439 sizeof(ROS_VACB),
1440 TAG_VACB,
1441 20);
1442
1443 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1444
1445 CcInitCacheZeroPage();
1446 }
1447
1448 #if DBG && defined(KDBG)
1449 BOOLEAN
1450 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1451 {
1452 PLIST_ENTRY ListEntry;
1453 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1454
1455 KdbpPrint(" Usage Summary (in kb)\n");
1456 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1457 /* No need to lock the spin lock here, we're in DBG */
1458 for (ListEntry = CcCleanSharedCacheMapList.Flink;
1459 ListEntry != &CcCleanSharedCacheMapList;
1460 ListEntry = ListEntry->Flink)
1461 {
1462 PLIST_ENTRY Vacbs;
1463 ULONG Valid = 0, Dirty = 0;
1464 PROS_SHARED_CACHE_MAP SharedCacheMap;
1465 PUNICODE_STRING FileName;
1466
1467 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1468
1469 /* Dirty size */
1470 Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
1471
1472 /* First, count for all the associated VACB */
1473 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1474 Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1475 Vacbs = Vacbs->Flink)
1476 {
1477 PROS_VACB Vacb;
1478
1479 Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
1480 if (Vacb->Valid)
1481 {
1482 Valid += VACB_MAPPING_GRANULARITY / 1024;
1483 }
1484 }
1485
1486 /* Setup name */
1487 if (SharedCacheMap->FileObject != NULL &&
1488 SharedCacheMap->FileObject->FileName.Length != 0)
1489 {
1490 FileName = &SharedCacheMap->FileObject->FileName;
1491 }
1492 else
1493 {
1494 FileName = &NoName;
1495 }
1496
1497 /* And print */
1498 KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
1499 }
1500
1501 return TRUE;
1502 }
1503
1504 BOOLEAN
1505 ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[])
1506 {
1507 KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages,
1508 (CcTotalDirtyPages * PAGE_SIZE) / 1024);
1509 KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold,
1510 (CcDirtyPageThreshold * PAGE_SIZE) / 1024);
1511 KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages,
1512 (MmAvailablePages * PAGE_SIZE) / 1024);
1513 KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop,
1514 (MmThrottleTop * PAGE_SIZE) / 1024);
1515 KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom,
1516 (MmThrottleBottom * PAGE_SIZE) / 1024);
1517 KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total,
1518 (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024);
1519
1520 if (CcTotalDirtyPages >= CcDirtyPageThreshold)
1521 {
1522 KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n");
1523 }
1524 else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold)
1525 {
1526 KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n");
1527 }
1528 else
1529 {
1530 KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n");
1531 }
1532
1533 return TRUE;
1534 }
1535 #endif
1536
1537 /* EOF */