[NTOSKRNL] Noisily dereference mapped VACB on cache release.
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42
43 /* GLOBALS *******************************************************************/
44
45 LIST_ENTRY DirtyVacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 /* Internal vars (MS):
55 * - Threshold above which lazy writer will start action
56 * - Amount of dirty pages
57 * - List for deferred writes
58 * - Spinlock when dealing with the deferred list
59 * - List for "clean" shared cache maps
60 */
61 ULONG CcDirtyPageThreshold = 0;
62 ULONG CcTotalDirtyPages = 0;
63 LIST_ENTRY CcDeferredWrites;
64 KSPIN_LOCK CcDeferredWriteSpinLock;
65 LIST_ENTRY CcCleanSharedCacheMapList;
66
67 #if DBG
68 ULONG CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line)
69 {
70 ULONG Refs;
71
72 Refs = InterlockedIncrement((PLONG)&vacb->ReferenceCount);
73 if (vacb->SharedCacheMap->Trace)
74 {
75 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
76 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
77 }
78
79 return Refs;
80 }
81 ULONG CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line)
82 {
83 ULONG Refs;
84
85 Refs = InterlockedDecrement((PLONG)&vacb->ReferenceCount);
86 ASSERT(!(Refs == 0 && vacb->Dirty));
87 if (vacb->SharedCacheMap->Trace)
88 {
89 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
90 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
91 }
92
93 if (Refs == 0)
94 {
95 CcRosInternalFreeVacb(vacb);
96 }
97
98 return Refs;
99 }
100 ULONG CcRosVacbGetRefCount_(PROS_VACB vacb, PCSTR file, INT line)
101 {
102 ULONG Refs;
103
104 Refs = InterlockedCompareExchange((PLONG)&vacb->ReferenceCount, 0, 0);
105 if (vacb->SharedCacheMap->Trace)
106 {
107 DbgPrint("(%s:%i) VACB %p ==RefCount=%lu, Dirty %u, PageOut %lu\n",
108 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
109 }
110
111 return Refs;
112 }
113 #endif
114
115
116 /* FUNCTIONS *****************************************************************/
117
118 VOID
119 NTAPI
120 CcRosTraceCacheMap (
121 PROS_SHARED_CACHE_MAP SharedCacheMap,
122 BOOLEAN Trace )
123 {
124 #if DBG
125 KIRQL oldirql;
126 PLIST_ENTRY current_entry;
127 PROS_VACB current;
128
129 if (!SharedCacheMap)
130 return;
131
132 SharedCacheMap->Trace = Trace;
133
134 if (Trace)
135 {
136 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
137
138 KeAcquireGuardedMutex(&ViewLock);
139 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
140
141 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
142 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
143 {
144 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
145 current_entry = current_entry->Flink;
146
147 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
148 current, current->ReferenceCount, current->Dirty, current->PageOut );
149 }
150 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
151 KeReleaseGuardedMutex(&ViewLock);
152 }
153 else
154 {
155 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
156 }
157
158 #else
159 UNREFERENCED_PARAMETER(SharedCacheMap);
160 UNREFERENCED_PARAMETER(Trace);
161 #endif
162 }
163
164 NTSTATUS
165 NTAPI
166 CcRosFlushVacb (
167 PROS_VACB Vacb)
168 {
169 NTSTATUS Status;
170
171 Status = CcWriteVirtualAddress(Vacb);
172 if (NT_SUCCESS(Status))
173 {
174 CcRosUnmarkDirtyVacb(Vacb, TRUE);
175 }
176
177 return Status;
178 }
179
180 NTSTATUS
181 NTAPI
182 CcRosFlushDirtyPages (
183 ULONG Target,
184 PULONG Count,
185 BOOLEAN Wait,
186 BOOLEAN CalledFromLazy)
187 {
188 PLIST_ENTRY current_entry;
189 PROS_VACB current;
190 BOOLEAN Locked;
191 NTSTATUS Status;
192
193 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
194
195 (*Count) = 0;
196
197 KeEnterCriticalRegion();
198 KeAcquireGuardedMutex(&ViewLock);
199
200 current_entry = DirtyVacbListHead.Flink;
201 if (current_entry == &DirtyVacbListHead)
202 {
203 DPRINT("No Dirty pages\n");
204 }
205
206 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
207 {
208 current = CONTAINING_RECORD(current_entry,
209 ROS_VACB,
210 DirtyVacbListEntry);
211 current_entry = current_entry->Flink;
212
213 CcRosVacbIncRefCount(current);
214
215 /* When performing lazy write, don't handle temporary files */
216 if (CalledFromLazy &&
217 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
218 {
219 CcRosVacbDecRefCount(current);
220 continue;
221 }
222
223 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
224 current->SharedCacheMap->LazyWriteContext, Wait);
225 if (!Locked)
226 {
227 CcRosVacbDecRefCount(current);
228 continue;
229 }
230
231 ASSERT(current->Dirty);
232
233 /* One reference is added above */
234 if (CcRosVacbGetRefCount(current) > 2)
235 {
236 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
237 current->SharedCacheMap->LazyWriteContext);
238 CcRosVacbDecRefCount(current);
239 continue;
240 }
241
242 KeReleaseGuardedMutex(&ViewLock);
243
244 Status = CcRosFlushVacb(current);
245
246 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
247 current->SharedCacheMap->LazyWriteContext);
248
249 KeAcquireGuardedMutex(&ViewLock);
250 CcRosVacbDecRefCount(current);
251
252 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
253 (Status != STATUS_MEDIA_WRITE_PROTECTED))
254 {
255 DPRINT1("CC: Failed to flush VACB.\n");
256 }
257 else
258 {
259 ULONG PagesFreed;
260
261 /* How many pages did we free? */
262 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
263 (*Count) += PagesFreed;
264
265 /* Make sure we don't overflow target! */
266 if (Target < PagesFreed)
267 {
268 /* If we would have, jump to zero directly */
269 Target = 0;
270 }
271 else
272 {
273 Target -= PagesFreed;
274 }
275 }
276
277 current_entry = DirtyVacbListHead.Flink;
278 }
279
280 KeReleaseGuardedMutex(&ViewLock);
281 KeLeaveCriticalRegion();
282
283 DPRINT("CcRosFlushDirtyPages() finished\n");
284 return STATUS_SUCCESS;
285 }
286
287 NTSTATUS
288 CcRosTrimCache (
289 ULONG Target,
290 ULONG Priority,
291 PULONG NrFreed)
292 /*
293 * FUNCTION: Try to free some memory from the file cache.
294 * ARGUMENTS:
295 * Target - The number of pages to be freed.
296 * Priority - The priority of free (currently unused).
297 * NrFreed - Points to a variable where the number of pages
298 * actually freed is returned.
299 */
300 {
301 PLIST_ENTRY current_entry;
302 PROS_VACB current;
303 ULONG PagesFreed;
304 KIRQL oldIrql;
305 LIST_ENTRY FreeList;
306 PFN_NUMBER Page;
307 ULONG i;
308 BOOLEAN FlushedPages = FALSE;
309
310 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
311
312 InitializeListHead(&FreeList);
313
314 *NrFreed = 0;
315
316 retry:
317 KeAcquireGuardedMutex(&ViewLock);
318
319 current_entry = VacbLruListHead.Flink;
320 while (current_entry != &VacbLruListHead)
321 {
322 ULONG Refs;
323
324 current = CONTAINING_RECORD(current_entry,
325 ROS_VACB,
326 VacbLruListEntry);
327 current_entry = current_entry->Flink;
328
329 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
330
331 /* Reference the VACB */
332 CcRosVacbIncRefCount(current);
333
334 /* Check if it's mapped and not dirty */
335 if (InterlockedCompareExchange((PLONG)&current->MappedCount, 0, 0) > 0 && !current->Dirty)
336 {
337 /* We have to break these locks because Cc sucks */
338 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
339 KeReleaseGuardedMutex(&ViewLock);
340
341 /* Page out the VACB */
342 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
343 {
344 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
345
346 MmPageOutPhysicalAddress(Page);
347 }
348
349 /* Reacquire the locks */
350 KeAcquireGuardedMutex(&ViewLock);
351 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
352 }
353
354 /* Dereference the VACB */
355 Refs = CcRosVacbDecRefCount(current);
356
357 /* Check if we can free this entry now */
358 if (Refs < 2)
359 {
360 ASSERT(!current->Dirty);
361 ASSERT(!current->MappedCount);
362 ASSERT(Refs == 1);
363
364 RemoveEntryList(&current->CacheMapVacbListEntry);
365 RemoveEntryList(&current->VacbLruListEntry);
366 InitializeListHead(&current->VacbLruListEntry);
367 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
368
369 /* Calculate how many pages we freed for Mm */
370 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
371 Target -= PagesFreed;
372 (*NrFreed) += PagesFreed;
373 }
374
375 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
376 }
377
378 KeReleaseGuardedMutex(&ViewLock);
379
380 /* Try flushing pages if we haven't met our target */
381 if ((Target > 0) && !FlushedPages)
382 {
383 /* Flush dirty pages to disk */
384 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
385 FlushedPages = TRUE;
386
387 /* We can only swap as many pages as we flushed */
388 if (PagesFreed < Target) Target = PagesFreed;
389
390 /* Check if we flushed anything */
391 if (PagesFreed != 0)
392 {
393 /* Try again after flushing dirty pages */
394 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
395 goto retry;
396 }
397 }
398
399 while (!IsListEmpty(&FreeList))
400 {
401 ULONG Refs;
402
403 current_entry = RemoveHeadList(&FreeList);
404 current = CONTAINING_RECORD(current_entry,
405 ROS_VACB,
406 CacheMapVacbListEntry);
407 InitializeListHead(&current->CacheMapVacbListEntry);
408 Refs = CcRosVacbDecRefCount(current);
409 ASSERT(Refs == 0);
410 }
411
412 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
413
414 return STATUS_SUCCESS;
415 }
416
417 NTSTATUS
418 NTAPI
419 CcRosReleaseVacb (
420 PROS_SHARED_CACHE_MAP SharedCacheMap,
421 PROS_VACB Vacb,
422 BOOLEAN Valid,
423 BOOLEAN Dirty,
424 BOOLEAN Mapped)
425 {
426 ULONG Refs;
427 ASSERT(SharedCacheMap);
428
429 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
430 SharedCacheMap, Vacb, Valid);
431
432 Vacb->Valid = Valid;
433
434 if (Dirty && !Vacb->Dirty)
435 {
436 CcRosMarkDirtyVacb(Vacb);
437 }
438
439 if (Mapped)
440 {
441 if (InterlockedIncrement((PLONG)&Vacb->MappedCount) == 1)
442 {
443 CcRosVacbIncRefCount(Vacb);
444 }
445 }
446
447 Refs = CcRosVacbDecRefCount(Vacb);
448 ASSERT(Refs > 0);
449
450 return STATUS_SUCCESS;
451 }
452
453 /* Returns with VACB Lock Held! */
454 PROS_VACB
455 NTAPI
456 CcRosLookupVacb (
457 PROS_SHARED_CACHE_MAP SharedCacheMap,
458 LONGLONG FileOffset)
459 {
460 PLIST_ENTRY current_entry;
461 PROS_VACB current;
462 KIRQL oldIrql;
463
464 ASSERT(SharedCacheMap);
465
466 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
467 SharedCacheMap, FileOffset);
468
469 KeAcquireGuardedMutex(&ViewLock);
470 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
471
472 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
473 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
474 {
475 current = CONTAINING_RECORD(current_entry,
476 ROS_VACB,
477 CacheMapVacbListEntry);
478 if (IsPointInRange(current->FileOffset.QuadPart,
479 VACB_MAPPING_GRANULARITY,
480 FileOffset))
481 {
482 CcRosVacbIncRefCount(current);
483 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
484 KeReleaseGuardedMutex(&ViewLock);
485 return current;
486 }
487 if (current->FileOffset.QuadPart > FileOffset)
488 break;
489 current_entry = current_entry->Flink;
490 }
491
492 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
493 KeReleaseGuardedMutex(&ViewLock);
494
495 return NULL;
496 }
497
498 VOID
499 NTAPI
500 CcRosMarkDirtyVacb (
501 PROS_VACB Vacb)
502 {
503 KIRQL oldIrql;
504 PROS_SHARED_CACHE_MAP SharedCacheMap;
505
506 SharedCacheMap = Vacb->SharedCacheMap;
507
508 KeAcquireGuardedMutex(&ViewLock);
509 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
510
511 ASSERT(!Vacb->Dirty);
512
513 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
514 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
515 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
516 CcRosVacbIncRefCount(Vacb);
517
518 /* Move to the tail of the LRU list */
519 RemoveEntryList(&Vacb->VacbLruListEntry);
520 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
521
522 Vacb->Dirty = TRUE;
523
524 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
525 KeReleaseGuardedMutex(&ViewLock);
526
527 /* Schedule a lazy writer run to now that we have dirty VACB */
528 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
529 if (!LazyWriter.ScanActive)
530 {
531 CcScheduleLazyWriteScan(FALSE);
532 }
533 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
534 }
535
536 VOID
537 NTAPI
538 CcRosUnmarkDirtyVacb (
539 PROS_VACB Vacb,
540 BOOLEAN LockViews)
541 {
542 KIRQL oldIrql;
543 PROS_SHARED_CACHE_MAP SharedCacheMap;
544
545 SharedCacheMap = Vacb->SharedCacheMap;
546
547 if (LockViews)
548 {
549 KeAcquireGuardedMutex(&ViewLock);
550 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
551 }
552
553 ASSERT(Vacb->Dirty);
554
555 Vacb->Dirty = FALSE;
556
557 RemoveEntryList(&Vacb->DirtyVacbListEntry);
558 InitializeListHead(&Vacb->DirtyVacbListEntry);
559 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
560 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
561 CcRosVacbDecRefCount(Vacb);
562
563 if (LockViews)
564 {
565 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
566 KeReleaseGuardedMutex(&ViewLock);
567 }
568 }
569
570 NTSTATUS
571 NTAPI
572 CcRosMarkDirtyFile (
573 PROS_SHARED_CACHE_MAP SharedCacheMap,
574 LONGLONG FileOffset)
575 {
576 PROS_VACB Vacb;
577
578 ASSERT(SharedCacheMap);
579
580 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
581 SharedCacheMap, FileOffset);
582
583 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
584 if (Vacb == NULL)
585 {
586 KeBugCheck(CACHE_MANAGER);
587 }
588
589 CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, TRUE, FALSE);
590
591 return STATUS_SUCCESS;
592 }
593
594 /*
595 * Note: this is not the contrary function of
596 * CcRosMapVacbInKernelSpace()
597 */
598 NTSTATUS
599 NTAPI
600 CcRosUnmapVacb (
601 PROS_SHARED_CACHE_MAP SharedCacheMap,
602 LONGLONG FileOffset,
603 BOOLEAN NowDirty)
604 {
605 PROS_VACB Vacb;
606
607 ASSERT(SharedCacheMap);
608
609 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
610 SharedCacheMap, FileOffset, NowDirty);
611
612 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
613 if (Vacb == NULL)
614 {
615 return STATUS_UNSUCCESSFUL;
616 }
617
618 ASSERT(Vacb->MappedCount != 0);
619 if (InterlockedDecrement((PLONG)&Vacb->MappedCount) == 0)
620 {
621 CcRosVacbDecRefCount(Vacb);
622 }
623
624 CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, NowDirty, FALSE);
625
626 return STATUS_SUCCESS;
627 }
628
629 static
630 NTSTATUS
631 CcRosMapVacbInKernelSpace(
632 PROS_VACB Vacb)
633 {
634 ULONG i;
635 NTSTATUS Status;
636 ULONG_PTR NumberOfPages;
637 PVOID BaseAddress = NULL;
638
639 /* Create a memory area. */
640 MmLockAddressSpace(MmGetKernelAddressSpace());
641 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
642 0, // nothing checks for VACB mareas, so set to 0
643 &BaseAddress,
644 VACB_MAPPING_GRANULARITY,
645 PAGE_READWRITE,
646 (PMEMORY_AREA*)&Vacb->MemoryArea,
647 0,
648 PAGE_SIZE);
649 ASSERT(Vacb->BaseAddress == NULL);
650 Vacb->BaseAddress = BaseAddress;
651 MmUnlockAddressSpace(MmGetKernelAddressSpace());
652 if (!NT_SUCCESS(Status))
653 {
654 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
655 return Status;
656 }
657
658 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
659 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
660 ASSERT((ULONG_PTR)Vacb->BaseAddress + VACB_MAPPING_GRANULARITY - 1 > (ULONG_PTR)MmSystemRangeStart);
661
662 /* Create a virtual mapping for this memory area */
663 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
664 for (i = 0; i < NumberOfPages; i++)
665 {
666 PFN_NUMBER PageFrameNumber;
667
668 MI_SET_USAGE(MI_USAGE_CACHE);
669 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
670 if (PageFrameNumber == 0)
671 {
672 DPRINT1("Unable to allocate page\n");
673 KeBugCheck(MEMORY_MANAGEMENT);
674 }
675
676 ASSERT(BaseAddress == Vacb->BaseAddress);
677 ASSERT(i * PAGE_SIZE < VACB_MAPPING_GRANULARITY);
678 ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) >= (ULONG_PTR)BaseAddress);
679 ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) > (ULONG_PTR)MmSystemRangeStart);
680
681 Status = MmCreateVirtualMapping(NULL,
682 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
683 PAGE_READWRITE,
684 &PageFrameNumber,
685 1);
686 if (!NT_SUCCESS(Status))
687 {
688 DPRINT1("Unable to create virtual mapping\n");
689 KeBugCheck(MEMORY_MANAGEMENT);
690 }
691 }
692
693 return STATUS_SUCCESS;
694 }
695
696 static
697 NTSTATUS
698 CcRosCreateVacb (
699 PROS_SHARED_CACHE_MAP SharedCacheMap,
700 LONGLONG FileOffset,
701 PROS_VACB *Vacb)
702 {
703 PROS_VACB current;
704 PROS_VACB previous;
705 PLIST_ENTRY current_entry;
706 NTSTATUS Status;
707 KIRQL oldIrql;
708 ULONG Refs;
709
710 ASSERT(SharedCacheMap);
711
712 DPRINT("CcRosCreateVacb()\n");
713
714 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
715 {
716 *Vacb = NULL;
717 return STATUS_INVALID_PARAMETER;
718 }
719
720 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
721 current->BaseAddress = NULL;
722 current->Valid = FALSE;
723 current->Dirty = FALSE;
724 current->PageOut = FALSE;
725 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
726 current->SharedCacheMap = SharedCacheMap;
727 #if DBG
728 if (SharedCacheMap->Trace)
729 {
730 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
731 }
732 #endif
733 current->MappedCount = 0;
734 current->ReferenceCount = 0;
735 current->PinCount = 0;
736 InitializeListHead(&current->CacheMapVacbListEntry);
737 InitializeListHead(&current->DirtyVacbListEntry);
738 InitializeListHead(&current->VacbLruListEntry);
739
740 CcRosVacbIncRefCount(current);
741
742 Status = CcRosMapVacbInKernelSpace(current);
743 if (!NT_SUCCESS(Status))
744 {
745 CcRosVacbDecRefCount(current);
746 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
747 return Status;
748 }
749
750 KeAcquireGuardedMutex(&ViewLock);
751
752 *Vacb = current;
753 /* There is window between the call to CcRosLookupVacb
754 * and CcRosCreateVacb. We must check if a VACB for the
755 * file offset exist. If there is a VACB, we release
756 * our newly created VACB and return the existing one.
757 */
758 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
759 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
760 previous = NULL;
761 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
762 {
763 current = CONTAINING_RECORD(current_entry,
764 ROS_VACB,
765 CacheMapVacbListEntry);
766 if (IsPointInRange(current->FileOffset.QuadPart,
767 VACB_MAPPING_GRANULARITY,
768 FileOffset))
769 {
770 CcRosVacbIncRefCount(current);
771 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
772 #if DBG
773 if (SharedCacheMap->Trace)
774 {
775 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
776 SharedCacheMap,
777 (*Vacb),
778 current);
779 }
780 #endif
781 KeReleaseGuardedMutex(&ViewLock);
782
783 Refs = CcRosVacbDecRefCount(*Vacb);
784 ASSERT(Refs == 0);
785
786 *Vacb = current;
787 return STATUS_SUCCESS;
788 }
789 if (current->FileOffset.QuadPart < FileOffset)
790 {
791 ASSERT(previous == NULL ||
792 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
793 previous = current;
794 }
795 if (current->FileOffset.QuadPart > FileOffset)
796 break;
797 current_entry = current_entry->Flink;
798 }
799 /* There was no existing VACB. */
800 current = *Vacb;
801 if (previous)
802 {
803 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
804 }
805 else
806 {
807 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
808 }
809 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
810 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
811 KeReleaseGuardedMutex(&ViewLock);
812
813 MI_SET_USAGE(MI_USAGE_CACHE);
814 #if MI_TRACE_PFNS
815 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
816 {
817 PWCHAR pos;
818 ULONG len = 0;
819 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
820 if (pos)
821 {
822 len = wcslen(pos) * sizeof(WCHAR);
823 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
824 }
825 else
826 {
827 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
828 }
829 }
830 #endif
831
832 /* Reference it to allow release */
833 CcRosVacbIncRefCount(current);
834
835 return Status;
836 }
837
838 NTSTATUS
839 NTAPI
840 CcRosGetVacb (
841 PROS_SHARED_CACHE_MAP SharedCacheMap,
842 LONGLONG FileOffset,
843 PLONGLONG BaseOffset,
844 PVOID* BaseAddress,
845 PBOOLEAN UptoDate,
846 PROS_VACB *Vacb)
847 {
848 PROS_VACB current;
849 NTSTATUS Status;
850 ULONG Refs;
851
852 ASSERT(SharedCacheMap);
853
854 DPRINT("CcRosGetVacb()\n");
855
856 /*
857 * Look for a VACB already mapping the same data.
858 */
859 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
860 if (current == NULL)
861 {
862 /*
863 * Otherwise create a new VACB.
864 */
865 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
866 if (!NT_SUCCESS(Status))
867 {
868 return Status;
869 }
870 }
871
872 Refs = CcRosVacbGetRefCount(current);
873
874 KeAcquireGuardedMutex(&ViewLock);
875
876 /* Move to the tail of the LRU list */
877 RemoveEntryList(&current->VacbLruListEntry);
878 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
879
880 KeReleaseGuardedMutex(&ViewLock);
881
882 /*
883 * Return information about the VACB to the caller.
884 */
885 *UptoDate = current->Valid;
886 *BaseAddress = current->BaseAddress;
887 DPRINT("*BaseAddress %p\n", *BaseAddress);
888 *Vacb = current;
889 *BaseOffset = current->FileOffset.QuadPart;
890
891 ASSERT(Refs > 1);
892
893 return STATUS_SUCCESS;
894 }
895
896 NTSTATUS
897 NTAPI
898 CcRosRequestVacb (
899 PROS_SHARED_CACHE_MAP SharedCacheMap,
900 LONGLONG FileOffset,
901 PVOID* BaseAddress,
902 PBOOLEAN UptoDate,
903 PROS_VACB *Vacb)
904 /*
905 * FUNCTION: Request a page mapping for a shared cache map
906 */
907 {
908 LONGLONG BaseOffset;
909
910 ASSERT(SharedCacheMap);
911
912 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
913 {
914 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
915 FileOffset, VACB_MAPPING_GRANULARITY);
916 KeBugCheck(CACHE_MANAGER);
917 }
918
919 return CcRosGetVacb(SharedCacheMap,
920 FileOffset,
921 &BaseOffset,
922 BaseAddress,
923 UptoDate,
924 Vacb);
925 }
926
927 static
928 VOID
929 CcFreeCachePage (
930 PVOID Context,
931 MEMORY_AREA* MemoryArea,
932 PVOID Address,
933 PFN_NUMBER Page,
934 SWAPENTRY SwapEntry,
935 BOOLEAN Dirty)
936 {
937 ASSERT(SwapEntry == 0);
938 if (Page != 0)
939 {
940 ASSERT(MmGetReferenceCountPage(Page) == 1);
941 MmReleasePageMemoryConsumer(MC_CACHE, Page);
942 }
943 }
944
945 NTSTATUS
946 CcRosInternalFreeVacb (
947 PROS_VACB Vacb)
948 /*
949 * FUNCTION: Releases a VACB associated with a shared cache map
950 */
951 {
952 DPRINT("Freeing VACB 0x%p\n", Vacb);
953 #if DBG
954 if (Vacb->SharedCacheMap->Trace)
955 {
956 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
957 }
958 #endif
959
960 MmLockAddressSpace(MmGetKernelAddressSpace());
961 MmFreeMemoryArea(MmGetKernelAddressSpace(),
962 Vacb->MemoryArea,
963 CcFreeCachePage,
964 NULL);
965 MmUnlockAddressSpace(MmGetKernelAddressSpace());
966
967 if (Vacb->PinCount != 0 || Vacb->ReferenceCount != 0)
968 {
969 DPRINT1("Invalid free: %ld, %ld\n", Vacb->ReferenceCount, Vacb->PinCount);
970 if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length)
971 {
972 DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName);
973 }
974 }
975
976 ASSERT(Vacb->PinCount == 0);
977 ASSERT(Vacb->ReferenceCount == 0);
978 ASSERT(IsListEmpty(&Vacb->CacheMapVacbListEntry));
979 ASSERT(IsListEmpty(&Vacb->DirtyVacbListEntry));
980 ASSERT(IsListEmpty(&Vacb->VacbLruListEntry));
981 RtlFillMemory(Vacb, sizeof(*Vacb), 0xfd);
982 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
983 return STATUS_SUCCESS;
984 }
985
986 /*
987 * @implemented
988 */
989 VOID
990 NTAPI
991 CcFlushCache (
992 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
993 IN PLARGE_INTEGER FileOffset OPTIONAL,
994 IN ULONG Length,
995 OUT PIO_STATUS_BLOCK IoStatus)
996 {
997 PROS_SHARED_CACHE_MAP SharedCacheMap;
998 LARGE_INTEGER Offset;
999 LONGLONG RemainingLength;
1000 PROS_VACB current;
1001 NTSTATUS Status;
1002
1003 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1004 SectionObjectPointers, FileOffset, Length);
1005
1006 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1007 SectionObjectPointers, FileOffset, Length, IoStatus);
1008
1009 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1010 {
1011 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1012 ASSERT(SharedCacheMap);
1013 if (FileOffset)
1014 {
1015 Offset = *FileOffset;
1016 RemainingLength = Length;
1017 }
1018 else
1019 {
1020 Offset.QuadPart = 0;
1021 RemainingLength = SharedCacheMap->FileSize.QuadPart;
1022 }
1023
1024 if (IoStatus)
1025 {
1026 IoStatus->Status = STATUS_SUCCESS;
1027 IoStatus->Information = 0;
1028 }
1029
1030 while (RemainingLength > 0)
1031 {
1032 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1033 if (current != NULL)
1034 {
1035 if (current->Dirty)
1036 {
1037 Status = CcRosFlushVacb(current);
1038 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1039 {
1040 IoStatus->Status = Status;
1041 }
1042 }
1043
1044 CcRosReleaseVacb(SharedCacheMap, current, current->Valid, current->Dirty, FALSE);
1045 }
1046
1047 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1048 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1049 }
1050 }
1051 else
1052 {
1053 if (IoStatus)
1054 {
1055 IoStatus->Status = STATUS_INVALID_PARAMETER;
1056 }
1057 }
1058 }
1059
1060 NTSTATUS
1061 NTAPI
1062 CcRosDeleteFileCache (
1063 PFILE_OBJECT FileObject,
1064 PROS_SHARED_CACHE_MAP SharedCacheMap)
1065 /*
1066 * FUNCTION: Releases the shared cache map associated with a file object
1067 */
1068 {
1069 PLIST_ENTRY current_entry;
1070 PROS_VACB current;
1071 LIST_ENTRY FreeList;
1072 KIRQL oldIrql;
1073
1074 ASSERT(SharedCacheMap);
1075
1076 SharedCacheMap->OpenCount++;
1077 KeReleaseGuardedMutex(&ViewLock);
1078
1079 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1080
1081 KeAcquireGuardedMutex(&ViewLock);
1082 SharedCacheMap->OpenCount--;
1083 if (SharedCacheMap->OpenCount == 0)
1084 {
1085 KIRQL OldIrql;
1086
1087 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1088
1089 /*
1090 * Release all VACBs
1091 */
1092 InitializeListHead(&FreeList);
1093 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1094 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1095 {
1096 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1097 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1098
1099 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1100 RemoveEntryList(&current->VacbLruListEntry);
1101 InitializeListHead(&current->VacbLruListEntry);
1102 if (current->Dirty)
1103 {
1104 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1105 CcRosUnmarkDirtyVacb(current, FALSE);
1106 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1107 DPRINT1("Freeing dirty VACB\n");
1108 }
1109 if (current->MappedCount != 0)
1110 {
1111 current->MappedCount = 0;
1112 NT_VERIFY(CcRosVacbDecRefCount(current) > 0);
1113 DPRINT1("Freeing mapped VACB\n");
1114 }
1115 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1116
1117 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1118 }
1119 #if DBG
1120 SharedCacheMap->Trace = FALSE;
1121 #endif
1122 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1123
1124 KeReleaseGuardedMutex(&ViewLock);
1125 ObDereferenceObject(SharedCacheMap->FileObject);
1126
1127 while (!IsListEmpty(&FreeList))
1128 {
1129 ULONG Refs;
1130
1131 current_entry = RemoveTailList(&FreeList);
1132 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1133 InitializeListHead(&current->CacheMapVacbListEntry);
1134 Refs = CcRosVacbDecRefCount(current);
1135 ASSERT(Refs == 0);
1136 }
1137
1138 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1139 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1140 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1141
1142 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1143 KeAcquireGuardedMutex(&ViewLock);
1144 }
1145 return STATUS_SUCCESS;
1146 }
1147
1148 VOID
1149 NTAPI
1150 CcRosReferenceCache (
1151 PFILE_OBJECT FileObject)
1152 {
1153 PROS_SHARED_CACHE_MAP SharedCacheMap;
1154 KeAcquireGuardedMutex(&ViewLock);
1155 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1156 ASSERT(SharedCacheMap);
1157 ASSERT(SharedCacheMap->OpenCount != 0);
1158 SharedCacheMap->OpenCount++;
1159 KeReleaseGuardedMutex(&ViewLock);
1160 }
1161
1162 VOID
1163 NTAPI
1164 CcRosRemoveIfClosed (
1165 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1166 {
1167 PROS_SHARED_CACHE_MAP SharedCacheMap;
1168 DPRINT("CcRosRemoveIfClosed()\n");
1169 KeAcquireGuardedMutex(&ViewLock);
1170 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1171 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1172 {
1173 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1174 }
1175 KeReleaseGuardedMutex(&ViewLock);
1176 }
1177
1178
1179 VOID
1180 NTAPI
1181 CcRosDereferenceCache (
1182 PFILE_OBJECT FileObject)
1183 {
1184 PROS_SHARED_CACHE_MAP SharedCacheMap;
1185 KeAcquireGuardedMutex(&ViewLock);
1186 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1187 ASSERT(SharedCacheMap);
1188 if (SharedCacheMap->OpenCount > 0)
1189 {
1190 SharedCacheMap->OpenCount--;
1191 if (SharedCacheMap->OpenCount == 0)
1192 {
1193 MmFreeSectionSegments(SharedCacheMap->FileObject);
1194 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1195 }
1196 }
1197 KeReleaseGuardedMutex(&ViewLock);
1198 }
1199
1200 NTSTATUS
1201 NTAPI
1202 CcRosReleaseFileCache (
1203 PFILE_OBJECT FileObject)
1204 /*
1205 * FUNCTION: Called by the file system when a handle to a file object
1206 * has been closed.
1207 */
1208 {
1209 KIRQL OldIrql;
1210 PPRIVATE_CACHE_MAP PrivateMap;
1211 PROS_SHARED_CACHE_MAP SharedCacheMap;
1212
1213 KeAcquireGuardedMutex(&ViewLock);
1214
1215 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1216 {
1217 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1218
1219 /* Closing the handle, so kill the private cache map
1220 * Before you event try to remove it from FO, always
1221 * lock the master lock, to be sure not to race
1222 * with a potential read ahead ongoing!
1223 */
1224 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1225 PrivateMap = FileObject->PrivateCacheMap;
1226 FileObject->PrivateCacheMap = NULL;
1227 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1228
1229 if (PrivateMap != NULL)
1230 {
1231 /* Remove it from the file */
1232 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
1233 RemoveEntryList(&PrivateMap->PrivateLinks);
1234 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
1235
1236 /* And free it. */
1237 if (PrivateMap != &SharedCacheMap->PrivateCacheMap)
1238 {
1239 ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP);
1240 }
1241 else
1242 {
1243 PrivateMap->NodeTypeCode = 0;
1244 }
1245
1246 if (SharedCacheMap->OpenCount > 0)
1247 {
1248 SharedCacheMap->OpenCount--;
1249 if (SharedCacheMap->OpenCount == 0)
1250 {
1251 MmFreeSectionSegments(SharedCacheMap->FileObject);
1252 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1253 }
1254 }
1255 }
1256 }
1257 KeReleaseGuardedMutex(&ViewLock);
1258 return STATUS_SUCCESS;
1259 }
1260
1261 NTSTATUS
1262 NTAPI
1263 CcRosInitializeFileCache (
1264 PFILE_OBJECT FileObject,
1265 PCC_FILE_SIZES FileSizes,
1266 BOOLEAN PinAccess,
1267 PCACHE_MANAGER_CALLBACKS CallBacks,
1268 PVOID LazyWriterContext)
1269 /*
1270 * FUNCTION: Initializes a shared cache map for a file object
1271 */
1272 {
1273 KIRQL OldIrql;
1274 BOOLEAN Allocated;
1275 PROS_SHARED_CACHE_MAP SharedCacheMap;
1276
1277 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1278 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1279 FileObject, SharedCacheMap);
1280
1281 Allocated = FALSE;
1282 KeAcquireGuardedMutex(&ViewLock);
1283 if (SharedCacheMap == NULL)
1284 {
1285 Allocated = TRUE;
1286 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1287 if (SharedCacheMap == NULL)
1288 {
1289 KeReleaseGuardedMutex(&ViewLock);
1290 return STATUS_INSUFFICIENT_RESOURCES;
1291 }
1292 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1293 ObReferenceObjectByPointer(FileObject,
1294 FILE_ALL_ACCESS,
1295 NULL,
1296 KernelMode);
1297 SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP;
1298 SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap);
1299 SharedCacheMap->FileObject = FileObject;
1300 SharedCacheMap->Callbacks = CallBacks;
1301 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1302 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1303 SharedCacheMap->FileSize = FileSizes->FileSize;
1304 SharedCacheMap->PinAccess = PinAccess;
1305 SharedCacheMap->DirtyPageThreshold = 0;
1306 SharedCacheMap->DirtyPages = 0;
1307 InitializeListHead(&SharedCacheMap->PrivateList);
1308 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1309 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1310 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1311
1312 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1313 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1314 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1315 }
1316 if (FileObject->PrivateCacheMap == NULL)
1317 {
1318 PPRIVATE_CACHE_MAP PrivateMap;
1319
1320 /* Allocate the private cache map for this handle */
1321 if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0)
1322 {
1323 PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP);
1324 }
1325 else
1326 {
1327 PrivateMap = &SharedCacheMap->PrivateCacheMap;
1328 }
1329
1330 if (PrivateMap == NULL)
1331 {
1332 /* If we also allocated the shared cache map for this file, kill it */
1333 if (Allocated)
1334 {
1335 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1336 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1337 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1338
1339 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1340 ObDereferenceObject(FileObject);
1341 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1342 }
1343
1344 KeReleaseGuardedMutex(&ViewLock);
1345 return STATUS_INSUFFICIENT_RESOURCES;
1346 }
1347
1348 /* Initialize it */
1349 RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP));
1350 PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP;
1351 PrivateMap->ReadAheadMask = PAGE_SIZE - 1;
1352 PrivateMap->FileObject = FileObject;
1353 KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock);
1354
1355 /* Link it to the file */
1356 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
1357 InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks);
1358 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
1359
1360 FileObject->PrivateCacheMap = PrivateMap;
1361 SharedCacheMap->OpenCount++;
1362 }
1363 KeReleaseGuardedMutex(&ViewLock);
1364
1365 return STATUS_SUCCESS;
1366 }
1367
1368 /*
1369 * @implemented
1370 */
1371 PFILE_OBJECT
1372 NTAPI
1373 CcGetFileObjectFromSectionPtrs (
1374 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1375 {
1376 PROS_SHARED_CACHE_MAP SharedCacheMap;
1377
1378 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1379
1380 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1381 {
1382 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1383 ASSERT(SharedCacheMap);
1384 return SharedCacheMap->FileObject;
1385 }
1386 return NULL;
1387 }
1388
1389 VOID
1390 INIT_FUNCTION
1391 NTAPI
1392 CcInitView (
1393 VOID)
1394 {
1395 DPRINT("CcInitView()\n");
1396
1397 InitializeListHead(&DirtyVacbListHead);
1398 InitializeListHead(&VacbLruListHead);
1399 InitializeListHead(&CcDeferredWrites);
1400 InitializeListHead(&CcCleanSharedCacheMapList);
1401 KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1402 KeInitializeGuardedMutex(&ViewLock);
1403 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1404 NULL,
1405 NULL,
1406 0,
1407 sizeof(INTERNAL_BCB),
1408 TAG_BCB,
1409 20);
1410 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1411 NULL,
1412 NULL,
1413 0,
1414 sizeof(ROS_SHARED_CACHE_MAP),
1415 TAG_SHARED_CACHE_MAP,
1416 20);
1417 ExInitializeNPagedLookasideList(&VacbLookasideList,
1418 NULL,
1419 NULL,
1420 0,
1421 sizeof(ROS_VACB),
1422 TAG_VACB,
1423 20);
1424
1425 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1426
1427 CcInitCacheZeroPage();
1428 }
1429
1430 #if DBG && defined(KDBG)
1431 BOOLEAN
1432 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1433 {
1434 PLIST_ENTRY ListEntry;
1435 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1436
1437 KdbpPrint(" Usage Summary (in kb)\n");
1438 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1439 /* No need to lock the spin lock here, we're in DBG */
1440 for (ListEntry = CcCleanSharedCacheMapList.Flink;
1441 ListEntry != &CcCleanSharedCacheMapList;
1442 ListEntry = ListEntry->Flink)
1443 {
1444 PLIST_ENTRY Vacbs;
1445 ULONG Valid = 0, Dirty = 0;
1446 PROS_SHARED_CACHE_MAP SharedCacheMap;
1447 PUNICODE_STRING FileName;
1448
1449 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1450
1451 /* Dirty size */
1452 Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
1453
1454 /* First, count for all the associated VACB */
1455 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1456 Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1457 Vacbs = Vacbs->Flink)
1458 {
1459 PROS_VACB Vacb;
1460
1461 Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
1462 if (Vacb->Valid)
1463 {
1464 Valid += VACB_MAPPING_GRANULARITY / 1024;
1465 }
1466 }
1467
1468 /* Setup name */
1469 if (SharedCacheMap->FileObject != NULL &&
1470 SharedCacheMap->FileObject->FileName.Length != 0)
1471 {
1472 FileName = &SharedCacheMap->FileObject->FileName;
1473 }
1474 else
1475 {
1476 FileName = &NoName;
1477 }
1478
1479 /* And print */
1480 KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
1481 }
1482
1483 return TRUE;
1484 }
1485
1486 BOOLEAN
1487 ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[])
1488 {
1489 KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages,
1490 (CcTotalDirtyPages * PAGE_SIZE) / 1024);
1491 KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold,
1492 (CcDirtyPageThreshold * PAGE_SIZE) / 1024);
1493 KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages,
1494 (MmAvailablePages * PAGE_SIZE) / 1024);
1495 KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop,
1496 (MmThrottleTop * PAGE_SIZE) / 1024);
1497 KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom,
1498 (MmThrottleBottom * PAGE_SIZE) / 1024);
1499 KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total,
1500 (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024);
1501
1502 if (CcTotalDirtyPages >= CcDirtyPageThreshold)
1503 {
1504 KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n");
1505 }
1506 else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold)
1507 {
1508 KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n");
1509 }
1510 else
1511 {
1512 KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n");
1513 }
1514
1515 return TRUE;
1516 }
1517 #endif
1518
1519 /* EOF */