c2b89131323b88669646c4ca47e940f3f1ef38ef
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42
43 /* GLOBALS *******************************************************************/
44
45 LIST_ENTRY DirtyVacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 /* Internal vars (MS):
55 * - Threshold above which lazy writer will start action
56 * - Amount of dirty pages
57 * - List for deferred writes
58 * - Spinlock when dealing with the deferred list
59 * - List for "clean" shared cache maps
60 */
61 ULONG CcDirtyPageThreshold = 0;
62 ULONG CcTotalDirtyPages = 0;
63 LIST_ENTRY CcDeferredWrites;
64 KSPIN_LOCK CcDeferredWriteSpinLock;
65 LIST_ENTRY CcCleanSharedCacheMapList;
66
67 #if DBG
68 ULONG CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line)
69 {
70 ULONG Refs;
71
72 Refs = InterlockedIncrement((PLONG)&vacb->ReferenceCount);
73 if (vacb->SharedCacheMap->Trace)
74 {
75 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
76 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
77 }
78
79 return Refs;
80 }
81 ULONG CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line)
82 {
83 ULONG Refs;
84
85 Refs = InterlockedDecrement((PLONG)&vacb->ReferenceCount);
86 ASSERT(!(Refs == 0 && vacb->Dirty));
87 if (vacb->SharedCacheMap->Trace)
88 {
89 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
90 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
91 }
92
93 if (Refs == 0)
94 {
95 CcRosInternalFreeVacb(vacb);
96 }
97
98 return Refs;
99 }
100 ULONG CcRosVacbGetRefCount_(PROS_VACB vacb, PCSTR file, INT line)
101 {
102 ULONG Refs;
103
104 Refs = InterlockedCompareExchange((PLONG)&vacb->ReferenceCount, 0, 0);
105 if (vacb->SharedCacheMap->Trace)
106 {
107 DbgPrint("(%s:%i) VACB %p ==RefCount=%lu, Dirty %u, PageOut %lu\n",
108 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
109 }
110
111 return Refs;
112 }
113 #endif
114
115
116 /* FUNCTIONS *****************************************************************/
117
118 VOID
119 NTAPI
120 CcRosTraceCacheMap (
121 PROS_SHARED_CACHE_MAP SharedCacheMap,
122 BOOLEAN Trace )
123 {
124 #if DBG
125 KIRQL oldirql;
126 PLIST_ENTRY current_entry;
127 PROS_VACB current;
128
129 if (!SharedCacheMap)
130 return;
131
132 SharedCacheMap->Trace = Trace;
133
134 if (Trace)
135 {
136 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
137
138 KeAcquireGuardedMutex(&ViewLock);
139 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
140
141 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
142 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
143 {
144 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
145 current_entry = current_entry->Flink;
146
147 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
148 current, current->ReferenceCount, current->Dirty, current->PageOut );
149 }
150 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
151 KeReleaseGuardedMutex(&ViewLock);
152 }
153 else
154 {
155 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
156 }
157
158 #else
159 UNREFERENCED_PARAMETER(SharedCacheMap);
160 UNREFERENCED_PARAMETER(Trace);
161 #endif
162 }
163
164 NTSTATUS
165 NTAPI
166 CcRosFlushVacb (
167 PROS_VACB Vacb)
168 {
169 NTSTATUS Status;
170
171 Status = CcWriteVirtualAddress(Vacb);
172 if (NT_SUCCESS(Status))
173 {
174 CcRosUnmarkDirtyVacb(Vacb, TRUE);
175 }
176
177 return Status;
178 }
179
180 NTSTATUS
181 NTAPI
182 CcRosFlushDirtyPages (
183 ULONG Target,
184 PULONG Count,
185 BOOLEAN Wait,
186 BOOLEAN CalledFromLazy)
187 {
188 PLIST_ENTRY current_entry;
189 PROS_VACB current;
190 BOOLEAN Locked;
191 NTSTATUS Status;
192
193 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
194
195 (*Count) = 0;
196
197 KeEnterCriticalRegion();
198 KeAcquireGuardedMutex(&ViewLock);
199
200 current_entry = DirtyVacbListHead.Flink;
201 if (current_entry == &DirtyVacbListHead)
202 {
203 DPRINT("No Dirty pages\n");
204 }
205
206 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
207 {
208 current = CONTAINING_RECORD(current_entry,
209 ROS_VACB,
210 DirtyVacbListEntry);
211 current_entry = current_entry->Flink;
212
213 CcRosVacbIncRefCount(current);
214
215 /* When performing lazy write, don't handle temporary files */
216 if (CalledFromLazy &&
217 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
218 {
219 CcRosVacbDecRefCount(current);
220 continue;
221 }
222
223 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
224 current->SharedCacheMap->LazyWriteContext, Wait);
225 if (!Locked)
226 {
227 CcRosVacbDecRefCount(current);
228 continue;
229 }
230
231 ASSERT(current->Dirty);
232
233 /* One reference is added above */
234 if (CcRosVacbGetRefCount(current) > 2)
235 {
236 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
237 current->SharedCacheMap->LazyWriteContext);
238 CcRosVacbDecRefCount(current);
239 continue;
240 }
241
242 KeReleaseGuardedMutex(&ViewLock);
243
244 Status = CcRosFlushVacb(current);
245
246 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
247 current->SharedCacheMap->LazyWriteContext);
248
249 KeAcquireGuardedMutex(&ViewLock);
250 CcRosVacbDecRefCount(current);
251
252 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
253 (Status != STATUS_MEDIA_WRITE_PROTECTED))
254 {
255 DPRINT1("CC: Failed to flush VACB.\n");
256 }
257 else
258 {
259 ULONG PagesFreed;
260
261 /* How many pages did we free? */
262 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
263 (*Count) += PagesFreed;
264
265 /* Make sure we don't overflow target! */
266 if (Target < PagesFreed)
267 {
268 /* If we would have, jump to zero directly */
269 Target = 0;
270 }
271 else
272 {
273 Target -= PagesFreed;
274 }
275 }
276
277 current_entry = DirtyVacbListHead.Flink;
278 }
279
280 KeReleaseGuardedMutex(&ViewLock);
281 KeLeaveCriticalRegion();
282
283 DPRINT("CcRosFlushDirtyPages() finished\n");
284 return STATUS_SUCCESS;
285 }
286
287 NTSTATUS
288 CcRosTrimCache (
289 ULONG Target,
290 ULONG Priority,
291 PULONG NrFreed)
292 /*
293 * FUNCTION: Try to free some memory from the file cache.
294 * ARGUMENTS:
295 * Target - The number of pages to be freed.
296 * Priority - The priority of free (currently unused).
297 * NrFreed - Points to a variable where the number of pages
298 * actually freed is returned.
299 */
300 {
301 PLIST_ENTRY current_entry;
302 PROS_VACB current;
303 ULONG PagesFreed;
304 KIRQL oldIrql;
305 LIST_ENTRY FreeList;
306 PFN_NUMBER Page;
307 ULONG i;
308 BOOLEAN FlushedPages = FALSE;
309
310 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
311
312 InitializeListHead(&FreeList);
313
314 *NrFreed = 0;
315
316 retry:
317 KeAcquireGuardedMutex(&ViewLock);
318
319 current_entry = VacbLruListHead.Flink;
320 while (current_entry != &VacbLruListHead)
321 {
322 ULONG Refs;
323
324 current = CONTAINING_RECORD(current_entry,
325 ROS_VACB,
326 VacbLruListEntry);
327 current_entry = current_entry->Flink;
328
329 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
330
331 /* Reference the VACB */
332 CcRosVacbIncRefCount(current);
333
334 /* Check if it's mapped and not dirty */
335 if (InterlockedCompareExchange((PLONG)&current->MappedCount, 0, 0) > 0 && !current->Dirty)
336 {
337 /* We have to break these locks because Cc sucks */
338 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
339 KeReleaseGuardedMutex(&ViewLock);
340
341 /* Page out the VACB */
342 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
343 {
344 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
345
346 MmPageOutPhysicalAddress(Page);
347 }
348
349 /* Reacquire the locks */
350 KeAcquireGuardedMutex(&ViewLock);
351 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
352 }
353
354 /* Dereference the VACB */
355 Refs = CcRosVacbDecRefCount(current);
356
357 /* Check if we can free this entry now */
358 if (Refs < 2)
359 {
360 ASSERT(!current->Dirty);
361 ASSERT(!current->MappedCount);
362 ASSERT(Refs == 1);
363
364 RemoveEntryList(&current->CacheMapVacbListEntry);
365 RemoveEntryList(&current->VacbLruListEntry);
366 InitializeListHead(&current->VacbLruListEntry);
367 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
368
369 /* Calculate how many pages we freed for Mm */
370 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
371 Target -= PagesFreed;
372 (*NrFreed) += PagesFreed;
373 }
374
375 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
376 }
377
378 KeReleaseGuardedMutex(&ViewLock);
379
380 /* Try flushing pages if we haven't met our target */
381 if ((Target > 0) && !FlushedPages)
382 {
383 /* Flush dirty pages to disk */
384 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
385 FlushedPages = TRUE;
386
387 /* We can only swap as many pages as we flushed */
388 if (PagesFreed < Target) Target = PagesFreed;
389
390 /* Check if we flushed anything */
391 if (PagesFreed != 0)
392 {
393 /* Try again after flushing dirty pages */
394 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
395 goto retry;
396 }
397 }
398
399 while (!IsListEmpty(&FreeList))
400 {
401 ULONG Refs;
402
403 current_entry = RemoveHeadList(&FreeList);
404 current = CONTAINING_RECORD(current_entry,
405 ROS_VACB,
406 CacheMapVacbListEntry);
407 InitializeListHead(&current->CacheMapVacbListEntry);
408 Refs = CcRosVacbDecRefCount(current);
409 ASSERT(Refs == 0);
410 }
411
412 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
413
414 return STATUS_SUCCESS;
415 }
416
417 NTSTATUS
418 NTAPI
419 CcRosReleaseVacb (
420 PROS_SHARED_CACHE_MAP SharedCacheMap,
421 PROS_VACB Vacb,
422 BOOLEAN Valid,
423 BOOLEAN Dirty,
424 BOOLEAN Mapped)
425 {
426 ULONG Refs;
427 ASSERT(SharedCacheMap);
428
429 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
430 SharedCacheMap, Vacb, Valid);
431
432 Vacb->Valid = Valid;
433
434 if (Dirty && !Vacb->Dirty)
435 {
436 CcRosMarkDirtyVacb(Vacb);
437 }
438
439 if (Mapped)
440 {
441 if (InterlockedIncrement((PLONG)&Vacb->MappedCount) == 1)
442 {
443 CcRosVacbIncRefCount(Vacb);
444 }
445 }
446
447 Refs = CcRosVacbDecRefCount(Vacb);
448 ASSERT(Refs > 0);
449
450 return STATUS_SUCCESS;
451 }
452
453 /* Returns with VACB Lock Held! */
454 PROS_VACB
455 NTAPI
456 CcRosLookupVacb (
457 PROS_SHARED_CACHE_MAP SharedCacheMap,
458 LONGLONG FileOffset)
459 {
460 PLIST_ENTRY current_entry;
461 PROS_VACB current;
462 KIRQL oldIrql;
463
464 ASSERT(SharedCacheMap);
465
466 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
467 SharedCacheMap, FileOffset);
468
469 KeAcquireGuardedMutex(&ViewLock);
470 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
471
472 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
473 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
474 {
475 current = CONTAINING_RECORD(current_entry,
476 ROS_VACB,
477 CacheMapVacbListEntry);
478 if (IsPointInRange(current->FileOffset.QuadPart,
479 VACB_MAPPING_GRANULARITY,
480 FileOffset))
481 {
482 CcRosVacbIncRefCount(current);
483 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
484 KeReleaseGuardedMutex(&ViewLock);
485 return current;
486 }
487 if (current->FileOffset.QuadPart > FileOffset)
488 break;
489 current_entry = current_entry->Flink;
490 }
491
492 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
493 KeReleaseGuardedMutex(&ViewLock);
494
495 return NULL;
496 }
497
498 VOID
499 NTAPI
500 CcRosMarkDirtyVacb (
501 PROS_VACB Vacb)
502 {
503 KIRQL oldIrql;
504 PROS_SHARED_CACHE_MAP SharedCacheMap;
505
506 SharedCacheMap = Vacb->SharedCacheMap;
507
508 KeAcquireGuardedMutex(&ViewLock);
509 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
510
511 ASSERT(!Vacb->Dirty);
512
513 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
514 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
515 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
516 CcRosVacbIncRefCount(Vacb);
517
518 /* Move to the tail of the LRU list */
519 RemoveEntryList(&Vacb->VacbLruListEntry);
520 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
521
522 Vacb->Dirty = TRUE;
523
524 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
525 KeReleaseGuardedMutex(&ViewLock);
526
527 /* Schedule a lazy writer run to now that we have dirty VACB */
528 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
529 if (!LazyWriter.ScanActive)
530 {
531 CcScheduleLazyWriteScan(FALSE);
532 }
533 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
534 }
535
536 VOID
537 NTAPI
538 CcRosUnmarkDirtyVacb (
539 PROS_VACB Vacb,
540 BOOLEAN LockViews)
541 {
542 KIRQL oldIrql;
543 PROS_SHARED_CACHE_MAP SharedCacheMap;
544
545 SharedCacheMap = Vacb->SharedCacheMap;
546
547 if (LockViews)
548 {
549 KeAcquireGuardedMutex(&ViewLock);
550 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
551 }
552
553 ASSERT(Vacb->Dirty);
554
555 Vacb->Dirty = FALSE;
556
557 RemoveEntryList(&Vacb->DirtyVacbListEntry);
558 InitializeListHead(&Vacb->DirtyVacbListEntry);
559 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
560 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
561 CcRosVacbDecRefCount(Vacb);
562
563 if (LockViews)
564 {
565 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
566 KeReleaseGuardedMutex(&ViewLock);
567 }
568 }
569
570 NTSTATUS
571 NTAPI
572 CcRosMarkDirtyFile (
573 PROS_SHARED_CACHE_MAP SharedCacheMap,
574 LONGLONG FileOffset)
575 {
576 PROS_VACB Vacb;
577
578 ASSERT(SharedCacheMap);
579
580 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
581 SharedCacheMap, FileOffset);
582
583 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
584 if (Vacb == NULL)
585 {
586 KeBugCheck(CACHE_MANAGER);
587 }
588
589 CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, TRUE, FALSE);
590
591 return STATUS_SUCCESS;
592 }
593
594 /*
595 * Note: this is not the contrary function of
596 * CcRosMapVacbInKernelSpace()
597 */
598 NTSTATUS
599 NTAPI
600 CcRosUnmapVacb (
601 PROS_SHARED_CACHE_MAP SharedCacheMap,
602 LONGLONG FileOffset,
603 BOOLEAN NowDirty)
604 {
605 PROS_VACB Vacb;
606
607 ASSERT(SharedCacheMap);
608
609 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
610 SharedCacheMap, FileOffset, NowDirty);
611
612 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
613 if (Vacb == NULL)
614 {
615 return STATUS_UNSUCCESSFUL;
616 }
617
618 ASSERT(Vacb->MappedCount != 0);
619 if (InterlockedDecrement((PLONG)&Vacb->MappedCount) == 0)
620 {
621 CcRosVacbDecRefCount(Vacb);
622 }
623
624 CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, NowDirty, FALSE);
625
626 return STATUS_SUCCESS;
627 }
628
629 static
630 NTSTATUS
631 CcRosMapVacbInKernelSpace(
632 PROS_VACB Vacb)
633 {
634 ULONG i;
635 NTSTATUS Status;
636 ULONG_PTR NumberOfPages;
637 PVOID BaseAddress = NULL;
638
639 /* Create a memory area. */
640 MmLockAddressSpace(MmGetKernelAddressSpace());
641 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
642 0, // nothing checks for VACB mareas, so set to 0
643 &BaseAddress,
644 VACB_MAPPING_GRANULARITY,
645 PAGE_READWRITE,
646 (PMEMORY_AREA*)&Vacb->MemoryArea,
647 0,
648 PAGE_SIZE);
649 ASSERT(Vacb->BaseAddress == NULL);
650 Vacb->BaseAddress = BaseAddress;
651 MmUnlockAddressSpace(MmGetKernelAddressSpace());
652 if (!NT_SUCCESS(Status))
653 {
654 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
655 return Status;
656 }
657
658 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
659 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
660 ASSERT((ULONG_PTR)Vacb->BaseAddress + VACB_MAPPING_GRANULARITY - 1 > (ULONG_PTR)MmSystemRangeStart);
661
662 /* Create a virtual mapping for this memory area */
663 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
664 for (i = 0; i < NumberOfPages; i++)
665 {
666 PFN_NUMBER PageFrameNumber;
667
668 MI_SET_USAGE(MI_USAGE_CACHE);
669 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
670 if (PageFrameNumber == 0)
671 {
672 DPRINT1("Unable to allocate page\n");
673 KeBugCheck(MEMORY_MANAGEMENT);
674 }
675
676 ASSERT(BaseAddress == Vacb->BaseAddress);
677 ASSERT(i * PAGE_SIZE < VACB_MAPPING_GRANULARITY);
678 ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) >= (ULONG_PTR)BaseAddress);
679 ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) > (ULONG_PTR)MmSystemRangeStart);
680
681 Status = MmCreateVirtualMapping(NULL,
682 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
683 PAGE_READWRITE,
684 &PageFrameNumber,
685 1);
686 if (!NT_SUCCESS(Status))
687 {
688 DPRINT1("Unable to create virtual mapping\n");
689 KeBugCheck(MEMORY_MANAGEMENT);
690 }
691 }
692
693 return STATUS_SUCCESS;
694 }
695
696 static
697 NTSTATUS
698 CcRosCreateVacb (
699 PROS_SHARED_CACHE_MAP SharedCacheMap,
700 LONGLONG FileOffset,
701 PROS_VACB *Vacb)
702 {
703 PROS_VACB current;
704 PROS_VACB previous;
705 PLIST_ENTRY current_entry;
706 NTSTATUS Status;
707 KIRQL oldIrql;
708 ULONG Refs;
709
710 ASSERT(SharedCacheMap);
711
712 DPRINT("CcRosCreateVacb()\n");
713
714 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
715 {
716 *Vacb = NULL;
717 return STATUS_INVALID_PARAMETER;
718 }
719
720 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
721 current->BaseAddress = NULL;
722 current->Valid = FALSE;
723 current->Dirty = FALSE;
724 current->PageOut = FALSE;
725 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
726 current->SharedCacheMap = SharedCacheMap;
727 #if DBG
728 if (SharedCacheMap->Trace)
729 {
730 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
731 }
732 #endif
733 current->MappedCount = 0;
734 current->ReferenceCount = 0;
735 current->PinCount = 0;
736 InitializeListHead(&current->CacheMapVacbListEntry);
737 InitializeListHead(&current->DirtyVacbListEntry);
738 InitializeListHead(&current->VacbLruListEntry);
739
740 CcRosVacbIncRefCount(current);
741
742 Status = CcRosMapVacbInKernelSpace(current);
743 if (!NT_SUCCESS(Status))
744 {
745 CcRosVacbDecRefCount(current);
746 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
747 return Status;
748 }
749
750 KeAcquireGuardedMutex(&ViewLock);
751
752 *Vacb = current;
753 /* There is window between the call to CcRosLookupVacb
754 * and CcRosCreateVacb. We must check if a VACB for the
755 * file offset exist. If there is a VACB, we release
756 * our newly created VACB and return the existing one.
757 */
758 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
759 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
760 previous = NULL;
761 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
762 {
763 current = CONTAINING_RECORD(current_entry,
764 ROS_VACB,
765 CacheMapVacbListEntry);
766 if (IsPointInRange(current->FileOffset.QuadPart,
767 VACB_MAPPING_GRANULARITY,
768 FileOffset))
769 {
770 CcRosVacbIncRefCount(current);
771 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
772 #if DBG
773 if (SharedCacheMap->Trace)
774 {
775 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
776 SharedCacheMap,
777 (*Vacb),
778 current);
779 }
780 #endif
781 KeReleaseGuardedMutex(&ViewLock);
782
783 Refs = CcRosVacbDecRefCount(*Vacb);
784 ASSERT(Refs == 0);
785
786 *Vacb = current;
787 return STATUS_SUCCESS;
788 }
789 if (current->FileOffset.QuadPart < FileOffset)
790 {
791 ASSERT(previous == NULL ||
792 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
793 previous = current;
794 }
795 if (current->FileOffset.QuadPart > FileOffset)
796 break;
797 current_entry = current_entry->Flink;
798 }
799 /* There was no existing VACB. */
800 current = *Vacb;
801 if (previous)
802 {
803 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
804 }
805 else
806 {
807 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
808 }
809 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
810 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
811 KeReleaseGuardedMutex(&ViewLock);
812
813 MI_SET_USAGE(MI_USAGE_CACHE);
814 #if MI_TRACE_PFNS
815 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
816 {
817 PWCHAR pos;
818 ULONG len = 0;
819 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
820 if (pos)
821 {
822 len = wcslen(pos) * sizeof(WCHAR);
823 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
824 }
825 else
826 {
827 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
828 }
829 }
830 #endif
831
832 /* Reference it to allow release */
833 CcRosVacbIncRefCount(current);
834
835 return Status;
836 }
837
838 NTSTATUS
839 NTAPI
840 CcRosGetVacb (
841 PROS_SHARED_CACHE_MAP SharedCacheMap,
842 LONGLONG FileOffset,
843 PLONGLONG BaseOffset,
844 PVOID* BaseAddress,
845 PBOOLEAN UptoDate,
846 PROS_VACB *Vacb)
847 {
848 PROS_VACB current;
849 NTSTATUS Status;
850 ULONG Refs;
851
852 ASSERT(SharedCacheMap);
853
854 DPRINT("CcRosGetVacb()\n");
855
856 /*
857 * Look for a VACB already mapping the same data.
858 */
859 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
860 if (current == NULL)
861 {
862 /*
863 * Otherwise create a new VACB.
864 */
865 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
866 if (!NT_SUCCESS(Status))
867 {
868 return Status;
869 }
870 }
871
872 Refs = CcRosVacbGetRefCount(current);
873
874 KeAcquireGuardedMutex(&ViewLock);
875
876 /* Move to the tail of the LRU list */
877 RemoveEntryList(&current->VacbLruListEntry);
878 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
879
880 KeReleaseGuardedMutex(&ViewLock);
881
882 /*
883 * Return information about the VACB to the caller.
884 */
885 *UptoDate = current->Valid;
886 *BaseAddress = current->BaseAddress;
887 DPRINT("*BaseAddress %p\n", *BaseAddress);
888 *Vacb = current;
889 *BaseOffset = current->FileOffset.QuadPart;
890
891 ASSERT(Refs > 1);
892
893 return STATUS_SUCCESS;
894 }
895
896 NTSTATUS
897 NTAPI
898 CcRosRequestVacb (
899 PROS_SHARED_CACHE_MAP SharedCacheMap,
900 LONGLONG FileOffset,
901 PVOID* BaseAddress,
902 PBOOLEAN UptoDate,
903 PROS_VACB *Vacb)
904 /*
905 * FUNCTION: Request a page mapping for a shared cache map
906 */
907 {
908 LONGLONG BaseOffset;
909
910 ASSERT(SharedCacheMap);
911
912 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
913 {
914 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
915 FileOffset, VACB_MAPPING_GRANULARITY);
916 KeBugCheck(CACHE_MANAGER);
917 }
918
919 return CcRosGetVacb(SharedCacheMap,
920 FileOffset,
921 &BaseOffset,
922 BaseAddress,
923 UptoDate,
924 Vacb);
925 }
926
927 static
928 VOID
929 CcFreeCachePage (
930 PVOID Context,
931 MEMORY_AREA* MemoryArea,
932 PVOID Address,
933 PFN_NUMBER Page,
934 SWAPENTRY SwapEntry,
935 BOOLEAN Dirty)
936 {
937 ASSERT(SwapEntry == 0);
938 if (Page != 0)
939 {
940 ASSERT(MmGetReferenceCountPage(Page) == 1);
941 MmReleasePageMemoryConsumer(MC_CACHE, Page);
942 }
943 }
944
945 NTSTATUS
946 CcRosInternalFreeVacb (
947 PROS_VACB Vacb)
948 /*
949 * FUNCTION: Releases a VACB associated with a shared cache map
950 */
951 {
952 DPRINT("Freeing VACB 0x%p\n", Vacb);
953 #if DBG
954 if (Vacb->SharedCacheMap->Trace)
955 {
956 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
957 }
958 #endif
959
960 MmLockAddressSpace(MmGetKernelAddressSpace());
961 MmFreeMemoryArea(MmGetKernelAddressSpace(),
962 Vacb->MemoryArea,
963 CcFreeCachePage,
964 NULL);
965 MmUnlockAddressSpace(MmGetKernelAddressSpace());
966
967 if (Vacb->PinCount != 0 || Vacb->ReferenceCount != 0)
968 {
969 DPRINT1("Invalid free: %ld, %ld\n", Vacb->ReferenceCount, Vacb->PinCount);
970 if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length)
971 {
972 DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName);
973 }
974 }
975
976 ASSERT(Vacb->PinCount == 0);
977 ASSERT(Vacb->ReferenceCount == 0);
978 ASSERT(IsListEmpty(&Vacb->CacheMapVacbListEntry));
979 ASSERT(IsListEmpty(&Vacb->DirtyVacbListEntry));
980 ASSERT(IsListEmpty(&Vacb->VacbLruListEntry));
981 RtlFillMemory(Vacb, sizeof(*Vacb), 0xfd);
982 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
983 return STATUS_SUCCESS;
984 }
985
986 /*
987 * @implemented
988 */
989 VOID
990 NTAPI
991 CcFlushCache (
992 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
993 IN PLARGE_INTEGER FileOffset OPTIONAL,
994 IN ULONG Length,
995 OUT PIO_STATUS_BLOCK IoStatus)
996 {
997 PROS_SHARED_CACHE_MAP SharedCacheMap;
998 LARGE_INTEGER Offset;
999 LONGLONG RemainingLength;
1000 PROS_VACB current;
1001 NTSTATUS Status;
1002
1003 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1004 SectionObjectPointers, FileOffset, Length);
1005
1006 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1007 SectionObjectPointers, FileOffset, Length, IoStatus);
1008
1009 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1010 {
1011 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1012 ASSERT(SharedCacheMap);
1013 if (FileOffset)
1014 {
1015 Offset = *FileOffset;
1016 RemainingLength = Length;
1017 }
1018 else
1019 {
1020 Offset.QuadPart = 0;
1021 RemainingLength = SharedCacheMap->FileSize.QuadPart;
1022 }
1023
1024 if (IoStatus)
1025 {
1026 IoStatus->Status = STATUS_SUCCESS;
1027 IoStatus->Information = 0;
1028 }
1029
1030 while (RemainingLength > 0)
1031 {
1032 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1033 if (current != NULL)
1034 {
1035 if (current->Dirty)
1036 {
1037 Status = CcRosFlushVacb(current);
1038 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1039 {
1040 IoStatus->Status = Status;
1041 }
1042 }
1043
1044 CcRosReleaseVacb(SharedCacheMap, current, current->Valid, current->Dirty, FALSE);
1045 }
1046
1047 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1048 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1049 }
1050 }
1051 else
1052 {
1053 if (IoStatus)
1054 {
1055 IoStatus->Status = STATUS_INVALID_PARAMETER;
1056 }
1057 }
1058 }
1059
1060 NTSTATUS
1061 NTAPI
1062 CcRosDeleteFileCache (
1063 PFILE_OBJECT FileObject,
1064 PROS_SHARED_CACHE_MAP SharedCacheMap)
1065 /*
1066 * FUNCTION: Releases the shared cache map associated with a file object
1067 */
1068 {
1069 PLIST_ENTRY current_entry;
1070 PROS_VACB current;
1071 LIST_ENTRY FreeList;
1072 KIRQL oldIrql;
1073
1074 ASSERT(SharedCacheMap);
1075
1076 SharedCacheMap->OpenCount++;
1077 KeReleaseGuardedMutex(&ViewLock);
1078
1079 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1080
1081 KeAcquireGuardedMutex(&ViewLock);
1082 SharedCacheMap->OpenCount--;
1083 if (SharedCacheMap->OpenCount == 0)
1084 {
1085 KIRQL OldIrql;
1086
1087 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1088
1089 /*
1090 * Release all VACBs
1091 */
1092 InitializeListHead(&FreeList);
1093 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1094 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1095 {
1096 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1097 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1098
1099 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1100 RemoveEntryList(&current->VacbLruListEntry);
1101 InitializeListHead(&current->VacbLruListEntry);
1102 if (current->Dirty)
1103 {
1104 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1105 CcRosUnmarkDirtyVacb(current, FALSE);
1106 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1107 DPRINT1("Freeing dirty VACB\n");
1108 }
1109 if (current->MappedCount != 0)
1110 {
1111 current->MappedCount = 0;
1112 NT_VERIFY(CcRosVacbDecRefCount(current) > 0);
1113 DPRINT1("Freeing mapped VACB\n");
1114 }
1115 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1116
1117 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1118 }
1119 #if DBG
1120 SharedCacheMap->Trace = FALSE;
1121 #endif
1122 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1123
1124 KeReleaseGuardedMutex(&ViewLock);
1125 ObDereferenceObject(SharedCacheMap->FileObject);
1126
1127 while (!IsListEmpty(&FreeList))
1128 {
1129 ULONG Refs;
1130
1131 current_entry = RemoveTailList(&FreeList);
1132 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1133 InitializeListHead(&current->CacheMapVacbListEntry);
1134 Refs = CcRosVacbDecRefCount(current);
1135 #if DBG // CORE-14578
1136 if (Refs != 0)
1137 {
1138 DPRINT1("Leaking VACB %p attached to %p (%I64d)\n", current, FileObject, current->FileOffset.QuadPart);
1139 DPRINT1("There are: %d references left\n", Refs);
1140 DPRINT1("Pin: %d, Map: %d\n", current->PinCount, current->MappedCount);
1141 DPRINT1("Dirty: %d\n", current->Dirty);
1142 if (FileObject->FileName.Length != 0)
1143 {
1144 DPRINT1("File was: %wZ\n", &FileObject->FileName);
1145 }
1146 else if (FileObject->FsContext != NULL &&
1147 ((PFSRTL_COMMON_FCB_HEADER)(FileObject->FsContext))->NodeTypeCode == 0x0502 &&
1148 ((PFSRTL_COMMON_FCB_HEADER)(FileObject->FsContext))->NodeByteSize == 0x1F8 &&
1149 ((PUNICODE_STRING)(((PUCHAR)FileObject->FsContext) + 0x100))->Length != 0)
1150 {
1151 DPRINT1("File was: %wZ (FastFAT)\n", (PUNICODE_STRING)(((PUCHAR)FileObject->FsContext) + 0x100));
1152 }
1153 else
1154 {
1155 DPRINT1("No name for the file\n");
1156 }
1157 }
1158 #else
1159 ASSERT(Refs == 0);
1160 #endif
1161 }
1162
1163 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1164 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1165 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1166
1167 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1168 KeAcquireGuardedMutex(&ViewLock);
1169 }
1170 return STATUS_SUCCESS;
1171 }
1172
1173 VOID
1174 NTAPI
1175 CcRosReferenceCache (
1176 PFILE_OBJECT FileObject)
1177 {
1178 PROS_SHARED_CACHE_MAP SharedCacheMap;
1179 KeAcquireGuardedMutex(&ViewLock);
1180 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1181 ASSERT(SharedCacheMap);
1182 ASSERT(SharedCacheMap->OpenCount != 0);
1183 SharedCacheMap->OpenCount++;
1184 KeReleaseGuardedMutex(&ViewLock);
1185 }
1186
1187 VOID
1188 NTAPI
1189 CcRosRemoveIfClosed (
1190 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1191 {
1192 PROS_SHARED_CACHE_MAP SharedCacheMap;
1193 DPRINT("CcRosRemoveIfClosed()\n");
1194 KeAcquireGuardedMutex(&ViewLock);
1195 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1196 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1197 {
1198 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1199 }
1200 KeReleaseGuardedMutex(&ViewLock);
1201 }
1202
1203
1204 VOID
1205 NTAPI
1206 CcRosDereferenceCache (
1207 PFILE_OBJECT FileObject)
1208 {
1209 PROS_SHARED_CACHE_MAP SharedCacheMap;
1210 KeAcquireGuardedMutex(&ViewLock);
1211 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1212 ASSERT(SharedCacheMap);
1213 if (SharedCacheMap->OpenCount > 0)
1214 {
1215 SharedCacheMap->OpenCount--;
1216 if (SharedCacheMap->OpenCount == 0)
1217 {
1218 MmFreeSectionSegments(SharedCacheMap->FileObject);
1219 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1220 }
1221 }
1222 KeReleaseGuardedMutex(&ViewLock);
1223 }
1224
1225 NTSTATUS
1226 NTAPI
1227 CcRosReleaseFileCache (
1228 PFILE_OBJECT FileObject)
1229 /*
1230 * FUNCTION: Called by the file system when a handle to a file object
1231 * has been closed.
1232 */
1233 {
1234 KIRQL OldIrql;
1235 PPRIVATE_CACHE_MAP PrivateMap;
1236 PROS_SHARED_CACHE_MAP SharedCacheMap;
1237
1238 KeAcquireGuardedMutex(&ViewLock);
1239
1240 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1241 {
1242 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1243
1244 /* Closing the handle, so kill the private cache map
1245 * Before you event try to remove it from FO, always
1246 * lock the master lock, to be sure not to race
1247 * with a potential read ahead ongoing!
1248 */
1249 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1250 PrivateMap = FileObject->PrivateCacheMap;
1251 FileObject->PrivateCacheMap = NULL;
1252 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1253
1254 if (PrivateMap != NULL)
1255 {
1256 /* Remove it from the file */
1257 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
1258 RemoveEntryList(&PrivateMap->PrivateLinks);
1259 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
1260
1261 /* And free it. */
1262 if (PrivateMap != &SharedCacheMap->PrivateCacheMap)
1263 {
1264 ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP);
1265 }
1266 else
1267 {
1268 PrivateMap->NodeTypeCode = 0;
1269 }
1270
1271 if (SharedCacheMap->OpenCount > 0)
1272 {
1273 SharedCacheMap->OpenCount--;
1274 if (SharedCacheMap->OpenCount == 0)
1275 {
1276 MmFreeSectionSegments(SharedCacheMap->FileObject);
1277 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1278 }
1279 }
1280 }
1281 }
1282 KeReleaseGuardedMutex(&ViewLock);
1283 return STATUS_SUCCESS;
1284 }
1285
1286 NTSTATUS
1287 NTAPI
1288 CcRosInitializeFileCache (
1289 PFILE_OBJECT FileObject,
1290 PCC_FILE_SIZES FileSizes,
1291 BOOLEAN PinAccess,
1292 PCACHE_MANAGER_CALLBACKS CallBacks,
1293 PVOID LazyWriterContext)
1294 /*
1295 * FUNCTION: Initializes a shared cache map for a file object
1296 */
1297 {
1298 KIRQL OldIrql;
1299 BOOLEAN Allocated;
1300 PROS_SHARED_CACHE_MAP SharedCacheMap;
1301
1302 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1303 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1304 FileObject, SharedCacheMap);
1305
1306 Allocated = FALSE;
1307 KeAcquireGuardedMutex(&ViewLock);
1308 if (SharedCacheMap == NULL)
1309 {
1310 Allocated = TRUE;
1311 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1312 if (SharedCacheMap == NULL)
1313 {
1314 KeReleaseGuardedMutex(&ViewLock);
1315 return STATUS_INSUFFICIENT_RESOURCES;
1316 }
1317 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1318 ObReferenceObjectByPointer(FileObject,
1319 FILE_ALL_ACCESS,
1320 NULL,
1321 KernelMode);
1322 SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP;
1323 SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap);
1324 SharedCacheMap->FileObject = FileObject;
1325 SharedCacheMap->Callbacks = CallBacks;
1326 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1327 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1328 SharedCacheMap->FileSize = FileSizes->FileSize;
1329 SharedCacheMap->PinAccess = PinAccess;
1330 SharedCacheMap->DirtyPageThreshold = 0;
1331 SharedCacheMap->DirtyPages = 0;
1332 InitializeListHead(&SharedCacheMap->PrivateList);
1333 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1334 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1335 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1336
1337 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1338 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1339 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1340 }
1341 if (FileObject->PrivateCacheMap == NULL)
1342 {
1343 PPRIVATE_CACHE_MAP PrivateMap;
1344
1345 /* Allocate the private cache map for this handle */
1346 if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0)
1347 {
1348 PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP);
1349 }
1350 else
1351 {
1352 PrivateMap = &SharedCacheMap->PrivateCacheMap;
1353 }
1354
1355 if (PrivateMap == NULL)
1356 {
1357 /* If we also allocated the shared cache map for this file, kill it */
1358 if (Allocated)
1359 {
1360 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1361 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1362 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1363
1364 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1365 ObDereferenceObject(FileObject);
1366 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1367 }
1368
1369 KeReleaseGuardedMutex(&ViewLock);
1370 return STATUS_INSUFFICIENT_RESOURCES;
1371 }
1372
1373 /* Initialize it */
1374 RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP));
1375 PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP;
1376 PrivateMap->ReadAheadMask = PAGE_SIZE - 1;
1377 PrivateMap->FileObject = FileObject;
1378 KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock);
1379
1380 /* Link it to the file */
1381 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
1382 InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks);
1383 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
1384
1385 FileObject->PrivateCacheMap = PrivateMap;
1386 SharedCacheMap->OpenCount++;
1387 }
1388 KeReleaseGuardedMutex(&ViewLock);
1389
1390 return STATUS_SUCCESS;
1391 }
1392
1393 /*
1394 * @implemented
1395 */
1396 PFILE_OBJECT
1397 NTAPI
1398 CcGetFileObjectFromSectionPtrs (
1399 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1400 {
1401 PROS_SHARED_CACHE_MAP SharedCacheMap;
1402
1403 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1404
1405 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1406 {
1407 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1408 ASSERT(SharedCacheMap);
1409 return SharedCacheMap->FileObject;
1410 }
1411 return NULL;
1412 }
1413
1414 VOID
1415 INIT_FUNCTION
1416 NTAPI
1417 CcInitView (
1418 VOID)
1419 {
1420 DPRINT("CcInitView()\n");
1421
1422 InitializeListHead(&DirtyVacbListHead);
1423 InitializeListHead(&VacbLruListHead);
1424 InitializeListHead(&CcDeferredWrites);
1425 InitializeListHead(&CcCleanSharedCacheMapList);
1426 KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1427 KeInitializeGuardedMutex(&ViewLock);
1428 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1429 NULL,
1430 NULL,
1431 0,
1432 sizeof(INTERNAL_BCB),
1433 TAG_BCB,
1434 20);
1435 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1436 NULL,
1437 NULL,
1438 0,
1439 sizeof(ROS_SHARED_CACHE_MAP),
1440 TAG_SHARED_CACHE_MAP,
1441 20);
1442 ExInitializeNPagedLookasideList(&VacbLookasideList,
1443 NULL,
1444 NULL,
1445 0,
1446 sizeof(ROS_VACB),
1447 TAG_VACB,
1448 20);
1449
1450 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1451
1452 CcInitCacheZeroPage();
1453 }
1454
1455 #if DBG && defined(KDBG)
1456 BOOLEAN
1457 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1458 {
1459 PLIST_ENTRY ListEntry;
1460 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1461
1462 KdbpPrint(" Usage Summary (in kb)\n");
1463 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1464 /* No need to lock the spin lock here, we're in DBG */
1465 for (ListEntry = CcCleanSharedCacheMapList.Flink;
1466 ListEntry != &CcCleanSharedCacheMapList;
1467 ListEntry = ListEntry->Flink)
1468 {
1469 PLIST_ENTRY Vacbs;
1470 ULONG Valid = 0, Dirty = 0;
1471 PROS_SHARED_CACHE_MAP SharedCacheMap;
1472 PUNICODE_STRING FileName;
1473 PWSTR Extra = L"";
1474
1475 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1476
1477 /* Dirty size */
1478 Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
1479
1480 /* First, count for all the associated VACB */
1481 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1482 Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1483 Vacbs = Vacbs->Flink)
1484 {
1485 PROS_VACB Vacb;
1486
1487 Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
1488 if (Vacb->Valid)
1489 {
1490 Valid += VACB_MAPPING_GRANULARITY / 1024;
1491 }
1492 }
1493
1494 /* Setup name */
1495 if (SharedCacheMap->FileObject != NULL &&
1496 SharedCacheMap->FileObject->FileName.Length != 0)
1497 {
1498 FileName = &SharedCacheMap->FileObject->FileName;
1499 }
1500 else if (SharedCacheMap->FileObject != NULL &&
1501 SharedCacheMap->FileObject->FsContext != NULL &&
1502 ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeTypeCode == 0x0502 &&
1503 ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeByteSize == 0x1F8 &&
1504 ((PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100))->Length != 0)
1505 {
1506 FileName = (PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100);
1507 Extra = L" (FastFAT)";
1508 }
1509 else
1510 {
1511 FileName = &NoName;
1512 }
1513
1514 /* And print */
1515 KdbpPrint("%p\t%d\t%d\t%wZ%S\n", SharedCacheMap, Valid, Dirty, FileName, Extra);
1516 }
1517
1518 return TRUE;
1519 }
1520
1521 BOOLEAN
1522 ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[])
1523 {
1524 KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages,
1525 (CcTotalDirtyPages * PAGE_SIZE) / 1024);
1526 KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold,
1527 (CcDirtyPageThreshold * PAGE_SIZE) / 1024);
1528 KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages,
1529 (MmAvailablePages * PAGE_SIZE) / 1024);
1530 KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop,
1531 (MmThrottleTop * PAGE_SIZE) / 1024);
1532 KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom,
1533 (MmThrottleBottom * PAGE_SIZE) / 1024);
1534 KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total,
1535 (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024);
1536
1537 if (CcTotalDirtyPages >= CcDirtyPageThreshold)
1538 {
1539 KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n");
1540 }
1541 else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold)
1542 {
1543 KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n");
1544 }
1545 else
1546 {
1547 KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n");
1548 }
1549
1550 return TRUE;
1551 }
1552 #endif
1553
1554 /* EOF */