[NTOSKRNL] When allocating a new BCB, save it in a list
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42
43 /* GLOBALS *******************************************************************/
44
45 LIST_ENTRY DirtyVacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 /* Internal vars (MS):
55 * - Threshold above which lazy writer will start action
56 * - Amount of dirty pages
57 * - List for deferred writes
58 * - Spinlock when dealing with the deferred list
59 * - List for "clean" shared cache maps
60 */
61 ULONG CcDirtyPageThreshold = 0;
62 ULONG CcTotalDirtyPages = 0;
63 LIST_ENTRY CcDeferredWrites;
64 KSPIN_LOCK CcDeferredWriteSpinLock;
65 LIST_ENTRY CcCleanSharedCacheMapList;
66
67 #if DBG
68 ULONG CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line)
69 {
70 ULONG Refs;
71
72 Refs = InterlockedIncrement((PLONG)&vacb->ReferenceCount);
73 if (vacb->SharedCacheMap->Trace)
74 {
75 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
76 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
77 }
78
79 return Refs;
80 }
81 ULONG CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line)
82 {
83 ULONG Refs;
84
85 Refs = InterlockedDecrement((PLONG)&vacb->ReferenceCount);
86 ASSERT(!(Refs == 0 && vacb->Dirty));
87 if (vacb->SharedCacheMap->Trace)
88 {
89 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
90 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
91 }
92
93 if (Refs == 0)
94 {
95 CcRosInternalFreeVacb(vacb);
96 }
97
98 return Refs;
99 }
100 ULONG CcRosVacbGetRefCount_(PROS_VACB vacb, PCSTR file, INT line)
101 {
102 ULONG Refs;
103
104 Refs = InterlockedCompareExchange((PLONG)&vacb->ReferenceCount, 0, 0);
105 if (vacb->SharedCacheMap->Trace)
106 {
107 DbgPrint("(%s:%i) VACB %p ==RefCount=%lu, Dirty %u, PageOut %lu\n",
108 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
109 }
110
111 return Refs;
112 }
113 #endif
114
115
116 /* FUNCTIONS *****************************************************************/
117
118 VOID
119 NTAPI
120 CcRosTraceCacheMap (
121 PROS_SHARED_CACHE_MAP SharedCacheMap,
122 BOOLEAN Trace )
123 {
124 #if DBG
125 KIRQL oldirql;
126 PLIST_ENTRY current_entry;
127 PROS_VACB current;
128
129 if (!SharedCacheMap)
130 return;
131
132 SharedCacheMap->Trace = Trace;
133
134 if (Trace)
135 {
136 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
137
138 KeAcquireGuardedMutex(&ViewLock);
139 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
140
141 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
142 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
143 {
144 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
145 current_entry = current_entry->Flink;
146
147 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
148 current, current->ReferenceCount, current->Dirty, current->PageOut );
149 }
150 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
151 KeReleaseGuardedMutex(&ViewLock);
152 }
153 else
154 {
155 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
156 }
157
158 #else
159 UNREFERENCED_PARAMETER(SharedCacheMap);
160 UNREFERENCED_PARAMETER(Trace);
161 #endif
162 }
163
164 NTSTATUS
165 NTAPI
166 CcRosFlushVacb (
167 PROS_VACB Vacb)
168 {
169 NTSTATUS Status;
170
171 CcRosUnmarkDirtyVacb(Vacb, TRUE);
172
173 Status = CcWriteVirtualAddress(Vacb);
174 if (!NT_SUCCESS(Status))
175 {
176 CcRosMarkDirtyVacb(Vacb);
177 }
178
179 return Status;
180 }
181
182 NTSTATUS
183 NTAPI
184 CcRosFlushDirtyPages (
185 ULONG Target,
186 PULONG Count,
187 BOOLEAN Wait,
188 BOOLEAN CalledFromLazy)
189 {
190 PLIST_ENTRY current_entry;
191 PROS_VACB current;
192 BOOLEAN Locked;
193 NTSTATUS Status;
194
195 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
196
197 (*Count) = 0;
198
199 KeEnterCriticalRegion();
200 KeAcquireGuardedMutex(&ViewLock);
201
202 current_entry = DirtyVacbListHead.Flink;
203 if (current_entry == &DirtyVacbListHead)
204 {
205 DPRINT("No Dirty pages\n");
206 }
207
208 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
209 {
210 current = CONTAINING_RECORD(current_entry,
211 ROS_VACB,
212 DirtyVacbListEntry);
213 current_entry = current_entry->Flink;
214
215 CcRosVacbIncRefCount(current);
216
217 /* When performing lazy write, don't handle temporary files */
218 if (CalledFromLazy &&
219 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
220 {
221 CcRosVacbDecRefCount(current);
222 continue;
223 }
224
225 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
226 current->SharedCacheMap->LazyWriteContext, Wait);
227 if (!Locked)
228 {
229 CcRosVacbDecRefCount(current);
230 continue;
231 }
232
233 ASSERT(current->Dirty);
234
235 KeReleaseGuardedMutex(&ViewLock);
236
237 Status = CcRosFlushVacb(current);
238
239 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
240 current->SharedCacheMap->LazyWriteContext);
241
242 KeAcquireGuardedMutex(&ViewLock);
243 CcRosVacbDecRefCount(current);
244
245 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
246 (Status != STATUS_MEDIA_WRITE_PROTECTED))
247 {
248 DPRINT1("CC: Failed to flush VACB.\n");
249 }
250 else
251 {
252 ULONG PagesFreed;
253
254 /* How many pages did we free? */
255 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
256 (*Count) += PagesFreed;
257
258 /* Make sure we don't overflow target! */
259 if (Target < PagesFreed)
260 {
261 /* If we would have, jump to zero directly */
262 Target = 0;
263 }
264 else
265 {
266 Target -= PagesFreed;
267 }
268 }
269
270 current_entry = DirtyVacbListHead.Flink;
271 }
272
273 KeReleaseGuardedMutex(&ViewLock);
274 KeLeaveCriticalRegion();
275
276 DPRINT("CcRosFlushDirtyPages() finished\n");
277 return STATUS_SUCCESS;
278 }
279
280 NTSTATUS
281 CcRosTrimCache (
282 ULONG Target,
283 ULONG Priority,
284 PULONG NrFreed)
285 /*
286 * FUNCTION: Try to free some memory from the file cache.
287 * ARGUMENTS:
288 * Target - The number of pages to be freed.
289 * Priority - The priority of free (currently unused).
290 * NrFreed - Points to a variable where the number of pages
291 * actually freed is returned.
292 */
293 {
294 PLIST_ENTRY current_entry;
295 PROS_VACB current;
296 ULONG PagesFreed;
297 KIRQL oldIrql;
298 LIST_ENTRY FreeList;
299 PFN_NUMBER Page;
300 ULONG i;
301 BOOLEAN FlushedPages = FALSE;
302
303 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
304
305 InitializeListHead(&FreeList);
306
307 *NrFreed = 0;
308
309 retry:
310 KeAcquireGuardedMutex(&ViewLock);
311
312 current_entry = VacbLruListHead.Flink;
313 while (current_entry != &VacbLruListHead)
314 {
315 ULONG Refs;
316
317 current = CONTAINING_RECORD(current_entry,
318 ROS_VACB,
319 VacbLruListEntry);
320 current_entry = current_entry->Flink;
321
322 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
323
324 /* Reference the VACB */
325 CcRosVacbIncRefCount(current);
326
327 /* Check if it's mapped and not dirty */
328 if (InterlockedCompareExchange((PLONG)&current->MappedCount, 0, 0) > 0 && !current->Dirty)
329 {
330 /* We have to break these locks because Cc sucks */
331 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
332 KeReleaseGuardedMutex(&ViewLock);
333
334 /* Page out the VACB */
335 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
336 {
337 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
338
339 MmPageOutPhysicalAddress(Page);
340 }
341
342 /* Reacquire the locks */
343 KeAcquireGuardedMutex(&ViewLock);
344 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
345 }
346
347 /* Dereference the VACB */
348 Refs = CcRosVacbDecRefCount(current);
349
350 /* Check if we can free this entry now */
351 if (Refs < 2)
352 {
353 ASSERT(!current->Dirty);
354 ASSERT(!current->MappedCount);
355 ASSERT(Refs == 1);
356
357 RemoveEntryList(&current->CacheMapVacbListEntry);
358 RemoveEntryList(&current->VacbLruListEntry);
359 InitializeListHead(&current->VacbLruListEntry);
360 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
361
362 /* Calculate how many pages we freed for Mm */
363 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
364 Target -= PagesFreed;
365 (*NrFreed) += PagesFreed;
366 }
367
368 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
369 }
370
371 KeReleaseGuardedMutex(&ViewLock);
372
373 /* Try flushing pages if we haven't met our target */
374 if ((Target > 0) && !FlushedPages)
375 {
376 /* Flush dirty pages to disk */
377 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
378 FlushedPages = TRUE;
379
380 /* We can only swap as many pages as we flushed */
381 if (PagesFreed < Target) Target = PagesFreed;
382
383 /* Check if we flushed anything */
384 if (PagesFreed != 0)
385 {
386 /* Try again after flushing dirty pages */
387 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
388 goto retry;
389 }
390 }
391
392 while (!IsListEmpty(&FreeList))
393 {
394 ULONG Refs;
395
396 current_entry = RemoveHeadList(&FreeList);
397 current = CONTAINING_RECORD(current_entry,
398 ROS_VACB,
399 CacheMapVacbListEntry);
400 InitializeListHead(&current->CacheMapVacbListEntry);
401 Refs = CcRosVacbDecRefCount(current);
402 ASSERT(Refs == 0);
403 }
404
405 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
406
407 return STATUS_SUCCESS;
408 }
409
410 NTSTATUS
411 NTAPI
412 CcRosReleaseVacb (
413 PROS_SHARED_CACHE_MAP SharedCacheMap,
414 PROS_VACB Vacb,
415 BOOLEAN Valid,
416 BOOLEAN Dirty,
417 BOOLEAN Mapped)
418 {
419 ULONG Refs;
420 ASSERT(SharedCacheMap);
421
422 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
423 SharedCacheMap, Vacb, Valid);
424
425 Vacb->Valid = Valid;
426
427 if (Dirty && !Vacb->Dirty)
428 {
429 CcRosMarkDirtyVacb(Vacb);
430 }
431
432 if (Mapped)
433 {
434 if (InterlockedIncrement((PLONG)&Vacb->MappedCount) == 1)
435 {
436 CcRosVacbIncRefCount(Vacb);
437 }
438 }
439
440 Refs = CcRosVacbDecRefCount(Vacb);
441 ASSERT(Refs > 0);
442
443 return STATUS_SUCCESS;
444 }
445
446 /* Returns with VACB Lock Held! */
447 PROS_VACB
448 NTAPI
449 CcRosLookupVacb (
450 PROS_SHARED_CACHE_MAP SharedCacheMap,
451 LONGLONG FileOffset)
452 {
453 PLIST_ENTRY current_entry;
454 PROS_VACB current;
455 KIRQL oldIrql;
456
457 ASSERT(SharedCacheMap);
458
459 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
460 SharedCacheMap, FileOffset);
461
462 KeAcquireGuardedMutex(&ViewLock);
463 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
464
465 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
466 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
467 {
468 current = CONTAINING_RECORD(current_entry,
469 ROS_VACB,
470 CacheMapVacbListEntry);
471 if (IsPointInRange(current->FileOffset.QuadPart,
472 VACB_MAPPING_GRANULARITY,
473 FileOffset))
474 {
475 CcRosVacbIncRefCount(current);
476 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
477 KeReleaseGuardedMutex(&ViewLock);
478 return current;
479 }
480 if (current->FileOffset.QuadPart > FileOffset)
481 break;
482 current_entry = current_entry->Flink;
483 }
484
485 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
486 KeReleaseGuardedMutex(&ViewLock);
487
488 return NULL;
489 }
490
491 VOID
492 NTAPI
493 CcRosMarkDirtyVacb (
494 PROS_VACB Vacb)
495 {
496 KIRQL oldIrql;
497 PROS_SHARED_CACHE_MAP SharedCacheMap;
498
499 SharedCacheMap = Vacb->SharedCacheMap;
500
501 KeAcquireGuardedMutex(&ViewLock);
502 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
503
504 ASSERT(!Vacb->Dirty);
505
506 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
507 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
508 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
509 CcRosVacbIncRefCount(Vacb);
510
511 /* Move to the tail of the LRU list */
512 RemoveEntryList(&Vacb->VacbLruListEntry);
513 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
514
515 Vacb->Dirty = TRUE;
516
517 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
518 KeReleaseGuardedMutex(&ViewLock);
519
520 /* Schedule a lazy writer run to now that we have dirty VACB */
521 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
522 if (!LazyWriter.ScanActive)
523 {
524 CcScheduleLazyWriteScan(FALSE);
525 }
526 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
527 }
528
529 VOID
530 NTAPI
531 CcRosUnmarkDirtyVacb (
532 PROS_VACB Vacb,
533 BOOLEAN LockViews)
534 {
535 KIRQL oldIrql;
536 PROS_SHARED_CACHE_MAP SharedCacheMap;
537
538 SharedCacheMap = Vacb->SharedCacheMap;
539
540 if (LockViews)
541 {
542 KeAcquireGuardedMutex(&ViewLock);
543 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
544 }
545
546 ASSERT(Vacb->Dirty);
547
548 Vacb->Dirty = FALSE;
549
550 RemoveEntryList(&Vacb->DirtyVacbListEntry);
551 InitializeListHead(&Vacb->DirtyVacbListEntry);
552 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
553 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
554 CcRosVacbDecRefCount(Vacb);
555
556 if (LockViews)
557 {
558 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
559 KeReleaseGuardedMutex(&ViewLock);
560 }
561 }
562
563 NTSTATUS
564 NTAPI
565 CcRosMarkDirtyFile (
566 PROS_SHARED_CACHE_MAP SharedCacheMap,
567 LONGLONG FileOffset)
568 {
569 PROS_VACB Vacb;
570
571 ASSERT(SharedCacheMap);
572
573 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
574 SharedCacheMap, FileOffset);
575
576 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
577 if (Vacb == NULL)
578 {
579 KeBugCheck(CACHE_MANAGER);
580 }
581
582 CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, TRUE, FALSE);
583
584 return STATUS_SUCCESS;
585 }
586
587 /*
588 * Note: this is not the contrary function of
589 * CcRosMapVacbInKernelSpace()
590 */
591 NTSTATUS
592 NTAPI
593 CcRosUnmapVacb (
594 PROS_SHARED_CACHE_MAP SharedCacheMap,
595 LONGLONG FileOffset,
596 BOOLEAN NowDirty)
597 {
598 PROS_VACB Vacb;
599
600 ASSERT(SharedCacheMap);
601
602 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
603 SharedCacheMap, FileOffset, NowDirty);
604
605 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
606 if (Vacb == NULL)
607 {
608 return STATUS_UNSUCCESSFUL;
609 }
610
611 ASSERT(Vacb->MappedCount != 0);
612 if (InterlockedDecrement((PLONG)&Vacb->MappedCount) == 0)
613 {
614 CcRosVacbDecRefCount(Vacb);
615 }
616
617 CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, NowDirty, FALSE);
618
619 return STATUS_SUCCESS;
620 }
621
622 static
623 NTSTATUS
624 CcRosMapVacbInKernelSpace(
625 PROS_VACB Vacb)
626 {
627 ULONG i;
628 NTSTATUS Status;
629 ULONG_PTR NumberOfPages;
630 PVOID BaseAddress = NULL;
631
632 /* Create a memory area. */
633 MmLockAddressSpace(MmGetKernelAddressSpace());
634 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
635 0, // nothing checks for VACB mareas, so set to 0
636 &BaseAddress,
637 VACB_MAPPING_GRANULARITY,
638 PAGE_READWRITE,
639 (PMEMORY_AREA*)&Vacb->MemoryArea,
640 0,
641 PAGE_SIZE);
642 ASSERT(Vacb->BaseAddress == NULL);
643 Vacb->BaseAddress = BaseAddress;
644 MmUnlockAddressSpace(MmGetKernelAddressSpace());
645 if (!NT_SUCCESS(Status))
646 {
647 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
648 return Status;
649 }
650
651 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
652 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
653 ASSERT((ULONG_PTR)Vacb->BaseAddress + VACB_MAPPING_GRANULARITY - 1 > (ULONG_PTR)MmSystemRangeStart);
654
655 /* Create a virtual mapping for this memory area */
656 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
657 for (i = 0; i < NumberOfPages; i++)
658 {
659 PFN_NUMBER PageFrameNumber;
660
661 MI_SET_USAGE(MI_USAGE_CACHE);
662 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
663 if (PageFrameNumber == 0)
664 {
665 DPRINT1("Unable to allocate page\n");
666 KeBugCheck(MEMORY_MANAGEMENT);
667 }
668
669 ASSERT(BaseAddress == Vacb->BaseAddress);
670 ASSERT(i * PAGE_SIZE < VACB_MAPPING_GRANULARITY);
671 ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) >= (ULONG_PTR)BaseAddress);
672 ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) > (ULONG_PTR)MmSystemRangeStart);
673
674 Status = MmCreateVirtualMapping(NULL,
675 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
676 PAGE_READWRITE,
677 &PageFrameNumber,
678 1);
679 if (!NT_SUCCESS(Status))
680 {
681 DPRINT1("Unable to create virtual mapping\n");
682 KeBugCheck(MEMORY_MANAGEMENT);
683 }
684 }
685
686 return STATUS_SUCCESS;
687 }
688
689 static
690 BOOLEAN
691 CcRosFreeUnusedVacb (
692 PULONG Count)
693 {
694 ULONG cFreed;
695 BOOLEAN Freed;
696 KIRQL oldIrql;
697 PROS_VACB current;
698 LIST_ENTRY FreeList;
699 PLIST_ENTRY current_entry;
700
701 cFreed = 0;
702 Freed = FALSE;
703 InitializeListHead(&FreeList);
704
705 KeAcquireGuardedMutex(&ViewLock);
706
707 /* Browse all the available VACB */
708 current_entry = VacbLruListHead.Flink;
709 while (current_entry != &VacbLruListHead)
710 {
711 ULONG Refs;
712
713 current = CONTAINING_RECORD(current_entry,
714 ROS_VACB,
715 VacbLruListEntry);
716 current_entry = current_entry->Flink;
717
718 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
719
720 /* Only deal with unused VACB, we will free them */
721 Refs = CcRosVacbGetRefCount(current);
722 if (Refs < 2)
723 {
724 ASSERT(!current->Dirty);
725 ASSERT(!current->MappedCount);
726 ASSERT(Refs == 1);
727
728 /* Reset and move to free list */
729 RemoveEntryList(&current->CacheMapVacbListEntry);
730 RemoveEntryList(&current->VacbLruListEntry);
731 InitializeListHead(&current->VacbLruListEntry);
732 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
733 }
734
735 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
736
737 }
738
739 KeReleaseGuardedMutex(&ViewLock);
740
741 /* And now, free any of the found VACB, that'll free memory! */
742 while (!IsListEmpty(&FreeList))
743 {
744 ULONG Refs;
745
746 current_entry = RemoveHeadList(&FreeList);
747 current = CONTAINING_RECORD(current_entry,
748 ROS_VACB,
749 CacheMapVacbListEntry);
750 InitializeListHead(&current->CacheMapVacbListEntry);
751 Refs = CcRosVacbDecRefCount(current);
752 ASSERT(Refs == 0);
753 ++cFreed;
754 }
755
756 /* If we freed at least one VACB, return success */
757 if (cFreed != 0)
758 {
759 Freed = TRUE;
760 }
761
762 /* If caller asked for free count, return it */
763 if (Count != NULL)
764 {
765 *Count = cFreed;
766 }
767
768 return Freed;
769 }
770
771 static
772 NTSTATUS
773 CcRosCreateVacb (
774 PROS_SHARED_CACHE_MAP SharedCacheMap,
775 LONGLONG FileOffset,
776 PROS_VACB *Vacb)
777 {
778 PROS_VACB current;
779 PROS_VACB previous;
780 PLIST_ENTRY current_entry;
781 NTSTATUS Status;
782 KIRQL oldIrql;
783 ULONG Refs;
784 BOOLEAN Retried;
785
786 ASSERT(SharedCacheMap);
787
788 DPRINT("CcRosCreateVacb()\n");
789
790 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
791 {
792 *Vacb = NULL;
793 return STATUS_INVALID_PARAMETER;
794 }
795
796 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
797 current->BaseAddress = NULL;
798 current->Valid = FALSE;
799 current->Dirty = FALSE;
800 current->PageOut = FALSE;
801 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
802 current->SharedCacheMap = SharedCacheMap;
803 #if DBG
804 if (SharedCacheMap->Trace)
805 {
806 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
807 }
808 #endif
809 current->MappedCount = 0;
810 current->ReferenceCount = 0;
811 current->PinCount = 0;
812 InitializeListHead(&current->CacheMapVacbListEntry);
813 InitializeListHead(&current->DirtyVacbListEntry);
814 InitializeListHead(&current->VacbLruListEntry);
815
816 CcRosVacbIncRefCount(current);
817
818 Retried = FALSE;
819 Retry:
820 /* Map VACB in kernel space */
821 Status = CcRosMapVacbInKernelSpace(current);
822 if (!NT_SUCCESS(Status))
823 {
824 ULONG Freed;
825 /* If no space left, try to prune unused VACB
826 * to recover space to map our VACB
827 * If it succeed, retry to map, otherwise
828 * just fail.
829 */
830 if (!Retried && CcRosFreeUnusedVacb(&Freed))
831 {
832 DPRINT("Prunned %d VACB, trying again\n", Freed);
833 Retried = TRUE;
834 goto Retry;
835 }
836
837 CcRosVacbDecRefCount(current);
838 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
839 return Status;
840 }
841
842 KeAcquireGuardedMutex(&ViewLock);
843
844 *Vacb = current;
845 /* There is window between the call to CcRosLookupVacb
846 * and CcRosCreateVacb. We must check if a VACB for the
847 * file offset exist. If there is a VACB, we release
848 * our newly created VACB and return the existing one.
849 */
850 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
851 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
852 previous = NULL;
853 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
854 {
855 current = CONTAINING_RECORD(current_entry,
856 ROS_VACB,
857 CacheMapVacbListEntry);
858 if (IsPointInRange(current->FileOffset.QuadPart,
859 VACB_MAPPING_GRANULARITY,
860 FileOffset))
861 {
862 CcRosVacbIncRefCount(current);
863 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
864 #if DBG
865 if (SharedCacheMap->Trace)
866 {
867 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
868 SharedCacheMap,
869 (*Vacb),
870 current);
871 }
872 #endif
873 KeReleaseGuardedMutex(&ViewLock);
874
875 Refs = CcRosVacbDecRefCount(*Vacb);
876 ASSERT(Refs == 0);
877
878 *Vacb = current;
879 return STATUS_SUCCESS;
880 }
881 if (current->FileOffset.QuadPart < FileOffset)
882 {
883 ASSERT(previous == NULL ||
884 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
885 previous = current;
886 }
887 if (current->FileOffset.QuadPart > FileOffset)
888 break;
889 current_entry = current_entry->Flink;
890 }
891 /* There was no existing VACB. */
892 current = *Vacb;
893 if (previous)
894 {
895 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
896 }
897 else
898 {
899 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
900 }
901 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
902 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
903 KeReleaseGuardedMutex(&ViewLock);
904
905 MI_SET_USAGE(MI_USAGE_CACHE);
906 #if MI_TRACE_PFNS
907 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
908 {
909 PWCHAR pos;
910 ULONG len = 0;
911 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
912 if (pos)
913 {
914 len = wcslen(pos) * sizeof(WCHAR);
915 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
916 }
917 else
918 {
919 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
920 }
921 }
922 #endif
923
924 /* Reference it to allow release */
925 CcRosVacbIncRefCount(current);
926
927 return Status;
928 }
929
930 NTSTATUS
931 NTAPI
932 CcRosGetVacb (
933 PROS_SHARED_CACHE_MAP SharedCacheMap,
934 LONGLONG FileOffset,
935 PLONGLONG BaseOffset,
936 PVOID* BaseAddress,
937 PBOOLEAN UptoDate,
938 PROS_VACB *Vacb)
939 {
940 PROS_VACB current;
941 NTSTATUS Status;
942 ULONG Refs;
943
944 ASSERT(SharedCacheMap);
945
946 DPRINT("CcRosGetVacb()\n");
947
948 /*
949 * Look for a VACB already mapping the same data.
950 */
951 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
952 if (current == NULL)
953 {
954 /*
955 * Otherwise create a new VACB.
956 */
957 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
958 if (!NT_SUCCESS(Status))
959 {
960 return Status;
961 }
962 }
963
964 Refs = CcRosVacbGetRefCount(current);
965
966 KeAcquireGuardedMutex(&ViewLock);
967
968 /* Move to the tail of the LRU list */
969 RemoveEntryList(&current->VacbLruListEntry);
970 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
971
972 KeReleaseGuardedMutex(&ViewLock);
973
974 /*
975 * Return information about the VACB to the caller.
976 */
977 *UptoDate = current->Valid;
978 *BaseAddress = current->BaseAddress;
979 DPRINT("*BaseAddress %p\n", *BaseAddress);
980 *Vacb = current;
981 *BaseOffset = current->FileOffset.QuadPart;
982
983 ASSERT(Refs > 1);
984
985 return STATUS_SUCCESS;
986 }
987
988 NTSTATUS
989 NTAPI
990 CcRosRequestVacb (
991 PROS_SHARED_CACHE_MAP SharedCacheMap,
992 LONGLONG FileOffset,
993 PVOID* BaseAddress,
994 PBOOLEAN UptoDate,
995 PROS_VACB *Vacb)
996 /*
997 * FUNCTION: Request a page mapping for a shared cache map
998 */
999 {
1000 LONGLONG BaseOffset;
1001
1002 ASSERT(SharedCacheMap);
1003
1004 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
1005 {
1006 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
1007 FileOffset, VACB_MAPPING_GRANULARITY);
1008 KeBugCheck(CACHE_MANAGER);
1009 }
1010
1011 return CcRosGetVacb(SharedCacheMap,
1012 FileOffset,
1013 &BaseOffset,
1014 BaseAddress,
1015 UptoDate,
1016 Vacb);
1017 }
1018
1019 static
1020 VOID
1021 CcFreeCachePage (
1022 PVOID Context,
1023 MEMORY_AREA* MemoryArea,
1024 PVOID Address,
1025 PFN_NUMBER Page,
1026 SWAPENTRY SwapEntry,
1027 BOOLEAN Dirty)
1028 {
1029 ASSERT(SwapEntry == 0);
1030 if (Page != 0)
1031 {
1032 ASSERT(MmGetReferenceCountPage(Page) == 1);
1033 MmReleasePageMemoryConsumer(MC_CACHE, Page);
1034 }
1035 }
1036
1037 NTSTATUS
1038 CcRosInternalFreeVacb (
1039 PROS_VACB Vacb)
1040 /*
1041 * FUNCTION: Releases a VACB associated with a shared cache map
1042 */
1043 {
1044 DPRINT("Freeing VACB 0x%p\n", Vacb);
1045 #if DBG
1046 if (Vacb->SharedCacheMap->Trace)
1047 {
1048 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
1049 }
1050 #endif
1051
1052 MmLockAddressSpace(MmGetKernelAddressSpace());
1053 MmFreeMemoryArea(MmGetKernelAddressSpace(),
1054 Vacb->MemoryArea,
1055 CcFreeCachePage,
1056 NULL);
1057 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1058
1059 if (Vacb->PinCount != 0 || Vacb->ReferenceCount != 0)
1060 {
1061 DPRINT1("Invalid free: %ld, %ld\n", Vacb->ReferenceCount, Vacb->PinCount);
1062 if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length)
1063 {
1064 DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName);
1065 }
1066 }
1067
1068 ASSERT(Vacb->PinCount == 0);
1069 ASSERT(Vacb->ReferenceCount == 0);
1070 ASSERT(IsListEmpty(&Vacb->CacheMapVacbListEntry));
1071 ASSERT(IsListEmpty(&Vacb->DirtyVacbListEntry));
1072 ASSERT(IsListEmpty(&Vacb->VacbLruListEntry));
1073 RtlFillMemory(Vacb, sizeof(*Vacb), 0xfd);
1074 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
1075 return STATUS_SUCCESS;
1076 }
1077
1078 /*
1079 * @implemented
1080 */
1081 VOID
1082 NTAPI
1083 CcFlushCache (
1084 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1085 IN PLARGE_INTEGER FileOffset OPTIONAL,
1086 IN ULONG Length,
1087 OUT PIO_STATUS_BLOCK IoStatus)
1088 {
1089 PROS_SHARED_CACHE_MAP SharedCacheMap;
1090 LARGE_INTEGER Offset;
1091 LONGLONG RemainingLength;
1092 PROS_VACB current;
1093 NTSTATUS Status;
1094
1095 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1096 SectionObjectPointers, FileOffset, Length);
1097
1098 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1099 SectionObjectPointers, FileOffset, Length, IoStatus);
1100
1101 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1102 {
1103 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1104 ASSERT(SharedCacheMap);
1105 if (FileOffset)
1106 {
1107 Offset = *FileOffset;
1108 RemainingLength = Length;
1109 }
1110 else
1111 {
1112 Offset.QuadPart = 0;
1113 RemainingLength = SharedCacheMap->FileSize.QuadPart;
1114 }
1115
1116 if (IoStatus)
1117 {
1118 IoStatus->Status = STATUS_SUCCESS;
1119 IoStatus->Information = 0;
1120 }
1121
1122 while (RemainingLength > 0)
1123 {
1124 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1125 if (current != NULL)
1126 {
1127 if (current->Dirty)
1128 {
1129 Status = CcRosFlushVacb(current);
1130 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1131 {
1132 IoStatus->Status = Status;
1133 }
1134 }
1135
1136 CcRosReleaseVacb(SharedCacheMap, current, current->Valid, current->Dirty, FALSE);
1137 }
1138
1139 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1140 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1141 }
1142 }
1143 else
1144 {
1145 if (IoStatus)
1146 {
1147 IoStatus->Status = STATUS_INVALID_PARAMETER;
1148 }
1149 }
1150 }
1151
1152 NTSTATUS
1153 NTAPI
1154 CcRosDeleteFileCache (
1155 PFILE_OBJECT FileObject,
1156 PROS_SHARED_CACHE_MAP SharedCacheMap)
1157 /*
1158 * FUNCTION: Releases the shared cache map associated with a file object
1159 */
1160 {
1161 PLIST_ENTRY current_entry;
1162 PROS_VACB current;
1163 LIST_ENTRY FreeList;
1164 KIRQL oldIrql;
1165
1166 ASSERT(SharedCacheMap);
1167
1168 SharedCacheMap->OpenCount++;
1169 KeReleaseGuardedMutex(&ViewLock);
1170
1171 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1172
1173 KeAcquireGuardedMutex(&ViewLock);
1174 SharedCacheMap->OpenCount--;
1175 if (SharedCacheMap->OpenCount == 0)
1176 {
1177 KIRQL OldIrql;
1178
1179 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1180
1181 /*
1182 * Release all VACBs
1183 */
1184 InitializeListHead(&FreeList);
1185 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1186 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1187 {
1188 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1189 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1190
1191 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1192 RemoveEntryList(&current->VacbLruListEntry);
1193 InitializeListHead(&current->VacbLruListEntry);
1194 if (current->Dirty)
1195 {
1196 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1197 CcRosUnmarkDirtyVacb(current, FALSE);
1198 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1199 DPRINT1("Freeing dirty VACB\n");
1200 }
1201 if (current->MappedCount != 0)
1202 {
1203 current->MappedCount = 0;
1204 NT_VERIFY(CcRosVacbDecRefCount(current) > 0);
1205 DPRINT1("Freeing mapped VACB\n");
1206 }
1207 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1208
1209 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1210 }
1211 #if DBG
1212 SharedCacheMap->Trace = FALSE;
1213 #endif
1214 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1215
1216 KeReleaseGuardedMutex(&ViewLock);
1217 ObDereferenceObject(SharedCacheMap->FileObject);
1218
1219 while (!IsListEmpty(&FreeList))
1220 {
1221 ULONG Refs;
1222
1223 current_entry = RemoveTailList(&FreeList);
1224 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1225 InitializeListHead(&current->CacheMapVacbListEntry);
1226 Refs = CcRosVacbDecRefCount(current);
1227 #if DBG // CORE-14578
1228 if (Refs != 0)
1229 {
1230 DPRINT1("Leaking VACB %p attached to %p (%I64d)\n", current, FileObject, current->FileOffset.QuadPart);
1231 DPRINT1("There are: %d references left\n", Refs);
1232 DPRINT1("Pin: %d, Map: %d\n", current->PinCount, current->MappedCount);
1233 DPRINT1("Dirty: %d\n", current->Dirty);
1234 if (FileObject->FileName.Length != 0)
1235 {
1236 DPRINT1("File was: %wZ\n", &FileObject->FileName);
1237 }
1238 else if (FileObject->FsContext != NULL &&
1239 ((PFSRTL_COMMON_FCB_HEADER)(FileObject->FsContext))->NodeTypeCode == 0x0502 &&
1240 ((PFSRTL_COMMON_FCB_HEADER)(FileObject->FsContext))->NodeByteSize == 0x1F8 &&
1241 ((PUNICODE_STRING)(((PUCHAR)FileObject->FsContext) + 0x100))->Length != 0)
1242 {
1243 DPRINT1("File was: %wZ (FastFAT)\n", (PUNICODE_STRING)(((PUCHAR)FileObject->FsContext) + 0x100));
1244 }
1245 else
1246 {
1247 DPRINT1("No name for the file\n");
1248 }
1249 }
1250 #else
1251 ASSERT(Refs == 0);
1252 #endif
1253 }
1254
1255 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1256 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1257 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1258
1259 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1260 KeAcquireGuardedMutex(&ViewLock);
1261 }
1262 return STATUS_SUCCESS;
1263 }
1264
1265 VOID
1266 NTAPI
1267 CcRosReferenceCache (
1268 PFILE_OBJECT FileObject)
1269 {
1270 PROS_SHARED_CACHE_MAP SharedCacheMap;
1271 KeAcquireGuardedMutex(&ViewLock);
1272 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1273 ASSERT(SharedCacheMap);
1274 ASSERT(SharedCacheMap->OpenCount != 0);
1275 SharedCacheMap->OpenCount++;
1276 KeReleaseGuardedMutex(&ViewLock);
1277 }
1278
1279 VOID
1280 NTAPI
1281 CcRosRemoveIfClosed (
1282 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1283 {
1284 PROS_SHARED_CACHE_MAP SharedCacheMap;
1285 DPRINT("CcRosRemoveIfClosed()\n");
1286 KeAcquireGuardedMutex(&ViewLock);
1287 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1288 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1289 {
1290 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1291 }
1292 KeReleaseGuardedMutex(&ViewLock);
1293 }
1294
1295
1296 VOID
1297 NTAPI
1298 CcRosDereferenceCache (
1299 PFILE_OBJECT FileObject)
1300 {
1301 PROS_SHARED_CACHE_MAP SharedCacheMap;
1302 KeAcquireGuardedMutex(&ViewLock);
1303 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1304 ASSERT(SharedCacheMap);
1305 if (SharedCacheMap->OpenCount > 0)
1306 {
1307 SharedCacheMap->OpenCount--;
1308 if (SharedCacheMap->OpenCount == 0)
1309 {
1310 MmFreeSectionSegments(SharedCacheMap->FileObject);
1311 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1312 }
1313 }
1314 KeReleaseGuardedMutex(&ViewLock);
1315 }
1316
1317 NTSTATUS
1318 NTAPI
1319 CcRosReleaseFileCache (
1320 PFILE_OBJECT FileObject)
1321 /*
1322 * FUNCTION: Called by the file system when a handle to a file object
1323 * has been closed.
1324 */
1325 {
1326 KIRQL OldIrql;
1327 PPRIVATE_CACHE_MAP PrivateMap;
1328 PROS_SHARED_CACHE_MAP SharedCacheMap;
1329
1330 KeAcquireGuardedMutex(&ViewLock);
1331
1332 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1333 {
1334 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1335
1336 /* Closing the handle, so kill the private cache map
1337 * Before you event try to remove it from FO, always
1338 * lock the master lock, to be sure not to race
1339 * with a potential read ahead ongoing!
1340 */
1341 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1342 PrivateMap = FileObject->PrivateCacheMap;
1343 FileObject->PrivateCacheMap = NULL;
1344 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1345
1346 if (PrivateMap != NULL)
1347 {
1348 /* Remove it from the file */
1349 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
1350 RemoveEntryList(&PrivateMap->PrivateLinks);
1351 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
1352
1353 /* And free it. */
1354 if (PrivateMap != &SharedCacheMap->PrivateCacheMap)
1355 {
1356 ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP);
1357 }
1358 else
1359 {
1360 PrivateMap->NodeTypeCode = 0;
1361 }
1362
1363 if (SharedCacheMap->OpenCount > 0)
1364 {
1365 SharedCacheMap->OpenCount--;
1366 if (SharedCacheMap->OpenCount == 0)
1367 {
1368 MmFreeSectionSegments(SharedCacheMap->FileObject);
1369 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1370 }
1371 }
1372 }
1373 }
1374 KeReleaseGuardedMutex(&ViewLock);
1375 return STATUS_SUCCESS;
1376 }
1377
1378 NTSTATUS
1379 NTAPI
1380 CcRosInitializeFileCache (
1381 PFILE_OBJECT FileObject,
1382 PCC_FILE_SIZES FileSizes,
1383 BOOLEAN PinAccess,
1384 PCACHE_MANAGER_CALLBACKS CallBacks,
1385 PVOID LazyWriterContext)
1386 /*
1387 * FUNCTION: Initializes a shared cache map for a file object
1388 */
1389 {
1390 KIRQL OldIrql;
1391 BOOLEAN Allocated;
1392 PROS_SHARED_CACHE_MAP SharedCacheMap;
1393
1394 DPRINT("CcRosInitializeFileCache(FileObject 0x%p)\n", FileObject);
1395
1396 Allocated = FALSE;
1397 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1398 if (SharedCacheMap == NULL)
1399 {
1400 Allocated = TRUE;
1401 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1402 if (SharedCacheMap == NULL)
1403 {
1404 return STATUS_INSUFFICIENT_RESOURCES;
1405 }
1406 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1407 SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP;
1408 SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap);
1409 SharedCacheMap->FileObject = FileObject;
1410 SharedCacheMap->Callbacks = CallBacks;
1411 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1412 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1413 SharedCacheMap->FileSize = FileSizes->FileSize;
1414 SharedCacheMap->PinAccess = PinAccess;
1415 SharedCacheMap->DirtyPageThreshold = 0;
1416 SharedCacheMap->DirtyPages = 0;
1417 InitializeListHead(&SharedCacheMap->PrivateList);
1418 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1419 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1420 InitializeListHead(&SharedCacheMap->BcbList);
1421 }
1422
1423 KeAcquireGuardedMutex(&ViewLock);
1424 if (Allocated)
1425 {
1426 if (FileObject->SectionObjectPointer->SharedCacheMap == NULL)
1427 {
1428 ObReferenceObjectByPointer(FileObject,
1429 FILE_ALL_ACCESS,
1430 NULL,
1431 KernelMode);
1432 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1433
1434 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1435 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1436 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1437 }
1438 else
1439 {
1440 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1441 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1442 }
1443 }
1444 if (FileObject->PrivateCacheMap == NULL)
1445 {
1446 PPRIVATE_CACHE_MAP PrivateMap;
1447
1448 /* Allocate the private cache map for this handle */
1449 if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0)
1450 {
1451 PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP);
1452 }
1453 else
1454 {
1455 PrivateMap = &SharedCacheMap->PrivateCacheMap;
1456 }
1457
1458 if (PrivateMap == NULL)
1459 {
1460 /* If we also allocated the shared cache map for this file, kill it */
1461 if (Allocated)
1462 {
1463 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1464 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1465 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1466
1467 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1468 ObDereferenceObject(FileObject);
1469 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1470 }
1471
1472 KeReleaseGuardedMutex(&ViewLock);
1473 return STATUS_INSUFFICIENT_RESOURCES;
1474 }
1475
1476 /* Initialize it */
1477 RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP));
1478 PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP;
1479 PrivateMap->ReadAheadMask = PAGE_SIZE - 1;
1480 PrivateMap->FileObject = FileObject;
1481 KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock);
1482
1483 /* Link it to the file */
1484 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
1485 InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks);
1486 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
1487
1488 FileObject->PrivateCacheMap = PrivateMap;
1489 SharedCacheMap->OpenCount++;
1490 }
1491 KeReleaseGuardedMutex(&ViewLock);
1492
1493 return STATUS_SUCCESS;
1494 }
1495
1496 /*
1497 * @implemented
1498 */
1499 PFILE_OBJECT
1500 NTAPI
1501 CcGetFileObjectFromSectionPtrs (
1502 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1503 {
1504 PROS_SHARED_CACHE_MAP SharedCacheMap;
1505
1506 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1507
1508 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1509 {
1510 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1511 ASSERT(SharedCacheMap);
1512 return SharedCacheMap->FileObject;
1513 }
1514 return NULL;
1515 }
1516
1517 VOID
1518 INIT_FUNCTION
1519 NTAPI
1520 CcInitView (
1521 VOID)
1522 {
1523 DPRINT("CcInitView()\n");
1524
1525 InitializeListHead(&DirtyVacbListHead);
1526 InitializeListHead(&VacbLruListHead);
1527 InitializeListHead(&CcDeferredWrites);
1528 InitializeListHead(&CcCleanSharedCacheMapList);
1529 KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1530 KeInitializeGuardedMutex(&ViewLock);
1531 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1532 NULL,
1533 NULL,
1534 0,
1535 sizeof(INTERNAL_BCB),
1536 TAG_BCB,
1537 20);
1538 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1539 NULL,
1540 NULL,
1541 0,
1542 sizeof(ROS_SHARED_CACHE_MAP),
1543 TAG_SHARED_CACHE_MAP,
1544 20);
1545 ExInitializeNPagedLookasideList(&VacbLookasideList,
1546 NULL,
1547 NULL,
1548 0,
1549 sizeof(ROS_VACB),
1550 TAG_VACB,
1551 20);
1552
1553 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1554
1555 CcInitCacheZeroPage();
1556 }
1557
1558 #if DBG && defined(KDBG)
1559 BOOLEAN
1560 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1561 {
1562 PLIST_ENTRY ListEntry;
1563 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1564
1565 KdbpPrint(" Usage Summary (in kb)\n");
1566 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1567 /* No need to lock the spin lock here, we're in DBG */
1568 for (ListEntry = CcCleanSharedCacheMapList.Flink;
1569 ListEntry != &CcCleanSharedCacheMapList;
1570 ListEntry = ListEntry->Flink)
1571 {
1572 PLIST_ENTRY Vacbs;
1573 ULONG Valid = 0, Dirty = 0;
1574 PROS_SHARED_CACHE_MAP SharedCacheMap;
1575 PUNICODE_STRING FileName;
1576 PWSTR Extra = L"";
1577
1578 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1579
1580 /* Dirty size */
1581 Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
1582
1583 /* First, count for all the associated VACB */
1584 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1585 Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1586 Vacbs = Vacbs->Flink)
1587 {
1588 PROS_VACB Vacb;
1589
1590 Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
1591 if (Vacb->Valid)
1592 {
1593 Valid += VACB_MAPPING_GRANULARITY / 1024;
1594 }
1595 }
1596
1597 /* Setup name */
1598 if (SharedCacheMap->FileObject != NULL &&
1599 SharedCacheMap->FileObject->FileName.Length != 0)
1600 {
1601 FileName = &SharedCacheMap->FileObject->FileName;
1602 }
1603 else if (SharedCacheMap->FileObject != NULL &&
1604 SharedCacheMap->FileObject->FsContext != NULL &&
1605 ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeTypeCode == 0x0502 &&
1606 ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeByteSize == 0x1F8 &&
1607 ((PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100))->Length != 0)
1608 {
1609 FileName = (PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100);
1610 Extra = L" (FastFAT)";
1611 }
1612 else
1613 {
1614 FileName = &NoName;
1615 }
1616
1617 /* And print */
1618 KdbpPrint("%p\t%d\t%d\t%wZ%S\n", SharedCacheMap, Valid, Dirty, FileName, Extra);
1619 }
1620
1621 return TRUE;
1622 }
1623
1624 BOOLEAN
1625 ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[])
1626 {
1627 KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages,
1628 (CcTotalDirtyPages * PAGE_SIZE) / 1024);
1629 KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold,
1630 (CcDirtyPageThreshold * PAGE_SIZE) / 1024);
1631 KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages,
1632 (MmAvailablePages * PAGE_SIZE) / 1024);
1633 KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop,
1634 (MmThrottleTop * PAGE_SIZE) / 1024);
1635 KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom,
1636 (MmThrottleBottom * PAGE_SIZE) / 1024);
1637 KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total,
1638 (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024);
1639
1640 if (CcTotalDirtyPages >= CcDirtyPageThreshold)
1641 {
1642 KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n");
1643 }
1644 else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold)
1645 {
1646 KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n");
1647 }
1648 else
1649 {
1650 KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n");
1651 }
1652
1653 return TRUE;
1654 }
1655 #endif
1656
1657 /* EOF */