[NTOSKRNL] Don't set VACB dirty on release if already dirty
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42
43 /* GLOBALS *******************************************************************/
44
45 LIST_ENTRY DirtyVacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47
48 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
49 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
50 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
51
52 /* Internal vars (MS):
53 * - Threshold above which lazy writer will start action
54 * - Amount of dirty pages
55 * - List for deferred writes
56 * - Spinlock when dealing with the deferred list
57 * - List for "clean" shared cache maps
58 */
59 ULONG CcDirtyPageThreshold = 0;
60 ULONG CcTotalDirtyPages = 0;
61 LIST_ENTRY CcDeferredWrites;
62 KSPIN_LOCK CcDeferredWriteSpinLock;
63 LIST_ENTRY CcCleanSharedCacheMapList;
64
65 #if DBG
66 ULONG CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line)
67 {
68 ULONG Refs;
69
70 Refs = InterlockedIncrement((PLONG)&vacb->ReferenceCount);
71 if (vacb->SharedCacheMap->Trace)
72 {
73 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
74 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
75 }
76
77 return Refs;
78 }
79 ULONG CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line)
80 {
81 ULONG Refs;
82
83 Refs = InterlockedDecrement((PLONG)&vacb->ReferenceCount);
84 ASSERT(!(Refs == 0 && vacb->Dirty));
85 if (vacb->SharedCacheMap->Trace)
86 {
87 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
88 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
89 }
90
91 if (Refs == 0)
92 {
93 CcRosInternalFreeVacb(vacb);
94 }
95
96 return Refs;
97 }
98 ULONG CcRosVacbGetRefCount_(PROS_VACB vacb, PCSTR file, INT line)
99 {
100 ULONG Refs;
101
102 Refs = InterlockedCompareExchange((PLONG)&vacb->ReferenceCount, 0, 0);
103 if (vacb->SharedCacheMap->Trace)
104 {
105 DbgPrint("(%s:%i) VACB %p ==RefCount=%lu, Dirty %u, PageOut %lu\n",
106 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
107 }
108
109 return Refs;
110 }
111 #endif
112
113
114 /* FUNCTIONS *****************************************************************/
115
116 VOID
117 NTAPI
118 CcRosTraceCacheMap (
119 PROS_SHARED_CACHE_MAP SharedCacheMap,
120 BOOLEAN Trace )
121 {
122 #if DBG
123 KIRQL oldirql;
124 PLIST_ENTRY current_entry;
125 PROS_VACB current;
126
127 if (!SharedCacheMap)
128 return;
129
130 SharedCacheMap->Trace = Trace;
131
132 if (Trace)
133 {
134 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
135
136 oldirql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
137 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
138
139 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
140 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
141 {
142 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
143 current_entry = current_entry->Flink;
144
145 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
146 current, current->ReferenceCount, current->Dirty, current->PageOut );
147 }
148
149 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
150 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldirql);
151 }
152 else
153 {
154 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
155 }
156
157 #else
158 UNREFERENCED_PARAMETER(SharedCacheMap);
159 UNREFERENCED_PARAMETER(Trace);
160 #endif
161 }
162
163 NTSTATUS
164 NTAPI
165 CcRosFlushVacb (
166 PROS_VACB Vacb)
167 {
168 NTSTATUS Status;
169
170 CcRosUnmarkDirtyVacb(Vacb, TRUE);
171
172 Status = CcWriteVirtualAddress(Vacb);
173 if (!NT_SUCCESS(Status))
174 {
175 CcRosMarkDirtyVacb(Vacb);
176 }
177
178 return Status;
179 }
180
181 NTSTATUS
182 NTAPI
183 CcRosFlushDirtyPages (
184 ULONG Target,
185 PULONG Count,
186 BOOLEAN Wait,
187 BOOLEAN CalledFromLazy)
188 {
189 PLIST_ENTRY current_entry;
190 PROS_VACB current;
191 BOOLEAN Locked;
192 NTSTATUS Status;
193 KIRQL OldIrql;
194
195 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
196
197 (*Count) = 0;
198
199 KeEnterCriticalRegion();
200 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
201
202 current_entry = DirtyVacbListHead.Flink;
203 if (current_entry == &DirtyVacbListHead)
204 {
205 DPRINT("No Dirty pages\n");
206 }
207
208 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
209 {
210 current = CONTAINING_RECORD(current_entry,
211 ROS_VACB,
212 DirtyVacbListEntry);
213 current_entry = current_entry->Flink;
214
215 CcRosVacbIncRefCount(current);
216
217 /* When performing lazy write, don't handle temporary files */
218 if (CalledFromLazy &&
219 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
220 {
221 CcRosVacbDecRefCount(current);
222 continue;
223 }
224
225 /* Don't attempt to lazy write the files that asked not to */
226 if (CalledFromLazy &&
227 BooleanFlagOn(current->SharedCacheMap->Flags, WRITEBEHIND_DISABLED))
228 {
229 CcRosVacbDecRefCount(current);
230 continue;
231 }
232
233 ASSERT(current->Dirty);
234
235 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
236
237 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
238 current->SharedCacheMap->LazyWriteContext, Wait);
239 if (!Locked)
240 {
241 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
242 CcRosVacbDecRefCount(current);
243 continue;
244 }
245
246 Status = CcRosFlushVacb(current);
247
248 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
249 current->SharedCacheMap->LazyWriteContext);
250
251 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
252 CcRosVacbDecRefCount(current);
253
254 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
255 (Status != STATUS_MEDIA_WRITE_PROTECTED))
256 {
257 DPRINT1("CC: Failed to flush VACB.\n");
258 }
259 else
260 {
261 ULONG PagesFreed;
262
263 /* How many pages did we free? */
264 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
265 (*Count) += PagesFreed;
266
267 /* Make sure we don't overflow target! */
268 if (Target < PagesFreed)
269 {
270 /* If we would have, jump to zero directly */
271 Target = 0;
272 }
273 else
274 {
275 Target -= PagesFreed;
276 }
277 }
278
279 current_entry = DirtyVacbListHead.Flink;
280 }
281
282 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
283 KeLeaveCriticalRegion();
284
285 DPRINT("CcRosFlushDirtyPages() finished\n");
286 return STATUS_SUCCESS;
287 }
288
289 NTSTATUS
290 CcRosTrimCache (
291 ULONG Target,
292 ULONG Priority,
293 PULONG NrFreed)
294 /*
295 * FUNCTION: Try to free some memory from the file cache.
296 * ARGUMENTS:
297 * Target - The number of pages to be freed.
298 * Priority - The priority of free (currently unused).
299 * NrFreed - Points to a variable where the number of pages
300 * actually freed is returned.
301 */
302 {
303 PLIST_ENTRY current_entry;
304 PROS_VACB current;
305 ULONG PagesFreed;
306 KIRQL oldIrql;
307 LIST_ENTRY FreeList;
308 PFN_NUMBER Page;
309 ULONG i;
310 BOOLEAN FlushedPages = FALSE;
311
312 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
313
314 InitializeListHead(&FreeList);
315
316 *NrFreed = 0;
317
318 retry:
319 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
320
321 current_entry = VacbLruListHead.Flink;
322 while (current_entry != &VacbLruListHead)
323 {
324 ULONG Refs;
325
326 current = CONTAINING_RECORD(current_entry,
327 ROS_VACB,
328 VacbLruListEntry);
329 current_entry = current_entry->Flink;
330
331 KeAcquireSpinLockAtDpcLevel(&current->SharedCacheMap->CacheMapLock);
332
333 /* Reference the VACB */
334 CcRosVacbIncRefCount(current);
335
336 /* Check if it's mapped and not dirty */
337 if (InterlockedCompareExchange((PLONG)&current->MappedCount, 0, 0) > 0 && !current->Dirty)
338 {
339 /* We have to break these locks because Cc sucks */
340 KeReleaseSpinLockFromDpcLevel(&current->SharedCacheMap->CacheMapLock);
341 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
342
343 /* Page out the VACB */
344 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
345 {
346 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
347
348 MmPageOutPhysicalAddress(Page);
349 }
350
351 /* Reacquire the locks */
352 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
353 KeAcquireSpinLockAtDpcLevel(&current->SharedCacheMap->CacheMapLock);
354 }
355
356 /* Dereference the VACB */
357 Refs = CcRosVacbDecRefCount(current);
358
359 /* Check if we can free this entry now */
360 if (Refs < 2)
361 {
362 ASSERT(!current->Dirty);
363 ASSERT(!current->MappedCount);
364 ASSERT(Refs == 1);
365
366 RemoveEntryList(&current->CacheMapVacbListEntry);
367 RemoveEntryList(&current->VacbLruListEntry);
368 InitializeListHead(&current->VacbLruListEntry);
369 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
370
371 /* Calculate how many pages we freed for Mm */
372 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
373 Target -= PagesFreed;
374 (*NrFreed) += PagesFreed;
375 }
376
377 KeReleaseSpinLockFromDpcLevel(&current->SharedCacheMap->CacheMapLock);
378 }
379
380 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
381
382 /* Try flushing pages if we haven't met our target */
383 if ((Target > 0) && !FlushedPages)
384 {
385 /* Flush dirty pages to disk */
386 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
387 FlushedPages = TRUE;
388
389 /* We can only swap as many pages as we flushed */
390 if (PagesFreed < Target) Target = PagesFreed;
391
392 /* Check if we flushed anything */
393 if (PagesFreed != 0)
394 {
395 /* Try again after flushing dirty pages */
396 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
397 goto retry;
398 }
399 }
400
401 while (!IsListEmpty(&FreeList))
402 {
403 ULONG Refs;
404
405 current_entry = RemoveHeadList(&FreeList);
406 current = CONTAINING_RECORD(current_entry,
407 ROS_VACB,
408 CacheMapVacbListEntry);
409 InitializeListHead(&current->CacheMapVacbListEntry);
410 Refs = CcRosVacbDecRefCount(current);
411 ASSERT(Refs == 0);
412 }
413
414 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
415
416 return STATUS_SUCCESS;
417 }
418
419 NTSTATUS
420 NTAPI
421 CcRosReleaseVacb (
422 PROS_SHARED_CACHE_MAP SharedCacheMap,
423 PROS_VACB Vacb,
424 BOOLEAN Valid,
425 BOOLEAN Dirty,
426 BOOLEAN Mapped)
427 {
428 ULONG Refs;
429 ASSERT(SharedCacheMap);
430
431 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
432 SharedCacheMap, Vacb, Valid);
433
434 Vacb->Valid = Valid;
435
436 if (Dirty && !Vacb->Dirty)
437 {
438 CcRosMarkDirtyVacb(Vacb);
439 }
440
441 if (Mapped)
442 {
443 if (InterlockedIncrement((PLONG)&Vacb->MappedCount) == 1)
444 {
445 CcRosVacbIncRefCount(Vacb);
446 }
447 }
448
449 Refs = CcRosVacbDecRefCount(Vacb);
450 ASSERT(Refs > 0);
451
452 return STATUS_SUCCESS;
453 }
454
455 /* Returns with VACB Lock Held! */
456 PROS_VACB
457 NTAPI
458 CcRosLookupVacb (
459 PROS_SHARED_CACHE_MAP SharedCacheMap,
460 LONGLONG FileOffset)
461 {
462 PLIST_ENTRY current_entry;
463 PROS_VACB current;
464 KIRQL oldIrql;
465
466 ASSERT(SharedCacheMap);
467
468 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
469 SharedCacheMap, FileOffset);
470
471 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
472 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
473
474 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
475 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
476 {
477 current = CONTAINING_RECORD(current_entry,
478 ROS_VACB,
479 CacheMapVacbListEntry);
480 if (IsPointInRange(current->FileOffset.QuadPart,
481 VACB_MAPPING_GRANULARITY,
482 FileOffset))
483 {
484 CcRosVacbIncRefCount(current);
485 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
486 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
487 return current;
488 }
489 if (current->FileOffset.QuadPart > FileOffset)
490 break;
491 current_entry = current_entry->Flink;
492 }
493
494 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
495 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
496
497 return NULL;
498 }
499
500 VOID
501 NTAPI
502 CcRosMarkDirtyVacb (
503 PROS_VACB Vacb)
504 {
505 KIRQL oldIrql;
506 PROS_SHARED_CACHE_MAP SharedCacheMap;
507
508 SharedCacheMap = Vacb->SharedCacheMap;
509
510 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
511 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
512
513 ASSERT(!Vacb->Dirty);
514
515 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
516 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
517 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
518 CcRosVacbIncRefCount(Vacb);
519
520 /* Move to the tail of the LRU list */
521 RemoveEntryList(&Vacb->VacbLruListEntry);
522 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
523
524 Vacb->Dirty = TRUE;
525
526 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
527
528 /* Schedule a lazy writer run to now that we have dirty VACB */
529 if (!LazyWriter.ScanActive)
530 {
531 CcScheduleLazyWriteScan(FALSE);
532 }
533 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
534 }
535
536 VOID
537 NTAPI
538 CcRosUnmarkDirtyVacb (
539 PROS_VACB Vacb,
540 BOOLEAN LockViews)
541 {
542 KIRQL oldIrql;
543 PROS_SHARED_CACHE_MAP SharedCacheMap;
544
545 SharedCacheMap = Vacb->SharedCacheMap;
546
547 if (LockViews)
548 {
549 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
550 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
551 }
552
553 ASSERT(Vacb->Dirty);
554
555 Vacb->Dirty = FALSE;
556
557 RemoveEntryList(&Vacb->DirtyVacbListEntry);
558 InitializeListHead(&Vacb->DirtyVacbListEntry);
559 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
560 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
561 CcRosVacbDecRefCount(Vacb);
562
563 if (LockViews)
564 {
565 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
566 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
567 }
568 }
569
570 NTSTATUS
571 NTAPI
572 CcRosMarkDirtyFile (
573 PROS_SHARED_CACHE_MAP SharedCacheMap,
574 LONGLONG FileOffset)
575 {
576 PROS_VACB Vacb;
577
578 ASSERT(SharedCacheMap);
579
580 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
581 SharedCacheMap, FileOffset);
582
583 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
584 if (Vacb == NULL)
585 {
586 KeBugCheck(CACHE_MANAGER);
587 }
588
589 CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, TRUE, FALSE);
590
591 return STATUS_SUCCESS;
592 }
593
594 /*
595 * Note: this is not the contrary function of
596 * CcRosMapVacbInKernelSpace()
597 */
598 NTSTATUS
599 NTAPI
600 CcRosUnmapVacb (
601 PROS_SHARED_CACHE_MAP SharedCacheMap,
602 LONGLONG FileOffset,
603 BOOLEAN NowDirty)
604 {
605 PROS_VACB Vacb;
606
607 ASSERT(SharedCacheMap);
608
609 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
610 SharedCacheMap, FileOffset, NowDirty);
611
612 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
613 if (Vacb == NULL)
614 {
615 return STATUS_UNSUCCESSFUL;
616 }
617
618 ASSERT(Vacb->MappedCount != 0);
619 if (InterlockedDecrement((PLONG)&Vacb->MappedCount) == 0)
620 {
621 CcRosVacbDecRefCount(Vacb);
622 }
623
624 CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, NowDirty, FALSE);
625
626 return STATUS_SUCCESS;
627 }
628
629 static
630 NTSTATUS
631 CcRosMapVacbInKernelSpace(
632 PROS_VACB Vacb)
633 {
634 ULONG i;
635 NTSTATUS Status;
636 ULONG_PTR NumberOfPages;
637 PVOID BaseAddress = NULL;
638
639 /* Create a memory area. */
640 MmLockAddressSpace(MmGetKernelAddressSpace());
641 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
642 0, // nothing checks for VACB mareas, so set to 0
643 &BaseAddress,
644 VACB_MAPPING_GRANULARITY,
645 PAGE_READWRITE,
646 (PMEMORY_AREA*)&Vacb->MemoryArea,
647 0,
648 PAGE_SIZE);
649 ASSERT(Vacb->BaseAddress == NULL);
650 Vacb->BaseAddress = BaseAddress;
651 MmUnlockAddressSpace(MmGetKernelAddressSpace());
652 if (!NT_SUCCESS(Status))
653 {
654 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
655 return Status;
656 }
657
658 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
659 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
660 ASSERT((ULONG_PTR)Vacb->BaseAddress + VACB_MAPPING_GRANULARITY - 1 > (ULONG_PTR)MmSystemRangeStart);
661
662 /* Create a virtual mapping for this memory area */
663 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
664 for (i = 0; i < NumberOfPages; i++)
665 {
666 PFN_NUMBER PageFrameNumber;
667
668 MI_SET_USAGE(MI_USAGE_CACHE);
669 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
670 if (PageFrameNumber == 0)
671 {
672 DPRINT1("Unable to allocate page\n");
673 KeBugCheck(MEMORY_MANAGEMENT);
674 }
675
676 ASSERT(BaseAddress == Vacb->BaseAddress);
677 ASSERT(i * PAGE_SIZE < VACB_MAPPING_GRANULARITY);
678 ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) >= (ULONG_PTR)BaseAddress);
679 ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) > (ULONG_PTR)MmSystemRangeStart);
680
681 Status = MmCreateVirtualMapping(NULL,
682 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
683 PAGE_READWRITE,
684 &PageFrameNumber,
685 1);
686 if (!NT_SUCCESS(Status))
687 {
688 DPRINT1("Unable to create virtual mapping\n");
689 KeBugCheck(MEMORY_MANAGEMENT);
690 }
691 }
692
693 return STATUS_SUCCESS;
694 }
695
696 static
697 BOOLEAN
698 CcRosFreeUnusedVacb (
699 PULONG Count)
700 {
701 ULONG cFreed;
702 BOOLEAN Freed;
703 KIRQL oldIrql;
704 PROS_VACB current;
705 LIST_ENTRY FreeList;
706 PLIST_ENTRY current_entry;
707
708 cFreed = 0;
709 Freed = FALSE;
710 InitializeListHead(&FreeList);
711
712 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
713
714 /* Browse all the available VACB */
715 current_entry = VacbLruListHead.Flink;
716 while (current_entry != &VacbLruListHead)
717 {
718 ULONG Refs;
719
720 current = CONTAINING_RECORD(current_entry,
721 ROS_VACB,
722 VacbLruListEntry);
723 current_entry = current_entry->Flink;
724
725 KeAcquireSpinLockAtDpcLevel(&current->SharedCacheMap->CacheMapLock);
726
727 /* Only deal with unused VACB, we will free them */
728 Refs = CcRosVacbGetRefCount(current);
729 if (Refs < 2)
730 {
731 ASSERT(!current->Dirty);
732 ASSERT(!current->MappedCount);
733 ASSERT(Refs == 1);
734
735 /* Reset and move to free list */
736 RemoveEntryList(&current->CacheMapVacbListEntry);
737 RemoveEntryList(&current->VacbLruListEntry);
738 InitializeListHead(&current->VacbLruListEntry);
739 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
740 }
741
742 KeReleaseSpinLockFromDpcLevel(&current->SharedCacheMap->CacheMapLock);
743
744 }
745
746 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
747
748 /* And now, free any of the found VACB, that'll free memory! */
749 while (!IsListEmpty(&FreeList))
750 {
751 ULONG Refs;
752
753 current_entry = RemoveHeadList(&FreeList);
754 current = CONTAINING_RECORD(current_entry,
755 ROS_VACB,
756 CacheMapVacbListEntry);
757 InitializeListHead(&current->CacheMapVacbListEntry);
758 Refs = CcRosVacbDecRefCount(current);
759 ASSERT(Refs == 0);
760 ++cFreed;
761 }
762
763 /* If we freed at least one VACB, return success */
764 if (cFreed != 0)
765 {
766 Freed = TRUE;
767 }
768
769 /* If caller asked for free count, return it */
770 if (Count != NULL)
771 {
772 *Count = cFreed;
773 }
774
775 return Freed;
776 }
777
778 static
779 NTSTATUS
780 CcRosCreateVacb (
781 PROS_SHARED_CACHE_MAP SharedCacheMap,
782 LONGLONG FileOffset,
783 PROS_VACB *Vacb)
784 {
785 PROS_VACB current;
786 PROS_VACB previous;
787 PLIST_ENTRY current_entry;
788 NTSTATUS Status;
789 KIRQL oldIrql;
790 ULONG Refs;
791 BOOLEAN Retried;
792
793 ASSERT(SharedCacheMap);
794
795 DPRINT("CcRosCreateVacb()\n");
796
797 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
798 {
799 *Vacb = NULL;
800 return STATUS_INVALID_PARAMETER;
801 }
802
803 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
804 current->BaseAddress = NULL;
805 current->Valid = FALSE;
806 current->Dirty = FALSE;
807 current->PageOut = FALSE;
808 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
809 current->SharedCacheMap = SharedCacheMap;
810 #if DBG
811 if (SharedCacheMap->Trace)
812 {
813 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
814 }
815 #endif
816 current->MappedCount = 0;
817 current->ReferenceCount = 0;
818 InitializeListHead(&current->CacheMapVacbListEntry);
819 InitializeListHead(&current->DirtyVacbListEntry);
820 InitializeListHead(&current->VacbLruListEntry);
821
822 CcRosVacbIncRefCount(current);
823
824 Retried = FALSE;
825 Retry:
826 /* Map VACB in kernel space */
827 Status = CcRosMapVacbInKernelSpace(current);
828 if (!NT_SUCCESS(Status))
829 {
830 ULONG Freed;
831 /* If no space left, try to prune unused VACB
832 * to recover space to map our VACB
833 * If it succeed, retry to map, otherwise
834 * just fail.
835 */
836 if (!Retried && CcRosFreeUnusedVacb(&Freed))
837 {
838 DPRINT("Prunned %d VACB, trying again\n", Freed);
839 Retried = TRUE;
840 goto Retry;
841 }
842
843 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
844 return Status;
845 }
846
847 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
848
849 *Vacb = current;
850 /* There is window between the call to CcRosLookupVacb
851 * and CcRosCreateVacb. We must check if a VACB for the
852 * file offset exist. If there is a VACB, we release
853 * our newly created VACB and return the existing one.
854 */
855 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
856 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
857 previous = NULL;
858 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
859 {
860 current = CONTAINING_RECORD(current_entry,
861 ROS_VACB,
862 CacheMapVacbListEntry);
863 if (IsPointInRange(current->FileOffset.QuadPart,
864 VACB_MAPPING_GRANULARITY,
865 FileOffset))
866 {
867 CcRosVacbIncRefCount(current);
868 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
869 #if DBG
870 if (SharedCacheMap->Trace)
871 {
872 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
873 SharedCacheMap,
874 (*Vacb),
875 current);
876 }
877 #endif
878 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
879
880 Refs = CcRosVacbDecRefCount(*Vacb);
881 ASSERT(Refs == 0);
882
883 *Vacb = current;
884 return STATUS_SUCCESS;
885 }
886 if (current->FileOffset.QuadPart < FileOffset)
887 {
888 ASSERT(previous == NULL ||
889 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
890 previous = current;
891 }
892 if (current->FileOffset.QuadPart > FileOffset)
893 break;
894 current_entry = current_entry->Flink;
895 }
896 /* There was no existing VACB. */
897 current = *Vacb;
898 if (previous)
899 {
900 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
901 }
902 else
903 {
904 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
905 }
906 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
907 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
908 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
909
910 MI_SET_USAGE(MI_USAGE_CACHE);
911 #if MI_TRACE_PFNS
912 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
913 {
914 PWCHAR pos;
915 ULONG len = 0;
916 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
917 if (pos)
918 {
919 len = wcslen(pos) * sizeof(WCHAR);
920 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
921 }
922 else
923 {
924 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
925 }
926 }
927 #endif
928
929 /* Reference it to allow release */
930 CcRosVacbIncRefCount(current);
931
932 return Status;
933 }
934
935 NTSTATUS
936 NTAPI
937 CcRosGetVacb (
938 PROS_SHARED_CACHE_MAP SharedCacheMap,
939 LONGLONG FileOffset,
940 PLONGLONG BaseOffset,
941 PVOID* BaseAddress,
942 PBOOLEAN UptoDate,
943 PROS_VACB *Vacb)
944 {
945 PROS_VACB current;
946 NTSTATUS Status;
947 ULONG Refs;
948 KIRQL OldIrql;
949
950 ASSERT(SharedCacheMap);
951
952 DPRINT("CcRosGetVacb()\n");
953
954 /*
955 * Look for a VACB already mapping the same data.
956 */
957 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
958 if (current == NULL)
959 {
960 /*
961 * Otherwise create a new VACB.
962 */
963 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
964 if (!NT_SUCCESS(Status))
965 {
966 return Status;
967 }
968 }
969
970 Refs = CcRosVacbGetRefCount(current);
971
972 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
973
974 /* Move to the tail of the LRU list */
975 RemoveEntryList(&current->VacbLruListEntry);
976 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
977
978 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
979
980 /*
981 * Return information about the VACB to the caller.
982 */
983 *UptoDate = current->Valid;
984 *BaseAddress = current->BaseAddress;
985 DPRINT("*BaseAddress %p\n", *BaseAddress);
986 *Vacb = current;
987 *BaseOffset = current->FileOffset.QuadPart;
988
989 ASSERT(Refs > 1);
990
991 return STATUS_SUCCESS;
992 }
993
994 NTSTATUS
995 NTAPI
996 CcRosRequestVacb (
997 PROS_SHARED_CACHE_MAP SharedCacheMap,
998 LONGLONG FileOffset,
999 PVOID* BaseAddress,
1000 PBOOLEAN UptoDate,
1001 PROS_VACB *Vacb)
1002 /*
1003 * FUNCTION: Request a page mapping for a shared cache map
1004 */
1005 {
1006 LONGLONG BaseOffset;
1007
1008 ASSERT(SharedCacheMap);
1009
1010 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
1011 {
1012 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
1013 FileOffset, VACB_MAPPING_GRANULARITY);
1014 KeBugCheck(CACHE_MANAGER);
1015 }
1016
1017 return CcRosGetVacb(SharedCacheMap,
1018 FileOffset,
1019 &BaseOffset,
1020 BaseAddress,
1021 UptoDate,
1022 Vacb);
1023 }
1024
1025 static
1026 VOID
1027 CcFreeCachePage (
1028 PVOID Context,
1029 MEMORY_AREA* MemoryArea,
1030 PVOID Address,
1031 PFN_NUMBER Page,
1032 SWAPENTRY SwapEntry,
1033 BOOLEAN Dirty)
1034 {
1035 ASSERT(SwapEntry == 0);
1036 if (Page != 0)
1037 {
1038 ASSERT(MmGetReferenceCountPage(Page) == 1);
1039 MmReleasePageMemoryConsumer(MC_CACHE, Page);
1040 }
1041 }
1042
1043 NTSTATUS
1044 CcRosInternalFreeVacb (
1045 PROS_VACB Vacb)
1046 /*
1047 * FUNCTION: Releases a VACB associated with a shared cache map
1048 */
1049 {
1050 DPRINT("Freeing VACB 0x%p\n", Vacb);
1051 #if DBG
1052 if (Vacb->SharedCacheMap->Trace)
1053 {
1054 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
1055 }
1056 #endif
1057
1058 MmLockAddressSpace(MmGetKernelAddressSpace());
1059 MmFreeMemoryArea(MmGetKernelAddressSpace(),
1060 Vacb->MemoryArea,
1061 CcFreeCachePage,
1062 NULL);
1063 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1064
1065 if (Vacb->ReferenceCount != 0)
1066 {
1067 DPRINT1("Invalid free: %ld\n", Vacb->ReferenceCount);
1068 if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length)
1069 {
1070 DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName);
1071 }
1072 }
1073
1074 ASSERT(Vacb->ReferenceCount == 0);
1075 ASSERT(IsListEmpty(&Vacb->CacheMapVacbListEntry));
1076 ASSERT(IsListEmpty(&Vacb->DirtyVacbListEntry));
1077 ASSERT(IsListEmpty(&Vacb->VacbLruListEntry));
1078 RtlFillMemory(Vacb, sizeof(*Vacb), 0xfd);
1079 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
1080 return STATUS_SUCCESS;
1081 }
1082
1083 /*
1084 * @implemented
1085 */
1086 VOID
1087 NTAPI
1088 CcFlushCache (
1089 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1090 IN PLARGE_INTEGER FileOffset OPTIONAL,
1091 IN ULONG Length,
1092 OUT PIO_STATUS_BLOCK IoStatus)
1093 {
1094 PROS_SHARED_CACHE_MAP SharedCacheMap;
1095 LARGE_INTEGER Offset;
1096 LONGLONG RemainingLength;
1097 PROS_VACB current;
1098 NTSTATUS Status;
1099
1100 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1101 SectionObjectPointers, FileOffset, Length);
1102
1103 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1104 SectionObjectPointers, FileOffset, Length, IoStatus);
1105
1106 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1107 {
1108 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1109 ASSERT(SharedCacheMap);
1110 if (FileOffset)
1111 {
1112 Offset = *FileOffset;
1113 RemainingLength = Length;
1114 }
1115 else
1116 {
1117 Offset.QuadPart = 0;
1118 RemainingLength = SharedCacheMap->FileSize.QuadPart;
1119 }
1120
1121 if (IoStatus)
1122 {
1123 IoStatus->Status = STATUS_SUCCESS;
1124 IoStatus->Information = 0;
1125 }
1126
1127 while (RemainingLength > 0)
1128 {
1129 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1130 if (current != NULL)
1131 {
1132 if (current->Dirty)
1133 {
1134 Status = CcRosFlushVacb(current);
1135 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1136 {
1137 IoStatus->Status = Status;
1138 }
1139 }
1140
1141 CcRosReleaseVacb(SharedCacheMap, current, current->Valid, FALSE, FALSE);
1142 }
1143
1144 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1145 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1146 }
1147 }
1148 else
1149 {
1150 if (IoStatus)
1151 {
1152 IoStatus->Status = STATUS_INVALID_PARAMETER;
1153 }
1154 }
1155 }
1156
1157 NTSTATUS
1158 NTAPI
1159 CcRosDeleteFileCache (
1160 PFILE_OBJECT FileObject,
1161 PROS_SHARED_CACHE_MAP SharedCacheMap,
1162 PKIRQL OldIrql)
1163 /*
1164 * FUNCTION: Releases the shared cache map associated with a file object
1165 */
1166 {
1167 PLIST_ENTRY current_entry;
1168 PROS_VACB current;
1169 LIST_ENTRY FreeList;
1170
1171 ASSERT(SharedCacheMap);
1172
1173 SharedCacheMap->OpenCount++;
1174 KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql);
1175
1176 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1177
1178 *OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1179 SharedCacheMap->OpenCount--;
1180 if (SharedCacheMap->OpenCount == 0)
1181 {
1182 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1183
1184 /*
1185 * Release all VACBs
1186 */
1187 InitializeListHead(&FreeList);
1188 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
1189 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1190 {
1191 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1192 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
1193
1194 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1195 RemoveEntryList(&current->VacbLruListEntry);
1196 InitializeListHead(&current->VacbLruListEntry);
1197 if (current->Dirty)
1198 {
1199 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
1200 CcRosUnmarkDirtyVacb(current, FALSE);
1201 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
1202 DPRINT1("Freeing dirty VACB\n");
1203 }
1204 if (current->MappedCount != 0)
1205 {
1206 current->MappedCount = 0;
1207 NT_VERIFY(CcRosVacbDecRefCount(current) > 0);
1208 DPRINT1("Freeing mapped VACB\n");
1209 }
1210 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1211
1212 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
1213 }
1214 #if DBG
1215 SharedCacheMap->Trace = FALSE;
1216 #endif
1217 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
1218
1219 KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql);
1220 ObDereferenceObject(SharedCacheMap->FileObject);
1221
1222 while (!IsListEmpty(&FreeList))
1223 {
1224 ULONG Refs;
1225
1226 current_entry = RemoveTailList(&FreeList);
1227 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1228 InitializeListHead(&current->CacheMapVacbListEntry);
1229 Refs = CcRosVacbDecRefCount(current);
1230 #if DBG // CORE-14578
1231 if (Refs != 0)
1232 {
1233 DPRINT1("Leaking VACB %p attached to %p (%I64d)\n", current, FileObject, current->FileOffset.QuadPart);
1234 DPRINT1("There are: %d references left\n", Refs);
1235 DPRINT1("Map: %d\n", current->MappedCount);
1236 DPRINT1("Dirty: %d\n", current->Dirty);
1237 if (FileObject->FileName.Length != 0)
1238 {
1239 DPRINT1("File was: %wZ\n", &FileObject->FileName);
1240 }
1241 else if (FileObject->FsContext != NULL &&
1242 ((PFSRTL_COMMON_FCB_HEADER)(FileObject->FsContext))->NodeTypeCode == 0x0502 &&
1243 ((PFSRTL_COMMON_FCB_HEADER)(FileObject->FsContext))->NodeByteSize == 0x1F8 &&
1244 ((PUNICODE_STRING)(((PUCHAR)FileObject->FsContext) + 0x100))->Length != 0)
1245 {
1246 DPRINT1("File was: %wZ (FastFAT)\n", (PUNICODE_STRING)(((PUCHAR)FileObject->FsContext) + 0x100));
1247 }
1248 else
1249 {
1250 DPRINT1("No name for the file\n");
1251 }
1252 }
1253 #else
1254 ASSERT(Refs == 0);
1255 #endif
1256 }
1257
1258 *OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1259 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1260 KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql);
1261
1262 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1263 *OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1264 }
1265 return STATUS_SUCCESS;
1266 }
1267
1268 VOID
1269 NTAPI
1270 CcRosReferenceCache (
1271 PFILE_OBJECT FileObject)
1272 {
1273 PROS_SHARED_CACHE_MAP SharedCacheMap;
1274 KIRQL OldIrql;
1275
1276 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1277 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1278 ASSERT(SharedCacheMap);
1279 ASSERT(SharedCacheMap->OpenCount != 0);
1280 SharedCacheMap->OpenCount++;
1281 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1282 }
1283
1284 VOID
1285 NTAPI
1286 CcRosRemoveIfClosed (
1287 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1288 {
1289 PROS_SHARED_CACHE_MAP SharedCacheMap;
1290 KIRQL OldIrql;
1291
1292 DPRINT("CcRosRemoveIfClosed()\n");
1293 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1294 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1295 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1296 {
1297 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, &OldIrql);
1298 }
1299 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1300 }
1301
1302
1303 VOID
1304 NTAPI
1305 CcRosDereferenceCache (
1306 PFILE_OBJECT FileObject)
1307 {
1308 PROS_SHARED_CACHE_MAP SharedCacheMap;
1309 KIRQL OldIrql;
1310
1311 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1312 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1313 ASSERT(SharedCacheMap);
1314 if (SharedCacheMap->OpenCount > 0)
1315 {
1316 SharedCacheMap->OpenCount--;
1317 if (SharedCacheMap->OpenCount == 0)
1318 {
1319 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1320 MmFreeSectionSegments(SharedCacheMap->FileObject);
1321
1322 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1323 CcRosDeleteFileCache(FileObject, SharedCacheMap, &OldIrql);
1324 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1325
1326 return;
1327 }
1328 }
1329 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1330 }
1331
1332 NTSTATUS
1333 NTAPI
1334 CcRosReleaseFileCache (
1335 PFILE_OBJECT FileObject)
1336 /*
1337 * FUNCTION: Called by the file system when a handle to a file object
1338 * has been closed.
1339 */
1340 {
1341 KIRQL OldIrql;
1342 PPRIVATE_CACHE_MAP PrivateMap;
1343 PROS_SHARED_CACHE_MAP SharedCacheMap;
1344
1345 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1346
1347 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1348 {
1349 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1350
1351 /* Closing the handle, so kill the private cache map
1352 * Before you event try to remove it from FO, always
1353 * lock the master lock, to be sure not to race
1354 * with a potential read ahead ongoing!
1355 */
1356 PrivateMap = FileObject->PrivateCacheMap;
1357 FileObject->PrivateCacheMap = NULL;
1358
1359 if (PrivateMap != NULL)
1360 {
1361 /* Remove it from the file */
1362 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
1363 RemoveEntryList(&PrivateMap->PrivateLinks);
1364 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
1365
1366 /* And free it. */
1367 if (PrivateMap != &SharedCacheMap->PrivateCacheMap)
1368 {
1369 ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP);
1370 }
1371 else
1372 {
1373 PrivateMap->NodeTypeCode = 0;
1374 }
1375
1376 if (SharedCacheMap->OpenCount > 0)
1377 {
1378 SharedCacheMap->OpenCount--;
1379 if (SharedCacheMap->OpenCount == 0)
1380 {
1381 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1382 MmFreeSectionSegments(SharedCacheMap->FileObject);
1383
1384 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1385 CcRosDeleteFileCache(FileObject, SharedCacheMap, &OldIrql);
1386 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1387
1388 return STATUS_SUCCESS;
1389 }
1390 }
1391 }
1392 }
1393 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1394 return STATUS_SUCCESS;
1395 }
1396
1397 NTSTATUS
1398 NTAPI
1399 CcRosInitializeFileCache (
1400 PFILE_OBJECT FileObject,
1401 PCC_FILE_SIZES FileSizes,
1402 BOOLEAN PinAccess,
1403 PCACHE_MANAGER_CALLBACKS CallBacks,
1404 PVOID LazyWriterContext)
1405 /*
1406 * FUNCTION: Initializes a shared cache map for a file object
1407 */
1408 {
1409 KIRQL OldIrql;
1410 BOOLEAN Allocated;
1411 PROS_SHARED_CACHE_MAP SharedCacheMap;
1412
1413 DPRINT("CcRosInitializeFileCache(FileObject 0x%p)\n", FileObject);
1414
1415 Allocated = FALSE;
1416 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1417 if (SharedCacheMap == NULL)
1418 {
1419 Allocated = TRUE;
1420 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1421 if (SharedCacheMap == NULL)
1422 {
1423 return STATUS_INSUFFICIENT_RESOURCES;
1424 }
1425 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1426 SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP;
1427 SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap);
1428 SharedCacheMap->FileObject = FileObject;
1429 SharedCacheMap->Callbacks = CallBacks;
1430 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1431 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1432 SharedCacheMap->FileSize = FileSizes->FileSize;
1433 SharedCacheMap->PinAccess = PinAccess;
1434 SharedCacheMap->DirtyPageThreshold = 0;
1435 SharedCacheMap->DirtyPages = 0;
1436 InitializeListHead(&SharedCacheMap->PrivateList);
1437 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1438 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1439 InitializeListHead(&SharedCacheMap->BcbList);
1440 }
1441
1442 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1443 if (Allocated)
1444 {
1445 if (FileObject->SectionObjectPointer->SharedCacheMap == NULL)
1446 {
1447 ObReferenceObjectByPointer(FileObject,
1448 FILE_ALL_ACCESS,
1449 NULL,
1450 KernelMode);
1451 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1452
1453 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1454 }
1455 else
1456 {
1457 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1458 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1459 }
1460 }
1461 if (FileObject->PrivateCacheMap == NULL)
1462 {
1463 PPRIVATE_CACHE_MAP PrivateMap;
1464
1465 /* Allocate the private cache map for this handle */
1466 if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0)
1467 {
1468 PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP);
1469 }
1470 else
1471 {
1472 PrivateMap = &SharedCacheMap->PrivateCacheMap;
1473 }
1474
1475 if (PrivateMap == NULL)
1476 {
1477 /* If we also allocated the shared cache map for this file, kill it */
1478 if (Allocated)
1479 {
1480 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1481
1482 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1483 ObDereferenceObject(FileObject);
1484 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1485 }
1486
1487 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1488 return STATUS_INSUFFICIENT_RESOURCES;
1489 }
1490
1491 /* Initialize it */
1492 RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP));
1493 PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP;
1494 PrivateMap->ReadAheadMask = PAGE_SIZE - 1;
1495 PrivateMap->FileObject = FileObject;
1496 KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock);
1497
1498 /* Link it to the file */
1499 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
1500 InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks);
1501 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
1502
1503 FileObject->PrivateCacheMap = PrivateMap;
1504 SharedCacheMap->OpenCount++;
1505 }
1506 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1507
1508 return STATUS_SUCCESS;
1509 }
1510
1511 /*
1512 * @implemented
1513 */
1514 PFILE_OBJECT
1515 NTAPI
1516 CcGetFileObjectFromSectionPtrs (
1517 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1518 {
1519 PROS_SHARED_CACHE_MAP SharedCacheMap;
1520
1521 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1522
1523 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1524 {
1525 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1526 ASSERT(SharedCacheMap);
1527 return SharedCacheMap->FileObject;
1528 }
1529 return NULL;
1530 }
1531
1532 VOID
1533 INIT_FUNCTION
1534 NTAPI
1535 CcInitView (
1536 VOID)
1537 {
1538 DPRINT("CcInitView()\n");
1539
1540 InitializeListHead(&DirtyVacbListHead);
1541 InitializeListHead(&VacbLruListHead);
1542 InitializeListHead(&CcDeferredWrites);
1543 InitializeListHead(&CcCleanSharedCacheMapList);
1544 KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1545 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1546 NULL,
1547 NULL,
1548 0,
1549 sizeof(INTERNAL_BCB),
1550 TAG_BCB,
1551 20);
1552 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1553 NULL,
1554 NULL,
1555 0,
1556 sizeof(ROS_SHARED_CACHE_MAP),
1557 TAG_SHARED_CACHE_MAP,
1558 20);
1559 ExInitializeNPagedLookasideList(&VacbLookasideList,
1560 NULL,
1561 NULL,
1562 0,
1563 sizeof(ROS_VACB),
1564 TAG_VACB,
1565 20);
1566
1567 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1568
1569 CcInitCacheZeroPage();
1570 }
1571
1572 #if DBG && defined(KDBG)
1573 BOOLEAN
1574 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1575 {
1576 PLIST_ENTRY ListEntry;
1577 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1578
1579 KdbpPrint(" Usage Summary (in kb)\n");
1580 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1581 /* No need to lock the spin lock here, we're in DBG */
1582 for (ListEntry = CcCleanSharedCacheMapList.Flink;
1583 ListEntry != &CcCleanSharedCacheMapList;
1584 ListEntry = ListEntry->Flink)
1585 {
1586 PLIST_ENTRY Vacbs;
1587 ULONG Valid = 0, Dirty = 0;
1588 PROS_SHARED_CACHE_MAP SharedCacheMap;
1589 PUNICODE_STRING FileName;
1590 PWSTR Extra = L"";
1591
1592 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1593
1594 /* Dirty size */
1595 Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
1596
1597 /* First, count for all the associated VACB */
1598 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1599 Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1600 Vacbs = Vacbs->Flink)
1601 {
1602 PROS_VACB Vacb;
1603
1604 Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
1605 if (Vacb->Valid)
1606 {
1607 Valid += VACB_MAPPING_GRANULARITY / 1024;
1608 }
1609 }
1610
1611 /* Setup name */
1612 if (SharedCacheMap->FileObject != NULL &&
1613 SharedCacheMap->FileObject->FileName.Length != 0)
1614 {
1615 FileName = &SharedCacheMap->FileObject->FileName;
1616 }
1617 else if (SharedCacheMap->FileObject != NULL &&
1618 SharedCacheMap->FileObject->FsContext != NULL &&
1619 ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeTypeCode == 0x0502 &&
1620 ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeByteSize == 0x1F8 &&
1621 ((PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100))->Length != 0)
1622 {
1623 FileName = (PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100);
1624 Extra = L" (FastFAT)";
1625 }
1626 else
1627 {
1628 FileName = &NoName;
1629 }
1630
1631 /* And print */
1632 KdbpPrint("%p\t%d\t%d\t%wZ%S\n", SharedCacheMap, Valid, Dirty, FileName, Extra);
1633 }
1634
1635 return TRUE;
1636 }
1637
1638 BOOLEAN
1639 ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[])
1640 {
1641 KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages,
1642 (CcTotalDirtyPages * PAGE_SIZE) / 1024);
1643 KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold,
1644 (CcDirtyPageThreshold * PAGE_SIZE) / 1024);
1645 KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages,
1646 (MmAvailablePages * PAGE_SIZE) / 1024);
1647 KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop,
1648 (MmThrottleTop * PAGE_SIZE) / 1024);
1649 KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom,
1650 (MmThrottleBottom * PAGE_SIZE) / 1024);
1651 KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total,
1652 (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024);
1653
1654 if (CcTotalDirtyPages >= CcDirtyPageThreshold)
1655 {
1656 KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n");
1657 }
1658 else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold)
1659 {
1660 KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n");
1661 }
1662 else
1663 {
1664 KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n");
1665 }
1666
1667 return TRUE;
1668 }
1669 #endif
1670
1671 /* EOF */