[NTOSKRNL] Don't call AcquireForLazyWrite with the master lock held
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42
43 /* GLOBALS *******************************************************************/
44
45 LIST_ENTRY DirtyVacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47
48 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
49 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
50 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
51
52 /* Internal vars (MS):
53 * - Threshold above which lazy writer will start action
54 * - Amount of dirty pages
55 * - List for deferred writes
56 * - Spinlock when dealing with the deferred list
57 * - List for "clean" shared cache maps
58 */
59 ULONG CcDirtyPageThreshold = 0;
60 ULONG CcTotalDirtyPages = 0;
61 LIST_ENTRY CcDeferredWrites;
62 KSPIN_LOCK CcDeferredWriteSpinLock;
63 LIST_ENTRY CcCleanSharedCacheMapList;
64
65 #if DBG
66 ULONG CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line)
67 {
68 ULONG Refs;
69
70 Refs = InterlockedIncrement((PLONG)&vacb->ReferenceCount);
71 if (vacb->SharedCacheMap->Trace)
72 {
73 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
74 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
75 }
76
77 return Refs;
78 }
79 ULONG CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line)
80 {
81 ULONG Refs;
82
83 Refs = InterlockedDecrement((PLONG)&vacb->ReferenceCount);
84 ASSERT(!(Refs == 0 && vacb->Dirty));
85 if (vacb->SharedCacheMap->Trace)
86 {
87 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
88 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
89 }
90
91 if (Refs == 0)
92 {
93 CcRosInternalFreeVacb(vacb);
94 }
95
96 return Refs;
97 }
98 ULONG CcRosVacbGetRefCount_(PROS_VACB vacb, PCSTR file, INT line)
99 {
100 ULONG Refs;
101
102 Refs = InterlockedCompareExchange((PLONG)&vacb->ReferenceCount, 0, 0);
103 if (vacb->SharedCacheMap->Trace)
104 {
105 DbgPrint("(%s:%i) VACB %p ==RefCount=%lu, Dirty %u, PageOut %lu\n",
106 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
107 }
108
109 return Refs;
110 }
111 #endif
112
113
114 /* FUNCTIONS *****************************************************************/
115
116 VOID
117 NTAPI
118 CcRosTraceCacheMap (
119 PROS_SHARED_CACHE_MAP SharedCacheMap,
120 BOOLEAN Trace )
121 {
122 #if DBG
123 KIRQL oldirql;
124 PLIST_ENTRY current_entry;
125 PROS_VACB current;
126
127 if (!SharedCacheMap)
128 return;
129
130 SharedCacheMap->Trace = Trace;
131
132 if (Trace)
133 {
134 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
135
136 oldirql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
137 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
138
139 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
140 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
141 {
142 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
143 current_entry = current_entry->Flink;
144
145 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
146 current, current->ReferenceCount, current->Dirty, current->PageOut );
147 }
148
149 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
150 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldirql);
151 }
152 else
153 {
154 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
155 }
156
157 #else
158 UNREFERENCED_PARAMETER(SharedCacheMap);
159 UNREFERENCED_PARAMETER(Trace);
160 #endif
161 }
162
163 NTSTATUS
164 NTAPI
165 CcRosFlushVacb (
166 PROS_VACB Vacb)
167 {
168 NTSTATUS Status;
169
170 CcRosUnmarkDirtyVacb(Vacb, TRUE);
171
172 Status = CcWriteVirtualAddress(Vacb);
173 if (!NT_SUCCESS(Status))
174 {
175 CcRosMarkDirtyVacb(Vacb);
176 }
177
178 return Status;
179 }
180
181 NTSTATUS
182 NTAPI
183 CcRosFlushDirtyPages (
184 ULONG Target,
185 PULONG Count,
186 BOOLEAN Wait,
187 BOOLEAN CalledFromLazy)
188 {
189 PLIST_ENTRY current_entry;
190 PROS_VACB current;
191 BOOLEAN Locked;
192 NTSTATUS Status;
193 KIRQL OldIrql;
194
195 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
196
197 (*Count) = 0;
198
199 KeEnterCriticalRegion();
200 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
201
202 current_entry = DirtyVacbListHead.Flink;
203 if (current_entry == &DirtyVacbListHead)
204 {
205 DPRINT("No Dirty pages\n");
206 }
207
208 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
209 {
210 current = CONTAINING_RECORD(current_entry,
211 ROS_VACB,
212 DirtyVacbListEntry);
213 current_entry = current_entry->Flink;
214
215 CcRosVacbIncRefCount(current);
216
217 /* When performing lazy write, don't handle temporary files */
218 if (CalledFromLazy &&
219 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
220 {
221 CcRosVacbDecRefCount(current);
222 continue;
223 }
224
225 ASSERT(current->Dirty);
226
227 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
228
229 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
230 current->SharedCacheMap->LazyWriteContext, Wait);
231 if (!Locked)
232 {
233 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
234 CcRosVacbDecRefCount(current);
235 continue;
236 }
237
238 Status = CcRosFlushVacb(current);
239
240 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
241 current->SharedCacheMap->LazyWriteContext);
242
243 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
244 CcRosVacbDecRefCount(current);
245
246 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
247 (Status != STATUS_MEDIA_WRITE_PROTECTED))
248 {
249 DPRINT1("CC: Failed to flush VACB.\n");
250 }
251 else
252 {
253 ULONG PagesFreed;
254
255 /* How many pages did we free? */
256 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
257 (*Count) += PagesFreed;
258
259 /* Make sure we don't overflow target! */
260 if (Target < PagesFreed)
261 {
262 /* If we would have, jump to zero directly */
263 Target = 0;
264 }
265 else
266 {
267 Target -= PagesFreed;
268 }
269 }
270
271 current_entry = DirtyVacbListHead.Flink;
272 }
273
274 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
275 KeLeaveCriticalRegion();
276
277 DPRINT("CcRosFlushDirtyPages() finished\n");
278 return STATUS_SUCCESS;
279 }
280
281 NTSTATUS
282 CcRosTrimCache (
283 ULONG Target,
284 ULONG Priority,
285 PULONG NrFreed)
286 /*
287 * FUNCTION: Try to free some memory from the file cache.
288 * ARGUMENTS:
289 * Target - The number of pages to be freed.
290 * Priority - The priority of free (currently unused).
291 * NrFreed - Points to a variable where the number of pages
292 * actually freed is returned.
293 */
294 {
295 PLIST_ENTRY current_entry;
296 PROS_VACB current;
297 ULONG PagesFreed;
298 KIRQL oldIrql;
299 LIST_ENTRY FreeList;
300 PFN_NUMBER Page;
301 ULONG i;
302 BOOLEAN FlushedPages = FALSE;
303
304 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
305
306 InitializeListHead(&FreeList);
307
308 *NrFreed = 0;
309
310 retry:
311 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
312
313 current_entry = VacbLruListHead.Flink;
314 while (current_entry != &VacbLruListHead)
315 {
316 ULONG Refs;
317
318 current = CONTAINING_RECORD(current_entry,
319 ROS_VACB,
320 VacbLruListEntry);
321 current_entry = current_entry->Flink;
322
323 KeAcquireSpinLockAtDpcLevel(&current->SharedCacheMap->CacheMapLock);
324
325 /* Reference the VACB */
326 CcRosVacbIncRefCount(current);
327
328 /* Check if it's mapped and not dirty */
329 if (InterlockedCompareExchange((PLONG)&current->MappedCount, 0, 0) > 0 && !current->Dirty)
330 {
331 /* We have to break these locks because Cc sucks */
332 KeReleaseSpinLockFromDpcLevel(&current->SharedCacheMap->CacheMapLock);
333 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
334
335 /* Page out the VACB */
336 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
337 {
338 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
339
340 MmPageOutPhysicalAddress(Page);
341 }
342
343 /* Reacquire the locks */
344 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
345 KeAcquireSpinLockAtDpcLevel(&current->SharedCacheMap->CacheMapLock);
346 }
347
348 /* Dereference the VACB */
349 Refs = CcRosVacbDecRefCount(current);
350
351 /* Check if we can free this entry now */
352 if (Refs < 2)
353 {
354 ASSERT(!current->Dirty);
355 ASSERT(!current->MappedCount);
356 ASSERT(Refs == 1);
357
358 RemoveEntryList(&current->CacheMapVacbListEntry);
359 RemoveEntryList(&current->VacbLruListEntry);
360 InitializeListHead(&current->VacbLruListEntry);
361 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
362
363 /* Calculate how many pages we freed for Mm */
364 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
365 Target -= PagesFreed;
366 (*NrFreed) += PagesFreed;
367 }
368
369 KeReleaseSpinLockFromDpcLevel(&current->SharedCacheMap->CacheMapLock);
370 }
371
372 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
373
374 /* Try flushing pages if we haven't met our target */
375 if ((Target > 0) && !FlushedPages)
376 {
377 /* Flush dirty pages to disk */
378 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
379 FlushedPages = TRUE;
380
381 /* We can only swap as many pages as we flushed */
382 if (PagesFreed < Target) Target = PagesFreed;
383
384 /* Check if we flushed anything */
385 if (PagesFreed != 0)
386 {
387 /* Try again after flushing dirty pages */
388 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
389 goto retry;
390 }
391 }
392
393 while (!IsListEmpty(&FreeList))
394 {
395 ULONG Refs;
396
397 current_entry = RemoveHeadList(&FreeList);
398 current = CONTAINING_RECORD(current_entry,
399 ROS_VACB,
400 CacheMapVacbListEntry);
401 InitializeListHead(&current->CacheMapVacbListEntry);
402 Refs = CcRosVacbDecRefCount(current);
403 ASSERT(Refs == 0);
404 }
405
406 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
407
408 return STATUS_SUCCESS;
409 }
410
411 NTSTATUS
412 NTAPI
413 CcRosReleaseVacb (
414 PROS_SHARED_CACHE_MAP SharedCacheMap,
415 PROS_VACB Vacb,
416 BOOLEAN Valid,
417 BOOLEAN Dirty,
418 BOOLEAN Mapped)
419 {
420 ULONG Refs;
421 ASSERT(SharedCacheMap);
422
423 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
424 SharedCacheMap, Vacb, Valid);
425
426 Vacb->Valid = Valid;
427
428 if (Dirty && !Vacb->Dirty)
429 {
430 CcRosMarkDirtyVacb(Vacb);
431 }
432
433 if (Mapped)
434 {
435 if (InterlockedIncrement((PLONG)&Vacb->MappedCount) == 1)
436 {
437 CcRosVacbIncRefCount(Vacb);
438 }
439 }
440
441 Refs = CcRosVacbDecRefCount(Vacb);
442 ASSERT(Refs > 0);
443
444 return STATUS_SUCCESS;
445 }
446
447 /* Returns with VACB Lock Held! */
448 PROS_VACB
449 NTAPI
450 CcRosLookupVacb (
451 PROS_SHARED_CACHE_MAP SharedCacheMap,
452 LONGLONG FileOffset)
453 {
454 PLIST_ENTRY current_entry;
455 PROS_VACB current;
456 KIRQL oldIrql;
457
458 ASSERT(SharedCacheMap);
459
460 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
461 SharedCacheMap, FileOffset);
462
463 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
464 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
465
466 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
467 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
468 {
469 current = CONTAINING_RECORD(current_entry,
470 ROS_VACB,
471 CacheMapVacbListEntry);
472 if (IsPointInRange(current->FileOffset.QuadPart,
473 VACB_MAPPING_GRANULARITY,
474 FileOffset))
475 {
476 CcRosVacbIncRefCount(current);
477 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
478 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
479 return current;
480 }
481 if (current->FileOffset.QuadPart > FileOffset)
482 break;
483 current_entry = current_entry->Flink;
484 }
485
486 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
487 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
488
489 return NULL;
490 }
491
492 VOID
493 NTAPI
494 CcRosMarkDirtyVacb (
495 PROS_VACB Vacb)
496 {
497 KIRQL oldIrql;
498 PROS_SHARED_CACHE_MAP SharedCacheMap;
499
500 SharedCacheMap = Vacb->SharedCacheMap;
501
502 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
503 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
504
505 ASSERT(!Vacb->Dirty);
506
507 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
508 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
509 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
510 CcRosVacbIncRefCount(Vacb);
511
512 /* Move to the tail of the LRU list */
513 RemoveEntryList(&Vacb->VacbLruListEntry);
514 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
515
516 Vacb->Dirty = TRUE;
517
518 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
519
520 /* Schedule a lazy writer run to now that we have dirty VACB */
521 if (!LazyWriter.ScanActive)
522 {
523 CcScheduleLazyWriteScan(FALSE);
524 }
525 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
526 }
527
528 VOID
529 NTAPI
530 CcRosUnmarkDirtyVacb (
531 PROS_VACB Vacb,
532 BOOLEAN LockViews)
533 {
534 KIRQL oldIrql;
535 PROS_SHARED_CACHE_MAP SharedCacheMap;
536
537 SharedCacheMap = Vacb->SharedCacheMap;
538
539 if (LockViews)
540 {
541 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
542 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
543 }
544
545 ASSERT(Vacb->Dirty);
546
547 Vacb->Dirty = FALSE;
548
549 RemoveEntryList(&Vacb->DirtyVacbListEntry);
550 InitializeListHead(&Vacb->DirtyVacbListEntry);
551 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
552 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
553 CcRosVacbDecRefCount(Vacb);
554
555 if (LockViews)
556 {
557 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
558 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
559 }
560 }
561
562 NTSTATUS
563 NTAPI
564 CcRosMarkDirtyFile (
565 PROS_SHARED_CACHE_MAP SharedCacheMap,
566 LONGLONG FileOffset)
567 {
568 PROS_VACB Vacb;
569
570 ASSERT(SharedCacheMap);
571
572 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
573 SharedCacheMap, FileOffset);
574
575 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
576 if (Vacb == NULL)
577 {
578 KeBugCheck(CACHE_MANAGER);
579 }
580
581 CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, TRUE, FALSE);
582
583 return STATUS_SUCCESS;
584 }
585
586 /*
587 * Note: this is not the contrary function of
588 * CcRosMapVacbInKernelSpace()
589 */
590 NTSTATUS
591 NTAPI
592 CcRosUnmapVacb (
593 PROS_SHARED_CACHE_MAP SharedCacheMap,
594 LONGLONG FileOffset,
595 BOOLEAN NowDirty)
596 {
597 PROS_VACB Vacb;
598
599 ASSERT(SharedCacheMap);
600
601 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
602 SharedCacheMap, FileOffset, NowDirty);
603
604 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
605 if (Vacb == NULL)
606 {
607 return STATUS_UNSUCCESSFUL;
608 }
609
610 ASSERT(Vacb->MappedCount != 0);
611 if (InterlockedDecrement((PLONG)&Vacb->MappedCount) == 0)
612 {
613 CcRosVacbDecRefCount(Vacb);
614 }
615
616 CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, NowDirty, FALSE);
617
618 return STATUS_SUCCESS;
619 }
620
621 static
622 NTSTATUS
623 CcRosMapVacbInKernelSpace(
624 PROS_VACB Vacb)
625 {
626 ULONG i;
627 NTSTATUS Status;
628 ULONG_PTR NumberOfPages;
629 PVOID BaseAddress = NULL;
630
631 /* Create a memory area. */
632 MmLockAddressSpace(MmGetKernelAddressSpace());
633 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
634 0, // nothing checks for VACB mareas, so set to 0
635 &BaseAddress,
636 VACB_MAPPING_GRANULARITY,
637 PAGE_READWRITE,
638 (PMEMORY_AREA*)&Vacb->MemoryArea,
639 0,
640 PAGE_SIZE);
641 ASSERT(Vacb->BaseAddress == NULL);
642 Vacb->BaseAddress = BaseAddress;
643 MmUnlockAddressSpace(MmGetKernelAddressSpace());
644 if (!NT_SUCCESS(Status))
645 {
646 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
647 return Status;
648 }
649
650 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
651 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
652 ASSERT((ULONG_PTR)Vacb->BaseAddress + VACB_MAPPING_GRANULARITY - 1 > (ULONG_PTR)MmSystemRangeStart);
653
654 /* Create a virtual mapping for this memory area */
655 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
656 for (i = 0; i < NumberOfPages; i++)
657 {
658 PFN_NUMBER PageFrameNumber;
659
660 MI_SET_USAGE(MI_USAGE_CACHE);
661 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
662 if (PageFrameNumber == 0)
663 {
664 DPRINT1("Unable to allocate page\n");
665 KeBugCheck(MEMORY_MANAGEMENT);
666 }
667
668 ASSERT(BaseAddress == Vacb->BaseAddress);
669 ASSERT(i * PAGE_SIZE < VACB_MAPPING_GRANULARITY);
670 ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) >= (ULONG_PTR)BaseAddress);
671 ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) > (ULONG_PTR)MmSystemRangeStart);
672
673 Status = MmCreateVirtualMapping(NULL,
674 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
675 PAGE_READWRITE,
676 &PageFrameNumber,
677 1);
678 if (!NT_SUCCESS(Status))
679 {
680 DPRINT1("Unable to create virtual mapping\n");
681 KeBugCheck(MEMORY_MANAGEMENT);
682 }
683 }
684
685 return STATUS_SUCCESS;
686 }
687
688 static
689 BOOLEAN
690 CcRosFreeUnusedVacb (
691 PULONG Count)
692 {
693 ULONG cFreed;
694 BOOLEAN Freed;
695 KIRQL oldIrql;
696 PROS_VACB current;
697 LIST_ENTRY FreeList;
698 PLIST_ENTRY current_entry;
699
700 cFreed = 0;
701 Freed = FALSE;
702 InitializeListHead(&FreeList);
703
704 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
705
706 /* Browse all the available VACB */
707 current_entry = VacbLruListHead.Flink;
708 while (current_entry != &VacbLruListHead)
709 {
710 ULONG Refs;
711
712 current = CONTAINING_RECORD(current_entry,
713 ROS_VACB,
714 VacbLruListEntry);
715 current_entry = current_entry->Flink;
716
717 KeAcquireSpinLockAtDpcLevel(&current->SharedCacheMap->CacheMapLock);
718
719 /* Only deal with unused VACB, we will free them */
720 Refs = CcRosVacbGetRefCount(current);
721 if (Refs < 2)
722 {
723 ASSERT(!current->Dirty);
724 ASSERT(!current->MappedCount);
725 ASSERT(Refs == 1);
726
727 /* Reset and move to free list */
728 RemoveEntryList(&current->CacheMapVacbListEntry);
729 RemoveEntryList(&current->VacbLruListEntry);
730 InitializeListHead(&current->VacbLruListEntry);
731 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
732 }
733
734 KeReleaseSpinLockFromDpcLevel(&current->SharedCacheMap->CacheMapLock);
735
736 }
737
738 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
739
740 /* And now, free any of the found VACB, that'll free memory! */
741 while (!IsListEmpty(&FreeList))
742 {
743 ULONG Refs;
744
745 current_entry = RemoveHeadList(&FreeList);
746 current = CONTAINING_RECORD(current_entry,
747 ROS_VACB,
748 CacheMapVacbListEntry);
749 InitializeListHead(&current->CacheMapVacbListEntry);
750 Refs = CcRosVacbDecRefCount(current);
751 ASSERT(Refs == 0);
752 ++cFreed;
753 }
754
755 /* If we freed at least one VACB, return success */
756 if (cFreed != 0)
757 {
758 Freed = TRUE;
759 }
760
761 /* If caller asked for free count, return it */
762 if (Count != NULL)
763 {
764 *Count = cFreed;
765 }
766
767 return Freed;
768 }
769
770 static
771 NTSTATUS
772 CcRosCreateVacb (
773 PROS_SHARED_CACHE_MAP SharedCacheMap,
774 LONGLONG FileOffset,
775 PROS_VACB *Vacb)
776 {
777 PROS_VACB current;
778 PROS_VACB previous;
779 PLIST_ENTRY current_entry;
780 NTSTATUS Status;
781 KIRQL oldIrql;
782 ULONG Refs;
783 BOOLEAN Retried;
784
785 ASSERT(SharedCacheMap);
786
787 DPRINT("CcRosCreateVacb()\n");
788
789 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
790 {
791 *Vacb = NULL;
792 return STATUS_INVALID_PARAMETER;
793 }
794
795 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
796 current->BaseAddress = NULL;
797 current->Valid = FALSE;
798 current->Dirty = FALSE;
799 current->PageOut = FALSE;
800 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
801 current->SharedCacheMap = SharedCacheMap;
802 #if DBG
803 if (SharedCacheMap->Trace)
804 {
805 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
806 }
807 #endif
808 current->MappedCount = 0;
809 current->ReferenceCount = 0;
810 InitializeListHead(&current->CacheMapVacbListEntry);
811 InitializeListHead(&current->DirtyVacbListEntry);
812 InitializeListHead(&current->VacbLruListEntry);
813
814 CcRosVacbIncRefCount(current);
815
816 Retried = FALSE;
817 Retry:
818 /* Map VACB in kernel space */
819 Status = CcRosMapVacbInKernelSpace(current);
820 if (!NT_SUCCESS(Status))
821 {
822 ULONG Freed;
823 /* If no space left, try to prune unused VACB
824 * to recover space to map our VACB
825 * If it succeed, retry to map, otherwise
826 * just fail.
827 */
828 if (!Retried && CcRosFreeUnusedVacb(&Freed))
829 {
830 DPRINT("Prunned %d VACB, trying again\n", Freed);
831 Retried = TRUE;
832 goto Retry;
833 }
834
835 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
836 return Status;
837 }
838
839 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
840
841 *Vacb = current;
842 /* There is window between the call to CcRosLookupVacb
843 * and CcRosCreateVacb. We must check if a VACB for the
844 * file offset exist. If there is a VACB, we release
845 * our newly created VACB and return the existing one.
846 */
847 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
848 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
849 previous = NULL;
850 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
851 {
852 current = CONTAINING_RECORD(current_entry,
853 ROS_VACB,
854 CacheMapVacbListEntry);
855 if (IsPointInRange(current->FileOffset.QuadPart,
856 VACB_MAPPING_GRANULARITY,
857 FileOffset))
858 {
859 CcRosVacbIncRefCount(current);
860 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
861 #if DBG
862 if (SharedCacheMap->Trace)
863 {
864 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
865 SharedCacheMap,
866 (*Vacb),
867 current);
868 }
869 #endif
870 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
871
872 Refs = CcRosVacbDecRefCount(*Vacb);
873 ASSERT(Refs == 0);
874
875 *Vacb = current;
876 return STATUS_SUCCESS;
877 }
878 if (current->FileOffset.QuadPart < FileOffset)
879 {
880 ASSERT(previous == NULL ||
881 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
882 previous = current;
883 }
884 if (current->FileOffset.QuadPart > FileOffset)
885 break;
886 current_entry = current_entry->Flink;
887 }
888 /* There was no existing VACB. */
889 current = *Vacb;
890 if (previous)
891 {
892 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
893 }
894 else
895 {
896 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
897 }
898 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
899 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
900 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
901
902 MI_SET_USAGE(MI_USAGE_CACHE);
903 #if MI_TRACE_PFNS
904 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
905 {
906 PWCHAR pos;
907 ULONG len = 0;
908 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
909 if (pos)
910 {
911 len = wcslen(pos) * sizeof(WCHAR);
912 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
913 }
914 else
915 {
916 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
917 }
918 }
919 #endif
920
921 /* Reference it to allow release */
922 CcRosVacbIncRefCount(current);
923
924 return Status;
925 }
926
927 NTSTATUS
928 NTAPI
929 CcRosGetVacb (
930 PROS_SHARED_CACHE_MAP SharedCacheMap,
931 LONGLONG FileOffset,
932 PLONGLONG BaseOffset,
933 PVOID* BaseAddress,
934 PBOOLEAN UptoDate,
935 PROS_VACB *Vacb)
936 {
937 PROS_VACB current;
938 NTSTATUS Status;
939 ULONG Refs;
940 KIRQL OldIrql;
941
942 ASSERT(SharedCacheMap);
943
944 DPRINT("CcRosGetVacb()\n");
945
946 /*
947 * Look for a VACB already mapping the same data.
948 */
949 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
950 if (current == NULL)
951 {
952 /*
953 * Otherwise create a new VACB.
954 */
955 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
956 if (!NT_SUCCESS(Status))
957 {
958 return Status;
959 }
960 }
961
962 Refs = CcRosVacbGetRefCount(current);
963
964 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
965
966 /* Move to the tail of the LRU list */
967 RemoveEntryList(&current->VacbLruListEntry);
968 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
969
970 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
971
972 /*
973 * Return information about the VACB to the caller.
974 */
975 *UptoDate = current->Valid;
976 *BaseAddress = current->BaseAddress;
977 DPRINT("*BaseAddress %p\n", *BaseAddress);
978 *Vacb = current;
979 *BaseOffset = current->FileOffset.QuadPart;
980
981 ASSERT(Refs > 1);
982
983 return STATUS_SUCCESS;
984 }
985
986 NTSTATUS
987 NTAPI
988 CcRosRequestVacb (
989 PROS_SHARED_CACHE_MAP SharedCacheMap,
990 LONGLONG FileOffset,
991 PVOID* BaseAddress,
992 PBOOLEAN UptoDate,
993 PROS_VACB *Vacb)
994 /*
995 * FUNCTION: Request a page mapping for a shared cache map
996 */
997 {
998 LONGLONG BaseOffset;
999
1000 ASSERT(SharedCacheMap);
1001
1002 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
1003 {
1004 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
1005 FileOffset, VACB_MAPPING_GRANULARITY);
1006 KeBugCheck(CACHE_MANAGER);
1007 }
1008
1009 return CcRosGetVacb(SharedCacheMap,
1010 FileOffset,
1011 &BaseOffset,
1012 BaseAddress,
1013 UptoDate,
1014 Vacb);
1015 }
1016
1017 static
1018 VOID
1019 CcFreeCachePage (
1020 PVOID Context,
1021 MEMORY_AREA* MemoryArea,
1022 PVOID Address,
1023 PFN_NUMBER Page,
1024 SWAPENTRY SwapEntry,
1025 BOOLEAN Dirty)
1026 {
1027 ASSERT(SwapEntry == 0);
1028 if (Page != 0)
1029 {
1030 ASSERT(MmGetReferenceCountPage(Page) == 1);
1031 MmReleasePageMemoryConsumer(MC_CACHE, Page);
1032 }
1033 }
1034
1035 NTSTATUS
1036 CcRosInternalFreeVacb (
1037 PROS_VACB Vacb)
1038 /*
1039 * FUNCTION: Releases a VACB associated with a shared cache map
1040 */
1041 {
1042 DPRINT("Freeing VACB 0x%p\n", Vacb);
1043 #if DBG
1044 if (Vacb->SharedCacheMap->Trace)
1045 {
1046 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
1047 }
1048 #endif
1049
1050 MmLockAddressSpace(MmGetKernelAddressSpace());
1051 MmFreeMemoryArea(MmGetKernelAddressSpace(),
1052 Vacb->MemoryArea,
1053 CcFreeCachePage,
1054 NULL);
1055 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1056
1057 if (Vacb->ReferenceCount != 0)
1058 {
1059 DPRINT1("Invalid free: %ld\n", Vacb->ReferenceCount);
1060 if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length)
1061 {
1062 DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName);
1063 }
1064 }
1065
1066 ASSERT(Vacb->ReferenceCount == 0);
1067 ASSERT(IsListEmpty(&Vacb->CacheMapVacbListEntry));
1068 ASSERT(IsListEmpty(&Vacb->DirtyVacbListEntry));
1069 ASSERT(IsListEmpty(&Vacb->VacbLruListEntry));
1070 RtlFillMemory(Vacb, sizeof(*Vacb), 0xfd);
1071 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
1072 return STATUS_SUCCESS;
1073 }
1074
1075 /*
1076 * @implemented
1077 */
1078 VOID
1079 NTAPI
1080 CcFlushCache (
1081 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1082 IN PLARGE_INTEGER FileOffset OPTIONAL,
1083 IN ULONG Length,
1084 OUT PIO_STATUS_BLOCK IoStatus)
1085 {
1086 PROS_SHARED_CACHE_MAP SharedCacheMap;
1087 LARGE_INTEGER Offset;
1088 LONGLONG RemainingLength;
1089 PROS_VACB current;
1090 NTSTATUS Status;
1091
1092 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1093 SectionObjectPointers, FileOffset, Length);
1094
1095 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1096 SectionObjectPointers, FileOffset, Length, IoStatus);
1097
1098 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1099 {
1100 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1101 ASSERT(SharedCacheMap);
1102 if (FileOffset)
1103 {
1104 Offset = *FileOffset;
1105 RemainingLength = Length;
1106 }
1107 else
1108 {
1109 Offset.QuadPart = 0;
1110 RemainingLength = SharedCacheMap->FileSize.QuadPart;
1111 }
1112
1113 if (IoStatus)
1114 {
1115 IoStatus->Status = STATUS_SUCCESS;
1116 IoStatus->Information = 0;
1117 }
1118
1119 while (RemainingLength > 0)
1120 {
1121 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1122 if (current != NULL)
1123 {
1124 if (current->Dirty)
1125 {
1126 Status = CcRosFlushVacb(current);
1127 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1128 {
1129 IoStatus->Status = Status;
1130 }
1131 }
1132
1133 CcRosReleaseVacb(SharedCacheMap, current, current->Valid, current->Dirty, FALSE);
1134 }
1135
1136 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1137 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1138 }
1139 }
1140 else
1141 {
1142 if (IoStatus)
1143 {
1144 IoStatus->Status = STATUS_INVALID_PARAMETER;
1145 }
1146 }
1147 }
1148
1149 NTSTATUS
1150 NTAPI
1151 CcRosDeleteFileCache (
1152 PFILE_OBJECT FileObject,
1153 PROS_SHARED_CACHE_MAP SharedCacheMap,
1154 PKIRQL OldIrql)
1155 /*
1156 * FUNCTION: Releases the shared cache map associated with a file object
1157 */
1158 {
1159 PLIST_ENTRY current_entry;
1160 PROS_VACB current;
1161 LIST_ENTRY FreeList;
1162
1163 ASSERT(SharedCacheMap);
1164
1165 SharedCacheMap->OpenCount++;
1166 KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql);
1167
1168 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1169
1170 *OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1171 SharedCacheMap->OpenCount--;
1172 if (SharedCacheMap->OpenCount == 0)
1173 {
1174 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1175
1176 /*
1177 * Release all VACBs
1178 */
1179 InitializeListHead(&FreeList);
1180 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
1181 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1182 {
1183 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1184 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
1185
1186 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1187 RemoveEntryList(&current->VacbLruListEntry);
1188 InitializeListHead(&current->VacbLruListEntry);
1189 if (current->Dirty)
1190 {
1191 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
1192 CcRosUnmarkDirtyVacb(current, FALSE);
1193 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
1194 DPRINT1("Freeing dirty VACB\n");
1195 }
1196 if (current->MappedCount != 0)
1197 {
1198 current->MappedCount = 0;
1199 NT_VERIFY(CcRosVacbDecRefCount(current) > 0);
1200 DPRINT1("Freeing mapped VACB\n");
1201 }
1202 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1203
1204 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
1205 }
1206 #if DBG
1207 SharedCacheMap->Trace = FALSE;
1208 #endif
1209 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
1210
1211 KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql);
1212 ObDereferenceObject(SharedCacheMap->FileObject);
1213
1214 while (!IsListEmpty(&FreeList))
1215 {
1216 ULONG Refs;
1217
1218 current_entry = RemoveTailList(&FreeList);
1219 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1220 InitializeListHead(&current->CacheMapVacbListEntry);
1221 Refs = CcRosVacbDecRefCount(current);
1222 #if DBG // CORE-14578
1223 if (Refs != 0)
1224 {
1225 DPRINT1("Leaking VACB %p attached to %p (%I64d)\n", current, FileObject, current->FileOffset.QuadPart);
1226 DPRINT1("There are: %d references left\n", Refs);
1227 DPRINT1("Map: %d\n", current->MappedCount);
1228 DPRINT1("Dirty: %d\n", current->Dirty);
1229 if (FileObject->FileName.Length != 0)
1230 {
1231 DPRINT1("File was: %wZ\n", &FileObject->FileName);
1232 }
1233 else if (FileObject->FsContext != NULL &&
1234 ((PFSRTL_COMMON_FCB_HEADER)(FileObject->FsContext))->NodeTypeCode == 0x0502 &&
1235 ((PFSRTL_COMMON_FCB_HEADER)(FileObject->FsContext))->NodeByteSize == 0x1F8 &&
1236 ((PUNICODE_STRING)(((PUCHAR)FileObject->FsContext) + 0x100))->Length != 0)
1237 {
1238 DPRINT1("File was: %wZ (FastFAT)\n", (PUNICODE_STRING)(((PUCHAR)FileObject->FsContext) + 0x100));
1239 }
1240 else
1241 {
1242 DPRINT1("No name for the file\n");
1243 }
1244 }
1245 #else
1246 ASSERT(Refs == 0);
1247 #endif
1248 }
1249
1250 *OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1251 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1252 KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql);
1253
1254 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1255 *OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1256 }
1257 return STATUS_SUCCESS;
1258 }
1259
1260 VOID
1261 NTAPI
1262 CcRosReferenceCache (
1263 PFILE_OBJECT FileObject)
1264 {
1265 PROS_SHARED_CACHE_MAP SharedCacheMap;
1266 KIRQL OldIrql;
1267
1268 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1269 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1270 ASSERT(SharedCacheMap);
1271 ASSERT(SharedCacheMap->OpenCount != 0);
1272 SharedCacheMap->OpenCount++;
1273 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1274 }
1275
1276 VOID
1277 NTAPI
1278 CcRosRemoveIfClosed (
1279 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1280 {
1281 PROS_SHARED_CACHE_MAP SharedCacheMap;
1282 KIRQL OldIrql;
1283
1284 DPRINT("CcRosRemoveIfClosed()\n");
1285 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1286 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1287 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1288 {
1289 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, &OldIrql);
1290 }
1291 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1292 }
1293
1294
1295 VOID
1296 NTAPI
1297 CcRosDereferenceCache (
1298 PFILE_OBJECT FileObject)
1299 {
1300 PROS_SHARED_CACHE_MAP SharedCacheMap;
1301 KIRQL OldIrql;
1302
1303 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1304 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1305 ASSERT(SharedCacheMap);
1306 if (SharedCacheMap->OpenCount > 0)
1307 {
1308 SharedCacheMap->OpenCount--;
1309 if (SharedCacheMap->OpenCount == 0)
1310 {
1311 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1312 MmFreeSectionSegments(SharedCacheMap->FileObject);
1313
1314 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1315 CcRosDeleteFileCache(FileObject, SharedCacheMap, &OldIrql);
1316 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1317
1318 return;
1319 }
1320 }
1321 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1322 }
1323
1324 NTSTATUS
1325 NTAPI
1326 CcRosReleaseFileCache (
1327 PFILE_OBJECT FileObject)
1328 /*
1329 * FUNCTION: Called by the file system when a handle to a file object
1330 * has been closed.
1331 */
1332 {
1333 KIRQL OldIrql;
1334 PPRIVATE_CACHE_MAP PrivateMap;
1335 PROS_SHARED_CACHE_MAP SharedCacheMap;
1336
1337 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1338
1339 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1340 {
1341 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1342
1343 /* Closing the handle, so kill the private cache map
1344 * Before you event try to remove it from FO, always
1345 * lock the master lock, to be sure not to race
1346 * with a potential read ahead ongoing!
1347 */
1348 PrivateMap = FileObject->PrivateCacheMap;
1349 FileObject->PrivateCacheMap = NULL;
1350
1351 if (PrivateMap != NULL)
1352 {
1353 /* Remove it from the file */
1354 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
1355 RemoveEntryList(&PrivateMap->PrivateLinks);
1356 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
1357
1358 /* And free it. */
1359 if (PrivateMap != &SharedCacheMap->PrivateCacheMap)
1360 {
1361 ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP);
1362 }
1363 else
1364 {
1365 PrivateMap->NodeTypeCode = 0;
1366 }
1367
1368 if (SharedCacheMap->OpenCount > 0)
1369 {
1370 SharedCacheMap->OpenCount--;
1371 if (SharedCacheMap->OpenCount == 0)
1372 {
1373 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1374 MmFreeSectionSegments(SharedCacheMap->FileObject);
1375
1376 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1377 CcRosDeleteFileCache(FileObject, SharedCacheMap, &OldIrql);
1378 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1379
1380 return STATUS_SUCCESS;
1381 }
1382 }
1383 }
1384 }
1385 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1386 return STATUS_SUCCESS;
1387 }
1388
1389 NTSTATUS
1390 NTAPI
1391 CcRosInitializeFileCache (
1392 PFILE_OBJECT FileObject,
1393 PCC_FILE_SIZES FileSizes,
1394 BOOLEAN PinAccess,
1395 PCACHE_MANAGER_CALLBACKS CallBacks,
1396 PVOID LazyWriterContext)
1397 /*
1398 * FUNCTION: Initializes a shared cache map for a file object
1399 */
1400 {
1401 KIRQL OldIrql;
1402 BOOLEAN Allocated;
1403 PROS_SHARED_CACHE_MAP SharedCacheMap;
1404
1405 DPRINT("CcRosInitializeFileCache(FileObject 0x%p)\n", FileObject);
1406
1407 Allocated = FALSE;
1408 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1409 if (SharedCacheMap == NULL)
1410 {
1411 Allocated = TRUE;
1412 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1413 if (SharedCacheMap == NULL)
1414 {
1415 return STATUS_INSUFFICIENT_RESOURCES;
1416 }
1417 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1418 SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP;
1419 SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap);
1420 SharedCacheMap->FileObject = FileObject;
1421 SharedCacheMap->Callbacks = CallBacks;
1422 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1423 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1424 SharedCacheMap->FileSize = FileSizes->FileSize;
1425 SharedCacheMap->PinAccess = PinAccess;
1426 SharedCacheMap->DirtyPageThreshold = 0;
1427 SharedCacheMap->DirtyPages = 0;
1428 InitializeListHead(&SharedCacheMap->PrivateList);
1429 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1430 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1431 InitializeListHead(&SharedCacheMap->BcbList);
1432 }
1433
1434 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1435 if (Allocated)
1436 {
1437 if (FileObject->SectionObjectPointer->SharedCacheMap == NULL)
1438 {
1439 ObReferenceObjectByPointer(FileObject,
1440 FILE_ALL_ACCESS,
1441 NULL,
1442 KernelMode);
1443 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1444
1445 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1446 }
1447 else
1448 {
1449 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1450 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1451 }
1452 }
1453 if (FileObject->PrivateCacheMap == NULL)
1454 {
1455 PPRIVATE_CACHE_MAP PrivateMap;
1456
1457 /* Allocate the private cache map for this handle */
1458 if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0)
1459 {
1460 PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP);
1461 }
1462 else
1463 {
1464 PrivateMap = &SharedCacheMap->PrivateCacheMap;
1465 }
1466
1467 if (PrivateMap == NULL)
1468 {
1469 /* If we also allocated the shared cache map for this file, kill it */
1470 if (Allocated)
1471 {
1472 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1473
1474 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1475 ObDereferenceObject(FileObject);
1476 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1477 }
1478
1479 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1480 return STATUS_INSUFFICIENT_RESOURCES;
1481 }
1482
1483 /* Initialize it */
1484 RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP));
1485 PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP;
1486 PrivateMap->ReadAheadMask = PAGE_SIZE - 1;
1487 PrivateMap->FileObject = FileObject;
1488 KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock);
1489
1490 /* Link it to the file */
1491 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
1492 InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks);
1493 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
1494
1495 FileObject->PrivateCacheMap = PrivateMap;
1496 SharedCacheMap->OpenCount++;
1497 }
1498 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1499
1500 return STATUS_SUCCESS;
1501 }
1502
1503 /*
1504 * @implemented
1505 */
1506 PFILE_OBJECT
1507 NTAPI
1508 CcGetFileObjectFromSectionPtrs (
1509 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1510 {
1511 PROS_SHARED_CACHE_MAP SharedCacheMap;
1512
1513 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1514
1515 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1516 {
1517 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1518 ASSERT(SharedCacheMap);
1519 return SharedCacheMap->FileObject;
1520 }
1521 return NULL;
1522 }
1523
1524 VOID
1525 INIT_FUNCTION
1526 NTAPI
1527 CcInitView (
1528 VOID)
1529 {
1530 DPRINT("CcInitView()\n");
1531
1532 InitializeListHead(&DirtyVacbListHead);
1533 InitializeListHead(&VacbLruListHead);
1534 InitializeListHead(&CcDeferredWrites);
1535 InitializeListHead(&CcCleanSharedCacheMapList);
1536 KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1537 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1538 NULL,
1539 NULL,
1540 0,
1541 sizeof(INTERNAL_BCB),
1542 TAG_BCB,
1543 20);
1544 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1545 NULL,
1546 NULL,
1547 0,
1548 sizeof(ROS_SHARED_CACHE_MAP),
1549 TAG_SHARED_CACHE_MAP,
1550 20);
1551 ExInitializeNPagedLookasideList(&VacbLookasideList,
1552 NULL,
1553 NULL,
1554 0,
1555 sizeof(ROS_VACB),
1556 TAG_VACB,
1557 20);
1558
1559 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1560
1561 CcInitCacheZeroPage();
1562 }
1563
1564 #if DBG && defined(KDBG)
1565 BOOLEAN
1566 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1567 {
1568 PLIST_ENTRY ListEntry;
1569 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1570
1571 KdbpPrint(" Usage Summary (in kb)\n");
1572 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1573 /* No need to lock the spin lock here, we're in DBG */
1574 for (ListEntry = CcCleanSharedCacheMapList.Flink;
1575 ListEntry != &CcCleanSharedCacheMapList;
1576 ListEntry = ListEntry->Flink)
1577 {
1578 PLIST_ENTRY Vacbs;
1579 ULONG Valid = 0, Dirty = 0;
1580 PROS_SHARED_CACHE_MAP SharedCacheMap;
1581 PUNICODE_STRING FileName;
1582 PWSTR Extra = L"";
1583
1584 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1585
1586 /* Dirty size */
1587 Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
1588
1589 /* First, count for all the associated VACB */
1590 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1591 Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1592 Vacbs = Vacbs->Flink)
1593 {
1594 PROS_VACB Vacb;
1595
1596 Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
1597 if (Vacb->Valid)
1598 {
1599 Valid += VACB_MAPPING_GRANULARITY / 1024;
1600 }
1601 }
1602
1603 /* Setup name */
1604 if (SharedCacheMap->FileObject != NULL &&
1605 SharedCacheMap->FileObject->FileName.Length != 0)
1606 {
1607 FileName = &SharedCacheMap->FileObject->FileName;
1608 }
1609 else if (SharedCacheMap->FileObject != NULL &&
1610 SharedCacheMap->FileObject->FsContext != NULL &&
1611 ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeTypeCode == 0x0502 &&
1612 ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeByteSize == 0x1F8 &&
1613 ((PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100))->Length != 0)
1614 {
1615 FileName = (PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100);
1616 Extra = L" (FastFAT)";
1617 }
1618 else
1619 {
1620 FileName = &NoName;
1621 }
1622
1623 /* And print */
1624 KdbpPrint("%p\t%d\t%d\t%wZ%S\n", SharedCacheMap, Valid, Dirty, FileName, Extra);
1625 }
1626
1627 return TRUE;
1628 }
1629
1630 BOOLEAN
1631 ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[])
1632 {
1633 KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages,
1634 (CcTotalDirtyPages * PAGE_SIZE) / 1024);
1635 KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold,
1636 (CcDirtyPageThreshold * PAGE_SIZE) / 1024);
1637 KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages,
1638 (MmAvailablePages * PAGE_SIZE) / 1024);
1639 KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop,
1640 (MmThrottleTop * PAGE_SIZE) / 1024);
1641 KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom,
1642 (MmThrottleBottom * PAGE_SIZE) / 1024);
1643 KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total,
1644 (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024);
1645
1646 if (CcTotalDirtyPages >= CcDirtyPageThreshold)
1647 {
1648 KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n");
1649 }
1650 else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold)
1651 {
1652 KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n");
1653 }
1654 else
1655 {
1656 KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n");
1657 }
1658
1659 return TRUE;
1660 }
1661 #endif
1662
1663 /* EOF */