81a1dac801cb0d64644c4de03a3e2f060fdc8d3d
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42
43 /* GLOBALS *******************************************************************/
44
45 LIST_ENTRY DirtyVacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47
48 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
49 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
50 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
51
52 /* Internal vars (MS):
53 * - Threshold above which lazy writer will start action
54 * - Amount of dirty pages
55 * - List for deferred writes
56 * - Spinlock when dealing with the deferred list
57 * - List for "clean" shared cache maps
58 */
59 ULONG CcDirtyPageThreshold = 0;
60 ULONG CcTotalDirtyPages = 0;
61 LIST_ENTRY CcDeferredWrites;
62 KSPIN_LOCK CcDeferredWriteSpinLock;
63 LIST_ENTRY CcCleanSharedCacheMapList;
64
65 #if DBG
66 ULONG CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line)
67 {
68 ULONG Refs;
69
70 Refs = InterlockedIncrement((PLONG)&vacb->ReferenceCount);
71 if (vacb->SharedCacheMap->Trace)
72 {
73 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
74 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
75 }
76
77 return Refs;
78 }
79 ULONG CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line)
80 {
81 ULONG Refs;
82
83 Refs = InterlockedDecrement((PLONG)&vacb->ReferenceCount);
84 ASSERT(!(Refs == 0 && vacb->Dirty));
85 if (vacb->SharedCacheMap->Trace)
86 {
87 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
88 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
89 }
90
91 if (Refs == 0)
92 {
93 CcRosInternalFreeVacb(vacb);
94 }
95
96 return Refs;
97 }
98 ULONG CcRosVacbGetRefCount_(PROS_VACB vacb, PCSTR file, INT line)
99 {
100 ULONG Refs;
101
102 Refs = InterlockedCompareExchange((PLONG)&vacb->ReferenceCount, 0, 0);
103 if (vacb->SharedCacheMap->Trace)
104 {
105 DbgPrint("(%s:%i) VACB %p ==RefCount=%lu, Dirty %u, PageOut %lu\n",
106 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
107 }
108
109 return Refs;
110 }
111 #endif
112
113
114 /* FUNCTIONS *****************************************************************/
115
116 VOID
117 NTAPI
118 CcRosTraceCacheMap (
119 PROS_SHARED_CACHE_MAP SharedCacheMap,
120 BOOLEAN Trace )
121 {
122 #if DBG
123 KIRQL oldirql;
124 PLIST_ENTRY current_entry;
125 PROS_VACB current;
126
127 if (!SharedCacheMap)
128 return;
129
130 SharedCacheMap->Trace = Trace;
131
132 if (Trace)
133 {
134 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
135
136 oldirql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
137 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
138
139 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
140 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
141 {
142 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
143 current_entry = current_entry->Flink;
144
145 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
146 current, current->ReferenceCount, current->Dirty, current->PageOut );
147 }
148
149 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
150 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldirql);
151 }
152 else
153 {
154 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
155 }
156
157 #else
158 UNREFERENCED_PARAMETER(SharedCacheMap);
159 UNREFERENCED_PARAMETER(Trace);
160 #endif
161 }
162
163 NTSTATUS
164 NTAPI
165 CcRosFlushVacb (
166 PROS_VACB Vacb)
167 {
168 NTSTATUS Status;
169
170 CcRosUnmarkDirtyVacb(Vacb, TRUE);
171
172 Status = CcWriteVirtualAddress(Vacb);
173 if (!NT_SUCCESS(Status))
174 {
175 CcRosMarkDirtyVacb(Vacb);
176 }
177
178 return Status;
179 }
180
181 NTSTATUS
182 NTAPI
183 CcRosFlushDirtyPages (
184 ULONG Target,
185 PULONG Count,
186 BOOLEAN Wait,
187 BOOLEAN CalledFromLazy)
188 {
189 PLIST_ENTRY current_entry;
190 PROS_VACB current;
191 BOOLEAN Locked;
192 NTSTATUS Status;
193 KIRQL OldIrql;
194
195 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
196
197 (*Count) = 0;
198
199 KeEnterCriticalRegion();
200 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
201
202 current_entry = DirtyVacbListHead.Flink;
203 if (current_entry == &DirtyVacbListHead)
204 {
205 DPRINT("No Dirty pages\n");
206 }
207
208 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
209 {
210 current = CONTAINING_RECORD(current_entry,
211 ROS_VACB,
212 DirtyVacbListEntry);
213 current_entry = current_entry->Flink;
214
215 CcRosVacbIncRefCount(current);
216
217 /* When performing lazy write, don't handle temporary files */
218 if (CalledFromLazy &&
219 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
220 {
221 CcRosVacbDecRefCount(current);
222 continue;
223 }
224
225 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
226 current->SharedCacheMap->LazyWriteContext, Wait);
227 if (!Locked)
228 {
229 CcRosVacbDecRefCount(current);
230 continue;
231 }
232
233 ASSERT(current->Dirty);
234
235 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
236
237 Status = CcRosFlushVacb(current);
238
239 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
240 current->SharedCacheMap->LazyWriteContext);
241
242 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
243 CcRosVacbDecRefCount(current);
244
245 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
246 (Status != STATUS_MEDIA_WRITE_PROTECTED))
247 {
248 DPRINT1("CC: Failed to flush VACB.\n");
249 }
250 else
251 {
252 ULONG PagesFreed;
253
254 /* How many pages did we free? */
255 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
256 (*Count) += PagesFreed;
257
258 /* Make sure we don't overflow target! */
259 if (Target < PagesFreed)
260 {
261 /* If we would have, jump to zero directly */
262 Target = 0;
263 }
264 else
265 {
266 Target -= PagesFreed;
267 }
268 }
269
270 current_entry = DirtyVacbListHead.Flink;
271 }
272
273 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
274 KeLeaveCriticalRegion();
275
276 DPRINT("CcRosFlushDirtyPages() finished\n");
277 return STATUS_SUCCESS;
278 }
279
280 NTSTATUS
281 CcRosTrimCache (
282 ULONG Target,
283 ULONG Priority,
284 PULONG NrFreed)
285 /*
286 * FUNCTION: Try to free some memory from the file cache.
287 * ARGUMENTS:
288 * Target - The number of pages to be freed.
289 * Priority - The priority of free (currently unused).
290 * NrFreed - Points to a variable where the number of pages
291 * actually freed is returned.
292 */
293 {
294 PLIST_ENTRY current_entry;
295 PROS_VACB current;
296 ULONG PagesFreed;
297 KIRQL oldIrql;
298 LIST_ENTRY FreeList;
299 PFN_NUMBER Page;
300 ULONG i;
301 BOOLEAN FlushedPages = FALSE;
302
303 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
304
305 InitializeListHead(&FreeList);
306
307 *NrFreed = 0;
308
309 retry:
310 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
311
312 current_entry = VacbLruListHead.Flink;
313 while (current_entry != &VacbLruListHead)
314 {
315 ULONG Refs;
316
317 current = CONTAINING_RECORD(current_entry,
318 ROS_VACB,
319 VacbLruListEntry);
320 current_entry = current_entry->Flink;
321
322 KeAcquireSpinLockAtDpcLevel(&current->SharedCacheMap->CacheMapLock);
323
324 /* Reference the VACB */
325 CcRosVacbIncRefCount(current);
326
327 /* Check if it's mapped and not dirty */
328 if (InterlockedCompareExchange((PLONG)&current->MappedCount, 0, 0) > 0 && !current->Dirty)
329 {
330 /* We have to break these locks because Cc sucks */
331 KeReleaseSpinLockFromDpcLevel(&current->SharedCacheMap->CacheMapLock);
332 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
333
334 /* Page out the VACB */
335 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
336 {
337 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
338
339 MmPageOutPhysicalAddress(Page);
340 }
341
342 /* Reacquire the locks */
343 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
344 KeAcquireSpinLockAtDpcLevel(&current->SharedCacheMap->CacheMapLock);
345 }
346
347 /* Dereference the VACB */
348 Refs = CcRosVacbDecRefCount(current);
349
350 /* Check if we can free this entry now */
351 if (Refs < 2)
352 {
353 ASSERT(!current->Dirty);
354 ASSERT(!current->MappedCount);
355 ASSERT(Refs == 1);
356
357 RemoveEntryList(&current->CacheMapVacbListEntry);
358 RemoveEntryList(&current->VacbLruListEntry);
359 InitializeListHead(&current->VacbLruListEntry);
360 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
361
362 /* Calculate how many pages we freed for Mm */
363 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
364 Target -= PagesFreed;
365 (*NrFreed) += PagesFreed;
366 }
367
368 KeReleaseSpinLockFromDpcLevel(&current->SharedCacheMap->CacheMapLock);
369 }
370
371 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
372
373 /* Try flushing pages if we haven't met our target */
374 if ((Target > 0) && !FlushedPages)
375 {
376 /* Flush dirty pages to disk */
377 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
378 FlushedPages = TRUE;
379
380 /* We can only swap as many pages as we flushed */
381 if (PagesFreed < Target) Target = PagesFreed;
382
383 /* Check if we flushed anything */
384 if (PagesFreed != 0)
385 {
386 /* Try again after flushing dirty pages */
387 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
388 goto retry;
389 }
390 }
391
392 while (!IsListEmpty(&FreeList))
393 {
394 ULONG Refs;
395
396 current_entry = RemoveHeadList(&FreeList);
397 current = CONTAINING_RECORD(current_entry,
398 ROS_VACB,
399 CacheMapVacbListEntry);
400 InitializeListHead(&current->CacheMapVacbListEntry);
401 Refs = CcRosVacbDecRefCount(current);
402 ASSERT(Refs == 0);
403 }
404
405 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
406
407 return STATUS_SUCCESS;
408 }
409
410 NTSTATUS
411 NTAPI
412 CcRosReleaseVacb (
413 PROS_SHARED_CACHE_MAP SharedCacheMap,
414 PROS_VACB Vacb,
415 BOOLEAN Valid,
416 BOOLEAN Dirty,
417 BOOLEAN Mapped)
418 {
419 ULONG Refs;
420 ASSERT(SharedCacheMap);
421
422 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
423 SharedCacheMap, Vacb, Valid);
424
425 Vacb->Valid = Valid;
426
427 if (Dirty && !Vacb->Dirty)
428 {
429 CcRosMarkDirtyVacb(Vacb);
430 }
431
432 if (Mapped)
433 {
434 if (InterlockedIncrement((PLONG)&Vacb->MappedCount) == 1)
435 {
436 CcRosVacbIncRefCount(Vacb);
437 }
438 }
439
440 Refs = CcRosVacbDecRefCount(Vacb);
441 ASSERT(Refs > 0);
442
443 return STATUS_SUCCESS;
444 }
445
446 /* Returns with VACB Lock Held! */
447 PROS_VACB
448 NTAPI
449 CcRosLookupVacb (
450 PROS_SHARED_CACHE_MAP SharedCacheMap,
451 LONGLONG FileOffset)
452 {
453 PLIST_ENTRY current_entry;
454 PROS_VACB current;
455 KIRQL oldIrql;
456
457 ASSERT(SharedCacheMap);
458
459 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
460 SharedCacheMap, FileOffset);
461
462 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
463 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
464
465 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
466 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
467 {
468 current = CONTAINING_RECORD(current_entry,
469 ROS_VACB,
470 CacheMapVacbListEntry);
471 if (IsPointInRange(current->FileOffset.QuadPart,
472 VACB_MAPPING_GRANULARITY,
473 FileOffset))
474 {
475 CcRosVacbIncRefCount(current);
476 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
477 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
478 return current;
479 }
480 if (current->FileOffset.QuadPart > FileOffset)
481 break;
482 current_entry = current_entry->Flink;
483 }
484
485 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
486 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
487
488 return NULL;
489 }
490
491 VOID
492 NTAPI
493 CcRosMarkDirtyVacb (
494 PROS_VACB Vacb)
495 {
496 KIRQL oldIrql;
497 PROS_SHARED_CACHE_MAP SharedCacheMap;
498
499 SharedCacheMap = Vacb->SharedCacheMap;
500
501 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
502 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
503
504 ASSERT(!Vacb->Dirty);
505
506 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
507 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
508 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
509 CcRosVacbIncRefCount(Vacb);
510
511 /* Move to the tail of the LRU list */
512 RemoveEntryList(&Vacb->VacbLruListEntry);
513 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
514
515 Vacb->Dirty = TRUE;
516
517 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
518
519 /* Schedule a lazy writer run to now that we have dirty VACB */
520 if (!LazyWriter.ScanActive)
521 {
522 CcScheduleLazyWriteScan(FALSE);
523 }
524 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
525 }
526
527 VOID
528 NTAPI
529 CcRosUnmarkDirtyVacb (
530 PROS_VACB Vacb,
531 BOOLEAN LockViews)
532 {
533 KIRQL oldIrql;
534 PROS_SHARED_CACHE_MAP SharedCacheMap;
535
536 SharedCacheMap = Vacb->SharedCacheMap;
537
538 if (LockViews)
539 {
540 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
541 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
542 }
543
544 ASSERT(Vacb->Dirty);
545
546 Vacb->Dirty = FALSE;
547
548 RemoveEntryList(&Vacb->DirtyVacbListEntry);
549 InitializeListHead(&Vacb->DirtyVacbListEntry);
550 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
551 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
552 CcRosVacbDecRefCount(Vacb);
553
554 if (LockViews)
555 {
556 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
557 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
558 }
559 }
560
561 NTSTATUS
562 NTAPI
563 CcRosMarkDirtyFile (
564 PROS_SHARED_CACHE_MAP SharedCacheMap,
565 LONGLONG FileOffset)
566 {
567 PROS_VACB Vacb;
568
569 ASSERT(SharedCacheMap);
570
571 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
572 SharedCacheMap, FileOffset);
573
574 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
575 if (Vacb == NULL)
576 {
577 KeBugCheck(CACHE_MANAGER);
578 }
579
580 CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, TRUE, FALSE);
581
582 return STATUS_SUCCESS;
583 }
584
585 /*
586 * Note: this is not the contrary function of
587 * CcRosMapVacbInKernelSpace()
588 */
589 NTSTATUS
590 NTAPI
591 CcRosUnmapVacb (
592 PROS_SHARED_CACHE_MAP SharedCacheMap,
593 LONGLONG FileOffset,
594 BOOLEAN NowDirty)
595 {
596 PROS_VACB Vacb;
597
598 ASSERT(SharedCacheMap);
599
600 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
601 SharedCacheMap, FileOffset, NowDirty);
602
603 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
604 if (Vacb == NULL)
605 {
606 return STATUS_UNSUCCESSFUL;
607 }
608
609 ASSERT(Vacb->MappedCount != 0);
610 if (InterlockedDecrement((PLONG)&Vacb->MappedCount) == 0)
611 {
612 CcRosVacbDecRefCount(Vacb);
613 }
614
615 CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, NowDirty, FALSE);
616
617 return STATUS_SUCCESS;
618 }
619
620 static
621 NTSTATUS
622 CcRosMapVacbInKernelSpace(
623 PROS_VACB Vacb)
624 {
625 ULONG i;
626 NTSTATUS Status;
627 ULONG_PTR NumberOfPages;
628 PVOID BaseAddress = NULL;
629
630 /* Create a memory area. */
631 MmLockAddressSpace(MmGetKernelAddressSpace());
632 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
633 0, // nothing checks for VACB mareas, so set to 0
634 &BaseAddress,
635 VACB_MAPPING_GRANULARITY,
636 PAGE_READWRITE,
637 (PMEMORY_AREA*)&Vacb->MemoryArea,
638 0,
639 PAGE_SIZE);
640 ASSERT(Vacb->BaseAddress == NULL);
641 Vacb->BaseAddress = BaseAddress;
642 MmUnlockAddressSpace(MmGetKernelAddressSpace());
643 if (!NT_SUCCESS(Status))
644 {
645 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
646 return Status;
647 }
648
649 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
650 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
651 ASSERT((ULONG_PTR)Vacb->BaseAddress + VACB_MAPPING_GRANULARITY - 1 > (ULONG_PTR)MmSystemRangeStart);
652
653 /* Create a virtual mapping for this memory area */
654 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
655 for (i = 0; i < NumberOfPages; i++)
656 {
657 PFN_NUMBER PageFrameNumber;
658
659 MI_SET_USAGE(MI_USAGE_CACHE);
660 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
661 if (PageFrameNumber == 0)
662 {
663 DPRINT1("Unable to allocate page\n");
664 KeBugCheck(MEMORY_MANAGEMENT);
665 }
666
667 ASSERT(BaseAddress == Vacb->BaseAddress);
668 ASSERT(i * PAGE_SIZE < VACB_MAPPING_GRANULARITY);
669 ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) >= (ULONG_PTR)BaseAddress);
670 ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) > (ULONG_PTR)MmSystemRangeStart);
671
672 Status = MmCreateVirtualMapping(NULL,
673 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
674 PAGE_READWRITE,
675 &PageFrameNumber,
676 1);
677 if (!NT_SUCCESS(Status))
678 {
679 DPRINT1("Unable to create virtual mapping\n");
680 KeBugCheck(MEMORY_MANAGEMENT);
681 }
682 }
683
684 return STATUS_SUCCESS;
685 }
686
687 static
688 BOOLEAN
689 CcRosFreeUnusedVacb (
690 PULONG Count)
691 {
692 ULONG cFreed;
693 BOOLEAN Freed;
694 KIRQL oldIrql;
695 PROS_VACB current;
696 LIST_ENTRY FreeList;
697 PLIST_ENTRY current_entry;
698
699 cFreed = 0;
700 Freed = FALSE;
701 InitializeListHead(&FreeList);
702
703 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
704
705 /* Browse all the available VACB */
706 current_entry = VacbLruListHead.Flink;
707 while (current_entry != &VacbLruListHead)
708 {
709 ULONG Refs;
710
711 current = CONTAINING_RECORD(current_entry,
712 ROS_VACB,
713 VacbLruListEntry);
714 current_entry = current_entry->Flink;
715
716 KeAcquireSpinLockAtDpcLevel(&current->SharedCacheMap->CacheMapLock);
717
718 /* Only deal with unused VACB, we will free them */
719 Refs = CcRosVacbGetRefCount(current);
720 if (Refs < 2)
721 {
722 ASSERT(!current->Dirty);
723 ASSERT(!current->MappedCount);
724 ASSERT(Refs == 1);
725
726 /* Reset and move to free list */
727 RemoveEntryList(&current->CacheMapVacbListEntry);
728 RemoveEntryList(&current->VacbLruListEntry);
729 InitializeListHead(&current->VacbLruListEntry);
730 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
731 }
732
733 KeReleaseSpinLockFromDpcLevel(&current->SharedCacheMap->CacheMapLock);
734
735 }
736
737 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
738
739 /* And now, free any of the found VACB, that'll free memory! */
740 while (!IsListEmpty(&FreeList))
741 {
742 ULONG Refs;
743
744 current_entry = RemoveHeadList(&FreeList);
745 current = CONTAINING_RECORD(current_entry,
746 ROS_VACB,
747 CacheMapVacbListEntry);
748 InitializeListHead(&current->CacheMapVacbListEntry);
749 Refs = CcRosVacbDecRefCount(current);
750 ASSERT(Refs == 0);
751 ++cFreed;
752 }
753
754 /* If we freed at least one VACB, return success */
755 if (cFreed != 0)
756 {
757 Freed = TRUE;
758 }
759
760 /* If caller asked for free count, return it */
761 if (Count != NULL)
762 {
763 *Count = cFreed;
764 }
765
766 return Freed;
767 }
768
769 static
770 NTSTATUS
771 CcRosCreateVacb (
772 PROS_SHARED_CACHE_MAP SharedCacheMap,
773 LONGLONG FileOffset,
774 PROS_VACB *Vacb)
775 {
776 PROS_VACB current;
777 PROS_VACB previous;
778 PLIST_ENTRY current_entry;
779 NTSTATUS Status;
780 KIRQL oldIrql;
781 ULONG Refs;
782 BOOLEAN Retried;
783
784 ASSERT(SharedCacheMap);
785
786 DPRINT("CcRosCreateVacb()\n");
787
788 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
789 {
790 *Vacb = NULL;
791 return STATUS_INVALID_PARAMETER;
792 }
793
794 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
795 current->BaseAddress = NULL;
796 current->Valid = FALSE;
797 current->Dirty = FALSE;
798 current->PageOut = FALSE;
799 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
800 current->SharedCacheMap = SharedCacheMap;
801 #if DBG
802 if (SharedCacheMap->Trace)
803 {
804 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
805 }
806 #endif
807 current->MappedCount = 0;
808 current->ReferenceCount = 0;
809 InitializeListHead(&current->CacheMapVacbListEntry);
810 InitializeListHead(&current->DirtyVacbListEntry);
811 InitializeListHead(&current->VacbLruListEntry);
812
813 CcRosVacbIncRefCount(current);
814
815 Retried = FALSE;
816 Retry:
817 /* Map VACB in kernel space */
818 Status = CcRosMapVacbInKernelSpace(current);
819 if (!NT_SUCCESS(Status))
820 {
821 ULONG Freed;
822 /* If no space left, try to prune unused VACB
823 * to recover space to map our VACB
824 * If it succeed, retry to map, otherwise
825 * just fail.
826 */
827 if (!Retried && CcRosFreeUnusedVacb(&Freed))
828 {
829 DPRINT("Prunned %d VACB, trying again\n", Freed);
830 Retried = TRUE;
831 goto Retry;
832 }
833
834 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
835 return Status;
836 }
837
838 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
839
840 *Vacb = current;
841 /* There is window between the call to CcRosLookupVacb
842 * and CcRosCreateVacb. We must check if a VACB for the
843 * file offset exist. If there is a VACB, we release
844 * our newly created VACB and return the existing one.
845 */
846 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
847 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
848 previous = NULL;
849 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
850 {
851 current = CONTAINING_RECORD(current_entry,
852 ROS_VACB,
853 CacheMapVacbListEntry);
854 if (IsPointInRange(current->FileOffset.QuadPart,
855 VACB_MAPPING_GRANULARITY,
856 FileOffset))
857 {
858 CcRosVacbIncRefCount(current);
859 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
860 #if DBG
861 if (SharedCacheMap->Trace)
862 {
863 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
864 SharedCacheMap,
865 (*Vacb),
866 current);
867 }
868 #endif
869 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
870
871 Refs = CcRosVacbDecRefCount(*Vacb);
872 ASSERT(Refs == 0);
873
874 *Vacb = current;
875 return STATUS_SUCCESS;
876 }
877 if (current->FileOffset.QuadPart < FileOffset)
878 {
879 ASSERT(previous == NULL ||
880 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
881 previous = current;
882 }
883 if (current->FileOffset.QuadPart > FileOffset)
884 break;
885 current_entry = current_entry->Flink;
886 }
887 /* There was no existing VACB. */
888 current = *Vacb;
889 if (previous)
890 {
891 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
892 }
893 else
894 {
895 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
896 }
897 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
898 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
899 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
900
901 MI_SET_USAGE(MI_USAGE_CACHE);
902 #if MI_TRACE_PFNS
903 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
904 {
905 PWCHAR pos;
906 ULONG len = 0;
907 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
908 if (pos)
909 {
910 len = wcslen(pos) * sizeof(WCHAR);
911 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
912 }
913 else
914 {
915 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
916 }
917 }
918 #endif
919
920 /* Reference it to allow release */
921 CcRosVacbIncRefCount(current);
922
923 return Status;
924 }
925
926 NTSTATUS
927 NTAPI
928 CcRosGetVacb (
929 PROS_SHARED_CACHE_MAP SharedCacheMap,
930 LONGLONG FileOffset,
931 PLONGLONG BaseOffset,
932 PVOID* BaseAddress,
933 PBOOLEAN UptoDate,
934 PROS_VACB *Vacb)
935 {
936 PROS_VACB current;
937 NTSTATUS Status;
938 ULONG Refs;
939 KIRQL OldIrql;
940
941 ASSERT(SharedCacheMap);
942
943 DPRINT("CcRosGetVacb()\n");
944
945 /*
946 * Look for a VACB already mapping the same data.
947 */
948 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
949 if (current == NULL)
950 {
951 /*
952 * Otherwise create a new VACB.
953 */
954 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
955 if (!NT_SUCCESS(Status))
956 {
957 return Status;
958 }
959 }
960
961 Refs = CcRosVacbGetRefCount(current);
962
963 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
964
965 /* Move to the tail of the LRU list */
966 RemoveEntryList(&current->VacbLruListEntry);
967 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
968
969 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
970
971 /*
972 * Return information about the VACB to the caller.
973 */
974 *UptoDate = current->Valid;
975 *BaseAddress = current->BaseAddress;
976 DPRINT("*BaseAddress %p\n", *BaseAddress);
977 *Vacb = current;
978 *BaseOffset = current->FileOffset.QuadPart;
979
980 ASSERT(Refs > 1);
981
982 return STATUS_SUCCESS;
983 }
984
985 NTSTATUS
986 NTAPI
987 CcRosRequestVacb (
988 PROS_SHARED_CACHE_MAP SharedCacheMap,
989 LONGLONG FileOffset,
990 PVOID* BaseAddress,
991 PBOOLEAN UptoDate,
992 PROS_VACB *Vacb)
993 /*
994 * FUNCTION: Request a page mapping for a shared cache map
995 */
996 {
997 LONGLONG BaseOffset;
998
999 ASSERT(SharedCacheMap);
1000
1001 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
1002 {
1003 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
1004 FileOffset, VACB_MAPPING_GRANULARITY);
1005 KeBugCheck(CACHE_MANAGER);
1006 }
1007
1008 return CcRosGetVacb(SharedCacheMap,
1009 FileOffset,
1010 &BaseOffset,
1011 BaseAddress,
1012 UptoDate,
1013 Vacb);
1014 }
1015
1016 static
1017 VOID
1018 CcFreeCachePage (
1019 PVOID Context,
1020 MEMORY_AREA* MemoryArea,
1021 PVOID Address,
1022 PFN_NUMBER Page,
1023 SWAPENTRY SwapEntry,
1024 BOOLEAN Dirty)
1025 {
1026 ASSERT(SwapEntry == 0);
1027 if (Page != 0)
1028 {
1029 ASSERT(MmGetReferenceCountPage(Page) == 1);
1030 MmReleasePageMemoryConsumer(MC_CACHE, Page);
1031 }
1032 }
1033
1034 NTSTATUS
1035 CcRosInternalFreeVacb (
1036 PROS_VACB Vacb)
1037 /*
1038 * FUNCTION: Releases a VACB associated with a shared cache map
1039 */
1040 {
1041 DPRINT("Freeing VACB 0x%p\n", Vacb);
1042 #if DBG
1043 if (Vacb->SharedCacheMap->Trace)
1044 {
1045 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
1046 }
1047 #endif
1048
1049 MmLockAddressSpace(MmGetKernelAddressSpace());
1050 MmFreeMemoryArea(MmGetKernelAddressSpace(),
1051 Vacb->MemoryArea,
1052 CcFreeCachePage,
1053 NULL);
1054 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1055
1056 if (Vacb->ReferenceCount != 0)
1057 {
1058 DPRINT1("Invalid free: %ld\n", Vacb->ReferenceCount);
1059 if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length)
1060 {
1061 DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName);
1062 }
1063 }
1064
1065 ASSERT(Vacb->ReferenceCount == 0);
1066 ASSERT(IsListEmpty(&Vacb->CacheMapVacbListEntry));
1067 ASSERT(IsListEmpty(&Vacb->DirtyVacbListEntry));
1068 ASSERT(IsListEmpty(&Vacb->VacbLruListEntry));
1069 RtlFillMemory(Vacb, sizeof(*Vacb), 0xfd);
1070 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
1071 return STATUS_SUCCESS;
1072 }
1073
1074 /*
1075 * @implemented
1076 */
1077 VOID
1078 NTAPI
1079 CcFlushCache (
1080 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1081 IN PLARGE_INTEGER FileOffset OPTIONAL,
1082 IN ULONG Length,
1083 OUT PIO_STATUS_BLOCK IoStatus)
1084 {
1085 PROS_SHARED_CACHE_MAP SharedCacheMap;
1086 LARGE_INTEGER Offset;
1087 LONGLONG RemainingLength;
1088 PROS_VACB current;
1089 NTSTATUS Status;
1090
1091 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1092 SectionObjectPointers, FileOffset, Length);
1093
1094 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1095 SectionObjectPointers, FileOffset, Length, IoStatus);
1096
1097 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1098 {
1099 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1100 ASSERT(SharedCacheMap);
1101 if (FileOffset)
1102 {
1103 Offset = *FileOffset;
1104 RemainingLength = Length;
1105 }
1106 else
1107 {
1108 Offset.QuadPart = 0;
1109 RemainingLength = SharedCacheMap->FileSize.QuadPart;
1110 }
1111
1112 if (IoStatus)
1113 {
1114 IoStatus->Status = STATUS_SUCCESS;
1115 IoStatus->Information = 0;
1116 }
1117
1118 while (RemainingLength > 0)
1119 {
1120 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1121 if (current != NULL)
1122 {
1123 if (current->Dirty)
1124 {
1125 Status = CcRosFlushVacb(current);
1126 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1127 {
1128 IoStatus->Status = Status;
1129 }
1130 }
1131
1132 CcRosReleaseVacb(SharedCacheMap, current, current->Valid, current->Dirty, FALSE);
1133 }
1134
1135 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1136 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1137 }
1138 }
1139 else
1140 {
1141 if (IoStatus)
1142 {
1143 IoStatus->Status = STATUS_INVALID_PARAMETER;
1144 }
1145 }
1146 }
1147
1148 NTSTATUS
1149 NTAPI
1150 CcRosDeleteFileCache (
1151 PFILE_OBJECT FileObject,
1152 PROS_SHARED_CACHE_MAP SharedCacheMap,
1153 PKIRQL OldIrql)
1154 /*
1155 * FUNCTION: Releases the shared cache map associated with a file object
1156 */
1157 {
1158 PLIST_ENTRY current_entry;
1159 PROS_VACB current;
1160 LIST_ENTRY FreeList;
1161
1162 ASSERT(SharedCacheMap);
1163
1164 SharedCacheMap->OpenCount++;
1165 KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql);
1166
1167 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1168
1169 *OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1170 SharedCacheMap->OpenCount--;
1171 if (SharedCacheMap->OpenCount == 0)
1172 {
1173 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1174
1175 /*
1176 * Release all VACBs
1177 */
1178 InitializeListHead(&FreeList);
1179 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
1180 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1181 {
1182 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1183 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
1184
1185 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1186 RemoveEntryList(&current->VacbLruListEntry);
1187 InitializeListHead(&current->VacbLruListEntry);
1188 if (current->Dirty)
1189 {
1190 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
1191 CcRosUnmarkDirtyVacb(current, FALSE);
1192 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
1193 DPRINT1("Freeing dirty VACB\n");
1194 }
1195 if (current->MappedCount != 0)
1196 {
1197 current->MappedCount = 0;
1198 NT_VERIFY(CcRosVacbDecRefCount(current) > 0);
1199 DPRINT1("Freeing mapped VACB\n");
1200 }
1201 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1202
1203 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
1204 }
1205 #if DBG
1206 SharedCacheMap->Trace = FALSE;
1207 #endif
1208 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
1209
1210 KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql);
1211 ObDereferenceObject(SharedCacheMap->FileObject);
1212
1213 while (!IsListEmpty(&FreeList))
1214 {
1215 ULONG Refs;
1216
1217 current_entry = RemoveTailList(&FreeList);
1218 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1219 InitializeListHead(&current->CacheMapVacbListEntry);
1220 Refs = CcRosVacbDecRefCount(current);
1221 #if DBG // CORE-14578
1222 if (Refs != 0)
1223 {
1224 DPRINT1("Leaking VACB %p attached to %p (%I64d)\n", current, FileObject, current->FileOffset.QuadPart);
1225 DPRINT1("There are: %d references left\n", Refs);
1226 DPRINT1("Map: %d\n", current->MappedCount);
1227 DPRINT1("Dirty: %d\n", current->Dirty);
1228 if (FileObject->FileName.Length != 0)
1229 {
1230 DPRINT1("File was: %wZ\n", &FileObject->FileName);
1231 }
1232 else if (FileObject->FsContext != NULL &&
1233 ((PFSRTL_COMMON_FCB_HEADER)(FileObject->FsContext))->NodeTypeCode == 0x0502 &&
1234 ((PFSRTL_COMMON_FCB_HEADER)(FileObject->FsContext))->NodeByteSize == 0x1F8 &&
1235 ((PUNICODE_STRING)(((PUCHAR)FileObject->FsContext) + 0x100))->Length != 0)
1236 {
1237 DPRINT1("File was: %wZ (FastFAT)\n", (PUNICODE_STRING)(((PUCHAR)FileObject->FsContext) + 0x100));
1238 }
1239 else
1240 {
1241 DPRINT1("No name for the file\n");
1242 }
1243 }
1244 #else
1245 ASSERT(Refs == 0);
1246 #endif
1247 }
1248
1249 *OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1250 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1251 KeReleaseQueuedSpinLock(LockQueueMasterLock, *OldIrql);
1252
1253 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1254 *OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1255 }
1256 return STATUS_SUCCESS;
1257 }
1258
1259 VOID
1260 NTAPI
1261 CcRosReferenceCache (
1262 PFILE_OBJECT FileObject)
1263 {
1264 PROS_SHARED_CACHE_MAP SharedCacheMap;
1265 KIRQL OldIrql;
1266
1267 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1268 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1269 ASSERT(SharedCacheMap);
1270 ASSERT(SharedCacheMap->OpenCount != 0);
1271 SharedCacheMap->OpenCount++;
1272 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1273 }
1274
1275 VOID
1276 NTAPI
1277 CcRosRemoveIfClosed (
1278 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1279 {
1280 PROS_SHARED_CACHE_MAP SharedCacheMap;
1281 KIRQL OldIrql;
1282
1283 DPRINT("CcRosRemoveIfClosed()\n");
1284 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1285 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1286 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1287 {
1288 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap, &OldIrql);
1289 }
1290 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1291 }
1292
1293
1294 VOID
1295 NTAPI
1296 CcRosDereferenceCache (
1297 PFILE_OBJECT FileObject)
1298 {
1299 PROS_SHARED_CACHE_MAP SharedCacheMap;
1300 KIRQL OldIrql;
1301
1302 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1303 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1304 ASSERT(SharedCacheMap);
1305 if (SharedCacheMap->OpenCount > 0)
1306 {
1307 SharedCacheMap->OpenCount--;
1308 if (SharedCacheMap->OpenCount == 0)
1309 {
1310 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1311 MmFreeSectionSegments(SharedCacheMap->FileObject);
1312
1313 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1314 CcRosDeleteFileCache(FileObject, SharedCacheMap, &OldIrql);
1315 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1316
1317 return;
1318 }
1319 }
1320 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1321 }
1322
1323 NTSTATUS
1324 NTAPI
1325 CcRosReleaseFileCache (
1326 PFILE_OBJECT FileObject)
1327 /*
1328 * FUNCTION: Called by the file system when a handle to a file object
1329 * has been closed.
1330 */
1331 {
1332 KIRQL OldIrql;
1333 PPRIVATE_CACHE_MAP PrivateMap;
1334 PROS_SHARED_CACHE_MAP SharedCacheMap;
1335
1336 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1337
1338 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1339 {
1340 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1341
1342 /* Closing the handle, so kill the private cache map
1343 * Before you event try to remove it from FO, always
1344 * lock the master lock, to be sure not to race
1345 * with a potential read ahead ongoing!
1346 */
1347 PrivateMap = FileObject->PrivateCacheMap;
1348 FileObject->PrivateCacheMap = NULL;
1349
1350 if (PrivateMap != NULL)
1351 {
1352 /* Remove it from the file */
1353 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
1354 RemoveEntryList(&PrivateMap->PrivateLinks);
1355 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
1356
1357 /* And free it. */
1358 if (PrivateMap != &SharedCacheMap->PrivateCacheMap)
1359 {
1360 ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP);
1361 }
1362 else
1363 {
1364 PrivateMap->NodeTypeCode = 0;
1365 }
1366
1367 if (SharedCacheMap->OpenCount > 0)
1368 {
1369 SharedCacheMap->OpenCount--;
1370 if (SharedCacheMap->OpenCount == 0)
1371 {
1372 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1373 MmFreeSectionSegments(SharedCacheMap->FileObject);
1374
1375 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1376 CcRosDeleteFileCache(FileObject, SharedCacheMap, &OldIrql);
1377 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1378
1379 return STATUS_SUCCESS;
1380 }
1381 }
1382 }
1383 }
1384 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1385 return STATUS_SUCCESS;
1386 }
1387
1388 NTSTATUS
1389 NTAPI
1390 CcRosInitializeFileCache (
1391 PFILE_OBJECT FileObject,
1392 PCC_FILE_SIZES FileSizes,
1393 BOOLEAN PinAccess,
1394 PCACHE_MANAGER_CALLBACKS CallBacks,
1395 PVOID LazyWriterContext)
1396 /*
1397 * FUNCTION: Initializes a shared cache map for a file object
1398 */
1399 {
1400 KIRQL OldIrql;
1401 BOOLEAN Allocated;
1402 PROS_SHARED_CACHE_MAP SharedCacheMap;
1403
1404 DPRINT("CcRosInitializeFileCache(FileObject 0x%p)\n", FileObject);
1405
1406 Allocated = FALSE;
1407 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1408 if (SharedCacheMap == NULL)
1409 {
1410 Allocated = TRUE;
1411 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1412 if (SharedCacheMap == NULL)
1413 {
1414 return STATUS_INSUFFICIENT_RESOURCES;
1415 }
1416 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1417 SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP;
1418 SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap);
1419 SharedCacheMap->FileObject = FileObject;
1420 SharedCacheMap->Callbacks = CallBacks;
1421 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1422 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1423 SharedCacheMap->FileSize = FileSizes->FileSize;
1424 SharedCacheMap->PinAccess = PinAccess;
1425 SharedCacheMap->DirtyPageThreshold = 0;
1426 SharedCacheMap->DirtyPages = 0;
1427 InitializeListHead(&SharedCacheMap->PrivateList);
1428 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1429 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1430 InitializeListHead(&SharedCacheMap->BcbList);
1431 }
1432
1433 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1434 if (Allocated)
1435 {
1436 if (FileObject->SectionObjectPointer->SharedCacheMap == NULL)
1437 {
1438 ObReferenceObjectByPointer(FileObject,
1439 FILE_ALL_ACCESS,
1440 NULL,
1441 KernelMode);
1442 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1443
1444 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1445 }
1446 else
1447 {
1448 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1449 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1450 }
1451 }
1452 if (FileObject->PrivateCacheMap == NULL)
1453 {
1454 PPRIVATE_CACHE_MAP PrivateMap;
1455
1456 /* Allocate the private cache map for this handle */
1457 if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0)
1458 {
1459 PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP);
1460 }
1461 else
1462 {
1463 PrivateMap = &SharedCacheMap->PrivateCacheMap;
1464 }
1465
1466 if (PrivateMap == NULL)
1467 {
1468 /* If we also allocated the shared cache map for this file, kill it */
1469 if (Allocated)
1470 {
1471 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1472
1473 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1474 ObDereferenceObject(FileObject);
1475 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1476 }
1477
1478 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1479 return STATUS_INSUFFICIENT_RESOURCES;
1480 }
1481
1482 /* Initialize it */
1483 RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP));
1484 PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP;
1485 PrivateMap->ReadAheadMask = PAGE_SIZE - 1;
1486 PrivateMap->FileObject = FileObject;
1487 KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock);
1488
1489 /* Link it to the file */
1490 KeAcquireSpinLockAtDpcLevel(&SharedCacheMap->CacheMapLock);
1491 InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks);
1492 KeReleaseSpinLockFromDpcLevel(&SharedCacheMap->CacheMapLock);
1493
1494 FileObject->PrivateCacheMap = PrivateMap;
1495 SharedCacheMap->OpenCount++;
1496 }
1497 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1498
1499 return STATUS_SUCCESS;
1500 }
1501
1502 /*
1503 * @implemented
1504 */
1505 PFILE_OBJECT
1506 NTAPI
1507 CcGetFileObjectFromSectionPtrs (
1508 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1509 {
1510 PROS_SHARED_CACHE_MAP SharedCacheMap;
1511
1512 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1513
1514 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1515 {
1516 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1517 ASSERT(SharedCacheMap);
1518 return SharedCacheMap->FileObject;
1519 }
1520 return NULL;
1521 }
1522
1523 VOID
1524 INIT_FUNCTION
1525 NTAPI
1526 CcInitView (
1527 VOID)
1528 {
1529 DPRINT("CcInitView()\n");
1530
1531 InitializeListHead(&DirtyVacbListHead);
1532 InitializeListHead(&VacbLruListHead);
1533 InitializeListHead(&CcDeferredWrites);
1534 InitializeListHead(&CcCleanSharedCacheMapList);
1535 KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1536 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1537 NULL,
1538 NULL,
1539 0,
1540 sizeof(INTERNAL_BCB),
1541 TAG_BCB,
1542 20);
1543 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1544 NULL,
1545 NULL,
1546 0,
1547 sizeof(ROS_SHARED_CACHE_MAP),
1548 TAG_SHARED_CACHE_MAP,
1549 20);
1550 ExInitializeNPagedLookasideList(&VacbLookasideList,
1551 NULL,
1552 NULL,
1553 0,
1554 sizeof(ROS_VACB),
1555 TAG_VACB,
1556 20);
1557
1558 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1559
1560 CcInitCacheZeroPage();
1561 }
1562
1563 #if DBG && defined(KDBG)
1564 BOOLEAN
1565 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1566 {
1567 PLIST_ENTRY ListEntry;
1568 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1569
1570 KdbpPrint(" Usage Summary (in kb)\n");
1571 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1572 /* No need to lock the spin lock here, we're in DBG */
1573 for (ListEntry = CcCleanSharedCacheMapList.Flink;
1574 ListEntry != &CcCleanSharedCacheMapList;
1575 ListEntry = ListEntry->Flink)
1576 {
1577 PLIST_ENTRY Vacbs;
1578 ULONG Valid = 0, Dirty = 0;
1579 PROS_SHARED_CACHE_MAP SharedCacheMap;
1580 PUNICODE_STRING FileName;
1581 PWSTR Extra = L"";
1582
1583 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1584
1585 /* Dirty size */
1586 Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
1587
1588 /* First, count for all the associated VACB */
1589 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1590 Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1591 Vacbs = Vacbs->Flink)
1592 {
1593 PROS_VACB Vacb;
1594
1595 Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
1596 if (Vacb->Valid)
1597 {
1598 Valid += VACB_MAPPING_GRANULARITY / 1024;
1599 }
1600 }
1601
1602 /* Setup name */
1603 if (SharedCacheMap->FileObject != NULL &&
1604 SharedCacheMap->FileObject->FileName.Length != 0)
1605 {
1606 FileName = &SharedCacheMap->FileObject->FileName;
1607 }
1608 else if (SharedCacheMap->FileObject != NULL &&
1609 SharedCacheMap->FileObject->FsContext != NULL &&
1610 ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeTypeCode == 0x0502 &&
1611 ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeByteSize == 0x1F8 &&
1612 ((PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100))->Length != 0)
1613 {
1614 FileName = (PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100);
1615 Extra = L" (FastFAT)";
1616 }
1617 else
1618 {
1619 FileName = &NoName;
1620 }
1621
1622 /* And print */
1623 KdbpPrint("%p\t%d\t%d\t%wZ%S\n", SharedCacheMap, Valid, Dirty, FileName, Extra);
1624 }
1625
1626 return TRUE;
1627 }
1628
1629 BOOLEAN
1630 ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[])
1631 {
1632 KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages,
1633 (CcTotalDirtyPages * PAGE_SIZE) / 1024);
1634 KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold,
1635 (CcDirtyPageThreshold * PAGE_SIZE) / 1024);
1636 KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages,
1637 (MmAvailablePages * PAGE_SIZE) / 1024);
1638 KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop,
1639 (MmThrottleTop * PAGE_SIZE) / 1024);
1640 KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom,
1641 (MmThrottleBottom * PAGE_SIZE) / 1024);
1642 KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total,
1643 (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024);
1644
1645 if (CcTotalDirtyPages >= CcDirtyPageThreshold)
1646 {
1647 KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n");
1648 }
1649 else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold)
1650 {
1651 KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n");
1652 }
1653 else
1654 {
1655 KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n");
1656 }
1657
1658 return TRUE;
1659 }
1660 #endif
1661
1662 /* EOF */