[NTOSKRNL] Unmark dirty first, and then write.
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42
43 /* GLOBALS *******************************************************************/
44
45 LIST_ENTRY DirtyVacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 /* Internal vars (MS):
55 * - Threshold above which lazy writer will start action
56 * - Amount of dirty pages
57 * - List for deferred writes
58 * - Spinlock when dealing with the deferred list
59 * - List for "clean" shared cache maps
60 */
61 ULONG CcDirtyPageThreshold = 0;
62 ULONG CcTotalDirtyPages = 0;
63 LIST_ENTRY CcDeferredWrites;
64 KSPIN_LOCK CcDeferredWriteSpinLock;
65 LIST_ENTRY CcCleanSharedCacheMapList;
66
67 #if DBG
68 ULONG CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line)
69 {
70 ULONG Refs;
71
72 Refs = InterlockedIncrement((PLONG)&vacb->ReferenceCount);
73 if (vacb->SharedCacheMap->Trace)
74 {
75 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
76 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
77 }
78
79 return Refs;
80 }
81 ULONG CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line)
82 {
83 ULONG Refs;
84
85 Refs = InterlockedDecrement((PLONG)&vacb->ReferenceCount);
86 ASSERT(!(Refs == 0 && vacb->Dirty));
87 if (vacb->SharedCacheMap->Trace)
88 {
89 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
90 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
91 }
92
93 if (Refs == 0)
94 {
95 CcRosInternalFreeVacb(vacb);
96 }
97
98 return Refs;
99 }
100 ULONG CcRosVacbGetRefCount_(PROS_VACB vacb, PCSTR file, INT line)
101 {
102 ULONG Refs;
103
104 Refs = InterlockedCompareExchange((PLONG)&vacb->ReferenceCount, 0, 0);
105 if (vacb->SharedCacheMap->Trace)
106 {
107 DbgPrint("(%s:%i) VACB %p ==RefCount=%lu, Dirty %u, PageOut %lu\n",
108 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
109 }
110
111 return Refs;
112 }
113 #endif
114
115
116 /* FUNCTIONS *****************************************************************/
117
118 VOID
119 NTAPI
120 CcRosTraceCacheMap (
121 PROS_SHARED_CACHE_MAP SharedCacheMap,
122 BOOLEAN Trace )
123 {
124 #if DBG
125 KIRQL oldirql;
126 PLIST_ENTRY current_entry;
127 PROS_VACB current;
128
129 if (!SharedCacheMap)
130 return;
131
132 SharedCacheMap->Trace = Trace;
133
134 if (Trace)
135 {
136 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
137
138 KeAcquireGuardedMutex(&ViewLock);
139 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
140
141 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
142 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
143 {
144 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
145 current_entry = current_entry->Flink;
146
147 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
148 current, current->ReferenceCount, current->Dirty, current->PageOut );
149 }
150 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
151 KeReleaseGuardedMutex(&ViewLock);
152 }
153 else
154 {
155 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
156 }
157
158 #else
159 UNREFERENCED_PARAMETER(SharedCacheMap);
160 UNREFERENCED_PARAMETER(Trace);
161 #endif
162 }
163
164 NTSTATUS
165 NTAPI
166 CcRosFlushVacb (
167 PROS_VACB Vacb)
168 {
169 NTSTATUS Status;
170
171 CcRosUnmarkDirtyVacb(Vacb, TRUE);
172
173 Status = CcWriteVirtualAddress(Vacb);
174 if (!NT_SUCCESS(Status))
175 {
176 CcRosMarkDirtyVacb(Vacb);
177 }
178
179 return Status;
180 }
181
182 NTSTATUS
183 NTAPI
184 CcRosFlushDirtyPages (
185 ULONG Target,
186 PULONG Count,
187 BOOLEAN Wait,
188 BOOLEAN CalledFromLazy)
189 {
190 PLIST_ENTRY current_entry;
191 PROS_VACB current;
192 BOOLEAN Locked;
193 NTSTATUS Status;
194
195 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
196
197 (*Count) = 0;
198
199 KeEnterCriticalRegion();
200 KeAcquireGuardedMutex(&ViewLock);
201
202 current_entry = DirtyVacbListHead.Flink;
203 if (current_entry == &DirtyVacbListHead)
204 {
205 DPRINT("No Dirty pages\n");
206 }
207
208 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
209 {
210 ULONG Refs;
211
212 current = CONTAINING_RECORD(current_entry,
213 ROS_VACB,
214 DirtyVacbListEntry);
215 current_entry = current_entry->Flink;
216
217 CcRosVacbIncRefCount(current);
218
219 /* When performing lazy write, don't handle temporary files */
220 if (CalledFromLazy &&
221 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
222 {
223 CcRosVacbDecRefCount(current);
224 continue;
225 }
226
227 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
228 current->SharedCacheMap->LazyWriteContext, Wait);
229 if (!Locked)
230 {
231 CcRosVacbDecRefCount(current);
232 continue;
233 }
234
235 ASSERT(current->Dirty);
236
237 /* One reference is added above */
238 Refs = CcRosVacbGetRefCount(current);
239 if ((Refs > 3 && current->PinCount == 0) ||
240 (Refs > 4 && current->PinCount > 1))
241 {
242 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
243 current->SharedCacheMap->LazyWriteContext);
244 CcRosVacbDecRefCount(current);
245 continue;
246 }
247
248 KeReleaseGuardedMutex(&ViewLock);
249
250 Status = CcRosFlushVacb(current);
251
252 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
253 current->SharedCacheMap->LazyWriteContext);
254
255 KeAcquireGuardedMutex(&ViewLock);
256 CcRosVacbDecRefCount(current);
257
258 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
259 (Status != STATUS_MEDIA_WRITE_PROTECTED))
260 {
261 DPRINT1("CC: Failed to flush VACB.\n");
262 }
263 else
264 {
265 ULONG PagesFreed;
266
267 /* How many pages did we free? */
268 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
269 (*Count) += PagesFreed;
270
271 /* Make sure we don't overflow target! */
272 if (Target < PagesFreed)
273 {
274 /* If we would have, jump to zero directly */
275 Target = 0;
276 }
277 else
278 {
279 Target -= PagesFreed;
280 }
281 }
282
283 current_entry = DirtyVacbListHead.Flink;
284 }
285
286 KeReleaseGuardedMutex(&ViewLock);
287 KeLeaveCriticalRegion();
288
289 DPRINT("CcRosFlushDirtyPages() finished\n");
290 return STATUS_SUCCESS;
291 }
292
293 NTSTATUS
294 CcRosTrimCache (
295 ULONG Target,
296 ULONG Priority,
297 PULONG NrFreed)
298 /*
299 * FUNCTION: Try to free some memory from the file cache.
300 * ARGUMENTS:
301 * Target - The number of pages to be freed.
302 * Priority - The priority of free (currently unused).
303 * NrFreed - Points to a variable where the number of pages
304 * actually freed is returned.
305 */
306 {
307 PLIST_ENTRY current_entry;
308 PROS_VACB current;
309 ULONG PagesFreed;
310 KIRQL oldIrql;
311 LIST_ENTRY FreeList;
312 PFN_NUMBER Page;
313 ULONG i;
314 BOOLEAN FlushedPages = FALSE;
315
316 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
317
318 InitializeListHead(&FreeList);
319
320 *NrFreed = 0;
321
322 retry:
323 KeAcquireGuardedMutex(&ViewLock);
324
325 current_entry = VacbLruListHead.Flink;
326 while (current_entry != &VacbLruListHead)
327 {
328 ULONG Refs;
329
330 current = CONTAINING_RECORD(current_entry,
331 ROS_VACB,
332 VacbLruListEntry);
333 current_entry = current_entry->Flink;
334
335 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
336
337 /* Reference the VACB */
338 CcRosVacbIncRefCount(current);
339
340 /* Check if it's mapped and not dirty */
341 if (InterlockedCompareExchange((PLONG)&current->MappedCount, 0, 0) > 0 && !current->Dirty)
342 {
343 /* We have to break these locks because Cc sucks */
344 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
345 KeReleaseGuardedMutex(&ViewLock);
346
347 /* Page out the VACB */
348 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
349 {
350 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
351
352 MmPageOutPhysicalAddress(Page);
353 }
354
355 /* Reacquire the locks */
356 KeAcquireGuardedMutex(&ViewLock);
357 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
358 }
359
360 /* Dereference the VACB */
361 Refs = CcRosVacbDecRefCount(current);
362
363 /* Check if we can free this entry now */
364 if (Refs < 2)
365 {
366 ASSERT(!current->Dirty);
367 ASSERT(!current->MappedCount);
368 ASSERT(Refs == 1);
369
370 RemoveEntryList(&current->CacheMapVacbListEntry);
371 RemoveEntryList(&current->VacbLruListEntry);
372 InitializeListHead(&current->VacbLruListEntry);
373 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
374
375 /* Calculate how many pages we freed for Mm */
376 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
377 Target -= PagesFreed;
378 (*NrFreed) += PagesFreed;
379 }
380
381 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
382 }
383
384 KeReleaseGuardedMutex(&ViewLock);
385
386 /* Try flushing pages if we haven't met our target */
387 if ((Target > 0) && !FlushedPages)
388 {
389 /* Flush dirty pages to disk */
390 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
391 FlushedPages = TRUE;
392
393 /* We can only swap as many pages as we flushed */
394 if (PagesFreed < Target) Target = PagesFreed;
395
396 /* Check if we flushed anything */
397 if (PagesFreed != 0)
398 {
399 /* Try again after flushing dirty pages */
400 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
401 goto retry;
402 }
403 }
404
405 while (!IsListEmpty(&FreeList))
406 {
407 ULONG Refs;
408
409 current_entry = RemoveHeadList(&FreeList);
410 current = CONTAINING_RECORD(current_entry,
411 ROS_VACB,
412 CacheMapVacbListEntry);
413 InitializeListHead(&current->CacheMapVacbListEntry);
414 Refs = CcRosVacbDecRefCount(current);
415 ASSERT(Refs == 0);
416 }
417
418 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
419
420 return STATUS_SUCCESS;
421 }
422
423 NTSTATUS
424 NTAPI
425 CcRosReleaseVacb (
426 PROS_SHARED_CACHE_MAP SharedCacheMap,
427 PROS_VACB Vacb,
428 BOOLEAN Valid,
429 BOOLEAN Dirty,
430 BOOLEAN Mapped)
431 {
432 ULONG Refs;
433 ASSERT(SharedCacheMap);
434
435 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
436 SharedCacheMap, Vacb, Valid);
437
438 Vacb->Valid = Valid;
439
440 if (Dirty && !Vacb->Dirty)
441 {
442 CcRosMarkDirtyVacb(Vacb);
443 }
444
445 if (Mapped)
446 {
447 if (InterlockedIncrement((PLONG)&Vacb->MappedCount) == 1)
448 {
449 CcRosVacbIncRefCount(Vacb);
450 }
451 }
452
453 Refs = CcRosVacbDecRefCount(Vacb);
454 ASSERT(Refs > 0);
455
456 return STATUS_SUCCESS;
457 }
458
459 /* Returns with VACB Lock Held! */
460 PROS_VACB
461 NTAPI
462 CcRosLookupVacb (
463 PROS_SHARED_CACHE_MAP SharedCacheMap,
464 LONGLONG FileOffset)
465 {
466 PLIST_ENTRY current_entry;
467 PROS_VACB current;
468 KIRQL oldIrql;
469
470 ASSERT(SharedCacheMap);
471
472 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
473 SharedCacheMap, FileOffset);
474
475 KeAcquireGuardedMutex(&ViewLock);
476 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
477
478 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
479 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
480 {
481 current = CONTAINING_RECORD(current_entry,
482 ROS_VACB,
483 CacheMapVacbListEntry);
484 if (IsPointInRange(current->FileOffset.QuadPart,
485 VACB_MAPPING_GRANULARITY,
486 FileOffset))
487 {
488 CcRosVacbIncRefCount(current);
489 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
490 KeReleaseGuardedMutex(&ViewLock);
491 return current;
492 }
493 if (current->FileOffset.QuadPart > FileOffset)
494 break;
495 current_entry = current_entry->Flink;
496 }
497
498 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
499 KeReleaseGuardedMutex(&ViewLock);
500
501 return NULL;
502 }
503
504 VOID
505 NTAPI
506 CcRosMarkDirtyVacb (
507 PROS_VACB Vacb)
508 {
509 KIRQL oldIrql;
510 PROS_SHARED_CACHE_MAP SharedCacheMap;
511
512 SharedCacheMap = Vacb->SharedCacheMap;
513
514 KeAcquireGuardedMutex(&ViewLock);
515 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
516
517 ASSERT(!Vacb->Dirty);
518
519 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
520 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
521 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
522 CcRosVacbIncRefCount(Vacb);
523
524 /* Move to the tail of the LRU list */
525 RemoveEntryList(&Vacb->VacbLruListEntry);
526 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
527
528 Vacb->Dirty = TRUE;
529
530 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
531 KeReleaseGuardedMutex(&ViewLock);
532
533 /* Schedule a lazy writer run to now that we have dirty VACB */
534 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
535 if (!LazyWriter.ScanActive)
536 {
537 CcScheduleLazyWriteScan(FALSE);
538 }
539 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
540 }
541
542 VOID
543 NTAPI
544 CcRosUnmarkDirtyVacb (
545 PROS_VACB Vacb,
546 BOOLEAN LockViews)
547 {
548 KIRQL oldIrql;
549 PROS_SHARED_CACHE_MAP SharedCacheMap;
550
551 SharedCacheMap = Vacb->SharedCacheMap;
552
553 if (LockViews)
554 {
555 KeAcquireGuardedMutex(&ViewLock);
556 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
557 }
558
559 ASSERT(Vacb->Dirty);
560
561 Vacb->Dirty = FALSE;
562
563 RemoveEntryList(&Vacb->DirtyVacbListEntry);
564 InitializeListHead(&Vacb->DirtyVacbListEntry);
565 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
566 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
567 CcRosVacbDecRefCount(Vacb);
568
569 if (LockViews)
570 {
571 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
572 KeReleaseGuardedMutex(&ViewLock);
573 }
574 }
575
576 NTSTATUS
577 NTAPI
578 CcRosMarkDirtyFile (
579 PROS_SHARED_CACHE_MAP SharedCacheMap,
580 LONGLONG FileOffset)
581 {
582 PROS_VACB Vacb;
583
584 ASSERT(SharedCacheMap);
585
586 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
587 SharedCacheMap, FileOffset);
588
589 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
590 if (Vacb == NULL)
591 {
592 KeBugCheck(CACHE_MANAGER);
593 }
594
595 CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, TRUE, FALSE);
596
597 return STATUS_SUCCESS;
598 }
599
600 /*
601 * Note: this is not the contrary function of
602 * CcRosMapVacbInKernelSpace()
603 */
604 NTSTATUS
605 NTAPI
606 CcRosUnmapVacb (
607 PROS_SHARED_CACHE_MAP SharedCacheMap,
608 LONGLONG FileOffset,
609 BOOLEAN NowDirty)
610 {
611 PROS_VACB Vacb;
612
613 ASSERT(SharedCacheMap);
614
615 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
616 SharedCacheMap, FileOffset, NowDirty);
617
618 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
619 if (Vacb == NULL)
620 {
621 return STATUS_UNSUCCESSFUL;
622 }
623
624 ASSERT(Vacb->MappedCount != 0);
625 if (InterlockedDecrement((PLONG)&Vacb->MappedCount) == 0)
626 {
627 CcRosVacbDecRefCount(Vacb);
628 }
629
630 CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, NowDirty, FALSE);
631
632 return STATUS_SUCCESS;
633 }
634
635 static
636 NTSTATUS
637 CcRosMapVacbInKernelSpace(
638 PROS_VACB Vacb)
639 {
640 ULONG i;
641 NTSTATUS Status;
642 ULONG_PTR NumberOfPages;
643 PVOID BaseAddress = NULL;
644
645 /* Create a memory area. */
646 MmLockAddressSpace(MmGetKernelAddressSpace());
647 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
648 0, // nothing checks for VACB mareas, so set to 0
649 &BaseAddress,
650 VACB_MAPPING_GRANULARITY,
651 PAGE_READWRITE,
652 (PMEMORY_AREA*)&Vacb->MemoryArea,
653 0,
654 PAGE_SIZE);
655 ASSERT(Vacb->BaseAddress == NULL);
656 Vacb->BaseAddress = BaseAddress;
657 MmUnlockAddressSpace(MmGetKernelAddressSpace());
658 if (!NT_SUCCESS(Status))
659 {
660 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
661 return Status;
662 }
663
664 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
665 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
666 ASSERT((ULONG_PTR)Vacb->BaseAddress + VACB_MAPPING_GRANULARITY - 1 > (ULONG_PTR)MmSystemRangeStart);
667
668 /* Create a virtual mapping for this memory area */
669 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
670 for (i = 0; i < NumberOfPages; i++)
671 {
672 PFN_NUMBER PageFrameNumber;
673
674 MI_SET_USAGE(MI_USAGE_CACHE);
675 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
676 if (PageFrameNumber == 0)
677 {
678 DPRINT1("Unable to allocate page\n");
679 KeBugCheck(MEMORY_MANAGEMENT);
680 }
681
682 ASSERT(BaseAddress == Vacb->BaseAddress);
683 ASSERT(i * PAGE_SIZE < VACB_MAPPING_GRANULARITY);
684 ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) >= (ULONG_PTR)BaseAddress);
685 ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) > (ULONG_PTR)MmSystemRangeStart);
686
687 Status = MmCreateVirtualMapping(NULL,
688 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
689 PAGE_READWRITE,
690 &PageFrameNumber,
691 1);
692 if (!NT_SUCCESS(Status))
693 {
694 DPRINT1("Unable to create virtual mapping\n");
695 KeBugCheck(MEMORY_MANAGEMENT);
696 }
697 }
698
699 return STATUS_SUCCESS;
700 }
701
702 static
703 NTSTATUS
704 CcRosCreateVacb (
705 PROS_SHARED_CACHE_MAP SharedCacheMap,
706 LONGLONG FileOffset,
707 PROS_VACB *Vacb)
708 {
709 PROS_VACB current;
710 PROS_VACB previous;
711 PLIST_ENTRY current_entry;
712 NTSTATUS Status;
713 KIRQL oldIrql;
714 ULONG Refs;
715
716 ASSERT(SharedCacheMap);
717
718 DPRINT("CcRosCreateVacb()\n");
719
720 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
721 {
722 *Vacb = NULL;
723 return STATUS_INVALID_PARAMETER;
724 }
725
726 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
727 current->BaseAddress = NULL;
728 current->Valid = FALSE;
729 current->Dirty = FALSE;
730 current->PageOut = FALSE;
731 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
732 current->SharedCacheMap = SharedCacheMap;
733 #if DBG
734 if (SharedCacheMap->Trace)
735 {
736 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
737 }
738 #endif
739 current->MappedCount = 0;
740 current->ReferenceCount = 0;
741 current->PinCount = 0;
742 InitializeListHead(&current->CacheMapVacbListEntry);
743 InitializeListHead(&current->DirtyVacbListEntry);
744 InitializeListHead(&current->VacbLruListEntry);
745
746 CcRosVacbIncRefCount(current);
747
748 Status = CcRosMapVacbInKernelSpace(current);
749 if (!NT_SUCCESS(Status))
750 {
751 CcRosVacbDecRefCount(current);
752 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
753 return Status;
754 }
755
756 KeAcquireGuardedMutex(&ViewLock);
757
758 *Vacb = current;
759 /* There is window between the call to CcRosLookupVacb
760 * and CcRosCreateVacb. We must check if a VACB for the
761 * file offset exist. If there is a VACB, we release
762 * our newly created VACB and return the existing one.
763 */
764 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
765 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
766 previous = NULL;
767 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
768 {
769 current = CONTAINING_RECORD(current_entry,
770 ROS_VACB,
771 CacheMapVacbListEntry);
772 if (IsPointInRange(current->FileOffset.QuadPart,
773 VACB_MAPPING_GRANULARITY,
774 FileOffset))
775 {
776 CcRosVacbIncRefCount(current);
777 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
778 #if DBG
779 if (SharedCacheMap->Trace)
780 {
781 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
782 SharedCacheMap,
783 (*Vacb),
784 current);
785 }
786 #endif
787 KeReleaseGuardedMutex(&ViewLock);
788
789 Refs = CcRosVacbDecRefCount(*Vacb);
790 ASSERT(Refs == 0);
791
792 *Vacb = current;
793 return STATUS_SUCCESS;
794 }
795 if (current->FileOffset.QuadPart < FileOffset)
796 {
797 ASSERT(previous == NULL ||
798 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
799 previous = current;
800 }
801 if (current->FileOffset.QuadPart > FileOffset)
802 break;
803 current_entry = current_entry->Flink;
804 }
805 /* There was no existing VACB. */
806 current = *Vacb;
807 if (previous)
808 {
809 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
810 }
811 else
812 {
813 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
814 }
815 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
816 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
817 KeReleaseGuardedMutex(&ViewLock);
818
819 MI_SET_USAGE(MI_USAGE_CACHE);
820 #if MI_TRACE_PFNS
821 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
822 {
823 PWCHAR pos;
824 ULONG len = 0;
825 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
826 if (pos)
827 {
828 len = wcslen(pos) * sizeof(WCHAR);
829 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
830 }
831 else
832 {
833 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
834 }
835 }
836 #endif
837
838 /* Reference it to allow release */
839 CcRosVacbIncRefCount(current);
840
841 return Status;
842 }
843
844 NTSTATUS
845 NTAPI
846 CcRosGetVacb (
847 PROS_SHARED_CACHE_MAP SharedCacheMap,
848 LONGLONG FileOffset,
849 PLONGLONG BaseOffset,
850 PVOID* BaseAddress,
851 PBOOLEAN UptoDate,
852 PROS_VACB *Vacb)
853 {
854 PROS_VACB current;
855 NTSTATUS Status;
856 ULONG Refs;
857
858 ASSERT(SharedCacheMap);
859
860 DPRINT("CcRosGetVacb()\n");
861
862 /*
863 * Look for a VACB already mapping the same data.
864 */
865 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
866 if (current == NULL)
867 {
868 /*
869 * Otherwise create a new VACB.
870 */
871 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
872 if (!NT_SUCCESS(Status))
873 {
874 return Status;
875 }
876 }
877
878 Refs = CcRosVacbGetRefCount(current);
879
880 KeAcquireGuardedMutex(&ViewLock);
881
882 /* Move to the tail of the LRU list */
883 RemoveEntryList(&current->VacbLruListEntry);
884 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
885
886 KeReleaseGuardedMutex(&ViewLock);
887
888 /*
889 * Return information about the VACB to the caller.
890 */
891 *UptoDate = current->Valid;
892 *BaseAddress = current->BaseAddress;
893 DPRINT("*BaseAddress %p\n", *BaseAddress);
894 *Vacb = current;
895 *BaseOffset = current->FileOffset.QuadPart;
896
897 ASSERT(Refs > 1);
898
899 return STATUS_SUCCESS;
900 }
901
902 NTSTATUS
903 NTAPI
904 CcRosRequestVacb (
905 PROS_SHARED_CACHE_MAP SharedCacheMap,
906 LONGLONG FileOffset,
907 PVOID* BaseAddress,
908 PBOOLEAN UptoDate,
909 PROS_VACB *Vacb)
910 /*
911 * FUNCTION: Request a page mapping for a shared cache map
912 */
913 {
914 LONGLONG BaseOffset;
915
916 ASSERT(SharedCacheMap);
917
918 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
919 {
920 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
921 FileOffset, VACB_MAPPING_GRANULARITY);
922 KeBugCheck(CACHE_MANAGER);
923 }
924
925 return CcRosGetVacb(SharedCacheMap,
926 FileOffset,
927 &BaseOffset,
928 BaseAddress,
929 UptoDate,
930 Vacb);
931 }
932
933 static
934 VOID
935 CcFreeCachePage (
936 PVOID Context,
937 MEMORY_AREA* MemoryArea,
938 PVOID Address,
939 PFN_NUMBER Page,
940 SWAPENTRY SwapEntry,
941 BOOLEAN Dirty)
942 {
943 ASSERT(SwapEntry == 0);
944 if (Page != 0)
945 {
946 ASSERT(MmGetReferenceCountPage(Page) == 1);
947 MmReleasePageMemoryConsumer(MC_CACHE, Page);
948 }
949 }
950
951 NTSTATUS
952 CcRosInternalFreeVacb (
953 PROS_VACB Vacb)
954 /*
955 * FUNCTION: Releases a VACB associated with a shared cache map
956 */
957 {
958 DPRINT("Freeing VACB 0x%p\n", Vacb);
959 #if DBG
960 if (Vacb->SharedCacheMap->Trace)
961 {
962 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
963 }
964 #endif
965
966 MmLockAddressSpace(MmGetKernelAddressSpace());
967 MmFreeMemoryArea(MmGetKernelAddressSpace(),
968 Vacb->MemoryArea,
969 CcFreeCachePage,
970 NULL);
971 MmUnlockAddressSpace(MmGetKernelAddressSpace());
972
973 if (Vacb->PinCount != 0 || Vacb->ReferenceCount != 0)
974 {
975 DPRINT1("Invalid free: %ld, %ld\n", Vacb->ReferenceCount, Vacb->PinCount);
976 if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length)
977 {
978 DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName);
979 }
980 }
981
982 ASSERT(Vacb->PinCount == 0);
983 ASSERT(Vacb->ReferenceCount == 0);
984 ASSERT(IsListEmpty(&Vacb->CacheMapVacbListEntry));
985 ASSERT(IsListEmpty(&Vacb->DirtyVacbListEntry));
986 ASSERT(IsListEmpty(&Vacb->VacbLruListEntry));
987 RtlFillMemory(Vacb, sizeof(*Vacb), 0xfd);
988 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
989 return STATUS_SUCCESS;
990 }
991
992 /*
993 * @implemented
994 */
995 VOID
996 NTAPI
997 CcFlushCache (
998 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
999 IN PLARGE_INTEGER FileOffset OPTIONAL,
1000 IN ULONG Length,
1001 OUT PIO_STATUS_BLOCK IoStatus)
1002 {
1003 PROS_SHARED_CACHE_MAP SharedCacheMap;
1004 LARGE_INTEGER Offset;
1005 LONGLONG RemainingLength;
1006 PROS_VACB current;
1007 NTSTATUS Status;
1008
1009 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1010 SectionObjectPointers, FileOffset, Length);
1011
1012 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1013 SectionObjectPointers, FileOffset, Length, IoStatus);
1014
1015 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1016 {
1017 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1018 ASSERT(SharedCacheMap);
1019 if (FileOffset)
1020 {
1021 Offset = *FileOffset;
1022 RemainingLength = Length;
1023 }
1024 else
1025 {
1026 Offset.QuadPart = 0;
1027 RemainingLength = SharedCacheMap->FileSize.QuadPart;
1028 }
1029
1030 if (IoStatus)
1031 {
1032 IoStatus->Status = STATUS_SUCCESS;
1033 IoStatus->Information = 0;
1034 }
1035
1036 while (RemainingLength > 0)
1037 {
1038 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1039 if (current != NULL)
1040 {
1041 if (current->Dirty)
1042 {
1043 Status = CcRosFlushVacb(current);
1044 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1045 {
1046 IoStatus->Status = Status;
1047 }
1048 }
1049
1050 CcRosReleaseVacb(SharedCacheMap, current, current->Valid, current->Dirty, FALSE);
1051 }
1052
1053 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1054 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1055 }
1056 }
1057 else
1058 {
1059 if (IoStatus)
1060 {
1061 IoStatus->Status = STATUS_INVALID_PARAMETER;
1062 }
1063 }
1064 }
1065
1066 NTSTATUS
1067 NTAPI
1068 CcRosDeleteFileCache (
1069 PFILE_OBJECT FileObject,
1070 PROS_SHARED_CACHE_MAP SharedCacheMap)
1071 /*
1072 * FUNCTION: Releases the shared cache map associated with a file object
1073 */
1074 {
1075 PLIST_ENTRY current_entry;
1076 PROS_VACB current;
1077 LIST_ENTRY FreeList;
1078 KIRQL oldIrql;
1079
1080 ASSERT(SharedCacheMap);
1081
1082 SharedCacheMap->OpenCount++;
1083 KeReleaseGuardedMutex(&ViewLock);
1084
1085 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1086
1087 KeAcquireGuardedMutex(&ViewLock);
1088 SharedCacheMap->OpenCount--;
1089 if (SharedCacheMap->OpenCount == 0)
1090 {
1091 KIRQL OldIrql;
1092
1093 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1094
1095 /*
1096 * Release all VACBs
1097 */
1098 InitializeListHead(&FreeList);
1099 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1100 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1101 {
1102 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1103 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1104
1105 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1106 RemoveEntryList(&current->VacbLruListEntry);
1107 InitializeListHead(&current->VacbLruListEntry);
1108 if (current->Dirty)
1109 {
1110 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1111 CcRosUnmarkDirtyVacb(current, FALSE);
1112 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1113 DPRINT1("Freeing dirty VACB\n");
1114 }
1115 if (current->MappedCount != 0)
1116 {
1117 current->MappedCount = 0;
1118 NT_VERIFY(CcRosVacbDecRefCount(current) > 0);
1119 DPRINT1("Freeing mapped VACB\n");
1120 }
1121 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1122
1123 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1124 }
1125 #if DBG
1126 SharedCacheMap->Trace = FALSE;
1127 #endif
1128 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1129
1130 KeReleaseGuardedMutex(&ViewLock);
1131 ObDereferenceObject(SharedCacheMap->FileObject);
1132
1133 while (!IsListEmpty(&FreeList))
1134 {
1135 ULONG Refs;
1136
1137 current_entry = RemoveTailList(&FreeList);
1138 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1139 InitializeListHead(&current->CacheMapVacbListEntry);
1140 Refs = CcRosVacbDecRefCount(current);
1141 #if DBG // CORE-14578
1142 if (Refs != 0)
1143 {
1144 DPRINT1("Leaking VACB %p attached to %p (%I64d)\n", current, FileObject, current->FileOffset.QuadPart);
1145 DPRINT1("There are: %d references left\n", Refs);
1146 DPRINT1("Pin: %d, Map: %d\n", current->PinCount, current->MappedCount);
1147 DPRINT1("Dirty: %d\n", current->Dirty);
1148 if (FileObject->FileName.Length != 0)
1149 {
1150 DPRINT1("File was: %wZ\n", &FileObject->FileName);
1151 }
1152 else if (FileObject->FsContext != NULL &&
1153 ((PFSRTL_COMMON_FCB_HEADER)(FileObject->FsContext))->NodeTypeCode == 0x0502 &&
1154 ((PFSRTL_COMMON_FCB_HEADER)(FileObject->FsContext))->NodeByteSize == 0x1F8 &&
1155 ((PUNICODE_STRING)(((PUCHAR)FileObject->FsContext) + 0x100))->Length != 0)
1156 {
1157 DPRINT1("File was: %wZ (FastFAT)\n", (PUNICODE_STRING)(((PUCHAR)FileObject->FsContext) + 0x100));
1158 }
1159 else
1160 {
1161 DPRINT1("No name for the file\n");
1162 }
1163 }
1164 #else
1165 ASSERT(Refs == 0);
1166 #endif
1167 }
1168
1169 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1170 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1171 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1172
1173 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1174 KeAcquireGuardedMutex(&ViewLock);
1175 }
1176 return STATUS_SUCCESS;
1177 }
1178
1179 VOID
1180 NTAPI
1181 CcRosReferenceCache (
1182 PFILE_OBJECT FileObject)
1183 {
1184 PROS_SHARED_CACHE_MAP SharedCacheMap;
1185 KeAcquireGuardedMutex(&ViewLock);
1186 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1187 ASSERT(SharedCacheMap);
1188 ASSERT(SharedCacheMap->OpenCount != 0);
1189 SharedCacheMap->OpenCount++;
1190 KeReleaseGuardedMutex(&ViewLock);
1191 }
1192
1193 VOID
1194 NTAPI
1195 CcRosRemoveIfClosed (
1196 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1197 {
1198 PROS_SHARED_CACHE_MAP SharedCacheMap;
1199 DPRINT("CcRosRemoveIfClosed()\n");
1200 KeAcquireGuardedMutex(&ViewLock);
1201 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1202 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1203 {
1204 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1205 }
1206 KeReleaseGuardedMutex(&ViewLock);
1207 }
1208
1209
1210 VOID
1211 NTAPI
1212 CcRosDereferenceCache (
1213 PFILE_OBJECT FileObject)
1214 {
1215 PROS_SHARED_CACHE_MAP SharedCacheMap;
1216 KeAcquireGuardedMutex(&ViewLock);
1217 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1218 ASSERT(SharedCacheMap);
1219 if (SharedCacheMap->OpenCount > 0)
1220 {
1221 SharedCacheMap->OpenCount--;
1222 if (SharedCacheMap->OpenCount == 0)
1223 {
1224 MmFreeSectionSegments(SharedCacheMap->FileObject);
1225 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1226 }
1227 }
1228 KeReleaseGuardedMutex(&ViewLock);
1229 }
1230
1231 NTSTATUS
1232 NTAPI
1233 CcRosReleaseFileCache (
1234 PFILE_OBJECT FileObject)
1235 /*
1236 * FUNCTION: Called by the file system when a handle to a file object
1237 * has been closed.
1238 */
1239 {
1240 KIRQL OldIrql;
1241 PPRIVATE_CACHE_MAP PrivateMap;
1242 PROS_SHARED_CACHE_MAP SharedCacheMap;
1243
1244 KeAcquireGuardedMutex(&ViewLock);
1245
1246 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1247 {
1248 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1249
1250 /* Closing the handle, so kill the private cache map
1251 * Before you event try to remove it from FO, always
1252 * lock the master lock, to be sure not to race
1253 * with a potential read ahead ongoing!
1254 */
1255 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1256 PrivateMap = FileObject->PrivateCacheMap;
1257 FileObject->PrivateCacheMap = NULL;
1258 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1259
1260 if (PrivateMap != NULL)
1261 {
1262 /* Remove it from the file */
1263 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
1264 RemoveEntryList(&PrivateMap->PrivateLinks);
1265 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
1266
1267 /* And free it. */
1268 if (PrivateMap != &SharedCacheMap->PrivateCacheMap)
1269 {
1270 ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP);
1271 }
1272 else
1273 {
1274 PrivateMap->NodeTypeCode = 0;
1275 }
1276
1277 if (SharedCacheMap->OpenCount > 0)
1278 {
1279 SharedCacheMap->OpenCount--;
1280 if (SharedCacheMap->OpenCount == 0)
1281 {
1282 MmFreeSectionSegments(SharedCacheMap->FileObject);
1283 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1284 }
1285 }
1286 }
1287 }
1288 KeReleaseGuardedMutex(&ViewLock);
1289 return STATUS_SUCCESS;
1290 }
1291
1292 NTSTATUS
1293 NTAPI
1294 CcRosInitializeFileCache (
1295 PFILE_OBJECT FileObject,
1296 PCC_FILE_SIZES FileSizes,
1297 BOOLEAN PinAccess,
1298 PCACHE_MANAGER_CALLBACKS CallBacks,
1299 PVOID LazyWriterContext)
1300 /*
1301 * FUNCTION: Initializes a shared cache map for a file object
1302 */
1303 {
1304 KIRQL OldIrql;
1305 BOOLEAN Allocated;
1306 PROS_SHARED_CACHE_MAP SharedCacheMap;
1307
1308 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1309 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1310 FileObject, SharedCacheMap);
1311
1312 Allocated = FALSE;
1313 KeAcquireGuardedMutex(&ViewLock);
1314 if (SharedCacheMap == NULL)
1315 {
1316 Allocated = TRUE;
1317 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1318 if (SharedCacheMap == NULL)
1319 {
1320 KeReleaseGuardedMutex(&ViewLock);
1321 return STATUS_INSUFFICIENT_RESOURCES;
1322 }
1323 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1324 ObReferenceObjectByPointer(FileObject,
1325 FILE_ALL_ACCESS,
1326 NULL,
1327 KernelMode);
1328 SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP;
1329 SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap);
1330 SharedCacheMap->FileObject = FileObject;
1331 SharedCacheMap->Callbacks = CallBacks;
1332 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1333 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1334 SharedCacheMap->FileSize = FileSizes->FileSize;
1335 SharedCacheMap->PinAccess = PinAccess;
1336 SharedCacheMap->DirtyPageThreshold = 0;
1337 SharedCacheMap->DirtyPages = 0;
1338 InitializeListHead(&SharedCacheMap->PrivateList);
1339 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1340 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1341 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1342
1343 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1344 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1345 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1346 }
1347 if (FileObject->PrivateCacheMap == NULL)
1348 {
1349 PPRIVATE_CACHE_MAP PrivateMap;
1350
1351 /* Allocate the private cache map for this handle */
1352 if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0)
1353 {
1354 PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP);
1355 }
1356 else
1357 {
1358 PrivateMap = &SharedCacheMap->PrivateCacheMap;
1359 }
1360
1361 if (PrivateMap == NULL)
1362 {
1363 /* If we also allocated the shared cache map for this file, kill it */
1364 if (Allocated)
1365 {
1366 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1367 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1368 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1369
1370 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1371 ObDereferenceObject(FileObject);
1372 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1373 }
1374
1375 KeReleaseGuardedMutex(&ViewLock);
1376 return STATUS_INSUFFICIENT_RESOURCES;
1377 }
1378
1379 /* Initialize it */
1380 RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP));
1381 PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP;
1382 PrivateMap->ReadAheadMask = PAGE_SIZE - 1;
1383 PrivateMap->FileObject = FileObject;
1384 KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock);
1385
1386 /* Link it to the file */
1387 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
1388 InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks);
1389 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
1390
1391 FileObject->PrivateCacheMap = PrivateMap;
1392 SharedCacheMap->OpenCount++;
1393 }
1394 KeReleaseGuardedMutex(&ViewLock);
1395
1396 return STATUS_SUCCESS;
1397 }
1398
1399 /*
1400 * @implemented
1401 */
1402 PFILE_OBJECT
1403 NTAPI
1404 CcGetFileObjectFromSectionPtrs (
1405 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1406 {
1407 PROS_SHARED_CACHE_MAP SharedCacheMap;
1408
1409 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1410
1411 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1412 {
1413 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1414 ASSERT(SharedCacheMap);
1415 return SharedCacheMap->FileObject;
1416 }
1417 return NULL;
1418 }
1419
1420 VOID
1421 INIT_FUNCTION
1422 NTAPI
1423 CcInitView (
1424 VOID)
1425 {
1426 DPRINT("CcInitView()\n");
1427
1428 InitializeListHead(&DirtyVacbListHead);
1429 InitializeListHead(&VacbLruListHead);
1430 InitializeListHead(&CcDeferredWrites);
1431 InitializeListHead(&CcCleanSharedCacheMapList);
1432 KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1433 KeInitializeGuardedMutex(&ViewLock);
1434 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1435 NULL,
1436 NULL,
1437 0,
1438 sizeof(INTERNAL_BCB),
1439 TAG_BCB,
1440 20);
1441 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1442 NULL,
1443 NULL,
1444 0,
1445 sizeof(ROS_SHARED_CACHE_MAP),
1446 TAG_SHARED_CACHE_MAP,
1447 20);
1448 ExInitializeNPagedLookasideList(&VacbLookasideList,
1449 NULL,
1450 NULL,
1451 0,
1452 sizeof(ROS_VACB),
1453 TAG_VACB,
1454 20);
1455
1456 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1457
1458 CcInitCacheZeroPage();
1459 }
1460
1461 #if DBG && defined(KDBG)
1462 BOOLEAN
1463 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1464 {
1465 PLIST_ENTRY ListEntry;
1466 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1467
1468 KdbpPrint(" Usage Summary (in kb)\n");
1469 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1470 /* No need to lock the spin lock here, we're in DBG */
1471 for (ListEntry = CcCleanSharedCacheMapList.Flink;
1472 ListEntry != &CcCleanSharedCacheMapList;
1473 ListEntry = ListEntry->Flink)
1474 {
1475 PLIST_ENTRY Vacbs;
1476 ULONG Valid = 0, Dirty = 0;
1477 PROS_SHARED_CACHE_MAP SharedCacheMap;
1478 PUNICODE_STRING FileName;
1479 PWSTR Extra = L"";
1480
1481 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1482
1483 /* Dirty size */
1484 Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
1485
1486 /* First, count for all the associated VACB */
1487 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1488 Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1489 Vacbs = Vacbs->Flink)
1490 {
1491 PROS_VACB Vacb;
1492
1493 Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
1494 if (Vacb->Valid)
1495 {
1496 Valid += VACB_MAPPING_GRANULARITY / 1024;
1497 }
1498 }
1499
1500 /* Setup name */
1501 if (SharedCacheMap->FileObject != NULL &&
1502 SharedCacheMap->FileObject->FileName.Length != 0)
1503 {
1504 FileName = &SharedCacheMap->FileObject->FileName;
1505 }
1506 else if (SharedCacheMap->FileObject != NULL &&
1507 SharedCacheMap->FileObject->FsContext != NULL &&
1508 ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeTypeCode == 0x0502 &&
1509 ((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->NodeByteSize == 0x1F8 &&
1510 ((PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100))->Length != 0)
1511 {
1512 FileName = (PUNICODE_STRING)(((PUCHAR)SharedCacheMap->FileObject->FsContext) + 0x100);
1513 Extra = L" (FastFAT)";
1514 }
1515 else
1516 {
1517 FileName = &NoName;
1518 }
1519
1520 /* And print */
1521 KdbpPrint("%p\t%d\t%d\t%wZ%S\n", SharedCacheMap, Valid, Dirty, FileName, Extra);
1522 }
1523
1524 return TRUE;
1525 }
1526
1527 BOOLEAN
1528 ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[])
1529 {
1530 KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages,
1531 (CcTotalDirtyPages * PAGE_SIZE) / 1024);
1532 KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold,
1533 (CcDirtyPageThreshold * PAGE_SIZE) / 1024);
1534 KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages,
1535 (MmAvailablePages * PAGE_SIZE) / 1024);
1536 KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop,
1537 (MmThrottleTop * PAGE_SIZE) / 1024);
1538 KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom,
1539 (MmThrottleBottom * PAGE_SIZE) / 1024);
1540 KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total,
1541 (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024);
1542
1543 if (CcTotalDirtyPages >= CcDirtyPageThreshold)
1544 {
1545 KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n");
1546 }
1547 else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold)
1548 {
1549 KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n");
1550 }
1551 else
1552 {
1553 KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n");
1554 }
1555
1556 return TRUE;
1557 }
1558 #endif
1559
1560 /* EOF */