c835a03a8ecf8f07500360cc3a03de49fb578cfa
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42
43 /* GLOBALS *******************************************************************/
44
45 LIST_ENTRY DirtyVacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 /* Internal vars (MS):
55 * - Threshold above which lazy writer will start action
56 * - Amount of dirty pages
57 * - List for deferred writes
58 * - Spinlock when dealing with the deferred list
59 * - List for "clean" shared cache maps
60 */
61 ULONG CcDirtyPageThreshold = 0;
62 ULONG CcTotalDirtyPages = 0;
63 LIST_ENTRY CcDeferredWrites;
64 KSPIN_LOCK CcDeferredWriteSpinLock;
65 LIST_ENTRY CcCleanSharedCacheMapList;
66
67 #if DBG
68 ULONG CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line)
69 {
70 ULONG Refs;
71
72 Refs = InterlockedIncrement((PLONG)&vacb->ReferenceCount);
73 if (vacb->SharedCacheMap->Trace)
74 {
75 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
76 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
77 }
78
79 return Refs;
80 }
81 ULONG CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line)
82 {
83 ULONG Refs;
84
85 Refs = InterlockedDecrement((PLONG)&vacb->ReferenceCount);
86 ASSERT(!(Refs == 0 && vacb->Dirty));
87 if (vacb->SharedCacheMap->Trace)
88 {
89 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
90 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
91 }
92
93 return Refs;
94 }
95 ULONG CcRosVacbGetRefCount_(PROS_VACB vacb, PCSTR file, INT line)
96 {
97 ULONG Refs;
98
99 Refs = InterlockedCompareExchange((PLONG)&vacb->ReferenceCount, 0, 0);
100 if (vacb->SharedCacheMap->Trace)
101 {
102 DbgPrint("(%s:%i) VACB %p ==RefCount=%lu, Dirty %u, PageOut %lu\n",
103 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
104 }
105
106 return Refs;
107 }
108 #endif
109
110 NTSTATUS
111 CcRosInternalFreeVacb(PROS_VACB Vacb);
112
113
114 /* FUNCTIONS *****************************************************************/
115
116 VOID
117 NTAPI
118 CcRosTraceCacheMap (
119 PROS_SHARED_CACHE_MAP SharedCacheMap,
120 BOOLEAN Trace )
121 {
122 #if DBG
123 KIRQL oldirql;
124 PLIST_ENTRY current_entry;
125 PROS_VACB current;
126
127 if (!SharedCacheMap)
128 return;
129
130 SharedCacheMap->Trace = Trace;
131
132 if (Trace)
133 {
134 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
135
136 KeAcquireGuardedMutex(&ViewLock);
137 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
138
139 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
140 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
141 {
142 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
143 current_entry = current_entry->Flink;
144
145 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
146 current, current->ReferenceCount, current->Dirty, current->PageOut );
147 }
148 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
149 KeReleaseGuardedMutex(&ViewLock);
150 }
151 else
152 {
153 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
154 }
155
156 #else
157 UNREFERENCED_PARAMETER(SharedCacheMap);
158 UNREFERENCED_PARAMETER(Trace);
159 #endif
160 }
161
162 NTSTATUS
163 NTAPI
164 CcRosFlushVacb (
165 PROS_VACB Vacb)
166 {
167 NTSTATUS Status;
168
169 Status = CcWriteVirtualAddress(Vacb);
170 if (NT_SUCCESS(Status))
171 {
172 CcRosUnmarkDirtyVacb(Vacb, TRUE);
173 }
174
175 return Status;
176 }
177
178 NTSTATUS
179 NTAPI
180 CcRosFlushDirtyPages (
181 ULONG Target,
182 PULONG Count,
183 BOOLEAN Wait,
184 BOOLEAN CalledFromLazy)
185 {
186 PLIST_ENTRY current_entry;
187 PROS_VACB current;
188 BOOLEAN Locked;
189 NTSTATUS Status;
190 LARGE_INTEGER ZeroTimeout;
191
192 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
193
194 (*Count) = 0;
195 ZeroTimeout.QuadPart = 0;
196
197 KeEnterCriticalRegion();
198 KeAcquireGuardedMutex(&ViewLock);
199
200 current_entry = DirtyVacbListHead.Flink;
201 if (current_entry == &DirtyVacbListHead)
202 {
203 DPRINT("No Dirty pages\n");
204 }
205
206 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
207 {
208 current = CONTAINING_RECORD(current_entry,
209 ROS_VACB,
210 DirtyVacbListEntry);
211 current_entry = current_entry->Flink;
212
213 CcRosVacbIncRefCount(current);
214
215 /* When performing lazy write, don't handle temporary files */
216 if (CalledFromLazy &&
217 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
218 {
219 CcRosVacbDecRefCount(current);
220 continue;
221 }
222
223 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
224 current->SharedCacheMap->LazyWriteContext, Wait);
225 if (!Locked)
226 {
227 CcRosVacbDecRefCount(current);
228 continue;
229 }
230
231 Status = CcRosAcquireVacbLock(current,
232 Wait ? NULL : &ZeroTimeout);
233 if (Status != STATUS_SUCCESS)
234 {
235 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
236 current->SharedCacheMap->LazyWriteContext);
237 CcRosVacbDecRefCount(current);
238 continue;
239 }
240
241 ASSERT(current->Dirty);
242
243 /* One reference is added above */
244 if (CcRosVacbGetRefCount(current) > 2)
245 {
246 CcRosReleaseVacbLock(current);
247 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
248 current->SharedCacheMap->LazyWriteContext);
249 CcRosVacbDecRefCount(current);
250 continue;
251 }
252
253 KeReleaseGuardedMutex(&ViewLock);
254
255 Status = CcRosFlushVacb(current);
256
257 CcRosReleaseVacbLock(current);
258 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
259 current->SharedCacheMap->LazyWriteContext);
260
261 KeAcquireGuardedMutex(&ViewLock);
262 CcRosVacbDecRefCount(current);
263
264 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
265 (Status != STATUS_MEDIA_WRITE_PROTECTED))
266 {
267 DPRINT1("CC: Failed to flush VACB.\n");
268 }
269 else
270 {
271 ULONG PagesFreed;
272
273 /* How many pages did we free? */
274 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
275 (*Count) += PagesFreed;
276
277 /* Make sure we don't overflow target! */
278 if (Target < PagesFreed)
279 {
280 /* If we would have, jump to zero directly */
281 Target = 0;
282 }
283 else
284 {
285 Target -= PagesFreed;
286 }
287 }
288
289 current_entry = DirtyVacbListHead.Flink;
290 }
291
292 KeReleaseGuardedMutex(&ViewLock);
293 KeLeaveCriticalRegion();
294
295 DPRINT("CcRosFlushDirtyPages() finished\n");
296 return STATUS_SUCCESS;
297 }
298
299 NTSTATUS
300 CcRosTrimCache (
301 ULONG Target,
302 ULONG Priority,
303 PULONG NrFreed)
304 /*
305 * FUNCTION: Try to free some memory from the file cache.
306 * ARGUMENTS:
307 * Target - The number of pages to be freed.
308 * Priority - The priority of free (currently unused).
309 * NrFreed - Points to a variable where the number of pages
310 * actually freed is returned.
311 */
312 {
313 PLIST_ENTRY current_entry;
314 PROS_VACB current;
315 ULONG PagesFreed;
316 KIRQL oldIrql;
317 LIST_ENTRY FreeList;
318 PFN_NUMBER Page;
319 ULONG i;
320 BOOLEAN FlushedPages = FALSE;
321
322 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
323
324 InitializeListHead(&FreeList);
325
326 *NrFreed = 0;
327
328 retry:
329 KeAcquireGuardedMutex(&ViewLock);
330
331 current_entry = VacbLruListHead.Flink;
332 while (current_entry != &VacbLruListHead)
333 {
334 ULONG Refs;
335
336 current = CONTAINING_RECORD(current_entry,
337 ROS_VACB,
338 VacbLruListEntry);
339 current_entry = current_entry->Flink;
340
341 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
342
343 /* Reference the VACB */
344 CcRosVacbIncRefCount(current);
345
346 /* Check if it's mapped and not dirty */
347 if (current->MappedCount > 0 && !current->Dirty)
348 {
349 /* We have to break these locks because Cc sucks */
350 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
351 KeReleaseGuardedMutex(&ViewLock);
352
353 /* Page out the VACB */
354 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
355 {
356 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
357
358 MmPageOutPhysicalAddress(Page);
359 }
360
361 /* Reacquire the locks */
362 KeAcquireGuardedMutex(&ViewLock);
363 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
364 }
365
366 /* Dereference the VACB */
367 Refs = CcRosVacbDecRefCount(current);
368
369 /* Check if we can free this entry now */
370 if (Refs < 2)
371 {
372 ASSERT(!current->Dirty);
373 ASSERT(!current->MappedCount);
374 ASSERT(Refs == 1);
375
376 RemoveEntryList(&current->CacheMapVacbListEntry);
377 RemoveEntryList(&current->VacbLruListEntry);
378 InitializeListHead(&current->VacbLruListEntry);
379 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
380
381 /* Calculate how many pages we freed for Mm */
382 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
383 Target -= PagesFreed;
384 (*NrFreed) += PagesFreed;
385 }
386
387 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
388 }
389
390 KeReleaseGuardedMutex(&ViewLock);
391
392 /* Try flushing pages if we haven't met our target */
393 if ((Target > 0) && !FlushedPages)
394 {
395 /* Flush dirty pages to disk */
396 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
397 FlushedPages = TRUE;
398
399 /* We can only swap as many pages as we flushed */
400 if (PagesFreed < Target) Target = PagesFreed;
401
402 /* Check if we flushed anything */
403 if (PagesFreed != 0)
404 {
405 /* Try again after flushing dirty pages */
406 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
407 goto retry;
408 }
409 }
410
411 while (!IsListEmpty(&FreeList))
412 {
413 current_entry = RemoveHeadList(&FreeList);
414 current = CONTAINING_RECORD(current_entry,
415 ROS_VACB,
416 CacheMapVacbListEntry);
417 InitializeListHead(&current->CacheMapVacbListEntry);
418 CcRosVacbDecRefCount(current);
419 CcRosInternalFreeVacb(current);
420 }
421
422 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
423
424 return STATUS_SUCCESS;
425 }
426
427 NTSTATUS
428 NTAPI
429 CcRosReleaseVacb (
430 PROS_SHARED_CACHE_MAP SharedCacheMap,
431 PROS_VACB Vacb,
432 BOOLEAN Valid,
433 BOOLEAN Dirty,
434 BOOLEAN Mapped)
435 {
436 ULONG Refs;
437 ASSERT(SharedCacheMap);
438
439 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
440 SharedCacheMap, Vacb, Valid);
441
442 Vacb->Valid = Valid;
443
444 if (Dirty && !Vacb->Dirty)
445 {
446 CcRosMarkDirtyVacb(Vacb);
447 }
448
449 if (Mapped)
450 {
451 Vacb->MappedCount++;
452 }
453 Refs = CcRosVacbDecRefCount(Vacb);
454 if (Mapped && (Vacb->MappedCount == 1))
455 {
456 CcRosVacbIncRefCount(Vacb);
457 }
458
459 ASSERT(Refs > 0);
460
461 CcRosReleaseVacbLock(Vacb);
462
463 return STATUS_SUCCESS;
464 }
465
466 /* Returns with VACB Lock Held! */
467 PROS_VACB
468 NTAPI
469 CcRosLookupVacb (
470 PROS_SHARED_CACHE_MAP SharedCacheMap,
471 LONGLONG FileOffset)
472 {
473 PLIST_ENTRY current_entry;
474 PROS_VACB current;
475 KIRQL oldIrql;
476
477 ASSERT(SharedCacheMap);
478
479 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
480 SharedCacheMap, FileOffset);
481
482 KeAcquireGuardedMutex(&ViewLock);
483 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
484
485 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
486 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
487 {
488 current = CONTAINING_RECORD(current_entry,
489 ROS_VACB,
490 CacheMapVacbListEntry);
491 if (IsPointInRange(current->FileOffset.QuadPart,
492 VACB_MAPPING_GRANULARITY,
493 FileOffset))
494 {
495 CcRosVacbIncRefCount(current);
496 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
497 KeReleaseGuardedMutex(&ViewLock);
498 CcRosAcquireVacbLock(current, NULL);
499 return current;
500 }
501 if (current->FileOffset.QuadPart > FileOffset)
502 break;
503 current_entry = current_entry->Flink;
504 }
505
506 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
507 KeReleaseGuardedMutex(&ViewLock);
508
509 return NULL;
510 }
511
512 VOID
513 NTAPI
514 CcRosMarkDirtyVacb (
515 PROS_VACB Vacb)
516 {
517 KIRQL oldIrql;
518 PROS_SHARED_CACHE_MAP SharedCacheMap;
519
520 SharedCacheMap = Vacb->SharedCacheMap;
521
522 KeAcquireGuardedMutex(&ViewLock);
523 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
524
525 ASSERT(!Vacb->Dirty);
526
527 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
528 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
529 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
530 CcRosVacbIncRefCount(Vacb);
531
532 /* Move to the tail of the LRU list */
533 RemoveEntryList(&Vacb->VacbLruListEntry);
534 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
535
536 Vacb->Dirty = TRUE;
537
538 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
539 KeReleaseGuardedMutex(&ViewLock);
540
541 /* Schedule a lazy writer run to now that we have dirty VACB */
542 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
543 if (!LazyWriter.ScanActive)
544 {
545 CcScheduleLazyWriteScan(FALSE);
546 }
547 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
548 }
549
550 VOID
551 NTAPI
552 CcRosUnmarkDirtyVacb (
553 PROS_VACB Vacb,
554 BOOLEAN LockViews)
555 {
556 KIRQL oldIrql;
557 PROS_SHARED_CACHE_MAP SharedCacheMap;
558
559 SharedCacheMap = Vacb->SharedCacheMap;
560
561 if (LockViews)
562 {
563 KeAcquireGuardedMutex(&ViewLock);
564 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
565 }
566
567 ASSERT(Vacb->Dirty);
568
569 Vacb->Dirty = FALSE;
570
571 RemoveEntryList(&Vacb->DirtyVacbListEntry);
572 InitializeListHead(&Vacb->DirtyVacbListEntry);
573 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
574 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
575 CcRosVacbDecRefCount(Vacb);
576
577 if (LockViews)
578 {
579 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
580 KeReleaseGuardedMutex(&ViewLock);
581 }
582 }
583
584 NTSTATUS
585 NTAPI
586 CcRosMarkDirtyFile (
587 PROS_SHARED_CACHE_MAP SharedCacheMap,
588 LONGLONG FileOffset)
589 {
590 PROS_VACB Vacb;
591
592 ASSERT(SharedCacheMap);
593
594 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
595 SharedCacheMap, FileOffset);
596
597 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
598 if (Vacb == NULL)
599 {
600 KeBugCheck(CACHE_MANAGER);
601 }
602
603 CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, TRUE, FALSE);
604
605 return STATUS_SUCCESS;
606 }
607
608 /*
609 * Note: this is not the contrary function of
610 * CcRosMapVacbInKernelSpace()
611 */
612 NTSTATUS
613 NTAPI
614 CcRosUnmapVacb (
615 PROS_SHARED_CACHE_MAP SharedCacheMap,
616 LONGLONG FileOffset,
617 BOOLEAN NowDirty)
618 {
619 PROS_VACB Vacb;
620
621 ASSERT(SharedCacheMap);
622
623 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
624 SharedCacheMap, FileOffset, NowDirty);
625
626 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
627 if (Vacb == NULL)
628 {
629 return STATUS_UNSUCCESSFUL;
630 }
631
632 ASSERT(Vacb->MappedCount != 0);
633 Vacb->MappedCount--;
634
635 if (Vacb->MappedCount == 0)
636 {
637 CcRosVacbDecRefCount(Vacb);
638 }
639
640 CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, NowDirty, FALSE);
641
642 return STATUS_SUCCESS;
643 }
644
645 static
646 NTSTATUS
647 CcRosMapVacbInKernelSpace(
648 PROS_VACB Vacb)
649 {
650 ULONG i;
651 NTSTATUS Status;
652 ULONG_PTR NumberOfPages;
653 PVOID BaseAddress = NULL;
654
655 /* Create a memory area. */
656 MmLockAddressSpace(MmGetKernelAddressSpace());
657 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
658 0, // nothing checks for VACB mareas, so set to 0
659 &BaseAddress,
660 VACB_MAPPING_GRANULARITY,
661 PAGE_READWRITE,
662 (PMEMORY_AREA*)&Vacb->MemoryArea,
663 0,
664 PAGE_SIZE);
665 ASSERT(Vacb->BaseAddress == NULL);
666 Vacb->BaseAddress = BaseAddress;
667 MmUnlockAddressSpace(MmGetKernelAddressSpace());
668 if (!NT_SUCCESS(Status))
669 {
670 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
671 return Status;
672 }
673
674 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
675 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
676 ASSERT((ULONG_PTR)Vacb->BaseAddress + VACB_MAPPING_GRANULARITY - 1 > (ULONG_PTR)MmSystemRangeStart);
677
678 /* Create a virtual mapping for this memory area */
679 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
680 for (i = 0; i < NumberOfPages; i++)
681 {
682 PFN_NUMBER PageFrameNumber;
683
684 MI_SET_USAGE(MI_USAGE_CACHE);
685 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
686 if (PageFrameNumber == 0)
687 {
688 DPRINT1("Unable to allocate page\n");
689 KeBugCheck(MEMORY_MANAGEMENT);
690 }
691
692 ASSERT(BaseAddress == Vacb->BaseAddress);
693 ASSERT(i * PAGE_SIZE < VACB_MAPPING_GRANULARITY);
694 ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) >= (ULONG_PTR)BaseAddress);
695 ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) > (ULONG_PTR)MmSystemRangeStart);
696
697 Status = MmCreateVirtualMapping(NULL,
698 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
699 PAGE_READWRITE,
700 &PageFrameNumber,
701 1);
702 if (!NT_SUCCESS(Status))
703 {
704 DPRINT1("Unable to create virtual mapping\n");
705 KeBugCheck(MEMORY_MANAGEMENT);
706 }
707 }
708
709 return STATUS_SUCCESS;
710 }
711
712 static
713 NTSTATUS
714 CcRosCreateVacb (
715 PROS_SHARED_CACHE_MAP SharedCacheMap,
716 LONGLONG FileOffset,
717 PROS_VACB *Vacb)
718 {
719 PROS_VACB current;
720 PROS_VACB previous;
721 PLIST_ENTRY current_entry;
722 NTSTATUS Status;
723 KIRQL oldIrql;
724
725 ASSERT(SharedCacheMap);
726
727 DPRINT("CcRosCreateVacb()\n");
728
729 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
730 {
731 *Vacb = NULL;
732 return STATUS_INVALID_PARAMETER;
733 }
734
735 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
736 current->BaseAddress = NULL;
737 current->Valid = FALSE;
738 current->Dirty = FALSE;
739 current->PageOut = FALSE;
740 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
741 current->SharedCacheMap = SharedCacheMap;
742 #if DBG
743 if (SharedCacheMap->Trace)
744 {
745 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
746 }
747 #endif
748 current->MappedCount = 0;
749 current->ReferenceCount = 0;
750 current->PinCount = 0;
751 KeInitializeMutex(&current->Mutex, 0);
752 InitializeListHead(&current->CacheMapVacbListEntry);
753 InitializeListHead(&current->DirtyVacbListEntry);
754 InitializeListHead(&current->VacbLruListEntry);
755 CcRosAcquireVacbLock(current, NULL);
756 KeAcquireGuardedMutex(&ViewLock);
757
758 *Vacb = current;
759 /* There is window between the call to CcRosLookupVacb
760 * and CcRosCreateVacb. We must check if a VACB for the
761 * file offset exist. If there is a VACB, we release
762 * our newly created VACB and return the existing one.
763 */
764 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
765 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
766 previous = NULL;
767 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
768 {
769 current = CONTAINING_RECORD(current_entry,
770 ROS_VACB,
771 CacheMapVacbListEntry);
772 if (IsPointInRange(current->FileOffset.QuadPart,
773 VACB_MAPPING_GRANULARITY,
774 FileOffset))
775 {
776 CcRosVacbIncRefCount(current);
777 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
778 #if DBG
779 if (SharedCacheMap->Trace)
780 {
781 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
782 SharedCacheMap,
783 (*Vacb),
784 current);
785 }
786 #endif
787 CcRosReleaseVacbLock(*Vacb);
788 KeReleaseGuardedMutex(&ViewLock);
789 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
790 *Vacb = current;
791 CcRosAcquireVacbLock(current, NULL);
792 return STATUS_SUCCESS;
793 }
794 if (current->FileOffset.QuadPart < FileOffset)
795 {
796 ASSERT(previous == NULL ||
797 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
798 previous = current;
799 }
800 if (current->FileOffset.QuadPart > FileOffset)
801 break;
802 current_entry = current_entry->Flink;
803 }
804 /* There was no existing VACB. */
805 current = *Vacb;
806 if (previous)
807 {
808 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
809 }
810 else
811 {
812 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
813 }
814 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
815 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
816 CcRosVacbIncRefCount(current);
817 KeReleaseGuardedMutex(&ViewLock);
818
819 MI_SET_USAGE(MI_USAGE_CACHE);
820 #if MI_TRACE_PFNS
821 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
822 {
823 PWCHAR pos;
824 ULONG len = 0;
825 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
826 if (pos)
827 {
828 len = wcslen(pos) * sizeof(WCHAR);
829 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
830 }
831 else
832 {
833 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
834 }
835 }
836 #endif
837
838 /* Reference it to allow release */
839 CcRosVacbIncRefCount(current);
840
841 Status = CcRosMapVacbInKernelSpace(current);
842 if (!NT_SUCCESS(Status))
843 {
844 RemoveEntryList(&current->CacheMapVacbListEntry);
845 RemoveEntryList(&current->VacbLruListEntry);
846 CcRosReleaseVacb(SharedCacheMap, current, FALSE,
847 FALSE, FALSE);
848 CcRosVacbDecRefCount(current);
849 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
850 }
851
852 return Status;
853 }
854
855 NTSTATUS
856 NTAPI
857 CcRosGetVacb (
858 PROS_SHARED_CACHE_MAP SharedCacheMap,
859 LONGLONG FileOffset,
860 PLONGLONG BaseOffset,
861 PVOID* BaseAddress,
862 PBOOLEAN UptoDate,
863 PROS_VACB *Vacb)
864 {
865 PROS_VACB current;
866 NTSTATUS Status;
867 ULONG Refs;
868
869 ASSERT(SharedCacheMap);
870
871 DPRINT("CcRosGetVacb()\n");
872
873 /*
874 * Look for a VACB already mapping the same data.
875 */
876 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
877 if (current == NULL)
878 {
879 /*
880 * Otherwise create a new VACB.
881 */
882 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
883 if (!NT_SUCCESS(Status))
884 {
885 return Status;
886 }
887 }
888
889 Refs = CcRosVacbGetRefCount(current);
890
891 KeAcquireGuardedMutex(&ViewLock);
892
893 /* Move to the tail of the LRU list */
894 RemoveEntryList(&current->VacbLruListEntry);
895 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
896
897 KeReleaseGuardedMutex(&ViewLock);
898
899 /*
900 * Return information about the VACB to the caller.
901 */
902 *UptoDate = current->Valid;
903 *BaseAddress = current->BaseAddress;
904 DPRINT("*BaseAddress %p\n", *BaseAddress);
905 *Vacb = current;
906 *BaseOffset = current->FileOffset.QuadPart;
907
908 ASSERT(Refs > 1);
909
910 return STATUS_SUCCESS;
911 }
912
913 NTSTATUS
914 NTAPI
915 CcRosRequestVacb (
916 PROS_SHARED_CACHE_MAP SharedCacheMap,
917 LONGLONG FileOffset,
918 PVOID* BaseAddress,
919 PBOOLEAN UptoDate,
920 PROS_VACB *Vacb)
921 /*
922 * FUNCTION: Request a page mapping for a shared cache map
923 */
924 {
925 LONGLONG BaseOffset;
926
927 ASSERT(SharedCacheMap);
928
929 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
930 {
931 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
932 FileOffset, VACB_MAPPING_GRANULARITY);
933 KeBugCheck(CACHE_MANAGER);
934 }
935
936 return CcRosGetVacb(SharedCacheMap,
937 FileOffset,
938 &BaseOffset,
939 BaseAddress,
940 UptoDate,
941 Vacb);
942 }
943
944 static
945 VOID
946 CcFreeCachePage (
947 PVOID Context,
948 MEMORY_AREA* MemoryArea,
949 PVOID Address,
950 PFN_NUMBER Page,
951 SWAPENTRY SwapEntry,
952 BOOLEAN Dirty)
953 {
954 ASSERT(SwapEntry == 0);
955 if (Page != 0)
956 {
957 ASSERT(MmGetReferenceCountPage(Page) == 1);
958 MmReleasePageMemoryConsumer(MC_CACHE, Page);
959 }
960 }
961
962 NTSTATUS
963 CcRosInternalFreeVacb (
964 PROS_VACB Vacb)
965 /*
966 * FUNCTION: Releases a VACB associated with a shared cache map
967 */
968 {
969 DPRINT("Freeing VACB 0x%p\n", Vacb);
970 #if DBG
971 if (Vacb->SharedCacheMap->Trace)
972 {
973 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
974 }
975 #endif
976
977 MmLockAddressSpace(MmGetKernelAddressSpace());
978 MmFreeMemoryArea(MmGetKernelAddressSpace(),
979 Vacb->MemoryArea,
980 CcFreeCachePage,
981 NULL);
982 MmUnlockAddressSpace(MmGetKernelAddressSpace());
983
984 if (Vacb->PinCount != 0 || Vacb->ReferenceCount != 0)
985 {
986 DPRINT1("Invalid free: %ld, %ld\n", Vacb->ReferenceCount, Vacb->PinCount);
987 if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length)
988 {
989 DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName);
990 }
991 }
992
993 ASSERT(Vacb->PinCount == 0);
994 ASSERT(Vacb->ReferenceCount == 0);
995 ASSERT(IsListEmpty(&Vacb->CacheMapVacbListEntry));
996 ASSERT(IsListEmpty(&Vacb->DirtyVacbListEntry));
997 ASSERT(IsListEmpty(&Vacb->VacbLruListEntry));
998 RtlFillMemory(Vacb, sizeof(Vacb), 0xfd);
999 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
1000 return STATUS_SUCCESS;
1001 }
1002
1003 /*
1004 * @implemented
1005 */
1006 VOID
1007 NTAPI
1008 CcFlushCache (
1009 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1010 IN PLARGE_INTEGER FileOffset OPTIONAL,
1011 IN ULONG Length,
1012 OUT PIO_STATUS_BLOCK IoStatus)
1013 {
1014 PROS_SHARED_CACHE_MAP SharedCacheMap;
1015 LARGE_INTEGER Offset;
1016 LONGLONG RemainingLength;
1017 PROS_VACB current;
1018 NTSTATUS Status;
1019
1020 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1021 SectionObjectPointers, FileOffset, Length);
1022
1023 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1024 SectionObjectPointers, FileOffset, Length, IoStatus);
1025
1026 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1027 {
1028 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1029 ASSERT(SharedCacheMap);
1030 if (FileOffset)
1031 {
1032 Offset = *FileOffset;
1033 RemainingLength = Length;
1034 }
1035 else
1036 {
1037 Offset.QuadPart = 0;
1038 RemainingLength = SharedCacheMap->FileSize.QuadPart;
1039 }
1040
1041 if (IoStatus)
1042 {
1043 IoStatus->Status = STATUS_SUCCESS;
1044 IoStatus->Information = 0;
1045 }
1046
1047 while (RemainingLength > 0)
1048 {
1049 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1050 if (current != NULL)
1051 {
1052 if (current->Dirty)
1053 {
1054 Status = CcRosFlushVacb(current);
1055 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1056 {
1057 IoStatus->Status = Status;
1058 }
1059 }
1060
1061 CcRosReleaseVacb(SharedCacheMap, current, current->Valid, current->Dirty, FALSE);
1062 }
1063
1064 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1065 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1066 }
1067 }
1068 else
1069 {
1070 if (IoStatus)
1071 {
1072 IoStatus->Status = STATUS_INVALID_PARAMETER;
1073 }
1074 }
1075 }
1076
1077 NTSTATUS
1078 NTAPI
1079 CcRosDeleteFileCache (
1080 PFILE_OBJECT FileObject,
1081 PROS_SHARED_CACHE_MAP SharedCacheMap)
1082 /*
1083 * FUNCTION: Releases the shared cache map associated with a file object
1084 */
1085 {
1086 PLIST_ENTRY current_entry;
1087 PROS_VACB current;
1088 LIST_ENTRY FreeList;
1089 KIRQL oldIrql;
1090
1091 ASSERT(SharedCacheMap);
1092
1093 SharedCacheMap->OpenCount++;
1094 KeReleaseGuardedMutex(&ViewLock);
1095
1096 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1097
1098 KeAcquireGuardedMutex(&ViewLock);
1099 SharedCacheMap->OpenCount--;
1100 if (SharedCacheMap->OpenCount == 0)
1101 {
1102 KIRQL OldIrql;
1103
1104 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1105
1106 /*
1107 * Release all VACBs
1108 */
1109 InitializeListHead(&FreeList);
1110 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1111 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1112 {
1113 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1114 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1115
1116 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1117 CcRosAcquireVacbLock(current, NULL);
1118 RemoveEntryList(&current->VacbLruListEntry);
1119 InitializeListHead(&current->VacbLruListEntry);
1120 if (current->Dirty)
1121 {
1122 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1123 CcRosUnmarkDirtyVacb(current, FALSE);
1124 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1125 DPRINT1("Freeing dirty VACB\n");
1126 }
1127 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1128 CcRosReleaseVacbLock(current);
1129
1130 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1131 }
1132 #if DBG
1133 SharedCacheMap->Trace = FALSE;
1134 #endif
1135 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1136
1137 KeReleaseGuardedMutex(&ViewLock);
1138 ObDereferenceObject(SharedCacheMap->FileObject);
1139
1140 while (!IsListEmpty(&FreeList))
1141 {
1142 current_entry = RemoveTailList(&FreeList);
1143 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1144 InitializeListHead(&current->CacheMapVacbListEntry);
1145 CcRosVacbDecRefCount(current);
1146 CcRosInternalFreeVacb(current);
1147 }
1148
1149 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1150 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1151 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1152
1153 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1154 KeAcquireGuardedMutex(&ViewLock);
1155 }
1156 return STATUS_SUCCESS;
1157 }
1158
1159 VOID
1160 NTAPI
1161 CcRosReferenceCache (
1162 PFILE_OBJECT FileObject)
1163 {
1164 PROS_SHARED_CACHE_MAP SharedCacheMap;
1165 KeAcquireGuardedMutex(&ViewLock);
1166 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1167 ASSERT(SharedCacheMap);
1168 ASSERT(SharedCacheMap->OpenCount != 0);
1169 SharedCacheMap->OpenCount++;
1170 KeReleaseGuardedMutex(&ViewLock);
1171 }
1172
1173 VOID
1174 NTAPI
1175 CcRosRemoveIfClosed (
1176 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1177 {
1178 PROS_SHARED_CACHE_MAP SharedCacheMap;
1179 DPRINT("CcRosRemoveIfClosed()\n");
1180 KeAcquireGuardedMutex(&ViewLock);
1181 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1182 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1183 {
1184 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1185 }
1186 KeReleaseGuardedMutex(&ViewLock);
1187 }
1188
1189
1190 VOID
1191 NTAPI
1192 CcRosDereferenceCache (
1193 PFILE_OBJECT FileObject)
1194 {
1195 PROS_SHARED_CACHE_MAP SharedCacheMap;
1196 KeAcquireGuardedMutex(&ViewLock);
1197 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1198 ASSERT(SharedCacheMap);
1199 if (SharedCacheMap->OpenCount > 0)
1200 {
1201 SharedCacheMap->OpenCount--;
1202 if (SharedCacheMap->OpenCount == 0)
1203 {
1204 MmFreeSectionSegments(SharedCacheMap->FileObject);
1205 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1206 }
1207 }
1208 KeReleaseGuardedMutex(&ViewLock);
1209 }
1210
1211 NTSTATUS
1212 NTAPI
1213 CcRosReleaseFileCache (
1214 PFILE_OBJECT FileObject)
1215 /*
1216 * FUNCTION: Called by the file system when a handle to a file object
1217 * has been closed.
1218 */
1219 {
1220 KIRQL OldIrql;
1221 PPRIVATE_CACHE_MAP PrivateMap;
1222 PROS_SHARED_CACHE_MAP SharedCacheMap;
1223
1224 KeAcquireGuardedMutex(&ViewLock);
1225
1226 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1227 {
1228 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1229
1230 /* Closing the handle, so kill the private cache map
1231 * Before you event try to remove it from FO, always
1232 * lock the master lock, to be sure not to race
1233 * with a potential read ahead ongoing!
1234 */
1235 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1236 PrivateMap = FileObject->PrivateCacheMap;
1237 FileObject->PrivateCacheMap = NULL;
1238 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1239
1240 if (PrivateMap != NULL)
1241 {
1242 /* Remove it from the file */
1243 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
1244 RemoveEntryList(&PrivateMap->PrivateLinks);
1245 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
1246
1247 /* And free it. */
1248 if (PrivateMap != &SharedCacheMap->PrivateCacheMap)
1249 {
1250 ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP);
1251 }
1252 else
1253 {
1254 PrivateMap->NodeTypeCode = 0;
1255 }
1256
1257 if (SharedCacheMap->OpenCount > 0)
1258 {
1259 SharedCacheMap->OpenCount--;
1260 if (SharedCacheMap->OpenCount == 0)
1261 {
1262 MmFreeSectionSegments(SharedCacheMap->FileObject);
1263 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1264 }
1265 }
1266 }
1267 }
1268 KeReleaseGuardedMutex(&ViewLock);
1269 return STATUS_SUCCESS;
1270 }
1271
1272 NTSTATUS
1273 NTAPI
1274 CcRosInitializeFileCache (
1275 PFILE_OBJECT FileObject,
1276 PCC_FILE_SIZES FileSizes,
1277 BOOLEAN PinAccess,
1278 PCACHE_MANAGER_CALLBACKS CallBacks,
1279 PVOID LazyWriterContext)
1280 /*
1281 * FUNCTION: Initializes a shared cache map for a file object
1282 */
1283 {
1284 KIRQL OldIrql;
1285 BOOLEAN Allocated;
1286 PROS_SHARED_CACHE_MAP SharedCacheMap;
1287
1288 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1289 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1290 FileObject, SharedCacheMap);
1291
1292 Allocated = FALSE;
1293 KeAcquireGuardedMutex(&ViewLock);
1294 if (SharedCacheMap == NULL)
1295 {
1296 Allocated = TRUE;
1297 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1298 if (SharedCacheMap == NULL)
1299 {
1300 KeReleaseGuardedMutex(&ViewLock);
1301 return STATUS_INSUFFICIENT_RESOURCES;
1302 }
1303 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1304 ObReferenceObjectByPointer(FileObject,
1305 FILE_ALL_ACCESS,
1306 NULL,
1307 KernelMode);
1308 SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP;
1309 SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap);
1310 SharedCacheMap->FileObject = FileObject;
1311 SharedCacheMap->Callbacks = CallBacks;
1312 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1313 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1314 SharedCacheMap->FileSize = FileSizes->FileSize;
1315 SharedCacheMap->PinAccess = PinAccess;
1316 SharedCacheMap->DirtyPageThreshold = 0;
1317 SharedCacheMap->DirtyPages = 0;
1318 InitializeListHead(&SharedCacheMap->PrivateList);
1319 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1320 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1321 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1322
1323 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1324 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1325 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1326 }
1327 if (FileObject->PrivateCacheMap == NULL)
1328 {
1329 PPRIVATE_CACHE_MAP PrivateMap;
1330
1331 /* Allocate the private cache map for this handle */
1332 if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0)
1333 {
1334 PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP);
1335 }
1336 else
1337 {
1338 PrivateMap = &SharedCacheMap->PrivateCacheMap;
1339 }
1340
1341 if (PrivateMap == NULL)
1342 {
1343 /* If we also allocated the shared cache map for this file, kill it */
1344 if (Allocated)
1345 {
1346 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1347 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1348 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1349
1350 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1351 ObDereferenceObject(FileObject);
1352 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1353 }
1354
1355 KeReleaseGuardedMutex(&ViewLock);
1356 return STATUS_INSUFFICIENT_RESOURCES;
1357 }
1358
1359 /* Initialize it */
1360 RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP));
1361 PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP;
1362 PrivateMap->ReadAheadMask = PAGE_SIZE - 1;
1363 PrivateMap->FileObject = FileObject;
1364 KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock);
1365
1366 /* Link it to the file */
1367 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
1368 InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks);
1369 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
1370
1371 FileObject->PrivateCacheMap = PrivateMap;
1372 SharedCacheMap->OpenCount++;
1373 }
1374 KeReleaseGuardedMutex(&ViewLock);
1375
1376 return STATUS_SUCCESS;
1377 }
1378
1379 /*
1380 * @implemented
1381 */
1382 PFILE_OBJECT
1383 NTAPI
1384 CcGetFileObjectFromSectionPtrs (
1385 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1386 {
1387 PROS_SHARED_CACHE_MAP SharedCacheMap;
1388
1389 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1390
1391 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1392 {
1393 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1394 ASSERT(SharedCacheMap);
1395 return SharedCacheMap->FileObject;
1396 }
1397 return NULL;
1398 }
1399
1400 VOID
1401 INIT_FUNCTION
1402 NTAPI
1403 CcInitView (
1404 VOID)
1405 {
1406 DPRINT("CcInitView()\n");
1407
1408 InitializeListHead(&DirtyVacbListHead);
1409 InitializeListHead(&VacbLruListHead);
1410 InitializeListHead(&CcDeferredWrites);
1411 InitializeListHead(&CcCleanSharedCacheMapList);
1412 KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1413 KeInitializeGuardedMutex(&ViewLock);
1414 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1415 NULL,
1416 NULL,
1417 0,
1418 sizeof(INTERNAL_BCB),
1419 TAG_BCB,
1420 20);
1421 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1422 NULL,
1423 NULL,
1424 0,
1425 sizeof(ROS_SHARED_CACHE_MAP),
1426 TAG_SHARED_CACHE_MAP,
1427 20);
1428 ExInitializeNPagedLookasideList(&VacbLookasideList,
1429 NULL,
1430 NULL,
1431 0,
1432 sizeof(ROS_VACB),
1433 TAG_VACB,
1434 20);
1435
1436 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1437
1438 CcInitCacheZeroPage();
1439 }
1440
1441 #if DBG && defined(KDBG)
1442 BOOLEAN
1443 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1444 {
1445 PLIST_ENTRY ListEntry;
1446 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1447
1448 KdbpPrint(" Usage Summary (in kb)\n");
1449 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1450 /* No need to lock the spin lock here, we're in DBG */
1451 for (ListEntry = CcCleanSharedCacheMapList.Flink;
1452 ListEntry != &CcCleanSharedCacheMapList;
1453 ListEntry = ListEntry->Flink)
1454 {
1455 PLIST_ENTRY Vacbs;
1456 ULONG Valid = 0, Dirty = 0;
1457 PROS_SHARED_CACHE_MAP SharedCacheMap;
1458 PUNICODE_STRING FileName;
1459
1460 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1461
1462 /* Dirty size */
1463 Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
1464
1465 /* First, count for all the associated VACB */
1466 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1467 Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1468 Vacbs = Vacbs->Flink)
1469 {
1470 PROS_VACB Vacb;
1471
1472 Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
1473 if (Vacb->Valid)
1474 {
1475 Valid += VACB_MAPPING_GRANULARITY / 1024;
1476 }
1477 }
1478
1479 /* Setup name */
1480 if (SharedCacheMap->FileObject != NULL &&
1481 SharedCacheMap->FileObject->FileName.Length != 0)
1482 {
1483 FileName = &SharedCacheMap->FileObject->FileName;
1484 }
1485 else
1486 {
1487 FileName = &NoName;
1488 }
1489
1490 /* And print */
1491 KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
1492 }
1493
1494 return TRUE;
1495 }
1496
1497 BOOLEAN
1498 ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[])
1499 {
1500 KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages,
1501 (CcTotalDirtyPages * PAGE_SIZE) / 1024);
1502 KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold,
1503 (CcDirtyPageThreshold * PAGE_SIZE) / 1024);
1504 KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages,
1505 (MmAvailablePages * PAGE_SIZE) / 1024);
1506 KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop,
1507 (MmThrottleTop * PAGE_SIZE) / 1024);
1508 KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom,
1509 (MmThrottleBottom * PAGE_SIZE) / 1024);
1510 KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total,
1511 (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024);
1512
1513 if (CcTotalDirtyPages >= CcDirtyPageThreshold)
1514 {
1515 KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n");
1516 }
1517 else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold)
1518 {
1519 KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n");
1520 }
1521 else
1522 {
1523 KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n");
1524 }
1525
1526 return TRUE;
1527 }
1528 #endif
1529
1530 /* EOF */