[NTOSKRNL] Use interlocked operations for VACB reference counting.
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42
43 /* GLOBALS *******************************************************************/
44
45 LIST_ENTRY DirtyVacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 /* Internal vars (MS):
55 * - Threshold above which lazy writer will start action
56 * - Amount of dirty pages
57 * - List for deferred writes
58 * - Spinlock when dealing with the deferred list
59 * - List for "clean" shared cache maps
60 */
61 ULONG CcDirtyPageThreshold = 0;
62 ULONG CcTotalDirtyPages = 0;
63 LIST_ENTRY CcDeferredWrites;
64 KSPIN_LOCK CcDeferredWriteSpinLock;
65 LIST_ENTRY CcCleanSharedCacheMapList;
66
67 #if DBG
68 ULONG CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line)
69 {
70 ULONG Refs;
71
72 Refs = InterlockedIncrement((PLONG)&vacb->ReferenceCount);
73 if (vacb->SharedCacheMap->Trace)
74 {
75 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
76 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
77 }
78
79 return Refs;
80 }
81 ULONG CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line)
82 {
83 ULONG Refs;
84
85 Refs = InterlockedDecrement((PLONG)&vacb->ReferenceCount);
86 ASSERT(!(Refs == 0 && vacb->Dirty));
87 if (vacb->SharedCacheMap->Trace)
88 {
89 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
90 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
91 }
92
93 return Refs;
94 }
95 ULONG CcRosVacbGetRefCount_(PROS_VACB vacb, PCSTR file, INT line)
96 {
97 ULONG Refs;
98
99 Refs = InterlockedCompareExchange((PLONG)&vacb->ReferenceCount, 0, 0);
100 if (vacb->SharedCacheMap->Trace)
101 {
102 DbgPrint("(%s:%i) VACB %p ==RefCount=%lu, Dirty %u, PageOut %lu\n",
103 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
104 }
105
106 return Refs;
107 }
108 #endif
109
110 NTSTATUS
111 CcRosInternalFreeVacb(PROS_VACB Vacb);
112
113
114 /* FUNCTIONS *****************************************************************/
115
116 VOID
117 NTAPI
118 CcRosTraceCacheMap (
119 PROS_SHARED_CACHE_MAP SharedCacheMap,
120 BOOLEAN Trace )
121 {
122 #if DBG
123 KIRQL oldirql;
124 PLIST_ENTRY current_entry;
125 PROS_VACB current;
126
127 if (!SharedCacheMap)
128 return;
129
130 SharedCacheMap->Trace = Trace;
131
132 if (Trace)
133 {
134 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
135
136 KeAcquireGuardedMutex(&ViewLock);
137 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
138
139 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
140 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
141 {
142 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
143 current_entry = current_entry->Flink;
144
145 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
146 current, current->ReferenceCount, current->Dirty, current->PageOut );
147 }
148 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
149 KeReleaseGuardedMutex(&ViewLock);
150 }
151 else
152 {
153 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
154 }
155
156 #else
157 UNREFERENCED_PARAMETER(SharedCacheMap);
158 UNREFERENCED_PARAMETER(Trace);
159 #endif
160 }
161
162 NTSTATUS
163 NTAPI
164 CcRosFlushVacb (
165 PROS_VACB Vacb)
166 {
167 NTSTATUS Status;
168
169 Status = CcWriteVirtualAddress(Vacb);
170 if (NT_SUCCESS(Status))
171 {
172 CcRosUnmarkDirtyVacb(Vacb, TRUE);
173 }
174
175 return Status;
176 }
177
178 NTSTATUS
179 NTAPI
180 CcRosFlushDirtyPages (
181 ULONG Target,
182 PULONG Count,
183 BOOLEAN Wait,
184 BOOLEAN CalledFromLazy)
185 {
186 PLIST_ENTRY current_entry;
187 PROS_VACB current;
188 BOOLEAN Locked;
189 NTSTATUS Status;
190 LARGE_INTEGER ZeroTimeout;
191
192 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
193
194 (*Count) = 0;
195 ZeroTimeout.QuadPart = 0;
196
197 KeEnterCriticalRegion();
198 KeAcquireGuardedMutex(&ViewLock);
199
200 current_entry = DirtyVacbListHead.Flink;
201 if (current_entry == &DirtyVacbListHead)
202 {
203 DPRINT("No Dirty pages\n");
204 }
205
206 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
207 {
208 current = CONTAINING_RECORD(current_entry,
209 ROS_VACB,
210 DirtyVacbListEntry);
211 current_entry = current_entry->Flink;
212
213 CcRosVacbIncRefCount(current);
214
215 /* When performing lazy write, don't handle temporary files */
216 if (CalledFromLazy &&
217 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
218 {
219 CcRosVacbDecRefCount(current);
220 continue;
221 }
222
223 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
224 current->SharedCacheMap->LazyWriteContext, Wait);
225 if (!Locked)
226 {
227 CcRosVacbDecRefCount(current);
228 continue;
229 }
230
231 Status = CcRosAcquireVacbLock(current,
232 Wait ? NULL : &ZeroTimeout);
233 if (Status != STATUS_SUCCESS)
234 {
235 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
236 current->SharedCacheMap->LazyWriteContext);
237 CcRosVacbDecRefCount(current);
238 continue;
239 }
240
241 ASSERT(current->Dirty);
242
243 /* One reference is added above */
244 if (CcRosVacbGetRefCount(current) > 2)
245 {
246 CcRosReleaseVacbLock(current);
247 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
248 current->SharedCacheMap->LazyWriteContext);
249 CcRosVacbDecRefCount(current);
250 continue;
251 }
252
253 KeReleaseGuardedMutex(&ViewLock);
254
255 Status = CcRosFlushVacb(current);
256
257 CcRosReleaseVacbLock(current);
258 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
259 current->SharedCacheMap->LazyWriteContext);
260
261 KeAcquireGuardedMutex(&ViewLock);
262 CcRosVacbDecRefCount(current);
263
264 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
265 (Status != STATUS_MEDIA_WRITE_PROTECTED))
266 {
267 DPRINT1("CC: Failed to flush VACB.\n");
268 }
269 else
270 {
271 ULONG PagesFreed;
272
273 /* How many pages did we free? */
274 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
275 (*Count) += PagesFreed;
276
277 /* Make sure we don't overflow target! */
278 if (Target < PagesFreed)
279 {
280 /* If we would have, jump to zero directly */
281 Target = 0;
282 }
283 else
284 {
285 Target -= PagesFreed;
286 }
287 }
288
289 current_entry = DirtyVacbListHead.Flink;
290 }
291
292 KeReleaseGuardedMutex(&ViewLock);
293 KeLeaveCriticalRegion();
294
295 DPRINT("CcRosFlushDirtyPages() finished\n");
296 return STATUS_SUCCESS;
297 }
298
299 NTSTATUS
300 CcRosTrimCache (
301 ULONG Target,
302 ULONG Priority,
303 PULONG NrFreed)
304 /*
305 * FUNCTION: Try to free some memory from the file cache.
306 * ARGUMENTS:
307 * Target - The number of pages to be freed.
308 * Priority - The priority of free (currently unused).
309 * NrFreed - Points to a variable where the number of pages
310 * actually freed is returned.
311 */
312 {
313 PLIST_ENTRY current_entry;
314 PROS_VACB current;
315 ULONG PagesFreed;
316 KIRQL oldIrql;
317 LIST_ENTRY FreeList;
318 PFN_NUMBER Page;
319 ULONG i;
320 BOOLEAN FlushedPages = FALSE;
321
322 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
323
324 InitializeListHead(&FreeList);
325
326 *NrFreed = 0;
327
328 retry:
329 KeAcquireGuardedMutex(&ViewLock);
330
331 current_entry = VacbLruListHead.Flink;
332 while (current_entry != &VacbLruListHead)
333 {
334 ULONG Refs;
335
336 current = CONTAINING_RECORD(current_entry,
337 ROS_VACB,
338 VacbLruListEntry);
339 current_entry = current_entry->Flink;
340
341 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
342
343 /* Reference the VACB */
344 CcRosVacbIncRefCount(current);
345
346 /* Check if it's mapped and not dirty */
347 if (current->MappedCount > 0 && !current->Dirty)
348 {
349 /* We have to break these locks because Cc sucks */
350 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
351 KeReleaseGuardedMutex(&ViewLock);
352
353 /* Page out the VACB */
354 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
355 {
356 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
357
358 MmPageOutPhysicalAddress(Page);
359 }
360
361 /* Reacquire the locks */
362 KeAcquireGuardedMutex(&ViewLock);
363 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
364 }
365
366 /* Dereference the VACB */
367 Refs = CcRosVacbDecRefCount(current);
368
369 /* Check if we can free this entry now */
370 if (Refs < 2)
371 {
372 ASSERT(!current->Dirty);
373 ASSERT(!current->MappedCount);
374 ASSERT(Refs == 1);
375
376 RemoveEntryList(&current->CacheMapVacbListEntry);
377 RemoveEntryList(&current->VacbLruListEntry);
378 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
379
380 /* Calculate how many pages we freed for Mm */
381 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
382 Target -= PagesFreed;
383 (*NrFreed) += PagesFreed;
384 }
385
386 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
387 }
388
389 KeReleaseGuardedMutex(&ViewLock);
390
391 /* Try flushing pages if we haven't met our target */
392 if ((Target > 0) && !FlushedPages)
393 {
394 /* Flush dirty pages to disk */
395 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
396 FlushedPages = TRUE;
397
398 /* We can only swap as many pages as we flushed */
399 if (PagesFreed < Target) Target = PagesFreed;
400
401 /* Check if we flushed anything */
402 if (PagesFreed != 0)
403 {
404 /* Try again after flushing dirty pages */
405 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
406 goto retry;
407 }
408 }
409
410 while (!IsListEmpty(&FreeList))
411 {
412 current_entry = RemoveHeadList(&FreeList);
413 current = CONTAINING_RECORD(current_entry,
414 ROS_VACB,
415 CacheMapVacbListEntry);
416 CcRosVacbDecRefCount(current);
417 CcRosInternalFreeVacb(current);
418 }
419
420 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
421
422 return STATUS_SUCCESS;
423 }
424
425 NTSTATUS
426 NTAPI
427 CcRosReleaseVacb (
428 PROS_SHARED_CACHE_MAP SharedCacheMap,
429 PROS_VACB Vacb,
430 BOOLEAN Valid,
431 BOOLEAN Dirty,
432 BOOLEAN Mapped)
433 {
434 ULONG Refs;
435 ASSERT(SharedCacheMap);
436
437 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
438 SharedCacheMap, Vacb, Valid);
439
440 Vacb->Valid = Valid;
441
442 if (Dirty && !Vacb->Dirty)
443 {
444 CcRosMarkDirtyVacb(Vacb);
445 }
446
447 if (Mapped)
448 {
449 Vacb->MappedCount++;
450 }
451 Refs = CcRosVacbDecRefCount(Vacb);
452 if (Mapped && (Vacb->MappedCount == 1))
453 {
454 CcRosVacbIncRefCount(Vacb);
455 }
456
457 ASSERT(Refs > 0);
458
459 CcRosReleaseVacbLock(Vacb);
460
461 return STATUS_SUCCESS;
462 }
463
464 /* Returns with VACB Lock Held! */
465 PROS_VACB
466 NTAPI
467 CcRosLookupVacb (
468 PROS_SHARED_CACHE_MAP SharedCacheMap,
469 LONGLONG FileOffset)
470 {
471 PLIST_ENTRY current_entry;
472 PROS_VACB current;
473 KIRQL oldIrql;
474
475 ASSERT(SharedCacheMap);
476
477 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
478 SharedCacheMap, FileOffset);
479
480 KeAcquireGuardedMutex(&ViewLock);
481 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
482
483 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
484 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
485 {
486 current = CONTAINING_RECORD(current_entry,
487 ROS_VACB,
488 CacheMapVacbListEntry);
489 if (IsPointInRange(current->FileOffset.QuadPart,
490 VACB_MAPPING_GRANULARITY,
491 FileOffset))
492 {
493 CcRosVacbIncRefCount(current);
494 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
495 KeReleaseGuardedMutex(&ViewLock);
496 CcRosAcquireVacbLock(current, NULL);
497 return current;
498 }
499 if (current->FileOffset.QuadPart > FileOffset)
500 break;
501 current_entry = current_entry->Flink;
502 }
503
504 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
505 KeReleaseGuardedMutex(&ViewLock);
506
507 return NULL;
508 }
509
510 VOID
511 NTAPI
512 CcRosMarkDirtyVacb (
513 PROS_VACB Vacb)
514 {
515 KIRQL oldIrql;
516 PROS_SHARED_CACHE_MAP SharedCacheMap;
517
518 SharedCacheMap = Vacb->SharedCacheMap;
519
520 KeAcquireGuardedMutex(&ViewLock);
521 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
522
523 ASSERT(!Vacb->Dirty);
524
525 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
526 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
527 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
528 CcRosVacbIncRefCount(Vacb);
529
530 /* Move to the tail of the LRU list */
531 RemoveEntryList(&Vacb->VacbLruListEntry);
532 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
533
534 Vacb->Dirty = TRUE;
535
536 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
537 KeReleaseGuardedMutex(&ViewLock);
538
539 /* Schedule a lazy writer run to now that we have dirty VACB */
540 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
541 if (!LazyWriter.ScanActive)
542 {
543 CcScheduleLazyWriteScan(FALSE);
544 }
545 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
546 }
547
548 VOID
549 NTAPI
550 CcRosUnmarkDirtyVacb (
551 PROS_VACB Vacb,
552 BOOLEAN LockViews)
553 {
554 KIRQL oldIrql;
555 PROS_SHARED_CACHE_MAP SharedCacheMap;
556
557 SharedCacheMap = Vacb->SharedCacheMap;
558
559 if (LockViews)
560 {
561 KeAcquireGuardedMutex(&ViewLock);
562 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
563 }
564
565 ASSERT(Vacb->Dirty);
566
567 Vacb->Dirty = FALSE;
568
569 RemoveEntryList(&Vacb->DirtyVacbListEntry);
570 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
571 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
572 CcRosVacbDecRefCount(Vacb);
573
574 if (LockViews)
575 {
576 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
577 KeReleaseGuardedMutex(&ViewLock);
578 }
579 }
580
581 NTSTATUS
582 NTAPI
583 CcRosMarkDirtyFile (
584 PROS_SHARED_CACHE_MAP SharedCacheMap,
585 LONGLONG FileOffset)
586 {
587 PROS_VACB Vacb;
588
589 ASSERT(SharedCacheMap);
590
591 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
592 SharedCacheMap, FileOffset);
593
594 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
595 if (Vacb == NULL)
596 {
597 KeBugCheck(CACHE_MANAGER);
598 }
599
600 CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, TRUE, FALSE);
601
602 return STATUS_SUCCESS;
603 }
604
605 /*
606 * Note: this is not the contrary function of
607 * CcRosMapVacbInKernelSpace()
608 */
609 NTSTATUS
610 NTAPI
611 CcRosUnmapVacb (
612 PROS_SHARED_CACHE_MAP SharedCacheMap,
613 LONGLONG FileOffset,
614 BOOLEAN NowDirty)
615 {
616 PROS_VACB Vacb;
617
618 ASSERT(SharedCacheMap);
619
620 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
621 SharedCacheMap, FileOffset, NowDirty);
622
623 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
624 if (Vacb == NULL)
625 {
626 return STATUS_UNSUCCESSFUL;
627 }
628
629 ASSERT(Vacb->MappedCount != 0);
630 Vacb->MappedCount--;
631
632 if (Vacb->MappedCount == 0)
633 {
634 CcRosVacbDecRefCount(Vacb);
635 }
636
637 CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, NowDirty, FALSE);
638
639 return STATUS_SUCCESS;
640 }
641
642 static
643 NTSTATUS
644 CcRosMapVacbInKernelSpace(
645 PROS_VACB Vacb)
646 {
647 ULONG i;
648 NTSTATUS Status;
649 ULONG_PTR NumberOfPages;
650 PVOID BaseAddress = NULL;
651
652 /* Create a memory area. */
653 MmLockAddressSpace(MmGetKernelAddressSpace());
654 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
655 0, // nothing checks for VACB mareas, so set to 0
656 &BaseAddress,
657 VACB_MAPPING_GRANULARITY,
658 PAGE_READWRITE,
659 (PMEMORY_AREA*)&Vacb->MemoryArea,
660 0,
661 PAGE_SIZE);
662 ASSERT(Vacb->BaseAddress == NULL);
663 Vacb->BaseAddress = BaseAddress;
664 MmUnlockAddressSpace(MmGetKernelAddressSpace());
665 if (!NT_SUCCESS(Status))
666 {
667 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
668 return Status;
669 }
670
671 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
672 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
673 ASSERT((ULONG_PTR)Vacb->BaseAddress + VACB_MAPPING_GRANULARITY - 1 > (ULONG_PTR)MmSystemRangeStart);
674
675 /* Create a virtual mapping for this memory area */
676 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
677 for (i = 0; i < NumberOfPages; i++)
678 {
679 PFN_NUMBER PageFrameNumber;
680
681 MI_SET_USAGE(MI_USAGE_CACHE);
682 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
683 if (PageFrameNumber == 0)
684 {
685 DPRINT1("Unable to allocate page\n");
686 KeBugCheck(MEMORY_MANAGEMENT);
687 }
688
689 ASSERT(BaseAddress == Vacb->BaseAddress);
690 ASSERT(i * PAGE_SIZE < VACB_MAPPING_GRANULARITY);
691 ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) >= (ULONG_PTR)BaseAddress);
692 ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) > (ULONG_PTR)MmSystemRangeStart);
693
694 Status = MmCreateVirtualMapping(NULL,
695 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
696 PAGE_READWRITE,
697 &PageFrameNumber,
698 1);
699 if (!NT_SUCCESS(Status))
700 {
701 DPRINT1("Unable to create virtual mapping\n");
702 KeBugCheck(MEMORY_MANAGEMENT);
703 }
704 }
705
706 return STATUS_SUCCESS;
707 }
708
709 static
710 NTSTATUS
711 CcRosCreateVacb (
712 PROS_SHARED_CACHE_MAP SharedCacheMap,
713 LONGLONG FileOffset,
714 PROS_VACB *Vacb)
715 {
716 PROS_VACB current;
717 PROS_VACB previous;
718 PLIST_ENTRY current_entry;
719 NTSTATUS Status;
720 KIRQL oldIrql;
721
722 ASSERT(SharedCacheMap);
723
724 DPRINT("CcRosCreateVacb()\n");
725
726 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
727 {
728 *Vacb = NULL;
729 return STATUS_INVALID_PARAMETER;
730 }
731
732 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
733 current->BaseAddress = NULL;
734 current->Valid = FALSE;
735 current->Dirty = FALSE;
736 current->PageOut = FALSE;
737 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
738 current->SharedCacheMap = SharedCacheMap;
739 #if DBG
740 if (SharedCacheMap->Trace)
741 {
742 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
743 }
744 #endif
745 current->MappedCount = 0;
746 current->DirtyVacbListEntry.Flink = NULL;
747 current->DirtyVacbListEntry.Blink = NULL;
748 current->ReferenceCount = 0;
749 current->PinCount = 0;
750 KeInitializeMutex(&current->Mutex, 0);
751 CcRosAcquireVacbLock(current, NULL);
752 KeAcquireGuardedMutex(&ViewLock);
753
754 *Vacb = current;
755 /* There is window between the call to CcRosLookupVacb
756 * and CcRosCreateVacb. We must check if a VACB for the
757 * file offset exist. If there is a VACB, we release
758 * our newly created VACB and return the existing one.
759 */
760 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
761 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
762 previous = NULL;
763 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
764 {
765 current = CONTAINING_RECORD(current_entry,
766 ROS_VACB,
767 CacheMapVacbListEntry);
768 if (IsPointInRange(current->FileOffset.QuadPart,
769 VACB_MAPPING_GRANULARITY,
770 FileOffset))
771 {
772 CcRosVacbIncRefCount(current);
773 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
774 #if DBG
775 if (SharedCacheMap->Trace)
776 {
777 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
778 SharedCacheMap,
779 (*Vacb),
780 current);
781 }
782 #endif
783 CcRosReleaseVacbLock(*Vacb);
784 KeReleaseGuardedMutex(&ViewLock);
785 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
786 *Vacb = current;
787 CcRosAcquireVacbLock(current, NULL);
788 return STATUS_SUCCESS;
789 }
790 if (current->FileOffset.QuadPart < FileOffset)
791 {
792 ASSERT(previous == NULL ||
793 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
794 previous = current;
795 }
796 if (current->FileOffset.QuadPart > FileOffset)
797 break;
798 current_entry = current_entry->Flink;
799 }
800 /* There was no existing VACB. */
801 current = *Vacb;
802 if (previous)
803 {
804 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
805 }
806 else
807 {
808 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
809 }
810 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
811 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
812 CcRosVacbIncRefCount(current);
813 KeReleaseGuardedMutex(&ViewLock);
814
815 MI_SET_USAGE(MI_USAGE_CACHE);
816 #if MI_TRACE_PFNS
817 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
818 {
819 PWCHAR pos;
820 ULONG len = 0;
821 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
822 if (pos)
823 {
824 len = wcslen(pos) * sizeof(WCHAR);
825 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
826 }
827 else
828 {
829 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
830 }
831 }
832 #endif
833
834 Status = CcRosMapVacbInKernelSpace(current);
835 if (!NT_SUCCESS(Status))
836 {
837 RemoveEntryList(&current->CacheMapVacbListEntry);
838 RemoveEntryList(&current->VacbLruListEntry);
839 CcRosReleaseVacbLock(current);
840 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
841 }
842
843 /* Reference it to allow release */
844 CcRosVacbIncRefCount(current);
845
846 return Status;
847 }
848
849 NTSTATUS
850 NTAPI
851 CcRosGetVacb (
852 PROS_SHARED_CACHE_MAP SharedCacheMap,
853 LONGLONG FileOffset,
854 PLONGLONG BaseOffset,
855 PVOID* BaseAddress,
856 PBOOLEAN UptoDate,
857 PROS_VACB *Vacb)
858 {
859 PROS_VACB current;
860 NTSTATUS Status;
861 ULONG Refs;
862
863 ASSERT(SharedCacheMap);
864
865 DPRINT("CcRosGetVacb()\n");
866
867 /*
868 * Look for a VACB already mapping the same data.
869 */
870 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
871 if (current == NULL)
872 {
873 /*
874 * Otherwise create a new VACB.
875 */
876 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
877 if (!NT_SUCCESS(Status))
878 {
879 return Status;
880 }
881 }
882
883 Refs = CcRosVacbGetRefCount(current);
884
885 KeAcquireGuardedMutex(&ViewLock);
886
887 /* Move to the tail of the LRU list */
888 RemoveEntryList(&current->VacbLruListEntry);
889 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
890
891 KeReleaseGuardedMutex(&ViewLock);
892
893 /*
894 * Return information about the VACB to the caller.
895 */
896 *UptoDate = current->Valid;
897 *BaseAddress = current->BaseAddress;
898 DPRINT("*BaseAddress %p\n", *BaseAddress);
899 *Vacb = current;
900 *BaseOffset = current->FileOffset.QuadPart;
901
902 ASSERT(Refs > 1);
903
904 return STATUS_SUCCESS;
905 }
906
907 NTSTATUS
908 NTAPI
909 CcRosRequestVacb (
910 PROS_SHARED_CACHE_MAP SharedCacheMap,
911 LONGLONG FileOffset,
912 PVOID* BaseAddress,
913 PBOOLEAN UptoDate,
914 PROS_VACB *Vacb)
915 /*
916 * FUNCTION: Request a page mapping for a shared cache map
917 */
918 {
919 LONGLONG BaseOffset;
920
921 ASSERT(SharedCacheMap);
922
923 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
924 {
925 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
926 FileOffset, VACB_MAPPING_GRANULARITY);
927 KeBugCheck(CACHE_MANAGER);
928 }
929
930 return CcRosGetVacb(SharedCacheMap,
931 FileOffset,
932 &BaseOffset,
933 BaseAddress,
934 UptoDate,
935 Vacb);
936 }
937
938 static
939 VOID
940 CcFreeCachePage (
941 PVOID Context,
942 MEMORY_AREA* MemoryArea,
943 PVOID Address,
944 PFN_NUMBER Page,
945 SWAPENTRY SwapEntry,
946 BOOLEAN Dirty)
947 {
948 ASSERT(SwapEntry == 0);
949 if (Page != 0)
950 {
951 ASSERT(MmGetReferenceCountPage(Page) == 1);
952 MmReleasePageMemoryConsumer(MC_CACHE, Page);
953 }
954 }
955
956 NTSTATUS
957 CcRosInternalFreeVacb (
958 PROS_VACB Vacb)
959 /*
960 * FUNCTION: Releases a VACB associated with a shared cache map
961 */
962 {
963 DPRINT("Freeing VACB 0x%p\n", Vacb);
964 #if DBG
965 if (Vacb->SharedCacheMap->Trace)
966 {
967 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
968 }
969 #endif
970
971 MmLockAddressSpace(MmGetKernelAddressSpace());
972 MmFreeMemoryArea(MmGetKernelAddressSpace(),
973 Vacb->MemoryArea,
974 CcFreeCachePage,
975 NULL);
976 MmUnlockAddressSpace(MmGetKernelAddressSpace());
977
978 if (Vacb->PinCount != 0 || Vacb->ReferenceCount != 0)
979 {
980 DPRINT1("Invalid free: %ld, %ld\n", Vacb->ReferenceCount, Vacb->PinCount);
981 if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length)
982 {
983 DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName);
984 }
985 }
986
987 ASSERT(Vacb->PinCount == 0);
988 ASSERT(Vacb->ReferenceCount == 0);
989 RtlFillMemory(Vacb, sizeof(Vacb), 0xfd);
990 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
991 return STATUS_SUCCESS;
992 }
993
994 /*
995 * @implemented
996 */
997 VOID
998 NTAPI
999 CcFlushCache (
1000 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1001 IN PLARGE_INTEGER FileOffset OPTIONAL,
1002 IN ULONG Length,
1003 OUT PIO_STATUS_BLOCK IoStatus)
1004 {
1005 PROS_SHARED_CACHE_MAP SharedCacheMap;
1006 LARGE_INTEGER Offset;
1007 LONGLONG RemainingLength;
1008 PROS_VACB current;
1009 NTSTATUS Status;
1010
1011 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1012 SectionObjectPointers, FileOffset, Length);
1013
1014 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1015 SectionObjectPointers, FileOffset, Length, IoStatus);
1016
1017 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1018 {
1019 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1020 ASSERT(SharedCacheMap);
1021 if (FileOffset)
1022 {
1023 Offset = *FileOffset;
1024 RemainingLength = Length;
1025 }
1026 else
1027 {
1028 Offset.QuadPart = 0;
1029 RemainingLength = SharedCacheMap->FileSize.QuadPart;
1030 }
1031
1032 if (IoStatus)
1033 {
1034 IoStatus->Status = STATUS_SUCCESS;
1035 IoStatus->Information = 0;
1036 }
1037
1038 while (RemainingLength > 0)
1039 {
1040 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1041 if (current != NULL)
1042 {
1043 if (current->Dirty)
1044 {
1045 Status = CcRosFlushVacb(current);
1046 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1047 {
1048 IoStatus->Status = Status;
1049 }
1050 }
1051
1052 CcRosReleaseVacb(SharedCacheMap, current, current->Valid, current->Dirty, FALSE);
1053 }
1054
1055 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1056 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1057 }
1058 }
1059 else
1060 {
1061 if (IoStatus)
1062 {
1063 IoStatus->Status = STATUS_INVALID_PARAMETER;
1064 }
1065 }
1066 }
1067
1068 NTSTATUS
1069 NTAPI
1070 CcRosDeleteFileCache (
1071 PFILE_OBJECT FileObject,
1072 PROS_SHARED_CACHE_MAP SharedCacheMap)
1073 /*
1074 * FUNCTION: Releases the shared cache map associated with a file object
1075 */
1076 {
1077 PLIST_ENTRY current_entry;
1078 PROS_VACB current;
1079 LIST_ENTRY FreeList;
1080 KIRQL oldIrql;
1081
1082 ASSERT(SharedCacheMap);
1083
1084 SharedCacheMap->OpenCount++;
1085 KeReleaseGuardedMutex(&ViewLock);
1086
1087 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1088
1089 KeAcquireGuardedMutex(&ViewLock);
1090 SharedCacheMap->OpenCount--;
1091 if (SharedCacheMap->OpenCount == 0)
1092 {
1093 KIRQL OldIrql;
1094
1095 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1096
1097 /*
1098 * Release all VACBs
1099 */
1100 InitializeListHead(&FreeList);
1101 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1102 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1103 {
1104 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1105 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1106
1107 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1108 CcRosAcquireVacbLock(current, NULL);
1109 RemoveEntryList(&current->VacbLruListEntry);
1110 if (current->Dirty)
1111 {
1112 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1113 CcRosUnmarkDirtyVacb(current, FALSE);
1114 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1115 DPRINT1("Freeing dirty VACB\n");
1116 }
1117 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1118 CcRosReleaseVacbLock(current);
1119
1120 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1121 }
1122 #if DBG
1123 SharedCacheMap->Trace = FALSE;
1124 #endif
1125 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1126
1127 KeReleaseGuardedMutex(&ViewLock);
1128 ObDereferenceObject(SharedCacheMap->FileObject);
1129
1130 while (!IsListEmpty(&FreeList))
1131 {
1132 current_entry = RemoveTailList(&FreeList);
1133 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1134 CcRosVacbDecRefCount(current);
1135 CcRosInternalFreeVacb(current);
1136 }
1137
1138 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1139 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1140 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1141
1142 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1143 KeAcquireGuardedMutex(&ViewLock);
1144 }
1145 return STATUS_SUCCESS;
1146 }
1147
1148 VOID
1149 NTAPI
1150 CcRosReferenceCache (
1151 PFILE_OBJECT FileObject)
1152 {
1153 PROS_SHARED_CACHE_MAP SharedCacheMap;
1154 KeAcquireGuardedMutex(&ViewLock);
1155 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1156 ASSERT(SharedCacheMap);
1157 ASSERT(SharedCacheMap->OpenCount != 0);
1158 SharedCacheMap->OpenCount++;
1159 KeReleaseGuardedMutex(&ViewLock);
1160 }
1161
1162 VOID
1163 NTAPI
1164 CcRosRemoveIfClosed (
1165 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1166 {
1167 PROS_SHARED_CACHE_MAP SharedCacheMap;
1168 DPRINT("CcRosRemoveIfClosed()\n");
1169 KeAcquireGuardedMutex(&ViewLock);
1170 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1171 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1172 {
1173 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1174 }
1175 KeReleaseGuardedMutex(&ViewLock);
1176 }
1177
1178
1179 VOID
1180 NTAPI
1181 CcRosDereferenceCache (
1182 PFILE_OBJECT FileObject)
1183 {
1184 PROS_SHARED_CACHE_MAP SharedCacheMap;
1185 KeAcquireGuardedMutex(&ViewLock);
1186 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1187 ASSERT(SharedCacheMap);
1188 if (SharedCacheMap->OpenCount > 0)
1189 {
1190 SharedCacheMap->OpenCount--;
1191 if (SharedCacheMap->OpenCount == 0)
1192 {
1193 MmFreeSectionSegments(SharedCacheMap->FileObject);
1194 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1195 }
1196 }
1197 KeReleaseGuardedMutex(&ViewLock);
1198 }
1199
1200 NTSTATUS
1201 NTAPI
1202 CcRosReleaseFileCache (
1203 PFILE_OBJECT FileObject)
1204 /*
1205 * FUNCTION: Called by the file system when a handle to a file object
1206 * has been closed.
1207 */
1208 {
1209 KIRQL OldIrql;
1210 PPRIVATE_CACHE_MAP PrivateMap;
1211 PROS_SHARED_CACHE_MAP SharedCacheMap;
1212
1213 KeAcquireGuardedMutex(&ViewLock);
1214
1215 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1216 {
1217 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1218
1219 /* Closing the handle, so kill the private cache map
1220 * Before you event try to remove it from FO, always
1221 * lock the master lock, to be sure not to race
1222 * with a potential read ahead ongoing!
1223 */
1224 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1225 PrivateMap = FileObject->PrivateCacheMap;
1226 FileObject->PrivateCacheMap = NULL;
1227 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1228
1229 if (PrivateMap != NULL)
1230 {
1231 /* Remove it from the file */
1232 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
1233 RemoveEntryList(&PrivateMap->PrivateLinks);
1234 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
1235
1236 /* And free it. */
1237 if (PrivateMap != &SharedCacheMap->PrivateCacheMap)
1238 {
1239 ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP);
1240 }
1241 else
1242 {
1243 PrivateMap->NodeTypeCode = 0;
1244 }
1245
1246 if (SharedCacheMap->OpenCount > 0)
1247 {
1248 SharedCacheMap->OpenCount--;
1249 if (SharedCacheMap->OpenCount == 0)
1250 {
1251 MmFreeSectionSegments(SharedCacheMap->FileObject);
1252 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1253 }
1254 }
1255 }
1256 }
1257 KeReleaseGuardedMutex(&ViewLock);
1258 return STATUS_SUCCESS;
1259 }
1260
1261 NTSTATUS
1262 NTAPI
1263 CcRosInitializeFileCache (
1264 PFILE_OBJECT FileObject,
1265 PCC_FILE_SIZES FileSizes,
1266 BOOLEAN PinAccess,
1267 PCACHE_MANAGER_CALLBACKS CallBacks,
1268 PVOID LazyWriterContext)
1269 /*
1270 * FUNCTION: Initializes a shared cache map for a file object
1271 */
1272 {
1273 KIRQL OldIrql;
1274 BOOLEAN Allocated;
1275 PROS_SHARED_CACHE_MAP SharedCacheMap;
1276
1277 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1278 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1279 FileObject, SharedCacheMap);
1280
1281 Allocated = FALSE;
1282 KeAcquireGuardedMutex(&ViewLock);
1283 if (SharedCacheMap == NULL)
1284 {
1285 Allocated = TRUE;
1286 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1287 if (SharedCacheMap == NULL)
1288 {
1289 KeReleaseGuardedMutex(&ViewLock);
1290 return STATUS_INSUFFICIENT_RESOURCES;
1291 }
1292 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1293 ObReferenceObjectByPointer(FileObject,
1294 FILE_ALL_ACCESS,
1295 NULL,
1296 KernelMode);
1297 SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP;
1298 SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap);
1299 SharedCacheMap->FileObject = FileObject;
1300 SharedCacheMap->Callbacks = CallBacks;
1301 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1302 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1303 SharedCacheMap->FileSize = FileSizes->FileSize;
1304 SharedCacheMap->PinAccess = PinAccess;
1305 SharedCacheMap->DirtyPageThreshold = 0;
1306 SharedCacheMap->DirtyPages = 0;
1307 InitializeListHead(&SharedCacheMap->PrivateList);
1308 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1309 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1310 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1311
1312 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1313 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1314 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1315 }
1316 if (FileObject->PrivateCacheMap == NULL)
1317 {
1318 PPRIVATE_CACHE_MAP PrivateMap;
1319
1320 /* Allocate the private cache map for this handle */
1321 if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0)
1322 {
1323 PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP);
1324 }
1325 else
1326 {
1327 PrivateMap = &SharedCacheMap->PrivateCacheMap;
1328 }
1329
1330 if (PrivateMap == NULL)
1331 {
1332 /* If we also allocated the shared cache map for this file, kill it */
1333 if (Allocated)
1334 {
1335 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1336 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1337 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1338
1339 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1340 ObDereferenceObject(FileObject);
1341 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1342 }
1343
1344 KeReleaseGuardedMutex(&ViewLock);
1345 return STATUS_INSUFFICIENT_RESOURCES;
1346 }
1347
1348 /* Initialize it */
1349 RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP));
1350 PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP;
1351 PrivateMap->ReadAheadMask = PAGE_SIZE - 1;
1352 PrivateMap->FileObject = FileObject;
1353 KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock);
1354
1355 /* Link it to the file */
1356 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
1357 InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks);
1358 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
1359
1360 FileObject->PrivateCacheMap = PrivateMap;
1361 SharedCacheMap->OpenCount++;
1362 }
1363 KeReleaseGuardedMutex(&ViewLock);
1364
1365 return STATUS_SUCCESS;
1366 }
1367
1368 /*
1369 * @implemented
1370 */
1371 PFILE_OBJECT
1372 NTAPI
1373 CcGetFileObjectFromSectionPtrs (
1374 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1375 {
1376 PROS_SHARED_CACHE_MAP SharedCacheMap;
1377
1378 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1379
1380 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1381 {
1382 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1383 ASSERT(SharedCacheMap);
1384 return SharedCacheMap->FileObject;
1385 }
1386 return NULL;
1387 }
1388
1389 VOID
1390 INIT_FUNCTION
1391 NTAPI
1392 CcInitView (
1393 VOID)
1394 {
1395 DPRINT("CcInitView()\n");
1396
1397 InitializeListHead(&DirtyVacbListHead);
1398 InitializeListHead(&VacbLruListHead);
1399 InitializeListHead(&CcDeferredWrites);
1400 InitializeListHead(&CcCleanSharedCacheMapList);
1401 KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1402 KeInitializeGuardedMutex(&ViewLock);
1403 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1404 NULL,
1405 NULL,
1406 0,
1407 sizeof(INTERNAL_BCB),
1408 TAG_BCB,
1409 20);
1410 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1411 NULL,
1412 NULL,
1413 0,
1414 sizeof(ROS_SHARED_CACHE_MAP),
1415 TAG_SHARED_CACHE_MAP,
1416 20);
1417 ExInitializeNPagedLookasideList(&VacbLookasideList,
1418 NULL,
1419 NULL,
1420 0,
1421 sizeof(ROS_VACB),
1422 TAG_VACB,
1423 20);
1424
1425 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1426
1427 CcInitCacheZeroPage();
1428 }
1429
1430 #if DBG && defined(KDBG)
1431 BOOLEAN
1432 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1433 {
1434 PLIST_ENTRY ListEntry;
1435 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1436
1437 KdbpPrint(" Usage Summary (in kb)\n");
1438 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1439 /* No need to lock the spin lock here, we're in DBG */
1440 for (ListEntry = CcCleanSharedCacheMapList.Flink;
1441 ListEntry != &CcCleanSharedCacheMapList;
1442 ListEntry = ListEntry->Flink)
1443 {
1444 PLIST_ENTRY Vacbs;
1445 ULONG Valid = 0, Dirty = 0;
1446 PROS_SHARED_CACHE_MAP SharedCacheMap;
1447 PUNICODE_STRING FileName;
1448
1449 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1450
1451 /* Dirty size */
1452 Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
1453
1454 /* First, count for all the associated VACB */
1455 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1456 Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1457 Vacbs = Vacbs->Flink)
1458 {
1459 PROS_VACB Vacb;
1460
1461 Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
1462 if (Vacb->Valid)
1463 {
1464 Valid += VACB_MAPPING_GRANULARITY / 1024;
1465 }
1466 }
1467
1468 /* Setup name */
1469 if (SharedCacheMap->FileObject != NULL &&
1470 SharedCacheMap->FileObject->FileName.Length != 0)
1471 {
1472 FileName = &SharedCacheMap->FileObject->FileName;
1473 }
1474 else
1475 {
1476 FileName = &NoName;
1477 }
1478
1479 /* And print */
1480 KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
1481 }
1482
1483 return TRUE;
1484 }
1485
1486 BOOLEAN
1487 ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[])
1488 {
1489 KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages,
1490 (CcTotalDirtyPages * PAGE_SIZE) / 1024);
1491 KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold,
1492 (CcDirtyPageThreshold * PAGE_SIZE) / 1024);
1493 KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages,
1494 (MmAvailablePages * PAGE_SIZE) / 1024);
1495 KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop,
1496 (MmThrottleTop * PAGE_SIZE) / 1024);
1497 KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom,
1498 (MmThrottleBottom * PAGE_SIZE) / 1024);
1499 KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total,
1500 (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024);
1501
1502 if (CcTotalDirtyPages >= CcDirtyPageThreshold)
1503 {
1504 KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n");
1505 }
1506 else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold)
1507 {
1508 KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n");
1509 }
1510 else
1511 {
1512 KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n");
1513 }
1514
1515 return TRUE;
1516 }
1517 #endif
1518
1519 /* EOF */