[NTOSKRNL] Properly delete VACB in CcRosCreateVacb() when mapping fails.
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42
43 /* GLOBALS *******************************************************************/
44
45 LIST_ENTRY DirtyVacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 /* Internal vars (MS):
55 * - Threshold above which lazy writer will start action
56 * - Amount of dirty pages
57 * - List for deferred writes
58 * - Spinlock when dealing with the deferred list
59 * - List for "clean" shared cache maps
60 */
61 ULONG CcDirtyPageThreshold = 0;
62 ULONG CcTotalDirtyPages = 0;
63 LIST_ENTRY CcDeferredWrites;
64 KSPIN_LOCK CcDeferredWriteSpinLock;
65 LIST_ENTRY CcCleanSharedCacheMapList;
66
67 #if DBG
68 ULONG CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line)
69 {
70 ULONG Refs;
71
72 Refs = InterlockedIncrement((PLONG)&vacb->ReferenceCount);
73 if (vacb->SharedCacheMap->Trace)
74 {
75 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
76 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
77 }
78
79 return Refs;
80 }
81 ULONG CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line)
82 {
83 ULONG Refs;
84
85 Refs = InterlockedDecrement((PLONG)&vacb->ReferenceCount);
86 ASSERT(!(Refs == 0 && vacb->Dirty));
87 if (vacb->SharedCacheMap->Trace)
88 {
89 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
90 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
91 }
92
93 return Refs;
94 }
95 ULONG CcRosVacbGetRefCount_(PROS_VACB vacb, PCSTR file, INT line)
96 {
97 ULONG Refs;
98
99 Refs = InterlockedCompareExchange((PLONG)&vacb->ReferenceCount, 0, 0);
100 if (vacb->SharedCacheMap->Trace)
101 {
102 DbgPrint("(%s:%i) VACB %p ==RefCount=%lu, Dirty %u, PageOut %lu\n",
103 file, line, vacb, Refs, vacb->Dirty, vacb->PageOut);
104 }
105
106 return Refs;
107 }
108 #endif
109
110 NTSTATUS
111 CcRosInternalFreeVacb(PROS_VACB Vacb);
112
113
114 /* FUNCTIONS *****************************************************************/
115
116 VOID
117 NTAPI
118 CcRosTraceCacheMap (
119 PROS_SHARED_CACHE_MAP SharedCacheMap,
120 BOOLEAN Trace )
121 {
122 #if DBG
123 KIRQL oldirql;
124 PLIST_ENTRY current_entry;
125 PROS_VACB current;
126
127 if (!SharedCacheMap)
128 return;
129
130 SharedCacheMap->Trace = Trace;
131
132 if (Trace)
133 {
134 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
135
136 KeAcquireGuardedMutex(&ViewLock);
137 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
138
139 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
140 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
141 {
142 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
143 current_entry = current_entry->Flink;
144
145 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
146 current, current->ReferenceCount, current->Dirty, current->PageOut );
147 }
148 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
149 KeReleaseGuardedMutex(&ViewLock);
150 }
151 else
152 {
153 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
154 }
155
156 #else
157 UNREFERENCED_PARAMETER(SharedCacheMap);
158 UNREFERENCED_PARAMETER(Trace);
159 #endif
160 }
161
162 NTSTATUS
163 NTAPI
164 CcRosFlushVacb (
165 PROS_VACB Vacb)
166 {
167 NTSTATUS Status;
168
169 Status = CcWriteVirtualAddress(Vacb);
170 if (NT_SUCCESS(Status))
171 {
172 CcRosUnmarkDirtyVacb(Vacb, TRUE);
173 }
174
175 return Status;
176 }
177
178 NTSTATUS
179 NTAPI
180 CcRosFlushDirtyPages (
181 ULONG Target,
182 PULONG Count,
183 BOOLEAN Wait,
184 BOOLEAN CalledFromLazy)
185 {
186 PLIST_ENTRY current_entry;
187 PROS_VACB current;
188 BOOLEAN Locked;
189 NTSTATUS Status;
190 LARGE_INTEGER ZeroTimeout;
191
192 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
193
194 (*Count) = 0;
195 ZeroTimeout.QuadPart = 0;
196
197 KeEnterCriticalRegion();
198 KeAcquireGuardedMutex(&ViewLock);
199
200 current_entry = DirtyVacbListHead.Flink;
201 if (current_entry == &DirtyVacbListHead)
202 {
203 DPRINT("No Dirty pages\n");
204 }
205
206 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
207 {
208 current = CONTAINING_RECORD(current_entry,
209 ROS_VACB,
210 DirtyVacbListEntry);
211 current_entry = current_entry->Flink;
212
213 CcRosVacbIncRefCount(current);
214
215 /* When performing lazy write, don't handle temporary files */
216 if (CalledFromLazy &&
217 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
218 {
219 CcRosVacbDecRefCount(current);
220 continue;
221 }
222
223 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
224 current->SharedCacheMap->LazyWriteContext, Wait);
225 if (!Locked)
226 {
227 CcRosVacbDecRefCount(current);
228 continue;
229 }
230
231 Status = CcRosAcquireVacbLock(current,
232 Wait ? NULL : &ZeroTimeout);
233 if (Status != STATUS_SUCCESS)
234 {
235 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
236 current->SharedCacheMap->LazyWriteContext);
237 CcRosVacbDecRefCount(current);
238 continue;
239 }
240
241 ASSERT(current->Dirty);
242
243 /* One reference is added above */
244 if (CcRosVacbGetRefCount(current) > 2)
245 {
246 CcRosReleaseVacbLock(current);
247 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
248 current->SharedCacheMap->LazyWriteContext);
249 CcRosVacbDecRefCount(current);
250 continue;
251 }
252
253 KeReleaseGuardedMutex(&ViewLock);
254
255 Status = CcRosFlushVacb(current);
256
257 CcRosReleaseVacbLock(current);
258 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
259 current->SharedCacheMap->LazyWriteContext);
260
261 KeAcquireGuardedMutex(&ViewLock);
262 CcRosVacbDecRefCount(current);
263
264 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
265 (Status != STATUS_MEDIA_WRITE_PROTECTED))
266 {
267 DPRINT1("CC: Failed to flush VACB.\n");
268 }
269 else
270 {
271 ULONG PagesFreed;
272
273 /* How many pages did we free? */
274 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
275 (*Count) += PagesFreed;
276
277 /* Make sure we don't overflow target! */
278 if (Target < PagesFreed)
279 {
280 /* If we would have, jump to zero directly */
281 Target = 0;
282 }
283 else
284 {
285 Target -= PagesFreed;
286 }
287 }
288
289 current_entry = DirtyVacbListHead.Flink;
290 }
291
292 KeReleaseGuardedMutex(&ViewLock);
293 KeLeaveCriticalRegion();
294
295 DPRINT("CcRosFlushDirtyPages() finished\n");
296 return STATUS_SUCCESS;
297 }
298
299 NTSTATUS
300 CcRosTrimCache (
301 ULONG Target,
302 ULONG Priority,
303 PULONG NrFreed)
304 /*
305 * FUNCTION: Try to free some memory from the file cache.
306 * ARGUMENTS:
307 * Target - The number of pages to be freed.
308 * Priority - The priority of free (currently unused).
309 * NrFreed - Points to a variable where the number of pages
310 * actually freed is returned.
311 */
312 {
313 PLIST_ENTRY current_entry;
314 PROS_VACB current;
315 ULONG PagesFreed;
316 KIRQL oldIrql;
317 LIST_ENTRY FreeList;
318 PFN_NUMBER Page;
319 ULONG i;
320 BOOLEAN FlushedPages = FALSE;
321
322 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
323
324 InitializeListHead(&FreeList);
325
326 *NrFreed = 0;
327
328 retry:
329 KeAcquireGuardedMutex(&ViewLock);
330
331 current_entry = VacbLruListHead.Flink;
332 while (current_entry != &VacbLruListHead)
333 {
334 ULONG Refs;
335
336 current = CONTAINING_RECORD(current_entry,
337 ROS_VACB,
338 VacbLruListEntry);
339 current_entry = current_entry->Flink;
340
341 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
342
343 /* Reference the VACB */
344 CcRosVacbIncRefCount(current);
345
346 /* Check if it's mapped and not dirty */
347 if (current->MappedCount > 0 && !current->Dirty)
348 {
349 /* We have to break these locks because Cc sucks */
350 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
351 KeReleaseGuardedMutex(&ViewLock);
352
353 /* Page out the VACB */
354 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
355 {
356 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
357
358 MmPageOutPhysicalAddress(Page);
359 }
360
361 /* Reacquire the locks */
362 KeAcquireGuardedMutex(&ViewLock);
363 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
364 }
365
366 /* Dereference the VACB */
367 Refs = CcRosVacbDecRefCount(current);
368
369 /* Check if we can free this entry now */
370 if (Refs < 2)
371 {
372 ASSERT(!current->Dirty);
373 ASSERT(!current->MappedCount);
374 ASSERT(Refs == 1);
375
376 RemoveEntryList(&current->CacheMapVacbListEntry);
377 RemoveEntryList(&current->VacbLruListEntry);
378 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
379
380 /* Calculate how many pages we freed for Mm */
381 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
382 Target -= PagesFreed;
383 (*NrFreed) += PagesFreed;
384 }
385
386 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
387 }
388
389 KeReleaseGuardedMutex(&ViewLock);
390
391 /* Try flushing pages if we haven't met our target */
392 if ((Target > 0) && !FlushedPages)
393 {
394 /* Flush dirty pages to disk */
395 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
396 FlushedPages = TRUE;
397
398 /* We can only swap as many pages as we flushed */
399 if (PagesFreed < Target) Target = PagesFreed;
400
401 /* Check if we flushed anything */
402 if (PagesFreed != 0)
403 {
404 /* Try again after flushing dirty pages */
405 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
406 goto retry;
407 }
408 }
409
410 while (!IsListEmpty(&FreeList))
411 {
412 current_entry = RemoveHeadList(&FreeList);
413 current = CONTAINING_RECORD(current_entry,
414 ROS_VACB,
415 CacheMapVacbListEntry);
416 CcRosVacbDecRefCount(current);
417 CcRosInternalFreeVacb(current);
418 }
419
420 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
421
422 return STATUS_SUCCESS;
423 }
424
425 NTSTATUS
426 NTAPI
427 CcRosReleaseVacb (
428 PROS_SHARED_CACHE_MAP SharedCacheMap,
429 PROS_VACB Vacb,
430 BOOLEAN Valid,
431 BOOLEAN Dirty,
432 BOOLEAN Mapped)
433 {
434 ULONG Refs;
435 ASSERT(SharedCacheMap);
436
437 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
438 SharedCacheMap, Vacb, Valid);
439
440 Vacb->Valid = Valid;
441
442 if (Dirty && !Vacb->Dirty)
443 {
444 CcRosMarkDirtyVacb(Vacb);
445 }
446
447 if (Mapped)
448 {
449 Vacb->MappedCount++;
450 }
451 Refs = CcRosVacbDecRefCount(Vacb);
452 if (Mapped && (Vacb->MappedCount == 1))
453 {
454 CcRosVacbIncRefCount(Vacb);
455 }
456
457 ASSERT(Refs > 0);
458
459 CcRosReleaseVacbLock(Vacb);
460
461 return STATUS_SUCCESS;
462 }
463
464 /* Returns with VACB Lock Held! */
465 PROS_VACB
466 NTAPI
467 CcRosLookupVacb (
468 PROS_SHARED_CACHE_MAP SharedCacheMap,
469 LONGLONG FileOffset)
470 {
471 PLIST_ENTRY current_entry;
472 PROS_VACB current;
473 KIRQL oldIrql;
474
475 ASSERT(SharedCacheMap);
476
477 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
478 SharedCacheMap, FileOffset);
479
480 KeAcquireGuardedMutex(&ViewLock);
481 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
482
483 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
484 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
485 {
486 current = CONTAINING_RECORD(current_entry,
487 ROS_VACB,
488 CacheMapVacbListEntry);
489 if (IsPointInRange(current->FileOffset.QuadPart,
490 VACB_MAPPING_GRANULARITY,
491 FileOffset))
492 {
493 CcRosVacbIncRefCount(current);
494 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
495 KeReleaseGuardedMutex(&ViewLock);
496 CcRosAcquireVacbLock(current, NULL);
497 return current;
498 }
499 if (current->FileOffset.QuadPart > FileOffset)
500 break;
501 current_entry = current_entry->Flink;
502 }
503
504 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
505 KeReleaseGuardedMutex(&ViewLock);
506
507 return NULL;
508 }
509
510 VOID
511 NTAPI
512 CcRosMarkDirtyVacb (
513 PROS_VACB Vacb)
514 {
515 KIRQL oldIrql;
516 PROS_SHARED_CACHE_MAP SharedCacheMap;
517
518 SharedCacheMap = Vacb->SharedCacheMap;
519
520 KeAcquireGuardedMutex(&ViewLock);
521 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
522
523 ASSERT(!Vacb->Dirty);
524
525 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
526 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
527 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
528 CcRosVacbIncRefCount(Vacb);
529
530 /* Move to the tail of the LRU list */
531 RemoveEntryList(&Vacb->VacbLruListEntry);
532 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
533
534 Vacb->Dirty = TRUE;
535
536 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
537 KeReleaseGuardedMutex(&ViewLock);
538
539 /* Schedule a lazy writer run to now that we have dirty VACB */
540 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
541 if (!LazyWriter.ScanActive)
542 {
543 CcScheduleLazyWriteScan(FALSE);
544 }
545 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
546 }
547
548 VOID
549 NTAPI
550 CcRosUnmarkDirtyVacb (
551 PROS_VACB Vacb,
552 BOOLEAN LockViews)
553 {
554 KIRQL oldIrql;
555 PROS_SHARED_CACHE_MAP SharedCacheMap;
556
557 SharedCacheMap = Vacb->SharedCacheMap;
558
559 if (LockViews)
560 {
561 KeAcquireGuardedMutex(&ViewLock);
562 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
563 }
564
565 ASSERT(Vacb->Dirty);
566
567 Vacb->Dirty = FALSE;
568
569 RemoveEntryList(&Vacb->DirtyVacbListEntry);
570 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
571 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
572 CcRosVacbDecRefCount(Vacb);
573
574 if (LockViews)
575 {
576 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
577 KeReleaseGuardedMutex(&ViewLock);
578 }
579 }
580
581 NTSTATUS
582 NTAPI
583 CcRosMarkDirtyFile (
584 PROS_SHARED_CACHE_MAP SharedCacheMap,
585 LONGLONG FileOffset)
586 {
587 PROS_VACB Vacb;
588
589 ASSERT(SharedCacheMap);
590
591 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
592 SharedCacheMap, FileOffset);
593
594 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
595 if (Vacb == NULL)
596 {
597 KeBugCheck(CACHE_MANAGER);
598 }
599
600 CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, TRUE, FALSE);
601
602 return STATUS_SUCCESS;
603 }
604
605 /*
606 * Note: this is not the contrary function of
607 * CcRosMapVacbInKernelSpace()
608 */
609 NTSTATUS
610 NTAPI
611 CcRosUnmapVacb (
612 PROS_SHARED_CACHE_MAP SharedCacheMap,
613 LONGLONG FileOffset,
614 BOOLEAN NowDirty)
615 {
616 PROS_VACB Vacb;
617
618 ASSERT(SharedCacheMap);
619
620 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
621 SharedCacheMap, FileOffset, NowDirty);
622
623 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
624 if (Vacb == NULL)
625 {
626 return STATUS_UNSUCCESSFUL;
627 }
628
629 ASSERT(Vacb->MappedCount != 0);
630 Vacb->MappedCount--;
631
632 if (Vacb->MappedCount == 0)
633 {
634 CcRosVacbDecRefCount(Vacb);
635 }
636
637 CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, NowDirty, FALSE);
638
639 return STATUS_SUCCESS;
640 }
641
642 static
643 NTSTATUS
644 CcRosMapVacbInKernelSpace(
645 PROS_VACB Vacb)
646 {
647 ULONG i;
648 NTSTATUS Status;
649 ULONG_PTR NumberOfPages;
650 PVOID BaseAddress = NULL;
651
652 /* Create a memory area. */
653 MmLockAddressSpace(MmGetKernelAddressSpace());
654 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
655 0, // nothing checks for VACB mareas, so set to 0
656 &BaseAddress,
657 VACB_MAPPING_GRANULARITY,
658 PAGE_READWRITE,
659 (PMEMORY_AREA*)&Vacb->MemoryArea,
660 0,
661 PAGE_SIZE);
662 ASSERT(Vacb->BaseAddress == NULL);
663 Vacb->BaseAddress = BaseAddress;
664 MmUnlockAddressSpace(MmGetKernelAddressSpace());
665 if (!NT_SUCCESS(Status))
666 {
667 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
668 return Status;
669 }
670
671 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
672 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
673 ASSERT((ULONG_PTR)Vacb->BaseAddress + VACB_MAPPING_GRANULARITY - 1 > (ULONG_PTR)MmSystemRangeStart);
674
675 /* Create a virtual mapping for this memory area */
676 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
677 for (i = 0; i < NumberOfPages; i++)
678 {
679 PFN_NUMBER PageFrameNumber;
680
681 MI_SET_USAGE(MI_USAGE_CACHE);
682 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
683 if (PageFrameNumber == 0)
684 {
685 DPRINT1("Unable to allocate page\n");
686 KeBugCheck(MEMORY_MANAGEMENT);
687 }
688
689 ASSERT(BaseAddress == Vacb->BaseAddress);
690 ASSERT(i * PAGE_SIZE < VACB_MAPPING_GRANULARITY);
691 ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) >= (ULONG_PTR)BaseAddress);
692 ASSERT((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE) > (ULONG_PTR)MmSystemRangeStart);
693
694 Status = MmCreateVirtualMapping(NULL,
695 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
696 PAGE_READWRITE,
697 &PageFrameNumber,
698 1);
699 if (!NT_SUCCESS(Status))
700 {
701 DPRINT1("Unable to create virtual mapping\n");
702 KeBugCheck(MEMORY_MANAGEMENT);
703 }
704 }
705
706 return STATUS_SUCCESS;
707 }
708
709 static
710 NTSTATUS
711 CcRosCreateVacb (
712 PROS_SHARED_CACHE_MAP SharedCacheMap,
713 LONGLONG FileOffset,
714 PROS_VACB *Vacb)
715 {
716 PROS_VACB current;
717 PROS_VACB previous;
718 PLIST_ENTRY current_entry;
719 NTSTATUS Status;
720 KIRQL oldIrql;
721
722 ASSERT(SharedCacheMap);
723
724 DPRINT("CcRosCreateVacb()\n");
725
726 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
727 {
728 *Vacb = NULL;
729 return STATUS_INVALID_PARAMETER;
730 }
731
732 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
733 current->BaseAddress = NULL;
734 current->Valid = FALSE;
735 current->Dirty = FALSE;
736 current->PageOut = FALSE;
737 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
738 current->SharedCacheMap = SharedCacheMap;
739 #if DBG
740 if (SharedCacheMap->Trace)
741 {
742 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
743 }
744 #endif
745 current->MappedCount = 0;
746 current->DirtyVacbListEntry.Flink = NULL;
747 current->DirtyVacbListEntry.Blink = NULL;
748 current->ReferenceCount = 0;
749 current->PinCount = 0;
750 KeInitializeMutex(&current->Mutex, 0);
751 CcRosAcquireVacbLock(current, NULL);
752 KeAcquireGuardedMutex(&ViewLock);
753
754 *Vacb = current;
755 /* There is window between the call to CcRosLookupVacb
756 * and CcRosCreateVacb. We must check if a VACB for the
757 * file offset exist. If there is a VACB, we release
758 * our newly created VACB and return the existing one.
759 */
760 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
761 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
762 previous = NULL;
763 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
764 {
765 current = CONTAINING_RECORD(current_entry,
766 ROS_VACB,
767 CacheMapVacbListEntry);
768 if (IsPointInRange(current->FileOffset.QuadPart,
769 VACB_MAPPING_GRANULARITY,
770 FileOffset))
771 {
772 CcRosVacbIncRefCount(current);
773 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
774 #if DBG
775 if (SharedCacheMap->Trace)
776 {
777 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
778 SharedCacheMap,
779 (*Vacb),
780 current);
781 }
782 #endif
783 CcRosReleaseVacbLock(*Vacb);
784 KeReleaseGuardedMutex(&ViewLock);
785 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
786 *Vacb = current;
787 CcRosAcquireVacbLock(current, NULL);
788 return STATUS_SUCCESS;
789 }
790 if (current->FileOffset.QuadPart < FileOffset)
791 {
792 ASSERT(previous == NULL ||
793 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
794 previous = current;
795 }
796 if (current->FileOffset.QuadPart > FileOffset)
797 break;
798 current_entry = current_entry->Flink;
799 }
800 /* There was no existing VACB. */
801 current = *Vacb;
802 if (previous)
803 {
804 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
805 }
806 else
807 {
808 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
809 }
810 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
811 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
812 CcRosVacbIncRefCount(current);
813 KeReleaseGuardedMutex(&ViewLock);
814
815 MI_SET_USAGE(MI_USAGE_CACHE);
816 #if MI_TRACE_PFNS
817 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
818 {
819 PWCHAR pos;
820 ULONG len = 0;
821 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
822 if (pos)
823 {
824 len = wcslen(pos) * sizeof(WCHAR);
825 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
826 }
827 else
828 {
829 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
830 }
831 }
832 #endif
833
834 /* Reference it to allow release */
835 CcRosVacbIncRefCount(current);
836
837 Status = CcRosMapVacbInKernelSpace(current);
838 if (!NT_SUCCESS(Status))
839 {
840 RemoveEntryList(&current->CacheMapVacbListEntry);
841 RemoveEntryList(&current->VacbLruListEntry);
842 CcRosReleaseVacb(SharedCacheMap, current, FALSE,
843 FALSE, FALSE);
844 CcRosVacbDecRefCount(current);
845 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
846 }
847
848 return Status;
849 }
850
851 NTSTATUS
852 NTAPI
853 CcRosGetVacb (
854 PROS_SHARED_CACHE_MAP SharedCacheMap,
855 LONGLONG FileOffset,
856 PLONGLONG BaseOffset,
857 PVOID* BaseAddress,
858 PBOOLEAN UptoDate,
859 PROS_VACB *Vacb)
860 {
861 PROS_VACB current;
862 NTSTATUS Status;
863 ULONG Refs;
864
865 ASSERT(SharedCacheMap);
866
867 DPRINT("CcRosGetVacb()\n");
868
869 /*
870 * Look for a VACB already mapping the same data.
871 */
872 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
873 if (current == NULL)
874 {
875 /*
876 * Otherwise create a new VACB.
877 */
878 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
879 if (!NT_SUCCESS(Status))
880 {
881 return Status;
882 }
883 }
884
885 Refs = CcRosVacbGetRefCount(current);
886
887 KeAcquireGuardedMutex(&ViewLock);
888
889 /* Move to the tail of the LRU list */
890 RemoveEntryList(&current->VacbLruListEntry);
891 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
892
893 KeReleaseGuardedMutex(&ViewLock);
894
895 /*
896 * Return information about the VACB to the caller.
897 */
898 *UptoDate = current->Valid;
899 *BaseAddress = current->BaseAddress;
900 DPRINT("*BaseAddress %p\n", *BaseAddress);
901 *Vacb = current;
902 *BaseOffset = current->FileOffset.QuadPart;
903
904 ASSERT(Refs > 1);
905
906 return STATUS_SUCCESS;
907 }
908
909 NTSTATUS
910 NTAPI
911 CcRosRequestVacb (
912 PROS_SHARED_CACHE_MAP SharedCacheMap,
913 LONGLONG FileOffset,
914 PVOID* BaseAddress,
915 PBOOLEAN UptoDate,
916 PROS_VACB *Vacb)
917 /*
918 * FUNCTION: Request a page mapping for a shared cache map
919 */
920 {
921 LONGLONG BaseOffset;
922
923 ASSERT(SharedCacheMap);
924
925 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
926 {
927 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
928 FileOffset, VACB_MAPPING_GRANULARITY);
929 KeBugCheck(CACHE_MANAGER);
930 }
931
932 return CcRosGetVacb(SharedCacheMap,
933 FileOffset,
934 &BaseOffset,
935 BaseAddress,
936 UptoDate,
937 Vacb);
938 }
939
940 static
941 VOID
942 CcFreeCachePage (
943 PVOID Context,
944 MEMORY_AREA* MemoryArea,
945 PVOID Address,
946 PFN_NUMBER Page,
947 SWAPENTRY SwapEntry,
948 BOOLEAN Dirty)
949 {
950 ASSERT(SwapEntry == 0);
951 if (Page != 0)
952 {
953 ASSERT(MmGetReferenceCountPage(Page) == 1);
954 MmReleasePageMemoryConsumer(MC_CACHE, Page);
955 }
956 }
957
958 NTSTATUS
959 CcRosInternalFreeVacb (
960 PROS_VACB Vacb)
961 /*
962 * FUNCTION: Releases a VACB associated with a shared cache map
963 */
964 {
965 DPRINT("Freeing VACB 0x%p\n", Vacb);
966 #if DBG
967 if (Vacb->SharedCacheMap->Trace)
968 {
969 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
970 }
971 #endif
972
973 MmLockAddressSpace(MmGetKernelAddressSpace());
974 MmFreeMemoryArea(MmGetKernelAddressSpace(),
975 Vacb->MemoryArea,
976 CcFreeCachePage,
977 NULL);
978 MmUnlockAddressSpace(MmGetKernelAddressSpace());
979
980 if (Vacb->PinCount != 0 || Vacb->ReferenceCount != 0)
981 {
982 DPRINT1("Invalid free: %ld, %ld\n", Vacb->ReferenceCount, Vacb->PinCount);
983 if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length)
984 {
985 DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName);
986 }
987 }
988
989 ASSERT(Vacb->PinCount == 0);
990 ASSERT(Vacb->ReferenceCount == 0);
991 RtlFillMemory(Vacb, sizeof(Vacb), 0xfd);
992 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
993 return STATUS_SUCCESS;
994 }
995
996 /*
997 * @implemented
998 */
999 VOID
1000 NTAPI
1001 CcFlushCache (
1002 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1003 IN PLARGE_INTEGER FileOffset OPTIONAL,
1004 IN ULONG Length,
1005 OUT PIO_STATUS_BLOCK IoStatus)
1006 {
1007 PROS_SHARED_CACHE_MAP SharedCacheMap;
1008 LARGE_INTEGER Offset;
1009 LONGLONG RemainingLength;
1010 PROS_VACB current;
1011 NTSTATUS Status;
1012
1013 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1014 SectionObjectPointers, FileOffset, Length);
1015
1016 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1017 SectionObjectPointers, FileOffset, Length, IoStatus);
1018
1019 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1020 {
1021 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1022 ASSERT(SharedCacheMap);
1023 if (FileOffset)
1024 {
1025 Offset = *FileOffset;
1026 RemainingLength = Length;
1027 }
1028 else
1029 {
1030 Offset.QuadPart = 0;
1031 RemainingLength = SharedCacheMap->FileSize.QuadPart;
1032 }
1033
1034 if (IoStatus)
1035 {
1036 IoStatus->Status = STATUS_SUCCESS;
1037 IoStatus->Information = 0;
1038 }
1039
1040 while (RemainingLength > 0)
1041 {
1042 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1043 if (current != NULL)
1044 {
1045 if (current->Dirty)
1046 {
1047 Status = CcRosFlushVacb(current);
1048 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1049 {
1050 IoStatus->Status = Status;
1051 }
1052 }
1053
1054 CcRosReleaseVacb(SharedCacheMap, current, current->Valid, current->Dirty, FALSE);
1055 }
1056
1057 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1058 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1059 }
1060 }
1061 else
1062 {
1063 if (IoStatus)
1064 {
1065 IoStatus->Status = STATUS_INVALID_PARAMETER;
1066 }
1067 }
1068 }
1069
1070 NTSTATUS
1071 NTAPI
1072 CcRosDeleteFileCache (
1073 PFILE_OBJECT FileObject,
1074 PROS_SHARED_CACHE_MAP SharedCacheMap)
1075 /*
1076 * FUNCTION: Releases the shared cache map associated with a file object
1077 */
1078 {
1079 PLIST_ENTRY current_entry;
1080 PROS_VACB current;
1081 LIST_ENTRY FreeList;
1082 KIRQL oldIrql;
1083
1084 ASSERT(SharedCacheMap);
1085
1086 SharedCacheMap->OpenCount++;
1087 KeReleaseGuardedMutex(&ViewLock);
1088
1089 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1090
1091 KeAcquireGuardedMutex(&ViewLock);
1092 SharedCacheMap->OpenCount--;
1093 if (SharedCacheMap->OpenCount == 0)
1094 {
1095 KIRQL OldIrql;
1096
1097 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1098
1099 /*
1100 * Release all VACBs
1101 */
1102 InitializeListHead(&FreeList);
1103 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1104 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1105 {
1106 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1107 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1108
1109 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1110 CcRosAcquireVacbLock(current, NULL);
1111 RemoveEntryList(&current->VacbLruListEntry);
1112 if (current->Dirty)
1113 {
1114 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1115 CcRosUnmarkDirtyVacb(current, FALSE);
1116 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1117 DPRINT1("Freeing dirty VACB\n");
1118 }
1119 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1120 CcRosReleaseVacbLock(current);
1121
1122 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1123 }
1124 #if DBG
1125 SharedCacheMap->Trace = FALSE;
1126 #endif
1127 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1128
1129 KeReleaseGuardedMutex(&ViewLock);
1130 ObDereferenceObject(SharedCacheMap->FileObject);
1131
1132 while (!IsListEmpty(&FreeList))
1133 {
1134 current_entry = RemoveTailList(&FreeList);
1135 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1136 CcRosVacbDecRefCount(current);
1137 CcRosInternalFreeVacb(current);
1138 }
1139
1140 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1141 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1142 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1143
1144 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1145 KeAcquireGuardedMutex(&ViewLock);
1146 }
1147 return STATUS_SUCCESS;
1148 }
1149
1150 VOID
1151 NTAPI
1152 CcRosReferenceCache (
1153 PFILE_OBJECT FileObject)
1154 {
1155 PROS_SHARED_CACHE_MAP SharedCacheMap;
1156 KeAcquireGuardedMutex(&ViewLock);
1157 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1158 ASSERT(SharedCacheMap);
1159 ASSERT(SharedCacheMap->OpenCount != 0);
1160 SharedCacheMap->OpenCount++;
1161 KeReleaseGuardedMutex(&ViewLock);
1162 }
1163
1164 VOID
1165 NTAPI
1166 CcRosRemoveIfClosed (
1167 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1168 {
1169 PROS_SHARED_CACHE_MAP SharedCacheMap;
1170 DPRINT("CcRosRemoveIfClosed()\n");
1171 KeAcquireGuardedMutex(&ViewLock);
1172 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1173 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1174 {
1175 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1176 }
1177 KeReleaseGuardedMutex(&ViewLock);
1178 }
1179
1180
1181 VOID
1182 NTAPI
1183 CcRosDereferenceCache (
1184 PFILE_OBJECT FileObject)
1185 {
1186 PROS_SHARED_CACHE_MAP SharedCacheMap;
1187 KeAcquireGuardedMutex(&ViewLock);
1188 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1189 ASSERT(SharedCacheMap);
1190 if (SharedCacheMap->OpenCount > 0)
1191 {
1192 SharedCacheMap->OpenCount--;
1193 if (SharedCacheMap->OpenCount == 0)
1194 {
1195 MmFreeSectionSegments(SharedCacheMap->FileObject);
1196 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1197 }
1198 }
1199 KeReleaseGuardedMutex(&ViewLock);
1200 }
1201
1202 NTSTATUS
1203 NTAPI
1204 CcRosReleaseFileCache (
1205 PFILE_OBJECT FileObject)
1206 /*
1207 * FUNCTION: Called by the file system when a handle to a file object
1208 * has been closed.
1209 */
1210 {
1211 KIRQL OldIrql;
1212 PPRIVATE_CACHE_MAP PrivateMap;
1213 PROS_SHARED_CACHE_MAP SharedCacheMap;
1214
1215 KeAcquireGuardedMutex(&ViewLock);
1216
1217 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1218 {
1219 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1220
1221 /* Closing the handle, so kill the private cache map
1222 * Before you event try to remove it from FO, always
1223 * lock the master lock, to be sure not to race
1224 * with a potential read ahead ongoing!
1225 */
1226 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1227 PrivateMap = FileObject->PrivateCacheMap;
1228 FileObject->PrivateCacheMap = NULL;
1229 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1230
1231 if (PrivateMap != NULL)
1232 {
1233 /* Remove it from the file */
1234 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
1235 RemoveEntryList(&PrivateMap->PrivateLinks);
1236 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
1237
1238 /* And free it. */
1239 if (PrivateMap != &SharedCacheMap->PrivateCacheMap)
1240 {
1241 ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP);
1242 }
1243 else
1244 {
1245 PrivateMap->NodeTypeCode = 0;
1246 }
1247
1248 if (SharedCacheMap->OpenCount > 0)
1249 {
1250 SharedCacheMap->OpenCount--;
1251 if (SharedCacheMap->OpenCount == 0)
1252 {
1253 MmFreeSectionSegments(SharedCacheMap->FileObject);
1254 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1255 }
1256 }
1257 }
1258 }
1259 KeReleaseGuardedMutex(&ViewLock);
1260 return STATUS_SUCCESS;
1261 }
1262
1263 NTSTATUS
1264 NTAPI
1265 CcRosInitializeFileCache (
1266 PFILE_OBJECT FileObject,
1267 PCC_FILE_SIZES FileSizes,
1268 BOOLEAN PinAccess,
1269 PCACHE_MANAGER_CALLBACKS CallBacks,
1270 PVOID LazyWriterContext)
1271 /*
1272 * FUNCTION: Initializes a shared cache map for a file object
1273 */
1274 {
1275 KIRQL OldIrql;
1276 BOOLEAN Allocated;
1277 PROS_SHARED_CACHE_MAP SharedCacheMap;
1278
1279 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1280 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1281 FileObject, SharedCacheMap);
1282
1283 Allocated = FALSE;
1284 KeAcquireGuardedMutex(&ViewLock);
1285 if (SharedCacheMap == NULL)
1286 {
1287 Allocated = TRUE;
1288 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1289 if (SharedCacheMap == NULL)
1290 {
1291 KeReleaseGuardedMutex(&ViewLock);
1292 return STATUS_INSUFFICIENT_RESOURCES;
1293 }
1294 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1295 ObReferenceObjectByPointer(FileObject,
1296 FILE_ALL_ACCESS,
1297 NULL,
1298 KernelMode);
1299 SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP;
1300 SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap);
1301 SharedCacheMap->FileObject = FileObject;
1302 SharedCacheMap->Callbacks = CallBacks;
1303 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1304 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1305 SharedCacheMap->FileSize = FileSizes->FileSize;
1306 SharedCacheMap->PinAccess = PinAccess;
1307 SharedCacheMap->DirtyPageThreshold = 0;
1308 SharedCacheMap->DirtyPages = 0;
1309 InitializeListHead(&SharedCacheMap->PrivateList);
1310 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1311 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1312 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1313
1314 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1315 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1316 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1317 }
1318 if (FileObject->PrivateCacheMap == NULL)
1319 {
1320 PPRIVATE_CACHE_MAP PrivateMap;
1321
1322 /* Allocate the private cache map for this handle */
1323 if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0)
1324 {
1325 PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP);
1326 }
1327 else
1328 {
1329 PrivateMap = &SharedCacheMap->PrivateCacheMap;
1330 }
1331
1332 if (PrivateMap == NULL)
1333 {
1334 /* If we also allocated the shared cache map for this file, kill it */
1335 if (Allocated)
1336 {
1337 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1338 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1339 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1340
1341 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1342 ObDereferenceObject(FileObject);
1343 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1344 }
1345
1346 KeReleaseGuardedMutex(&ViewLock);
1347 return STATUS_INSUFFICIENT_RESOURCES;
1348 }
1349
1350 /* Initialize it */
1351 RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP));
1352 PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP;
1353 PrivateMap->ReadAheadMask = PAGE_SIZE - 1;
1354 PrivateMap->FileObject = FileObject;
1355 KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock);
1356
1357 /* Link it to the file */
1358 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
1359 InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks);
1360 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
1361
1362 FileObject->PrivateCacheMap = PrivateMap;
1363 SharedCacheMap->OpenCount++;
1364 }
1365 KeReleaseGuardedMutex(&ViewLock);
1366
1367 return STATUS_SUCCESS;
1368 }
1369
1370 /*
1371 * @implemented
1372 */
1373 PFILE_OBJECT
1374 NTAPI
1375 CcGetFileObjectFromSectionPtrs (
1376 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1377 {
1378 PROS_SHARED_CACHE_MAP SharedCacheMap;
1379
1380 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1381
1382 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1383 {
1384 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1385 ASSERT(SharedCacheMap);
1386 return SharedCacheMap->FileObject;
1387 }
1388 return NULL;
1389 }
1390
1391 VOID
1392 INIT_FUNCTION
1393 NTAPI
1394 CcInitView (
1395 VOID)
1396 {
1397 DPRINT("CcInitView()\n");
1398
1399 InitializeListHead(&DirtyVacbListHead);
1400 InitializeListHead(&VacbLruListHead);
1401 InitializeListHead(&CcDeferredWrites);
1402 InitializeListHead(&CcCleanSharedCacheMapList);
1403 KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1404 KeInitializeGuardedMutex(&ViewLock);
1405 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1406 NULL,
1407 NULL,
1408 0,
1409 sizeof(INTERNAL_BCB),
1410 TAG_BCB,
1411 20);
1412 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1413 NULL,
1414 NULL,
1415 0,
1416 sizeof(ROS_SHARED_CACHE_MAP),
1417 TAG_SHARED_CACHE_MAP,
1418 20);
1419 ExInitializeNPagedLookasideList(&VacbLookasideList,
1420 NULL,
1421 NULL,
1422 0,
1423 sizeof(ROS_VACB),
1424 TAG_VACB,
1425 20);
1426
1427 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1428
1429 CcInitCacheZeroPage();
1430 }
1431
1432 #if DBG && defined(KDBG)
1433 BOOLEAN
1434 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1435 {
1436 PLIST_ENTRY ListEntry;
1437 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1438
1439 KdbpPrint(" Usage Summary (in kb)\n");
1440 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1441 /* No need to lock the spin lock here, we're in DBG */
1442 for (ListEntry = CcCleanSharedCacheMapList.Flink;
1443 ListEntry != &CcCleanSharedCacheMapList;
1444 ListEntry = ListEntry->Flink)
1445 {
1446 PLIST_ENTRY Vacbs;
1447 ULONG Valid = 0, Dirty = 0;
1448 PROS_SHARED_CACHE_MAP SharedCacheMap;
1449 PUNICODE_STRING FileName;
1450
1451 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1452
1453 /* Dirty size */
1454 Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
1455
1456 /* First, count for all the associated VACB */
1457 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1458 Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1459 Vacbs = Vacbs->Flink)
1460 {
1461 PROS_VACB Vacb;
1462
1463 Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
1464 if (Vacb->Valid)
1465 {
1466 Valid += VACB_MAPPING_GRANULARITY / 1024;
1467 }
1468 }
1469
1470 /* Setup name */
1471 if (SharedCacheMap->FileObject != NULL &&
1472 SharedCacheMap->FileObject->FileName.Length != 0)
1473 {
1474 FileName = &SharedCacheMap->FileObject->FileName;
1475 }
1476 else
1477 {
1478 FileName = &NoName;
1479 }
1480
1481 /* And print */
1482 KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
1483 }
1484
1485 return TRUE;
1486 }
1487
1488 BOOLEAN
1489 ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[])
1490 {
1491 KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages,
1492 (CcTotalDirtyPages * PAGE_SIZE) / 1024);
1493 KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold,
1494 (CcDirtyPageThreshold * PAGE_SIZE) / 1024);
1495 KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages,
1496 (MmAvailablePages * PAGE_SIZE) / 1024);
1497 KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop,
1498 (MmThrottleTop * PAGE_SIZE) / 1024);
1499 KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom,
1500 (MmThrottleBottom * PAGE_SIZE) / 1024);
1501 KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total,
1502 (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024);
1503
1504 if (CcTotalDirtyPages >= CcDirtyPageThreshold)
1505 {
1506 KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n");
1507 }
1508 else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold)
1509 {
1510 KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n");
1511 }
1512 else
1513 {
1514 KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n");
1515 }
1516
1517 return TRUE;
1518 }
1519 #endif
1520
1521 /* EOF */