[NTOSKRNL] More asserts regarding reference count
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42
43 /* GLOBALS *******************************************************************/
44
45 LIST_ENTRY DirtyVacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 /* Internal vars (MS):
55 * - Threshold above which lazy writer will start action
56 * - Amount of dirty pages
57 * - List for deferred writes
58 * - Spinlock when dealing with the deferred list
59 * - List for "clean" shared cache maps
60 */
61 ULONG CcDirtyPageThreshold = 0;
62 ULONG CcTotalDirtyPages = 0;
63 LIST_ENTRY CcDeferredWrites;
64 KSPIN_LOCK CcDeferredWriteSpinLock;
65 LIST_ENTRY CcCleanSharedCacheMapList;
66
67 #if DBG
68 VOID CcRosVacbIncRefCount_(PROS_VACB vacb, PCSTR file, INT line)
69 {
70 ++vacb->ReferenceCount;
71 if (vacb->SharedCacheMap->Trace)
72 {
73 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
74 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
75 }
76 }
77 VOID CcRosVacbDecRefCount_(PROS_VACB vacb, PCSTR file, INT line)
78 {
79 ASSERT(vacb->ReferenceCount != 0);
80 --vacb->ReferenceCount;
81 ASSERT(!(vacb->ReferenceCount == 0 && vacb->Dirty));
82 if (vacb->SharedCacheMap->Trace)
83 {
84 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
85 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
86 }
87 }
88 #endif
89
90 NTSTATUS
91 CcRosInternalFreeVacb(PROS_VACB Vacb);
92
93
94 /* FUNCTIONS *****************************************************************/
95
96 VOID
97 NTAPI
98 CcRosTraceCacheMap (
99 PROS_SHARED_CACHE_MAP SharedCacheMap,
100 BOOLEAN Trace )
101 {
102 #if DBG
103 KIRQL oldirql;
104 PLIST_ENTRY current_entry;
105 PROS_VACB current;
106
107 if (!SharedCacheMap)
108 return;
109
110 SharedCacheMap->Trace = Trace;
111
112 if (Trace)
113 {
114 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
115
116 KeAcquireGuardedMutex(&ViewLock);
117 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
118
119 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
120 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
121 {
122 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
123 current_entry = current_entry->Flink;
124
125 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
126 current, current->ReferenceCount, current->Dirty, current->PageOut );
127 }
128 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
129 KeReleaseGuardedMutex(&ViewLock);
130 }
131 else
132 {
133 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
134 }
135
136 #else
137 UNREFERENCED_PARAMETER(SharedCacheMap);
138 UNREFERENCED_PARAMETER(Trace);
139 #endif
140 }
141
142 NTSTATUS
143 NTAPI
144 CcRosFlushVacb (
145 PROS_VACB Vacb)
146 {
147 NTSTATUS Status;
148
149 Status = CcWriteVirtualAddress(Vacb);
150 if (NT_SUCCESS(Status))
151 {
152 CcRosUnmarkDirtyVacb(Vacb, TRUE);
153 }
154
155 return Status;
156 }
157
158 NTSTATUS
159 NTAPI
160 CcRosFlushDirtyPages (
161 ULONG Target,
162 PULONG Count,
163 BOOLEAN Wait,
164 BOOLEAN CalledFromLazy)
165 {
166 PLIST_ENTRY current_entry;
167 PROS_VACB current;
168 BOOLEAN Locked;
169 NTSTATUS Status;
170 LARGE_INTEGER ZeroTimeout;
171
172 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
173
174 (*Count) = 0;
175 ZeroTimeout.QuadPart = 0;
176
177 KeEnterCriticalRegion();
178 KeAcquireGuardedMutex(&ViewLock);
179
180 current_entry = DirtyVacbListHead.Flink;
181 if (current_entry == &DirtyVacbListHead)
182 {
183 DPRINT("No Dirty pages\n");
184 }
185
186 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
187 {
188 current = CONTAINING_RECORD(current_entry,
189 ROS_VACB,
190 DirtyVacbListEntry);
191 current_entry = current_entry->Flink;
192
193 CcRosVacbIncRefCount(current);
194
195 /* When performing lazy write, don't handle temporary files */
196 if (CalledFromLazy &&
197 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
198 {
199 CcRosVacbDecRefCount(current);
200 continue;
201 }
202
203 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
204 current->SharedCacheMap->LazyWriteContext, Wait);
205 if (!Locked)
206 {
207 CcRosVacbDecRefCount(current);
208 continue;
209 }
210
211 Status = CcRosAcquireVacbLock(current,
212 Wait ? NULL : &ZeroTimeout);
213 if (Status != STATUS_SUCCESS)
214 {
215 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
216 current->SharedCacheMap->LazyWriteContext);
217 CcRosVacbDecRefCount(current);
218 continue;
219 }
220
221 ASSERT(current->Dirty);
222
223 /* One reference is added above */
224 if (current->ReferenceCount > 2)
225 {
226 CcRosReleaseVacbLock(current);
227 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
228 current->SharedCacheMap->LazyWriteContext);
229 CcRosVacbDecRefCount(current);
230 continue;
231 }
232
233 KeReleaseGuardedMutex(&ViewLock);
234
235 Status = CcRosFlushVacb(current);
236
237 CcRosReleaseVacbLock(current);
238 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
239 current->SharedCacheMap->LazyWriteContext);
240
241 KeAcquireGuardedMutex(&ViewLock);
242 CcRosVacbDecRefCount(current);
243
244 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
245 (Status != STATUS_MEDIA_WRITE_PROTECTED))
246 {
247 DPRINT1("CC: Failed to flush VACB.\n");
248 }
249 else
250 {
251 ULONG PagesFreed;
252
253 /* How many pages did we free? */
254 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
255 (*Count) += PagesFreed;
256
257 /* Make sure we don't overflow target! */
258 if (Target < PagesFreed)
259 {
260 /* If we would have, jump to zero directly */
261 Target = 0;
262 }
263 else
264 {
265 Target -= PagesFreed;
266 }
267 }
268
269 current_entry = DirtyVacbListHead.Flink;
270 }
271
272 KeReleaseGuardedMutex(&ViewLock);
273 KeLeaveCriticalRegion();
274
275 DPRINT("CcRosFlushDirtyPages() finished\n");
276 return STATUS_SUCCESS;
277 }
278
279 NTSTATUS
280 CcRosTrimCache (
281 ULONG Target,
282 ULONG Priority,
283 PULONG NrFreed)
284 /*
285 * FUNCTION: Try to free some memory from the file cache.
286 * ARGUMENTS:
287 * Target - The number of pages to be freed.
288 * Priority - The priority of free (currently unused).
289 * NrFreed - Points to a variable where the number of pages
290 * actually freed is returned.
291 */
292 {
293 PLIST_ENTRY current_entry;
294 PROS_VACB current;
295 ULONG PagesFreed;
296 KIRQL oldIrql;
297 LIST_ENTRY FreeList;
298 PFN_NUMBER Page;
299 ULONG i;
300 BOOLEAN FlushedPages = FALSE;
301
302 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
303
304 InitializeListHead(&FreeList);
305
306 *NrFreed = 0;
307
308 retry:
309 KeAcquireGuardedMutex(&ViewLock);
310
311 current_entry = VacbLruListHead.Flink;
312 while (current_entry != &VacbLruListHead)
313 {
314 current = CONTAINING_RECORD(current_entry,
315 ROS_VACB,
316 VacbLruListEntry);
317 current_entry = current_entry->Flink;
318
319 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
320
321 /* Reference the VACB */
322 CcRosVacbIncRefCount(current);
323
324 /* Check if it's mapped and not dirty */
325 if (current->MappedCount > 0 && !current->Dirty)
326 {
327 /* We have to break these locks because Cc sucks */
328 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
329 KeReleaseGuardedMutex(&ViewLock);
330
331 /* Page out the VACB */
332 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
333 {
334 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
335
336 MmPageOutPhysicalAddress(Page);
337 }
338
339 /* Reacquire the locks */
340 KeAcquireGuardedMutex(&ViewLock);
341 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
342 }
343
344 /* Dereference the VACB */
345 CcRosVacbDecRefCount(current);
346
347 /* Check if we can free this entry now */
348 if (current->ReferenceCount < 2)
349 {
350 ASSERT(!current->Dirty);
351 ASSERT(!current->MappedCount);
352 ASSERT(current->ReferenceCount == 1);
353
354 RemoveEntryList(&current->CacheMapVacbListEntry);
355 RemoveEntryList(&current->VacbLruListEntry);
356 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
357
358 /* Calculate how many pages we freed for Mm */
359 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
360 Target -= PagesFreed;
361 (*NrFreed) += PagesFreed;
362 }
363
364 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
365 }
366
367 KeReleaseGuardedMutex(&ViewLock);
368
369 /* Try flushing pages if we haven't met our target */
370 if ((Target > 0) && !FlushedPages)
371 {
372 /* Flush dirty pages to disk */
373 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
374 FlushedPages = TRUE;
375
376 /* We can only swap as many pages as we flushed */
377 if (PagesFreed < Target) Target = PagesFreed;
378
379 /* Check if we flushed anything */
380 if (PagesFreed != 0)
381 {
382 /* Try again after flushing dirty pages */
383 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
384 goto retry;
385 }
386 }
387
388 while (!IsListEmpty(&FreeList))
389 {
390 current_entry = RemoveHeadList(&FreeList);
391 current = CONTAINING_RECORD(current_entry,
392 ROS_VACB,
393 CacheMapVacbListEntry);
394 CcRosVacbDecRefCount(current);
395 CcRosInternalFreeVacb(current);
396 }
397
398 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
399
400 return STATUS_SUCCESS;
401 }
402
403 NTSTATUS
404 NTAPI
405 CcRosReleaseVacb (
406 PROS_SHARED_CACHE_MAP SharedCacheMap,
407 PROS_VACB Vacb,
408 BOOLEAN Valid,
409 BOOLEAN Dirty,
410 BOOLEAN Mapped)
411 {
412 ASSERT(SharedCacheMap);
413
414 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
415 SharedCacheMap, Vacb, Valid);
416
417 Vacb->Valid = Valid;
418
419 if (Dirty && !Vacb->Dirty)
420 {
421 CcRosMarkDirtyVacb(Vacb);
422 }
423
424 if (Mapped)
425 {
426 Vacb->MappedCount++;
427 }
428 CcRosVacbDecRefCount(Vacb);
429 if (Mapped && (Vacb->MappedCount == 1))
430 {
431 CcRosVacbIncRefCount(Vacb);
432 }
433
434 ASSERT(Vacb->ReferenceCount > 0);
435
436 CcRosReleaseVacbLock(Vacb);
437
438 return STATUS_SUCCESS;
439 }
440
441 /* Returns with VACB Lock Held! */
442 PROS_VACB
443 NTAPI
444 CcRosLookupVacb (
445 PROS_SHARED_CACHE_MAP SharedCacheMap,
446 LONGLONG FileOffset)
447 {
448 PLIST_ENTRY current_entry;
449 PROS_VACB current;
450 KIRQL oldIrql;
451
452 ASSERT(SharedCacheMap);
453
454 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
455 SharedCacheMap, FileOffset);
456
457 KeAcquireGuardedMutex(&ViewLock);
458 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
459
460 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
461 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
462 {
463 current = CONTAINING_RECORD(current_entry,
464 ROS_VACB,
465 CacheMapVacbListEntry);
466 if (IsPointInRange(current->FileOffset.QuadPart,
467 VACB_MAPPING_GRANULARITY,
468 FileOffset))
469 {
470 CcRosVacbIncRefCount(current);
471 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
472 KeReleaseGuardedMutex(&ViewLock);
473 CcRosAcquireVacbLock(current, NULL);
474 return current;
475 }
476 if (current->FileOffset.QuadPart > FileOffset)
477 break;
478 current_entry = current_entry->Flink;
479 }
480
481 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
482 KeReleaseGuardedMutex(&ViewLock);
483
484 return NULL;
485 }
486
487 VOID
488 NTAPI
489 CcRosMarkDirtyVacb (
490 PROS_VACB Vacb)
491 {
492 KIRQL oldIrql;
493 PROS_SHARED_CACHE_MAP SharedCacheMap;
494
495 SharedCacheMap = Vacb->SharedCacheMap;
496
497 KeAcquireGuardedMutex(&ViewLock);
498 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
499
500 ASSERT(!Vacb->Dirty);
501
502 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
503 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
504 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
505 CcRosVacbIncRefCount(Vacb);
506
507 /* Move to the tail of the LRU list */
508 RemoveEntryList(&Vacb->VacbLruListEntry);
509 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
510
511 Vacb->Dirty = TRUE;
512
513 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
514 KeReleaseGuardedMutex(&ViewLock);
515
516 /* Schedule a lazy writer run to now that we have dirty VACB */
517 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
518 if (!LazyWriter.ScanActive)
519 {
520 CcScheduleLazyWriteScan(FALSE);
521 }
522 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
523 }
524
525 VOID
526 NTAPI
527 CcRosUnmarkDirtyVacb (
528 PROS_VACB Vacb,
529 BOOLEAN LockViews)
530 {
531 KIRQL oldIrql;
532 PROS_SHARED_CACHE_MAP SharedCacheMap;
533
534 SharedCacheMap = Vacb->SharedCacheMap;
535
536 if (LockViews)
537 {
538 KeAcquireGuardedMutex(&ViewLock);
539 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
540 }
541
542 ASSERT(Vacb->Dirty);
543
544 Vacb->Dirty = FALSE;
545
546 RemoveEntryList(&Vacb->DirtyVacbListEntry);
547 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
548 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
549 CcRosVacbDecRefCount(Vacb);
550
551 if (LockViews)
552 {
553 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
554 KeReleaseGuardedMutex(&ViewLock);
555 }
556 }
557
558 NTSTATUS
559 NTAPI
560 CcRosMarkDirtyFile (
561 PROS_SHARED_CACHE_MAP SharedCacheMap,
562 LONGLONG FileOffset)
563 {
564 PROS_VACB Vacb;
565
566 ASSERT(SharedCacheMap);
567
568 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
569 SharedCacheMap, FileOffset);
570
571 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
572 if (Vacb == NULL)
573 {
574 KeBugCheck(CACHE_MANAGER);
575 }
576
577 CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, TRUE, FALSE);
578
579 return STATUS_SUCCESS;
580 }
581
582 /*
583 * Note: this is not the contrary function of
584 * CcRosMapVacbInKernelSpace()
585 */
586 NTSTATUS
587 NTAPI
588 CcRosUnmapVacb (
589 PROS_SHARED_CACHE_MAP SharedCacheMap,
590 LONGLONG FileOffset,
591 BOOLEAN NowDirty)
592 {
593 PROS_VACB Vacb;
594
595 ASSERT(SharedCacheMap);
596
597 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
598 SharedCacheMap, FileOffset, NowDirty);
599
600 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
601 if (Vacb == NULL)
602 {
603 return STATUS_UNSUCCESSFUL;
604 }
605
606 ASSERT(Vacb->MappedCount != 0);
607 Vacb->MappedCount--;
608
609 if (Vacb->MappedCount == 0)
610 {
611 CcRosVacbDecRefCount(Vacb);
612 }
613
614 CcRosReleaseVacb(SharedCacheMap, Vacb, Vacb->Valid, NowDirty, FALSE);
615
616 return STATUS_SUCCESS;
617 }
618
619 static
620 NTSTATUS
621 CcRosMapVacbInKernelSpace(
622 PROS_VACB Vacb)
623 {
624 ULONG i;
625 NTSTATUS Status;
626 ULONG_PTR NumberOfPages;
627
628 /* Create a memory area. */
629 MmLockAddressSpace(MmGetKernelAddressSpace());
630 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
631 0, // nothing checks for VACB mareas, so set to 0
632 &Vacb->BaseAddress,
633 VACB_MAPPING_GRANULARITY,
634 PAGE_READWRITE,
635 (PMEMORY_AREA*)&Vacb->MemoryArea,
636 0,
637 PAGE_SIZE);
638 MmUnlockAddressSpace(MmGetKernelAddressSpace());
639 if (!NT_SUCCESS(Status))
640 {
641 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
642 return Status;
643 }
644
645 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
646 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
647
648 /* Create a virtual mapping for this memory area */
649 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
650 for (i = 0; i < NumberOfPages; i++)
651 {
652 PFN_NUMBER PageFrameNumber;
653
654 MI_SET_USAGE(MI_USAGE_CACHE);
655 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
656 if (PageFrameNumber == 0)
657 {
658 DPRINT1("Unable to allocate page\n");
659 KeBugCheck(MEMORY_MANAGEMENT);
660 }
661
662 Status = MmCreateVirtualMapping(NULL,
663 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
664 PAGE_READWRITE,
665 &PageFrameNumber,
666 1);
667 if (!NT_SUCCESS(Status))
668 {
669 DPRINT1("Unable to create virtual mapping\n");
670 KeBugCheck(MEMORY_MANAGEMENT);
671 }
672 }
673
674 return STATUS_SUCCESS;
675 }
676
677 static
678 NTSTATUS
679 CcRosCreateVacb (
680 PROS_SHARED_CACHE_MAP SharedCacheMap,
681 LONGLONG FileOffset,
682 PROS_VACB *Vacb)
683 {
684 PROS_VACB current;
685 PROS_VACB previous;
686 PLIST_ENTRY current_entry;
687 NTSTATUS Status;
688 KIRQL oldIrql;
689
690 ASSERT(SharedCacheMap);
691
692 DPRINT("CcRosCreateVacb()\n");
693
694 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
695 {
696 *Vacb = NULL;
697 return STATUS_INVALID_PARAMETER;
698 }
699
700 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
701 current->BaseAddress = NULL;
702 current->Valid = FALSE;
703 current->Dirty = FALSE;
704 current->PageOut = FALSE;
705 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
706 current->SharedCacheMap = SharedCacheMap;
707 #if DBG
708 if (SharedCacheMap->Trace)
709 {
710 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
711 }
712 #endif
713 current->MappedCount = 0;
714 current->DirtyVacbListEntry.Flink = NULL;
715 current->DirtyVacbListEntry.Blink = NULL;
716 current->ReferenceCount = 0;
717 current->PinCount = 0;
718 KeInitializeMutex(&current->Mutex, 0);
719 CcRosAcquireVacbLock(current, NULL);
720 KeAcquireGuardedMutex(&ViewLock);
721
722 *Vacb = current;
723 /* There is window between the call to CcRosLookupVacb
724 * and CcRosCreateVacb. We must check if a VACB for the
725 * file offset exist. If there is a VACB, we release
726 * our newly created VACB and return the existing one.
727 */
728 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
729 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
730 previous = NULL;
731 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
732 {
733 current = CONTAINING_RECORD(current_entry,
734 ROS_VACB,
735 CacheMapVacbListEntry);
736 if (IsPointInRange(current->FileOffset.QuadPart,
737 VACB_MAPPING_GRANULARITY,
738 FileOffset))
739 {
740 CcRosVacbIncRefCount(current);
741 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
742 #if DBG
743 if (SharedCacheMap->Trace)
744 {
745 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
746 SharedCacheMap,
747 (*Vacb),
748 current);
749 }
750 #endif
751 CcRosReleaseVacbLock(*Vacb);
752 KeReleaseGuardedMutex(&ViewLock);
753 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
754 *Vacb = current;
755 CcRosAcquireVacbLock(current, NULL);
756 return STATUS_SUCCESS;
757 }
758 if (current->FileOffset.QuadPart < FileOffset)
759 {
760 ASSERT(previous == NULL ||
761 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
762 previous = current;
763 }
764 if (current->FileOffset.QuadPart > FileOffset)
765 break;
766 current_entry = current_entry->Flink;
767 }
768 /* There was no existing VACB. */
769 current = *Vacb;
770 if (previous)
771 {
772 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
773 }
774 else
775 {
776 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
777 }
778 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
779 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
780 CcRosVacbIncRefCount(current);
781 KeReleaseGuardedMutex(&ViewLock);
782
783 MI_SET_USAGE(MI_USAGE_CACHE);
784 #if MI_TRACE_PFNS
785 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
786 {
787 PWCHAR pos;
788 ULONG len = 0;
789 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
790 if (pos)
791 {
792 len = wcslen(pos) * sizeof(WCHAR);
793 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
794 }
795 else
796 {
797 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
798 }
799 }
800 #endif
801
802 Status = CcRosMapVacbInKernelSpace(current);
803 if (!NT_SUCCESS(Status))
804 {
805 RemoveEntryList(&current->CacheMapVacbListEntry);
806 RemoveEntryList(&current->VacbLruListEntry);
807 CcRosReleaseVacbLock(current);
808 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
809 }
810
811 /* Reference it to allow release */
812 CcRosVacbIncRefCount(current);
813
814 return Status;
815 }
816
817 NTSTATUS
818 NTAPI
819 CcRosGetVacb (
820 PROS_SHARED_CACHE_MAP SharedCacheMap,
821 LONGLONG FileOffset,
822 PLONGLONG BaseOffset,
823 PVOID* BaseAddress,
824 PBOOLEAN UptoDate,
825 PROS_VACB *Vacb)
826 {
827 PROS_VACB current;
828 NTSTATUS Status;
829
830 ASSERT(SharedCacheMap);
831
832 DPRINT("CcRosGetVacb()\n");
833
834 /*
835 * Look for a VACB already mapping the same data.
836 */
837 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
838 if (current == NULL)
839 {
840 /*
841 * Otherwise create a new VACB.
842 */
843 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
844 if (!NT_SUCCESS(Status))
845 {
846 return Status;
847 }
848 }
849
850 KeAcquireGuardedMutex(&ViewLock);
851
852 /* Move to the tail of the LRU list */
853 RemoveEntryList(&current->VacbLruListEntry);
854 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
855
856 KeReleaseGuardedMutex(&ViewLock);
857
858 /*
859 * Return information about the VACB to the caller.
860 */
861 *UptoDate = current->Valid;
862 *BaseAddress = current->BaseAddress;
863 DPRINT("*BaseAddress %p\n", *BaseAddress);
864 *Vacb = current;
865 *BaseOffset = current->FileOffset.QuadPart;
866
867 ASSERT(current->ReferenceCount > 1);
868
869 return STATUS_SUCCESS;
870 }
871
872 NTSTATUS
873 NTAPI
874 CcRosRequestVacb (
875 PROS_SHARED_CACHE_MAP SharedCacheMap,
876 LONGLONG FileOffset,
877 PVOID* BaseAddress,
878 PBOOLEAN UptoDate,
879 PROS_VACB *Vacb)
880 /*
881 * FUNCTION: Request a page mapping for a shared cache map
882 */
883 {
884 LONGLONG BaseOffset;
885
886 ASSERT(SharedCacheMap);
887
888 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
889 {
890 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
891 FileOffset, VACB_MAPPING_GRANULARITY);
892 KeBugCheck(CACHE_MANAGER);
893 }
894
895 return CcRosGetVacb(SharedCacheMap,
896 FileOffset,
897 &BaseOffset,
898 BaseAddress,
899 UptoDate,
900 Vacb);
901 }
902
903 static
904 VOID
905 CcFreeCachePage (
906 PVOID Context,
907 MEMORY_AREA* MemoryArea,
908 PVOID Address,
909 PFN_NUMBER Page,
910 SWAPENTRY SwapEntry,
911 BOOLEAN Dirty)
912 {
913 ASSERT(SwapEntry == 0);
914 if (Page != 0)
915 {
916 ASSERT(MmGetReferenceCountPage(Page) == 1);
917 MmReleasePageMemoryConsumer(MC_CACHE, Page);
918 }
919 }
920
921 NTSTATUS
922 CcRosInternalFreeVacb (
923 PROS_VACB Vacb)
924 /*
925 * FUNCTION: Releases a VACB associated with a shared cache map
926 */
927 {
928 DPRINT("Freeing VACB 0x%p\n", Vacb);
929 #if DBG
930 if (Vacb->SharedCacheMap->Trace)
931 {
932 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
933 }
934 #endif
935
936 MmLockAddressSpace(MmGetKernelAddressSpace());
937 MmFreeMemoryArea(MmGetKernelAddressSpace(),
938 Vacb->MemoryArea,
939 CcFreeCachePage,
940 NULL);
941 MmUnlockAddressSpace(MmGetKernelAddressSpace());
942
943 if (Vacb->PinCount != 0 || Vacb->ReferenceCount != 0)
944 {
945 DPRINT1("Invalid free: %ld, %ld\n", Vacb->ReferenceCount, Vacb->PinCount);
946 if (Vacb->SharedCacheMap->FileObject && Vacb->SharedCacheMap->FileObject->FileName.Length)
947 {
948 DPRINT1("For file: %wZ\n", &Vacb->SharedCacheMap->FileObject->FileName);
949 }
950 }
951
952 ASSERT(Vacb->PinCount == 0);
953 ASSERT(Vacb->ReferenceCount == 0);
954 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
955 return STATUS_SUCCESS;
956 }
957
958 /*
959 * @implemented
960 */
961 VOID
962 NTAPI
963 CcFlushCache (
964 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
965 IN PLARGE_INTEGER FileOffset OPTIONAL,
966 IN ULONG Length,
967 OUT PIO_STATUS_BLOCK IoStatus)
968 {
969 PROS_SHARED_CACHE_MAP SharedCacheMap;
970 LARGE_INTEGER Offset;
971 LONGLONG RemainingLength;
972 PROS_VACB current;
973 NTSTATUS Status;
974
975 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
976 SectionObjectPointers, FileOffset, Length);
977
978 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
979 SectionObjectPointers, FileOffset, Length, IoStatus);
980
981 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
982 {
983 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
984 ASSERT(SharedCacheMap);
985 if (FileOffset)
986 {
987 Offset = *FileOffset;
988 RemainingLength = Length;
989 }
990 else
991 {
992 Offset.QuadPart = 0;
993 RemainingLength = SharedCacheMap->FileSize.QuadPart;
994 }
995
996 if (IoStatus)
997 {
998 IoStatus->Status = STATUS_SUCCESS;
999 IoStatus->Information = 0;
1000 }
1001
1002 while (RemainingLength > 0)
1003 {
1004 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1005 if (current != NULL)
1006 {
1007 if (current->Dirty)
1008 {
1009 Status = CcRosFlushVacb(current);
1010 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1011 {
1012 IoStatus->Status = Status;
1013 }
1014 }
1015
1016 CcRosReleaseVacb(SharedCacheMap, current, current->Valid, current->Dirty, FALSE);
1017 }
1018
1019 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1020 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1021 }
1022 }
1023 else
1024 {
1025 if (IoStatus)
1026 {
1027 IoStatus->Status = STATUS_INVALID_PARAMETER;
1028 }
1029 }
1030 }
1031
1032 NTSTATUS
1033 NTAPI
1034 CcRosDeleteFileCache (
1035 PFILE_OBJECT FileObject,
1036 PROS_SHARED_CACHE_MAP SharedCacheMap)
1037 /*
1038 * FUNCTION: Releases the shared cache map associated with a file object
1039 */
1040 {
1041 PLIST_ENTRY current_entry;
1042 PROS_VACB current;
1043 LIST_ENTRY FreeList;
1044 KIRQL oldIrql;
1045
1046 ASSERT(SharedCacheMap);
1047
1048 SharedCacheMap->OpenCount++;
1049 KeReleaseGuardedMutex(&ViewLock);
1050
1051 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1052
1053 KeAcquireGuardedMutex(&ViewLock);
1054 SharedCacheMap->OpenCount--;
1055 if (SharedCacheMap->OpenCount == 0)
1056 {
1057 KIRQL OldIrql;
1058
1059 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1060
1061 /*
1062 * Release all VACBs
1063 */
1064 InitializeListHead(&FreeList);
1065 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1066 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1067 {
1068 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1069 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1070
1071 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1072 CcRosAcquireVacbLock(current, NULL);
1073 RemoveEntryList(&current->VacbLruListEntry);
1074 if (current->Dirty)
1075 {
1076 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1077 CcRosUnmarkDirtyVacb(current, FALSE);
1078 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1079 DPRINT1("Freeing dirty VACB\n");
1080 }
1081 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1082 CcRosReleaseVacbLock(current);
1083
1084 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1085 }
1086 #if DBG
1087 SharedCacheMap->Trace = FALSE;
1088 #endif
1089 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1090
1091 KeReleaseGuardedMutex(&ViewLock);
1092 ObDereferenceObject(SharedCacheMap->FileObject);
1093
1094 while (!IsListEmpty(&FreeList))
1095 {
1096 current_entry = RemoveTailList(&FreeList);
1097 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1098 CcRosVacbDecRefCount(current);
1099 CcRosInternalFreeVacb(current);
1100 }
1101
1102 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1103 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1104 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1105
1106 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1107 KeAcquireGuardedMutex(&ViewLock);
1108 }
1109 return STATUS_SUCCESS;
1110 }
1111
1112 VOID
1113 NTAPI
1114 CcRosReferenceCache (
1115 PFILE_OBJECT FileObject)
1116 {
1117 PROS_SHARED_CACHE_MAP SharedCacheMap;
1118 KeAcquireGuardedMutex(&ViewLock);
1119 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1120 ASSERT(SharedCacheMap);
1121 ASSERT(SharedCacheMap->OpenCount != 0);
1122 SharedCacheMap->OpenCount++;
1123 KeReleaseGuardedMutex(&ViewLock);
1124 }
1125
1126 VOID
1127 NTAPI
1128 CcRosRemoveIfClosed (
1129 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1130 {
1131 PROS_SHARED_CACHE_MAP SharedCacheMap;
1132 DPRINT("CcRosRemoveIfClosed()\n");
1133 KeAcquireGuardedMutex(&ViewLock);
1134 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1135 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1136 {
1137 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1138 }
1139 KeReleaseGuardedMutex(&ViewLock);
1140 }
1141
1142
1143 VOID
1144 NTAPI
1145 CcRosDereferenceCache (
1146 PFILE_OBJECT FileObject)
1147 {
1148 PROS_SHARED_CACHE_MAP SharedCacheMap;
1149 KeAcquireGuardedMutex(&ViewLock);
1150 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1151 ASSERT(SharedCacheMap);
1152 if (SharedCacheMap->OpenCount > 0)
1153 {
1154 SharedCacheMap->OpenCount--;
1155 if (SharedCacheMap->OpenCount == 0)
1156 {
1157 MmFreeSectionSegments(SharedCacheMap->FileObject);
1158 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1159 }
1160 }
1161 KeReleaseGuardedMutex(&ViewLock);
1162 }
1163
1164 NTSTATUS
1165 NTAPI
1166 CcRosReleaseFileCache (
1167 PFILE_OBJECT FileObject)
1168 /*
1169 * FUNCTION: Called by the file system when a handle to a file object
1170 * has been closed.
1171 */
1172 {
1173 KIRQL OldIrql;
1174 PPRIVATE_CACHE_MAP PrivateMap;
1175 PROS_SHARED_CACHE_MAP SharedCacheMap;
1176
1177 KeAcquireGuardedMutex(&ViewLock);
1178
1179 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1180 {
1181 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1182
1183 /* Closing the handle, so kill the private cache map
1184 * Before you event try to remove it from FO, always
1185 * lock the master lock, to be sure not to race
1186 * with a potential read ahead ongoing!
1187 */
1188 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1189 PrivateMap = FileObject->PrivateCacheMap;
1190 FileObject->PrivateCacheMap = NULL;
1191 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1192
1193 if (PrivateMap != NULL)
1194 {
1195 /* Remove it from the file */
1196 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
1197 RemoveEntryList(&PrivateMap->PrivateLinks);
1198 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
1199
1200 /* And free it. */
1201 if (PrivateMap != &SharedCacheMap->PrivateCacheMap)
1202 {
1203 ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP);
1204 }
1205 else
1206 {
1207 PrivateMap->NodeTypeCode = 0;
1208 }
1209
1210 if (SharedCacheMap->OpenCount > 0)
1211 {
1212 SharedCacheMap->OpenCount--;
1213 if (SharedCacheMap->OpenCount == 0)
1214 {
1215 MmFreeSectionSegments(SharedCacheMap->FileObject);
1216 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1217 }
1218 }
1219 }
1220 }
1221 KeReleaseGuardedMutex(&ViewLock);
1222 return STATUS_SUCCESS;
1223 }
1224
1225 NTSTATUS
1226 NTAPI
1227 CcRosInitializeFileCache (
1228 PFILE_OBJECT FileObject,
1229 PCC_FILE_SIZES FileSizes,
1230 BOOLEAN PinAccess,
1231 PCACHE_MANAGER_CALLBACKS CallBacks,
1232 PVOID LazyWriterContext)
1233 /*
1234 * FUNCTION: Initializes a shared cache map for a file object
1235 */
1236 {
1237 KIRQL OldIrql;
1238 BOOLEAN Allocated;
1239 PROS_SHARED_CACHE_MAP SharedCacheMap;
1240
1241 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1242 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1243 FileObject, SharedCacheMap);
1244
1245 Allocated = FALSE;
1246 KeAcquireGuardedMutex(&ViewLock);
1247 if (SharedCacheMap == NULL)
1248 {
1249 Allocated = TRUE;
1250 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1251 if (SharedCacheMap == NULL)
1252 {
1253 KeReleaseGuardedMutex(&ViewLock);
1254 return STATUS_INSUFFICIENT_RESOURCES;
1255 }
1256 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1257 ObReferenceObjectByPointer(FileObject,
1258 FILE_ALL_ACCESS,
1259 NULL,
1260 KernelMode);
1261 SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP;
1262 SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap);
1263 SharedCacheMap->FileObject = FileObject;
1264 SharedCacheMap->Callbacks = CallBacks;
1265 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1266 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1267 SharedCacheMap->FileSize = FileSizes->FileSize;
1268 SharedCacheMap->PinAccess = PinAccess;
1269 SharedCacheMap->DirtyPageThreshold = 0;
1270 SharedCacheMap->DirtyPages = 0;
1271 InitializeListHead(&SharedCacheMap->PrivateList);
1272 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1273 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1274 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1275
1276 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1277 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1278 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1279 }
1280 if (FileObject->PrivateCacheMap == NULL)
1281 {
1282 PPRIVATE_CACHE_MAP PrivateMap;
1283
1284 /* Allocate the private cache map for this handle */
1285 if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0)
1286 {
1287 PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP);
1288 }
1289 else
1290 {
1291 PrivateMap = &SharedCacheMap->PrivateCacheMap;
1292 }
1293
1294 if (PrivateMap == NULL)
1295 {
1296 /* If we also allocated the shared cache map for this file, kill it */
1297 if (Allocated)
1298 {
1299 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1300 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1301 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1302
1303 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1304 ObDereferenceObject(FileObject);
1305 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1306 }
1307
1308 KeReleaseGuardedMutex(&ViewLock);
1309 return STATUS_INSUFFICIENT_RESOURCES;
1310 }
1311
1312 /* Initialize it */
1313 RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP));
1314 PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP;
1315 PrivateMap->ReadAheadMask = PAGE_SIZE - 1;
1316 PrivateMap->FileObject = FileObject;
1317 KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock);
1318
1319 /* Link it to the file */
1320 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
1321 InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks);
1322 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
1323
1324 FileObject->PrivateCacheMap = PrivateMap;
1325 SharedCacheMap->OpenCount++;
1326 }
1327 KeReleaseGuardedMutex(&ViewLock);
1328
1329 return STATUS_SUCCESS;
1330 }
1331
1332 /*
1333 * @implemented
1334 */
1335 PFILE_OBJECT
1336 NTAPI
1337 CcGetFileObjectFromSectionPtrs (
1338 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1339 {
1340 PROS_SHARED_CACHE_MAP SharedCacheMap;
1341
1342 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1343
1344 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1345 {
1346 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1347 ASSERT(SharedCacheMap);
1348 return SharedCacheMap->FileObject;
1349 }
1350 return NULL;
1351 }
1352
1353 VOID
1354 INIT_FUNCTION
1355 NTAPI
1356 CcInitView (
1357 VOID)
1358 {
1359 DPRINT("CcInitView()\n");
1360
1361 InitializeListHead(&DirtyVacbListHead);
1362 InitializeListHead(&VacbLruListHead);
1363 InitializeListHead(&CcDeferredWrites);
1364 InitializeListHead(&CcCleanSharedCacheMapList);
1365 KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1366 KeInitializeGuardedMutex(&ViewLock);
1367 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1368 NULL,
1369 NULL,
1370 0,
1371 sizeof(INTERNAL_BCB),
1372 TAG_BCB,
1373 20);
1374 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1375 NULL,
1376 NULL,
1377 0,
1378 sizeof(ROS_SHARED_CACHE_MAP),
1379 TAG_SHARED_CACHE_MAP,
1380 20);
1381 ExInitializeNPagedLookasideList(&VacbLookasideList,
1382 NULL,
1383 NULL,
1384 0,
1385 sizeof(ROS_VACB),
1386 TAG_VACB,
1387 20);
1388
1389 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1390
1391 CcInitCacheZeroPage();
1392 }
1393
1394 #if DBG && defined(KDBG)
1395 BOOLEAN
1396 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1397 {
1398 PLIST_ENTRY ListEntry;
1399 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1400
1401 KdbpPrint(" Usage Summary (in kb)\n");
1402 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1403 /* No need to lock the spin lock here, we're in DBG */
1404 for (ListEntry = CcCleanSharedCacheMapList.Flink;
1405 ListEntry != &CcCleanSharedCacheMapList;
1406 ListEntry = ListEntry->Flink)
1407 {
1408 PLIST_ENTRY Vacbs;
1409 ULONG Valid = 0, Dirty = 0;
1410 PROS_SHARED_CACHE_MAP SharedCacheMap;
1411 PUNICODE_STRING FileName;
1412
1413 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1414
1415 /* Dirty size */
1416 Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
1417
1418 /* First, count for all the associated VACB */
1419 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1420 Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1421 Vacbs = Vacbs->Flink)
1422 {
1423 PROS_VACB Vacb;
1424
1425 Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
1426 if (Vacb->Valid)
1427 {
1428 Valid += VACB_MAPPING_GRANULARITY / 1024;
1429 }
1430 }
1431
1432 /* Setup name */
1433 if (SharedCacheMap->FileObject != NULL &&
1434 SharedCacheMap->FileObject->FileName.Length != 0)
1435 {
1436 FileName = &SharedCacheMap->FileObject->FileName;
1437 }
1438 else
1439 {
1440 FileName = &NoName;
1441 }
1442
1443 /* And print */
1444 KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
1445 }
1446
1447 return TRUE;
1448 }
1449
1450 BOOLEAN
1451 ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[])
1452 {
1453 KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages,
1454 (CcTotalDirtyPages * PAGE_SIZE) / 1024);
1455 KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold,
1456 (CcDirtyPageThreshold * PAGE_SIZE) / 1024);
1457 KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages,
1458 (MmAvailablePages * PAGE_SIZE) / 1024);
1459 KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop,
1460 (MmThrottleTop * PAGE_SIZE) / 1024);
1461 KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom,
1462 (MmThrottleBottom * PAGE_SIZE) / 1024);
1463 KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total,
1464 (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024);
1465
1466 if (CcTotalDirtyPages >= CcDirtyPageThreshold)
1467 {
1468 KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n");
1469 }
1470 else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold)
1471 {
1472 KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n");
1473 }
1474 else
1475 {
1476 KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n");
1477 }
1478
1479 return TRUE;
1480 }
1481 #endif
1482
1483 /* EOF */