cf2d2c5cb75aedda1c8f6056f5256486831e21a9
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42
43 /* GLOBALS *******************************************************************/
44
45 LIST_ENTRY DirtyVacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 /* Internal vars (MS):
55 * - Threshold above which lazy writer will start action
56 * - Amount of dirty pages
57 * - List for deferred writes
58 * - Spinlock when dealing with the deferred list
59 * - List for "clean" shared cache maps
60 */
61 ULONG CcDirtyPageThreshold = 0;
62 ULONG CcTotalDirtyPages = 0;
63 LIST_ENTRY CcDeferredWrites;
64 KSPIN_LOCK CcDeferredWriteSpinLock;
65 LIST_ENTRY CcCleanSharedCacheMapList;
66
67 #if DBG
68 static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
69 {
70 ++vacb->ReferenceCount;
71 if (vacb->SharedCacheMap->Trace)
72 {
73 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
74 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
75 }
76 }
77 static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
78 {
79 ASSERT(vacb->ReferenceCount != 0);
80 --vacb->ReferenceCount;
81 ASSERT(!(vacb->ReferenceCount == 0 && vacb->Dirty));
82 if (vacb->SharedCacheMap->Trace)
83 {
84 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
85 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
86 }
87 }
88 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
89 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
90 #else
91 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
92 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
93 #endif
94
95 NTSTATUS
96 CcRosInternalFreeVacb(PROS_VACB Vacb);
97
98
99 /* FUNCTIONS *****************************************************************/
100
101 VOID
102 NTAPI
103 CcRosTraceCacheMap (
104 PROS_SHARED_CACHE_MAP SharedCacheMap,
105 BOOLEAN Trace )
106 {
107 #if DBG
108 KIRQL oldirql;
109 PLIST_ENTRY current_entry;
110 PROS_VACB current;
111
112 if (!SharedCacheMap)
113 return;
114
115 SharedCacheMap->Trace = Trace;
116
117 if (Trace)
118 {
119 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
120
121 KeAcquireGuardedMutex(&ViewLock);
122 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
123
124 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
125 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
126 {
127 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
128 current_entry = current_entry->Flink;
129
130 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
131 current, current->ReferenceCount, current->Dirty, current->PageOut );
132 }
133 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
134 KeReleaseGuardedMutex(&ViewLock);
135 }
136 else
137 {
138 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
139 }
140
141 #else
142 UNREFERENCED_PARAMETER(SharedCacheMap);
143 UNREFERENCED_PARAMETER(Trace);
144 #endif
145 }
146
147 NTSTATUS
148 NTAPI
149 CcRosFlushVacb (
150 PROS_VACB Vacb)
151 {
152 NTSTATUS Status;
153
154 Status = CcWriteVirtualAddress(Vacb);
155 if (NT_SUCCESS(Status))
156 {
157 CcRosUnmarkDirtyVacb(Vacb, TRUE);
158 }
159
160 return Status;
161 }
162
163 NTSTATUS
164 NTAPI
165 CcRosFlushDirtyPages (
166 ULONG Target,
167 PULONG Count,
168 BOOLEAN Wait,
169 BOOLEAN CalledFromLazy)
170 {
171 PLIST_ENTRY current_entry;
172 PROS_VACB current;
173 BOOLEAN Locked;
174 NTSTATUS Status;
175 LARGE_INTEGER ZeroTimeout;
176
177 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
178
179 (*Count) = 0;
180 ZeroTimeout.QuadPart = 0;
181
182 KeEnterCriticalRegion();
183 KeAcquireGuardedMutex(&ViewLock);
184
185 current_entry = DirtyVacbListHead.Flink;
186 if (current_entry == &DirtyVacbListHead)
187 {
188 DPRINT("No Dirty pages\n");
189 }
190
191 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
192 {
193 current = CONTAINING_RECORD(current_entry,
194 ROS_VACB,
195 DirtyVacbListEntry);
196 current_entry = current_entry->Flink;
197
198 CcRosVacbIncRefCount(current);
199
200 /* When performing lazy write, don't handle temporary files */
201 if (CalledFromLazy &&
202 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
203 {
204 CcRosVacbDecRefCount(current);
205 continue;
206 }
207
208 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
209 current->SharedCacheMap->LazyWriteContext, Wait);
210 if (!Locked)
211 {
212 CcRosVacbDecRefCount(current);
213 continue;
214 }
215
216 Status = CcRosAcquireVacbLock(current,
217 Wait ? NULL : &ZeroTimeout);
218 if (Status != STATUS_SUCCESS)
219 {
220 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
221 current->SharedCacheMap->LazyWriteContext);
222 CcRosVacbDecRefCount(current);
223 continue;
224 }
225
226 ASSERT(current->Dirty);
227
228 /* One reference is added above */
229 if (current->ReferenceCount > 2)
230 {
231 CcRosReleaseVacbLock(current);
232 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
233 current->SharedCacheMap->LazyWriteContext);
234 CcRosVacbDecRefCount(current);
235 continue;
236 }
237
238 KeReleaseGuardedMutex(&ViewLock);
239
240 Status = CcRosFlushVacb(current);
241
242 CcRosReleaseVacbLock(current);
243 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
244 current->SharedCacheMap->LazyWriteContext);
245
246 KeAcquireGuardedMutex(&ViewLock);
247 CcRosVacbDecRefCount(current);
248
249 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
250 (Status != STATUS_MEDIA_WRITE_PROTECTED))
251 {
252 DPRINT1("CC: Failed to flush VACB.\n");
253 }
254 else
255 {
256 ULONG PagesFreed;
257
258 /* How many pages did we free? */
259 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
260 (*Count) += PagesFreed;
261
262 /* Make sure we don't overflow target! */
263 if (Target < PagesFreed)
264 {
265 /* If we would have, jump to zero directly */
266 Target = 0;
267 }
268 else
269 {
270 Target -= PagesFreed;
271 }
272 }
273
274 current_entry = DirtyVacbListHead.Flink;
275 }
276
277 KeReleaseGuardedMutex(&ViewLock);
278 KeLeaveCriticalRegion();
279
280 DPRINT("CcRosFlushDirtyPages() finished\n");
281 return STATUS_SUCCESS;
282 }
283
284 NTSTATUS
285 CcRosTrimCache (
286 ULONG Target,
287 ULONG Priority,
288 PULONG NrFreed)
289 /*
290 * FUNCTION: Try to free some memory from the file cache.
291 * ARGUMENTS:
292 * Target - The number of pages to be freed.
293 * Priority - The priority of free (currently unused).
294 * NrFreed - Points to a variable where the number of pages
295 * actually freed is returned.
296 */
297 {
298 PLIST_ENTRY current_entry;
299 PROS_VACB current;
300 ULONG PagesFreed;
301 KIRQL oldIrql;
302 LIST_ENTRY FreeList;
303 PFN_NUMBER Page;
304 ULONG i;
305 BOOLEAN FlushedPages = FALSE;
306
307 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
308
309 InitializeListHead(&FreeList);
310
311 *NrFreed = 0;
312
313 retry:
314 KeAcquireGuardedMutex(&ViewLock);
315
316 current_entry = VacbLruListHead.Flink;
317 while (current_entry != &VacbLruListHead)
318 {
319 current = CONTAINING_RECORD(current_entry,
320 ROS_VACB,
321 VacbLruListEntry);
322 current_entry = current_entry->Flink;
323
324 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
325
326 /* Reference the VACB */
327 CcRosVacbIncRefCount(current);
328
329 /* Check if it's mapped and not dirty */
330 if (current->MappedCount > 0 && !current->Dirty)
331 {
332 /* We have to break these locks because Cc sucks */
333 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
334 KeReleaseGuardedMutex(&ViewLock);
335
336 /* Page out the VACB */
337 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
338 {
339 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
340
341 MmPageOutPhysicalAddress(Page);
342 }
343
344 /* Reacquire the locks */
345 KeAcquireGuardedMutex(&ViewLock);
346 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
347 }
348
349 /* Dereference the VACB */
350 CcRosVacbDecRefCount(current);
351
352 /* Check if we can free this entry now */
353 if (current->ReferenceCount == 0)
354 {
355 ASSERT(!current->Dirty);
356 ASSERT(!current->MappedCount);
357
358 RemoveEntryList(&current->CacheMapVacbListEntry);
359 RemoveEntryList(&current->VacbLruListEntry);
360 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
361
362 /* Calculate how many pages we freed for Mm */
363 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
364 Target -= PagesFreed;
365 (*NrFreed) += PagesFreed;
366 }
367
368 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
369 }
370
371 KeReleaseGuardedMutex(&ViewLock);
372
373 /* Try flushing pages if we haven't met our target */
374 if ((Target > 0) && !FlushedPages)
375 {
376 /* Flush dirty pages to disk */
377 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
378 FlushedPages = TRUE;
379
380 /* We can only swap as many pages as we flushed */
381 if (PagesFreed < Target) Target = PagesFreed;
382
383 /* Check if we flushed anything */
384 if (PagesFreed != 0)
385 {
386 /* Try again after flushing dirty pages */
387 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
388 goto retry;
389 }
390 }
391
392 while (!IsListEmpty(&FreeList))
393 {
394 current_entry = RemoveHeadList(&FreeList);
395 current = CONTAINING_RECORD(current_entry,
396 ROS_VACB,
397 CacheMapVacbListEntry);
398 CcRosInternalFreeVacb(current);
399 }
400
401 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
402
403 return STATUS_SUCCESS;
404 }
405
406 NTSTATUS
407 NTAPI
408 CcRosReleaseVacb (
409 PROS_SHARED_CACHE_MAP SharedCacheMap,
410 PROS_VACB Vacb,
411 BOOLEAN Valid,
412 BOOLEAN Dirty,
413 BOOLEAN Mapped)
414 {
415 ASSERT(SharedCacheMap);
416
417 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
418 SharedCacheMap, Vacb, Valid);
419
420 Vacb->Valid = Valid;
421
422 if (Dirty && !Vacb->Dirty)
423 {
424 CcRosMarkDirtyVacb(Vacb);
425 }
426
427 if (Mapped)
428 {
429 Vacb->MappedCount++;
430 }
431 CcRosVacbDecRefCount(Vacb);
432 if (Mapped && (Vacb->MappedCount == 1))
433 {
434 CcRosVacbIncRefCount(Vacb);
435 }
436
437 CcRosReleaseVacbLock(Vacb);
438
439 return STATUS_SUCCESS;
440 }
441
442 /* Returns with VACB Lock Held! */
443 PROS_VACB
444 NTAPI
445 CcRosLookupVacb (
446 PROS_SHARED_CACHE_MAP SharedCacheMap,
447 LONGLONG FileOffset)
448 {
449 PLIST_ENTRY current_entry;
450 PROS_VACB current;
451 KIRQL oldIrql;
452
453 ASSERT(SharedCacheMap);
454
455 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
456 SharedCacheMap, FileOffset);
457
458 KeAcquireGuardedMutex(&ViewLock);
459 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
460
461 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
462 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
463 {
464 current = CONTAINING_RECORD(current_entry,
465 ROS_VACB,
466 CacheMapVacbListEntry);
467 if (IsPointInRange(current->FileOffset.QuadPart,
468 VACB_MAPPING_GRANULARITY,
469 FileOffset))
470 {
471 CcRosVacbIncRefCount(current);
472 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
473 KeReleaseGuardedMutex(&ViewLock);
474 CcRosAcquireVacbLock(current, NULL);
475 return current;
476 }
477 if (current->FileOffset.QuadPart > FileOffset)
478 break;
479 current_entry = current_entry->Flink;
480 }
481
482 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
483 KeReleaseGuardedMutex(&ViewLock);
484
485 return NULL;
486 }
487
488 VOID
489 NTAPI
490 CcRosMarkDirtyVacb (
491 PROS_VACB Vacb)
492 {
493 KIRQL oldIrql;
494 PROS_SHARED_CACHE_MAP SharedCacheMap;
495
496 SharedCacheMap = Vacb->SharedCacheMap;
497
498 KeAcquireGuardedMutex(&ViewLock);
499 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
500
501 ASSERT(!Vacb->Dirty);
502
503 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
504 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
505 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
506 CcRosVacbIncRefCount(Vacb);
507
508 /* Move to the tail of the LRU list */
509 RemoveEntryList(&Vacb->VacbLruListEntry);
510 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
511
512 Vacb->Dirty = TRUE;
513
514 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
515 KeReleaseGuardedMutex(&ViewLock);
516
517 /* Schedule a lazy writer run to now that we have dirty VACB */
518 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
519 if (!LazyWriter.ScanActive)
520 {
521 CcScheduleLazyWriteScan(FALSE);
522 }
523 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
524 }
525
526 VOID
527 NTAPI
528 CcRosUnmarkDirtyVacb (
529 PROS_VACB Vacb,
530 BOOLEAN LockViews)
531 {
532 KIRQL oldIrql;
533 PROS_SHARED_CACHE_MAP SharedCacheMap;
534
535 SharedCacheMap = Vacb->SharedCacheMap;
536
537 if (LockViews)
538 {
539 KeAcquireGuardedMutex(&ViewLock);
540 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
541 }
542
543 ASSERT(Vacb->Dirty);
544
545 Vacb->Dirty = FALSE;
546
547 RemoveEntryList(&Vacb->DirtyVacbListEntry);
548 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
549 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
550 CcRosVacbDecRefCount(Vacb);
551
552 if (LockViews)
553 {
554 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
555 KeReleaseGuardedMutex(&ViewLock);
556 }
557 }
558
559 NTSTATUS
560 NTAPI
561 CcRosMarkDirtyFile (
562 PROS_SHARED_CACHE_MAP SharedCacheMap,
563 LONGLONG FileOffset)
564 {
565 PROS_VACB Vacb;
566
567 ASSERT(SharedCacheMap);
568
569 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
570 SharedCacheMap, FileOffset);
571
572 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
573 if (Vacb == NULL)
574 {
575 KeBugCheck(CACHE_MANAGER);
576 }
577
578 if (!Vacb->Dirty)
579 {
580 CcRosMarkDirtyVacb(Vacb);
581 }
582
583 CcRosReleaseVacbLock(Vacb);
584
585 return STATUS_SUCCESS;
586 }
587
588 NTSTATUS
589 NTAPI
590 CcRosUnmapVacb (
591 PROS_SHARED_CACHE_MAP SharedCacheMap,
592 LONGLONG FileOffset,
593 BOOLEAN NowDirty)
594 {
595 PROS_VACB Vacb;
596
597 ASSERT(SharedCacheMap);
598
599 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
600 SharedCacheMap, FileOffset, NowDirty);
601
602 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
603 if (Vacb == NULL)
604 {
605 return STATUS_UNSUCCESSFUL;
606 }
607
608 if (NowDirty && !Vacb->Dirty)
609 {
610 CcRosMarkDirtyVacb(Vacb);
611 }
612
613 ASSERT(Vacb->MappedCount != 0);
614 Vacb->MappedCount--;
615
616 CcRosVacbDecRefCount(Vacb);
617 if (Vacb->MappedCount == 0)
618 {
619 CcRosVacbDecRefCount(Vacb);
620 }
621
622 CcRosReleaseVacbLock(Vacb);
623
624 return STATUS_SUCCESS;
625 }
626
627 static
628 NTSTATUS
629 CcRosMapVacb(
630 PROS_VACB Vacb)
631 {
632 ULONG i;
633 NTSTATUS Status;
634 ULONG_PTR NumberOfPages;
635
636 /* Create a memory area. */
637 MmLockAddressSpace(MmGetKernelAddressSpace());
638 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
639 0, // nothing checks for VACB mareas, so set to 0
640 &Vacb->BaseAddress,
641 VACB_MAPPING_GRANULARITY,
642 PAGE_READWRITE,
643 (PMEMORY_AREA*)&Vacb->MemoryArea,
644 0,
645 PAGE_SIZE);
646 MmUnlockAddressSpace(MmGetKernelAddressSpace());
647 if (!NT_SUCCESS(Status))
648 {
649 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
650 return Status;
651 }
652
653 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
654 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
655
656 /* Create a virtual mapping for this memory area */
657 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
658 for (i = 0; i < NumberOfPages; i++)
659 {
660 PFN_NUMBER PageFrameNumber;
661
662 MI_SET_USAGE(MI_USAGE_CACHE);
663 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
664 if (PageFrameNumber == 0)
665 {
666 DPRINT1("Unable to allocate page\n");
667 KeBugCheck(MEMORY_MANAGEMENT);
668 }
669
670 Status = MmCreateVirtualMapping(NULL,
671 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
672 PAGE_READWRITE,
673 &PageFrameNumber,
674 1);
675 if (!NT_SUCCESS(Status))
676 {
677 DPRINT1("Unable to create virtual mapping\n");
678 KeBugCheck(MEMORY_MANAGEMENT);
679 }
680 }
681
682 return STATUS_SUCCESS;
683 }
684
685 static
686 NTSTATUS
687 CcRosCreateVacb (
688 PROS_SHARED_CACHE_MAP SharedCacheMap,
689 LONGLONG FileOffset,
690 PROS_VACB *Vacb)
691 {
692 PROS_VACB current;
693 PROS_VACB previous;
694 PLIST_ENTRY current_entry;
695 NTSTATUS Status;
696 KIRQL oldIrql;
697
698 ASSERT(SharedCacheMap);
699
700 DPRINT("CcRosCreateVacb()\n");
701
702 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
703 {
704 *Vacb = NULL;
705 return STATUS_INVALID_PARAMETER;
706 }
707
708 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
709 current->BaseAddress = NULL;
710 current->Valid = FALSE;
711 current->Dirty = FALSE;
712 current->PageOut = FALSE;
713 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
714 current->SharedCacheMap = SharedCacheMap;
715 #if DBG
716 if (SharedCacheMap->Trace)
717 {
718 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
719 }
720 #endif
721 current->MappedCount = 0;
722 current->DirtyVacbListEntry.Flink = NULL;
723 current->DirtyVacbListEntry.Blink = NULL;
724 current->ReferenceCount = 1;
725 current->PinCount = 0;
726 KeInitializeMutex(&current->Mutex, 0);
727 CcRosAcquireVacbLock(current, NULL);
728 KeAcquireGuardedMutex(&ViewLock);
729
730 *Vacb = current;
731 /* There is window between the call to CcRosLookupVacb
732 * and CcRosCreateVacb. We must check if a VACB for the
733 * file offset exist. If there is a VACB, we release
734 * our newly created VACB and return the existing one.
735 */
736 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
737 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
738 previous = NULL;
739 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
740 {
741 current = CONTAINING_RECORD(current_entry,
742 ROS_VACB,
743 CacheMapVacbListEntry);
744 if (IsPointInRange(current->FileOffset.QuadPart,
745 VACB_MAPPING_GRANULARITY,
746 FileOffset))
747 {
748 CcRosVacbIncRefCount(current);
749 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
750 #if DBG
751 if (SharedCacheMap->Trace)
752 {
753 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
754 SharedCacheMap,
755 (*Vacb),
756 current);
757 }
758 #endif
759 CcRosReleaseVacbLock(*Vacb);
760 KeReleaseGuardedMutex(&ViewLock);
761 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
762 *Vacb = current;
763 CcRosAcquireVacbLock(current, NULL);
764 return STATUS_SUCCESS;
765 }
766 if (current->FileOffset.QuadPart < FileOffset)
767 {
768 ASSERT(previous == NULL ||
769 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
770 previous = current;
771 }
772 if (current->FileOffset.QuadPart > FileOffset)
773 break;
774 current_entry = current_entry->Flink;
775 }
776 /* There was no existing VACB. */
777 current = *Vacb;
778 if (previous)
779 {
780 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
781 }
782 else
783 {
784 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
785 }
786 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
787 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
788 KeReleaseGuardedMutex(&ViewLock);
789
790 MI_SET_USAGE(MI_USAGE_CACHE);
791 #if MI_TRACE_PFNS
792 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
793 {
794 PWCHAR pos;
795 ULONG len = 0;
796 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
797 if (pos)
798 {
799 len = wcslen(pos) * sizeof(WCHAR);
800 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
801 }
802 else
803 {
804 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
805 }
806 }
807 #endif
808
809 Status = CcRosMapVacb(current);
810 if (!NT_SUCCESS(Status))
811 {
812 RemoveEntryList(&current->CacheMapVacbListEntry);
813 RemoveEntryList(&current->VacbLruListEntry);
814 CcRosReleaseVacbLock(current);
815 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
816 }
817
818 return Status;
819 }
820
821 NTSTATUS
822 NTAPI
823 CcRosGetVacb (
824 PROS_SHARED_CACHE_MAP SharedCacheMap,
825 LONGLONG FileOffset,
826 PLONGLONG BaseOffset,
827 PVOID* BaseAddress,
828 PBOOLEAN UptoDate,
829 PROS_VACB *Vacb)
830 {
831 PROS_VACB current;
832 NTSTATUS Status;
833
834 ASSERT(SharedCacheMap);
835
836 DPRINT("CcRosGetVacb()\n");
837
838 /*
839 * Look for a VACB already mapping the same data.
840 */
841 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
842 if (current == NULL)
843 {
844 /*
845 * Otherwise create a new VACB.
846 */
847 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
848 if (!NT_SUCCESS(Status))
849 {
850 return Status;
851 }
852 }
853
854 KeAcquireGuardedMutex(&ViewLock);
855
856 /* Move to the tail of the LRU list */
857 RemoveEntryList(&current->VacbLruListEntry);
858 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
859
860 KeReleaseGuardedMutex(&ViewLock);
861
862 /*
863 * Return information about the VACB to the caller.
864 */
865 *UptoDate = current->Valid;
866 *BaseAddress = current->BaseAddress;
867 DPRINT("*BaseAddress %p\n", *BaseAddress);
868 *Vacb = current;
869 *BaseOffset = current->FileOffset.QuadPart;
870 return STATUS_SUCCESS;
871 }
872
873 NTSTATUS
874 NTAPI
875 CcRosRequestVacb (
876 PROS_SHARED_CACHE_MAP SharedCacheMap,
877 LONGLONG FileOffset,
878 PVOID* BaseAddress,
879 PBOOLEAN UptoDate,
880 PROS_VACB *Vacb)
881 /*
882 * FUNCTION: Request a page mapping for a shared cache map
883 */
884 {
885 LONGLONG BaseOffset;
886
887 ASSERT(SharedCacheMap);
888
889 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
890 {
891 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
892 FileOffset, VACB_MAPPING_GRANULARITY);
893 KeBugCheck(CACHE_MANAGER);
894 }
895
896 return CcRosGetVacb(SharedCacheMap,
897 FileOffset,
898 &BaseOffset,
899 BaseAddress,
900 UptoDate,
901 Vacb);
902 }
903
904 static
905 VOID
906 CcFreeCachePage (
907 PVOID Context,
908 MEMORY_AREA* MemoryArea,
909 PVOID Address,
910 PFN_NUMBER Page,
911 SWAPENTRY SwapEntry,
912 BOOLEAN Dirty)
913 {
914 ASSERT(SwapEntry == 0);
915 if (Page != 0)
916 {
917 ASSERT(MmGetReferenceCountPage(Page) == 1);
918 MmReleasePageMemoryConsumer(MC_CACHE, Page);
919 }
920 }
921
922 NTSTATUS
923 CcRosInternalFreeVacb (
924 PROS_VACB Vacb)
925 /*
926 * FUNCTION: Releases a VACB associated with a shared cache map
927 */
928 {
929 DPRINT("Freeing VACB 0x%p\n", Vacb);
930 #if DBG
931 if (Vacb->SharedCacheMap->Trace)
932 {
933 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
934 }
935 #endif
936
937 MmLockAddressSpace(MmGetKernelAddressSpace());
938 MmFreeMemoryArea(MmGetKernelAddressSpace(),
939 Vacb->MemoryArea,
940 CcFreeCachePage,
941 NULL);
942 MmUnlockAddressSpace(MmGetKernelAddressSpace());
943
944 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
945 return STATUS_SUCCESS;
946 }
947
948 /*
949 * @implemented
950 */
951 VOID
952 NTAPI
953 CcFlushCache (
954 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
955 IN PLARGE_INTEGER FileOffset OPTIONAL,
956 IN ULONG Length,
957 OUT PIO_STATUS_BLOCK IoStatus)
958 {
959 PROS_SHARED_CACHE_MAP SharedCacheMap;
960 LARGE_INTEGER Offset;
961 LONGLONG RemainingLength;
962 PROS_VACB current;
963 NTSTATUS Status;
964 KIRQL oldIrql;
965
966 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
967 SectionObjectPointers, FileOffset, Length);
968
969 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
970 SectionObjectPointers, FileOffset, Length, IoStatus);
971
972 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
973 {
974 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
975 ASSERT(SharedCacheMap);
976 if (FileOffset)
977 {
978 Offset = *FileOffset;
979 RemainingLength = Length;
980 }
981 else
982 {
983 Offset.QuadPart = 0;
984 RemainingLength = SharedCacheMap->FileSize.QuadPart;
985 }
986
987 if (IoStatus)
988 {
989 IoStatus->Status = STATUS_SUCCESS;
990 IoStatus->Information = 0;
991 }
992
993 while (RemainingLength > 0)
994 {
995 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
996 if (current != NULL)
997 {
998 if (current->Dirty)
999 {
1000 Status = CcRosFlushVacb(current);
1001 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1002 {
1003 IoStatus->Status = Status;
1004 }
1005 }
1006
1007 CcRosReleaseVacbLock(current);
1008
1009 KeAcquireGuardedMutex(&ViewLock);
1010 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1011 CcRosVacbDecRefCount(current);
1012 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1013 KeReleaseGuardedMutex(&ViewLock);
1014 }
1015
1016 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1017 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1018 }
1019 }
1020 else
1021 {
1022 if (IoStatus)
1023 {
1024 IoStatus->Status = STATUS_INVALID_PARAMETER;
1025 }
1026 }
1027 }
1028
1029 NTSTATUS
1030 NTAPI
1031 CcRosDeleteFileCache (
1032 PFILE_OBJECT FileObject,
1033 PROS_SHARED_CACHE_MAP SharedCacheMap)
1034 /*
1035 * FUNCTION: Releases the shared cache map associated with a file object
1036 */
1037 {
1038 PLIST_ENTRY current_entry;
1039 PROS_VACB current;
1040 LIST_ENTRY FreeList;
1041 KIRQL oldIrql;
1042
1043 ASSERT(SharedCacheMap);
1044
1045 SharedCacheMap->OpenCount++;
1046 KeReleaseGuardedMutex(&ViewLock);
1047
1048 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1049
1050 KeAcquireGuardedMutex(&ViewLock);
1051 SharedCacheMap->OpenCount--;
1052 if (SharedCacheMap->OpenCount == 0)
1053 {
1054 KIRQL OldIrql;
1055
1056 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1057
1058 /*
1059 * Release all VACBs
1060 */
1061 InitializeListHead(&FreeList);
1062 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1063 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1064 {
1065 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1066 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1067
1068 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1069 CcRosAcquireVacbLock(current, NULL);
1070 RemoveEntryList(&current->VacbLruListEntry);
1071 if (current->Dirty)
1072 {
1073 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1074 CcRosUnmarkDirtyVacb(current, FALSE);
1075 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1076 DPRINT1("Freeing dirty VACB\n");
1077 }
1078 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1079 CcRosReleaseVacbLock(current);
1080
1081 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1082 }
1083 #if DBG
1084 SharedCacheMap->Trace = FALSE;
1085 #endif
1086 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1087
1088 KeReleaseGuardedMutex(&ViewLock);
1089 ObDereferenceObject(SharedCacheMap->FileObject);
1090
1091 while (!IsListEmpty(&FreeList))
1092 {
1093 current_entry = RemoveTailList(&FreeList);
1094 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1095 CcRosInternalFreeVacb(current);
1096 }
1097
1098 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1099 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1100 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1101
1102 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1103 KeAcquireGuardedMutex(&ViewLock);
1104 }
1105 return STATUS_SUCCESS;
1106 }
1107
1108 VOID
1109 NTAPI
1110 CcRosReferenceCache (
1111 PFILE_OBJECT FileObject)
1112 {
1113 PROS_SHARED_CACHE_MAP SharedCacheMap;
1114 KeAcquireGuardedMutex(&ViewLock);
1115 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1116 ASSERT(SharedCacheMap);
1117 ASSERT(SharedCacheMap->OpenCount != 0);
1118 SharedCacheMap->OpenCount++;
1119 KeReleaseGuardedMutex(&ViewLock);
1120 }
1121
1122 VOID
1123 NTAPI
1124 CcRosRemoveIfClosed (
1125 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1126 {
1127 PROS_SHARED_CACHE_MAP SharedCacheMap;
1128 DPRINT("CcRosRemoveIfClosed()\n");
1129 KeAcquireGuardedMutex(&ViewLock);
1130 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1131 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1132 {
1133 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1134 }
1135 KeReleaseGuardedMutex(&ViewLock);
1136 }
1137
1138
1139 VOID
1140 NTAPI
1141 CcRosDereferenceCache (
1142 PFILE_OBJECT FileObject)
1143 {
1144 PROS_SHARED_CACHE_MAP SharedCacheMap;
1145 KeAcquireGuardedMutex(&ViewLock);
1146 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1147 ASSERT(SharedCacheMap);
1148 if (SharedCacheMap->OpenCount > 0)
1149 {
1150 SharedCacheMap->OpenCount--;
1151 if (SharedCacheMap->OpenCount == 0)
1152 {
1153 MmFreeSectionSegments(SharedCacheMap->FileObject);
1154 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1155 }
1156 }
1157 KeReleaseGuardedMutex(&ViewLock);
1158 }
1159
1160 NTSTATUS
1161 NTAPI
1162 CcRosReleaseFileCache (
1163 PFILE_OBJECT FileObject)
1164 /*
1165 * FUNCTION: Called by the file system when a handle to a file object
1166 * has been closed.
1167 */
1168 {
1169 KIRQL OldIrql;
1170 PPRIVATE_CACHE_MAP PrivateMap;
1171 PROS_SHARED_CACHE_MAP SharedCacheMap;
1172
1173 KeAcquireGuardedMutex(&ViewLock);
1174
1175 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1176 {
1177 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1178
1179 /* Closing the handle, so kill the private cache map
1180 * Before you event try to remove it from FO, always
1181 * lock the master lock, to be sure not to race
1182 * with a potential read ahead ongoing!
1183 */
1184 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1185 PrivateMap = FileObject->PrivateCacheMap;
1186 FileObject->PrivateCacheMap = NULL;
1187 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1188
1189 if (PrivateMap != NULL)
1190 {
1191 /* Remove it from the file */
1192 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
1193 RemoveEntryList(&PrivateMap->PrivateLinks);
1194 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
1195
1196 /* And free it. */
1197 if (PrivateMap != &SharedCacheMap->PrivateCacheMap)
1198 {
1199 ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP);
1200 }
1201 else
1202 {
1203 PrivateMap->NodeTypeCode = 0;
1204 }
1205
1206 if (SharedCacheMap->OpenCount > 0)
1207 {
1208 SharedCacheMap->OpenCount--;
1209 if (SharedCacheMap->OpenCount == 0)
1210 {
1211 MmFreeSectionSegments(SharedCacheMap->FileObject);
1212 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1213 }
1214 }
1215 }
1216 }
1217 KeReleaseGuardedMutex(&ViewLock);
1218 return STATUS_SUCCESS;
1219 }
1220
1221 NTSTATUS
1222 NTAPI
1223 CcRosInitializeFileCache (
1224 PFILE_OBJECT FileObject,
1225 PCC_FILE_SIZES FileSizes,
1226 BOOLEAN PinAccess,
1227 PCACHE_MANAGER_CALLBACKS CallBacks,
1228 PVOID LazyWriterContext)
1229 /*
1230 * FUNCTION: Initializes a shared cache map for a file object
1231 */
1232 {
1233 KIRQL OldIrql;
1234 BOOLEAN Allocated;
1235 PROS_SHARED_CACHE_MAP SharedCacheMap;
1236
1237 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1238 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1239 FileObject, SharedCacheMap);
1240
1241 Allocated = FALSE;
1242 KeAcquireGuardedMutex(&ViewLock);
1243 if (SharedCacheMap == NULL)
1244 {
1245 Allocated = TRUE;
1246 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1247 if (SharedCacheMap == NULL)
1248 {
1249 KeReleaseGuardedMutex(&ViewLock);
1250 return STATUS_INSUFFICIENT_RESOURCES;
1251 }
1252 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1253 ObReferenceObjectByPointer(FileObject,
1254 FILE_ALL_ACCESS,
1255 NULL,
1256 KernelMode);
1257 SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP;
1258 SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap);
1259 SharedCacheMap->FileObject = FileObject;
1260 SharedCacheMap->Callbacks = CallBacks;
1261 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1262 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1263 SharedCacheMap->FileSize = FileSizes->FileSize;
1264 SharedCacheMap->PinAccess = PinAccess;
1265 SharedCacheMap->DirtyPageThreshold = 0;
1266 SharedCacheMap->DirtyPages = 0;
1267 InitializeListHead(&SharedCacheMap->PrivateList);
1268 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1269 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1270 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1271
1272 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1273 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1274 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1275 }
1276 if (FileObject->PrivateCacheMap == NULL)
1277 {
1278 PPRIVATE_CACHE_MAP PrivateMap;
1279
1280 /* Allocate the private cache map for this handle */
1281 if (SharedCacheMap->PrivateCacheMap.NodeTypeCode != 0)
1282 {
1283 PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP);
1284 }
1285 else
1286 {
1287 PrivateMap = &SharedCacheMap->PrivateCacheMap;
1288 }
1289
1290 if (PrivateMap == NULL)
1291 {
1292 /* If we also allocated the shared cache map for this file, kill it */
1293 if (Allocated)
1294 {
1295 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1296 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1297 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1298
1299 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1300 ObDereferenceObject(FileObject);
1301 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1302 }
1303
1304 KeReleaseGuardedMutex(&ViewLock);
1305 return STATUS_INSUFFICIENT_RESOURCES;
1306 }
1307
1308 /* Initialize it */
1309 RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP));
1310 PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP;
1311 PrivateMap->ReadAheadMask = PAGE_SIZE - 1;
1312 PrivateMap->FileObject = FileObject;
1313 KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock);
1314
1315 /* Link it to the file */
1316 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
1317 InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks);
1318 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
1319
1320 FileObject->PrivateCacheMap = PrivateMap;
1321 SharedCacheMap->OpenCount++;
1322 }
1323 KeReleaseGuardedMutex(&ViewLock);
1324
1325 return STATUS_SUCCESS;
1326 }
1327
1328 /*
1329 * @implemented
1330 */
1331 PFILE_OBJECT
1332 NTAPI
1333 CcGetFileObjectFromSectionPtrs (
1334 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1335 {
1336 PROS_SHARED_CACHE_MAP SharedCacheMap;
1337
1338 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1339
1340 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1341 {
1342 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1343 ASSERT(SharedCacheMap);
1344 return SharedCacheMap->FileObject;
1345 }
1346 return NULL;
1347 }
1348
1349 VOID
1350 INIT_FUNCTION
1351 NTAPI
1352 CcInitView (
1353 VOID)
1354 {
1355 DPRINT("CcInitView()\n");
1356
1357 InitializeListHead(&DirtyVacbListHead);
1358 InitializeListHead(&VacbLruListHead);
1359 InitializeListHead(&CcDeferredWrites);
1360 InitializeListHead(&CcCleanSharedCacheMapList);
1361 KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1362 KeInitializeGuardedMutex(&ViewLock);
1363 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1364 NULL,
1365 NULL,
1366 0,
1367 sizeof(INTERNAL_BCB),
1368 TAG_BCB,
1369 20);
1370 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1371 NULL,
1372 NULL,
1373 0,
1374 sizeof(ROS_SHARED_CACHE_MAP),
1375 TAG_SHARED_CACHE_MAP,
1376 20);
1377 ExInitializeNPagedLookasideList(&VacbLookasideList,
1378 NULL,
1379 NULL,
1380 0,
1381 sizeof(ROS_VACB),
1382 TAG_VACB,
1383 20);
1384
1385 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1386
1387 CcInitCacheZeroPage();
1388 }
1389
1390 #if DBG && defined(KDBG)
1391 BOOLEAN
1392 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1393 {
1394 PLIST_ENTRY ListEntry;
1395 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1396
1397 KdbpPrint(" Usage Summary (in kb)\n");
1398 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1399 /* No need to lock the spin lock here, we're in DBG */
1400 for (ListEntry = CcCleanSharedCacheMapList.Flink;
1401 ListEntry != &CcCleanSharedCacheMapList;
1402 ListEntry = ListEntry->Flink)
1403 {
1404 PLIST_ENTRY Vacbs;
1405 ULONG Valid = 0, Dirty = 0;
1406 PROS_SHARED_CACHE_MAP SharedCacheMap;
1407 PUNICODE_STRING FileName;
1408
1409 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1410
1411 /* Dirty size */
1412 Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
1413
1414 /* First, count for all the associated VACB */
1415 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1416 Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1417 Vacbs = Vacbs->Flink)
1418 {
1419 PROS_VACB Vacb;
1420
1421 Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
1422 if (Vacb->Valid)
1423 {
1424 Valid += VACB_MAPPING_GRANULARITY / 1024;
1425 }
1426 }
1427
1428 /* Setup name */
1429 if (SharedCacheMap->FileObject != NULL &&
1430 SharedCacheMap->FileObject->FileName.Length != 0)
1431 {
1432 FileName = &SharedCacheMap->FileObject->FileName;
1433 }
1434 else
1435 {
1436 FileName = &NoName;
1437 }
1438
1439 /* And print */
1440 KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
1441 }
1442
1443 return TRUE;
1444 }
1445
1446 BOOLEAN
1447 ExpKdbgExtDefWrites(ULONG Argc, PCHAR Argv[])
1448 {
1449 KdbpPrint("CcTotalDirtyPages:\t%lu (%lu Kb)\n", CcTotalDirtyPages,
1450 (CcTotalDirtyPages * PAGE_SIZE) / 1024);
1451 KdbpPrint("CcDirtyPageThreshold:\t%lu (%lu Kb)\n", CcDirtyPageThreshold,
1452 (CcDirtyPageThreshold * PAGE_SIZE) / 1024);
1453 KdbpPrint("MmAvailablePages:\t%lu (%lu Kb)\n", MmAvailablePages,
1454 (MmAvailablePages * PAGE_SIZE) / 1024);
1455 KdbpPrint("MmThrottleTop:\t\t%lu (%lu Kb)\n", MmThrottleTop,
1456 (MmThrottleTop * PAGE_SIZE) / 1024);
1457 KdbpPrint("MmThrottleBottom:\t%lu (%lu Kb)\n", MmThrottleBottom,
1458 (MmThrottleBottom * PAGE_SIZE) / 1024);
1459 KdbpPrint("MmModifiedPageListHead.Total:\t%lu (%lu Kb)\n", MmModifiedPageListHead.Total,
1460 (MmModifiedPageListHead.Total * PAGE_SIZE) / 1024);
1461
1462 if (CcTotalDirtyPages >= CcDirtyPageThreshold)
1463 {
1464 KdbpPrint("CcTotalDirtyPages above the threshold, writes should be throttled\n");
1465 }
1466 else if (CcTotalDirtyPages + 64 >= CcDirtyPageThreshold)
1467 {
1468 KdbpPrint("CcTotalDirtyPages within 64 (max charge) pages of the threshold, writes may be throttled\n");
1469 }
1470 else
1471 {
1472 KdbpPrint("CcTotalDirtyPages below the threshold, writes should not be throttled\n");
1473 }
1474
1475 return TRUE;
1476 }
1477 #endif
1478
1479 /* EOF */