[NTOSKRNL] Save system size on init in CcCapturedSystemSize
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42
43 /* GLOBALS *******************************************************************/
44
45 LIST_ENTRY DirtyVacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 /* Counters:
55 * - Amount of pages flushed by lazy writer
56 * - Number of times lazy writer ran
57 */
58 ULONG CcLazyWritePages = 0;
59 ULONG CcLazyWriteIos = 0;
60
61 /* Internal vars (MS):
62 * - Threshold above which lazy writer will start action
63 * - Amount of dirty pages
64 * - List for deferred writes
65 * - Spinlock when dealing with the deferred list
66 * - List for "clean" shared cache maps
67 * - One second delay for lazy writer
68 * - System size when system started
69 */
70 ULONG CcDirtyPageThreshold = 0;
71 ULONG CcTotalDirtyPages = 0;
72 LIST_ENTRY CcDeferredWrites;
73 KSPIN_LOCK CcDeferredWriteSpinLock;
74 LIST_ENTRY CcCleanSharedCacheMapList;
75 LARGE_INTEGER CcIdleDelay = RTL_CONSTANT_LARGE_INTEGER((LONGLONG)-1*1000*1000*10);
76 MM_SYSTEMSIZE CcCapturedSystemSize;
77
78 /* Internal vars (ROS):
79 * - Event to notify lazy writer to shutdown
80 * - Event to inform watchers lazy writer is done for this loop
81 * - Lock for the CcCleanSharedCacheMapList list
82 */
83 KEVENT iLazyWriterShutdown;
84 KEVENT iLazyWriterNotify;
85 KSPIN_LOCK iSharedCacheMapLock;
86
87 #if DBG
88 static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
89 {
90 ++vacb->ReferenceCount;
91 if (vacb->SharedCacheMap->Trace)
92 {
93 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
94 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
95 }
96 }
97 static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
98 {
99 ASSERT(vacb->ReferenceCount != 0);
100 --vacb->ReferenceCount;
101 ASSERT(!(vacb->ReferenceCount == 0 && vacb->Dirty));
102 if (vacb->SharedCacheMap->Trace)
103 {
104 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
105 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
106 }
107 }
108 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
109 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
110 #else
111 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
112 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
113 #endif
114
115 NTSTATUS
116 CcRosInternalFreeVacb(PROS_VACB Vacb);
117
118
119 /* FUNCTIONS *****************************************************************/
120
121 VOID
122 NTAPI
123 CcRosTraceCacheMap (
124 PROS_SHARED_CACHE_MAP SharedCacheMap,
125 BOOLEAN Trace )
126 {
127 #if DBG
128 KIRQL oldirql;
129 PLIST_ENTRY current_entry;
130 PROS_VACB current;
131
132 if (!SharedCacheMap)
133 return;
134
135 SharedCacheMap->Trace = Trace;
136
137 if (Trace)
138 {
139 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
140
141 KeAcquireGuardedMutex(&ViewLock);
142 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
143
144 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
145 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
146 {
147 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
148 current_entry = current_entry->Flink;
149
150 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
151 current, current->ReferenceCount, current->Dirty, current->PageOut );
152 }
153 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
154 KeReleaseGuardedMutex(&ViewLock);
155 }
156 else
157 {
158 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
159 }
160
161 #else
162 UNREFERENCED_PARAMETER(SharedCacheMap);
163 UNREFERENCED_PARAMETER(Trace);
164 #endif
165 }
166
167 NTSTATUS
168 NTAPI
169 CcRosFlushVacb (
170 PROS_VACB Vacb)
171 {
172 NTSTATUS Status;
173
174 Status = CcWriteVirtualAddress(Vacb);
175 if (NT_SUCCESS(Status))
176 {
177 CcRosUnmarkDirtyVacb(Vacb, TRUE);
178 }
179
180 return Status;
181 }
182
183 NTSTATUS
184 NTAPI
185 CcRosFlushDirtyPages (
186 ULONG Target,
187 PULONG Count,
188 BOOLEAN Wait,
189 BOOLEAN CalledFromLazy)
190 {
191 PLIST_ENTRY current_entry;
192 PROS_VACB current;
193 BOOLEAN Locked;
194 NTSTATUS Status;
195 LARGE_INTEGER ZeroTimeout;
196
197 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
198
199 (*Count) = 0;
200 ZeroTimeout.QuadPart = 0;
201
202 KeEnterCriticalRegion();
203 KeAcquireGuardedMutex(&ViewLock);
204
205 current_entry = DirtyVacbListHead.Flink;
206 if (current_entry == &DirtyVacbListHead)
207 {
208 DPRINT("No Dirty pages\n");
209 }
210
211 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
212 {
213 current = CONTAINING_RECORD(current_entry,
214 ROS_VACB,
215 DirtyVacbListEntry);
216 current_entry = current_entry->Flink;
217
218 CcRosVacbIncRefCount(current);
219
220 /* When performing lazy write, don't handle temporary files */
221 if (CalledFromLazy &&
222 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
223 {
224 CcRosVacbDecRefCount(current);
225 continue;
226 }
227
228 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
229 current->SharedCacheMap->LazyWriteContext, Wait);
230 if (!Locked)
231 {
232 CcRosVacbDecRefCount(current);
233 continue;
234 }
235
236 Status = CcRosAcquireVacbLock(current,
237 Wait ? NULL : &ZeroTimeout);
238 if (Status != STATUS_SUCCESS)
239 {
240 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
241 current->SharedCacheMap->LazyWriteContext);
242 CcRosVacbDecRefCount(current);
243 continue;
244 }
245
246 ASSERT(current->Dirty);
247
248 /* One reference is added above */
249 if ((current->ReferenceCount > 2 && current->PinCount == 0) ||
250 (current->ReferenceCount > 3 && current->PinCount > 1))
251 {
252 CcRosReleaseVacbLock(current);
253 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
254 current->SharedCacheMap->LazyWriteContext);
255 CcRosVacbDecRefCount(current);
256 continue;
257 }
258
259 KeReleaseGuardedMutex(&ViewLock);
260
261 Status = CcRosFlushVacb(current);
262
263 CcRosReleaseVacbLock(current);
264 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
265 current->SharedCacheMap->LazyWriteContext);
266
267 KeAcquireGuardedMutex(&ViewLock);
268 CcRosVacbDecRefCount(current);
269
270 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
271 (Status != STATUS_MEDIA_WRITE_PROTECTED))
272 {
273 DPRINT1("CC: Failed to flush VACB.\n");
274 }
275 else
276 {
277 ULONG PagesFreed;
278
279 /* How many pages did we free? */
280 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
281 (*Count) += PagesFreed;
282
283 /* Make sure we don't overflow target! */
284 if (Target < PagesFreed)
285 {
286 /* If we would have, jump to zero directly */
287 Target = 0;
288 }
289 else
290 {
291 Target -= PagesFreed;
292 }
293 }
294
295 current_entry = DirtyVacbListHead.Flink;
296 }
297
298 KeReleaseGuardedMutex(&ViewLock);
299 KeLeaveCriticalRegion();
300
301 DPRINT("CcRosFlushDirtyPages() finished\n");
302 return STATUS_SUCCESS;
303 }
304
305 /* FIXME: Someday this could somewhat implement write-behind/read-ahead */
306 VOID
307 NTAPI
308 CciLazyWriter(PVOID Unused)
309 {
310 while (TRUE)
311 {
312 NTSTATUS Status;
313 PLIST_ENTRY ListEntry;
314 ULONG Target, Count = 0;
315
316 /* One per second or until we have to stop */
317 Status = KeWaitForSingleObject(&iLazyWriterShutdown,
318 Executive,
319 KernelMode,
320 FALSE,
321 &CcIdleDelay);
322
323 /* If we succeeed, we've to stop running! */
324 if (Status == STATUS_SUCCESS)
325 {
326 break;
327 }
328
329 /* We're not sleeping anymore */
330 KeClearEvent(&iLazyWriterNotify);
331
332 /* Our target is one-eighth of the dirty pages */
333 Target = CcTotalDirtyPages / 8;
334 if (Target != 0)
335 {
336 /* Flush! */
337 DPRINT("Lazy writer starting (%d)\n", Target);
338 CcRosFlushDirtyPages(Target, &Count, FALSE, TRUE);
339
340 /* And update stats */
341 CcLazyWritePages += Count;
342 ++CcLazyWriteIos;
343 DPRINT("Lazy writer done (%d)\n", Count);
344 }
345
346 /* Inform people waiting on us that we're done */
347 KeSetEvent(&iLazyWriterNotify, IO_DISK_INCREMENT, FALSE);
348
349 /* Likely not optimal, but let's handle one deferred write now! */
350 ListEntry = ExInterlockedRemoveHeadList(&CcDeferredWrites, &CcDeferredWriteSpinLock);
351 if (ListEntry != NULL)
352 {
353 PDEFERRED_WRITE Context;
354
355 /* Extract the context */
356 Context = CONTAINING_RECORD(ListEntry, DEFERRED_WRITE, DeferredWriteLinks);
357 ASSERT(Context->NodeTypeCode == NODE_TYPE_DEFERRED_WRITE);
358
359 /* Can we write now? */
360 if (CcCanIWrite(Context->FileObject, Context->BytesToWrite, FALSE, TRUE))
361 {
362 /* Yes! Do it, and destroy the associated context */
363 Context->PostRoutine(Context->Context1, Context->Context2);
364 ExFreePoolWithTag(Context, 'CcDw');
365 }
366 else
367 {
368 /* Otherwise, requeue it, but in tail, so that it doesn't block others
369 * This is clearly to improve, but given the poor algorithm used now
370 * It's better than nothing!
371 */
372 ExInterlockedInsertTailList(&CcDeferredWrites,
373 &Context->DeferredWriteLinks,
374 &CcDeferredWriteSpinLock);
375 }
376 }
377 }
378 }
379
380 NTSTATUS
381 CcRosTrimCache (
382 ULONG Target,
383 ULONG Priority,
384 PULONG NrFreed)
385 /*
386 * FUNCTION: Try to free some memory from the file cache.
387 * ARGUMENTS:
388 * Target - The number of pages to be freed.
389 * Priority - The priority of free (currently unused).
390 * NrFreed - Points to a variable where the number of pages
391 * actually freed is returned.
392 */
393 {
394 PLIST_ENTRY current_entry;
395 PROS_VACB current;
396 ULONG PagesFreed;
397 KIRQL oldIrql;
398 LIST_ENTRY FreeList;
399 PFN_NUMBER Page;
400 ULONG i;
401 BOOLEAN FlushedPages = FALSE;
402
403 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
404
405 InitializeListHead(&FreeList);
406
407 *NrFreed = 0;
408
409 retry:
410 KeAcquireGuardedMutex(&ViewLock);
411
412 current_entry = VacbLruListHead.Flink;
413 while (current_entry != &VacbLruListHead)
414 {
415 current = CONTAINING_RECORD(current_entry,
416 ROS_VACB,
417 VacbLruListEntry);
418 current_entry = current_entry->Flink;
419
420 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
421
422 /* Reference the VACB */
423 CcRosVacbIncRefCount(current);
424
425 /* Check if it's mapped and not dirty */
426 if (current->MappedCount > 0 && !current->Dirty)
427 {
428 /* We have to break these locks because Cc sucks */
429 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
430 KeReleaseGuardedMutex(&ViewLock);
431
432 /* Page out the VACB */
433 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
434 {
435 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
436
437 MmPageOutPhysicalAddress(Page);
438 }
439
440 /* Reacquire the locks */
441 KeAcquireGuardedMutex(&ViewLock);
442 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
443 }
444
445 /* Dereference the VACB */
446 CcRosVacbDecRefCount(current);
447
448 /* Check if we can free this entry now */
449 if (current->ReferenceCount == 0)
450 {
451 ASSERT(!current->Dirty);
452 ASSERT(!current->MappedCount);
453
454 RemoveEntryList(&current->CacheMapVacbListEntry);
455 RemoveEntryList(&current->VacbLruListEntry);
456 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
457
458 /* Calculate how many pages we freed for Mm */
459 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
460 Target -= PagesFreed;
461 (*NrFreed) += PagesFreed;
462 }
463
464 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
465 }
466
467 KeReleaseGuardedMutex(&ViewLock);
468
469 /* Try flushing pages if we haven't met our target */
470 if ((Target > 0) && !FlushedPages)
471 {
472 /* Flush dirty pages to disk */
473 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
474 FlushedPages = TRUE;
475
476 /* We can only swap as many pages as we flushed */
477 if (PagesFreed < Target) Target = PagesFreed;
478
479 /* Check if we flushed anything */
480 if (PagesFreed != 0)
481 {
482 /* Try again after flushing dirty pages */
483 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
484 goto retry;
485 }
486 }
487
488 while (!IsListEmpty(&FreeList))
489 {
490 current_entry = RemoveHeadList(&FreeList);
491 current = CONTAINING_RECORD(current_entry,
492 ROS_VACB,
493 CacheMapVacbListEntry);
494 CcRosInternalFreeVacb(current);
495 }
496
497 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
498
499 return STATUS_SUCCESS;
500 }
501
502 NTSTATUS
503 NTAPI
504 CcRosReleaseVacb (
505 PROS_SHARED_CACHE_MAP SharedCacheMap,
506 PROS_VACB Vacb,
507 BOOLEAN Valid,
508 BOOLEAN Dirty,
509 BOOLEAN Mapped)
510 {
511 ASSERT(SharedCacheMap);
512
513 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
514 SharedCacheMap, Vacb, Valid);
515
516 Vacb->Valid = Valid;
517
518 if (Dirty && !Vacb->Dirty)
519 {
520 CcRosMarkDirtyVacb(Vacb);
521 }
522
523 if (Mapped)
524 {
525 Vacb->MappedCount++;
526 }
527 CcRosVacbDecRefCount(Vacb);
528 if (Mapped && (Vacb->MappedCount == 1))
529 {
530 CcRosVacbIncRefCount(Vacb);
531 }
532
533 CcRosReleaseVacbLock(Vacb);
534
535 return STATUS_SUCCESS;
536 }
537
538 /* Returns with VACB Lock Held! */
539 PROS_VACB
540 NTAPI
541 CcRosLookupVacb (
542 PROS_SHARED_CACHE_MAP SharedCacheMap,
543 LONGLONG FileOffset)
544 {
545 PLIST_ENTRY current_entry;
546 PROS_VACB current;
547 KIRQL oldIrql;
548
549 ASSERT(SharedCacheMap);
550
551 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
552 SharedCacheMap, FileOffset);
553
554 KeAcquireGuardedMutex(&ViewLock);
555 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
556
557 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
558 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
559 {
560 current = CONTAINING_RECORD(current_entry,
561 ROS_VACB,
562 CacheMapVacbListEntry);
563 if (IsPointInRange(current->FileOffset.QuadPart,
564 VACB_MAPPING_GRANULARITY,
565 FileOffset))
566 {
567 CcRosVacbIncRefCount(current);
568 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
569 KeReleaseGuardedMutex(&ViewLock);
570 CcRosAcquireVacbLock(current, NULL);
571 return current;
572 }
573 if (current->FileOffset.QuadPart > FileOffset)
574 break;
575 current_entry = current_entry->Flink;
576 }
577
578 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
579 KeReleaseGuardedMutex(&ViewLock);
580
581 return NULL;
582 }
583
584 VOID
585 NTAPI
586 CcRosMarkDirtyVacb (
587 PROS_VACB Vacb)
588 {
589 KIRQL oldIrql;
590 PROS_SHARED_CACHE_MAP SharedCacheMap;
591
592 SharedCacheMap = Vacb->SharedCacheMap;
593
594 KeAcquireGuardedMutex(&ViewLock);
595 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
596
597 ASSERT(!Vacb->Dirty);
598
599 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
600 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
601 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
602 CcRosVacbIncRefCount(Vacb);
603
604 /* Move to the tail of the LRU list */
605 RemoveEntryList(&Vacb->VacbLruListEntry);
606 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
607
608 Vacb->Dirty = TRUE;
609
610 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
611 KeReleaseGuardedMutex(&ViewLock);
612 }
613
614 VOID
615 NTAPI
616 CcRosUnmarkDirtyVacb (
617 PROS_VACB Vacb,
618 BOOLEAN LockViews)
619 {
620 KIRQL oldIrql;
621 PROS_SHARED_CACHE_MAP SharedCacheMap;
622
623 SharedCacheMap = Vacb->SharedCacheMap;
624
625 if (LockViews)
626 {
627 KeAcquireGuardedMutex(&ViewLock);
628 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
629 }
630
631 ASSERT(Vacb->Dirty);
632
633 Vacb->Dirty = FALSE;
634
635 RemoveEntryList(&Vacb->DirtyVacbListEntry);
636 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
637 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
638 CcRosVacbDecRefCount(Vacb);
639
640 if (LockViews)
641 {
642 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
643 KeReleaseGuardedMutex(&ViewLock);
644 }
645 }
646
647 NTSTATUS
648 NTAPI
649 CcRosMarkDirtyFile (
650 PROS_SHARED_CACHE_MAP SharedCacheMap,
651 LONGLONG FileOffset)
652 {
653 PROS_VACB Vacb;
654
655 ASSERT(SharedCacheMap);
656
657 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
658 SharedCacheMap, FileOffset);
659
660 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
661 if (Vacb == NULL)
662 {
663 KeBugCheck(CACHE_MANAGER);
664 }
665
666 if (!Vacb->Dirty)
667 {
668 CcRosMarkDirtyVacb(Vacb);
669 }
670
671 CcRosReleaseVacbLock(Vacb);
672
673 return STATUS_SUCCESS;
674 }
675
676 NTSTATUS
677 NTAPI
678 CcRosUnmapVacb (
679 PROS_SHARED_CACHE_MAP SharedCacheMap,
680 LONGLONG FileOffset,
681 BOOLEAN NowDirty)
682 {
683 PROS_VACB Vacb;
684
685 ASSERT(SharedCacheMap);
686
687 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
688 SharedCacheMap, FileOffset, NowDirty);
689
690 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
691 if (Vacb == NULL)
692 {
693 return STATUS_UNSUCCESSFUL;
694 }
695
696 if (NowDirty && !Vacb->Dirty)
697 {
698 CcRosMarkDirtyVacb(Vacb);
699 }
700
701 ASSERT(Vacb->MappedCount != 0);
702 Vacb->MappedCount--;
703
704 CcRosVacbDecRefCount(Vacb);
705 if (Vacb->MappedCount == 0)
706 {
707 CcRosVacbDecRefCount(Vacb);
708 }
709
710 CcRosReleaseVacbLock(Vacb);
711
712 return STATUS_SUCCESS;
713 }
714
715 static
716 NTSTATUS
717 CcRosMapVacb(
718 PROS_VACB Vacb)
719 {
720 ULONG i;
721 NTSTATUS Status;
722 ULONG_PTR NumberOfPages;
723
724 /* Create a memory area. */
725 MmLockAddressSpace(MmGetKernelAddressSpace());
726 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
727 0, // nothing checks for VACB mareas, so set to 0
728 &Vacb->BaseAddress,
729 VACB_MAPPING_GRANULARITY,
730 PAGE_READWRITE,
731 (PMEMORY_AREA*)&Vacb->MemoryArea,
732 0,
733 PAGE_SIZE);
734 MmUnlockAddressSpace(MmGetKernelAddressSpace());
735 if (!NT_SUCCESS(Status))
736 {
737 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
738 return Status;
739 }
740
741 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
742 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
743
744 /* Create a virtual mapping for this memory area */
745 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
746 for (i = 0; i < NumberOfPages; i++)
747 {
748 PFN_NUMBER PageFrameNumber;
749
750 MI_SET_USAGE(MI_USAGE_CACHE);
751 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
752 if (PageFrameNumber == 0)
753 {
754 DPRINT1("Unable to allocate page\n");
755 KeBugCheck(MEMORY_MANAGEMENT);
756 }
757
758 Status = MmCreateVirtualMapping(NULL,
759 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
760 PAGE_READWRITE,
761 &PageFrameNumber,
762 1);
763 if (!NT_SUCCESS(Status))
764 {
765 DPRINT1("Unable to create virtual mapping\n");
766 KeBugCheck(MEMORY_MANAGEMENT);
767 }
768 }
769
770 return STATUS_SUCCESS;
771 }
772
773 static
774 NTSTATUS
775 CcRosCreateVacb (
776 PROS_SHARED_CACHE_MAP SharedCacheMap,
777 LONGLONG FileOffset,
778 PROS_VACB *Vacb)
779 {
780 PROS_VACB current;
781 PROS_VACB previous;
782 PLIST_ENTRY current_entry;
783 NTSTATUS Status;
784 KIRQL oldIrql;
785
786 ASSERT(SharedCacheMap);
787
788 DPRINT("CcRosCreateVacb()\n");
789
790 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
791 {
792 *Vacb = NULL;
793 return STATUS_INVALID_PARAMETER;
794 }
795
796 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
797 current->BaseAddress = NULL;
798 current->Valid = FALSE;
799 current->Dirty = FALSE;
800 current->PageOut = FALSE;
801 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
802 current->SharedCacheMap = SharedCacheMap;
803 #if DBG
804 if (SharedCacheMap->Trace)
805 {
806 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
807 }
808 #endif
809 current->MappedCount = 0;
810 current->DirtyVacbListEntry.Flink = NULL;
811 current->DirtyVacbListEntry.Blink = NULL;
812 current->ReferenceCount = 1;
813 current->PinCount = 0;
814 KeInitializeMutex(&current->Mutex, 0);
815 CcRosAcquireVacbLock(current, NULL);
816 KeAcquireGuardedMutex(&ViewLock);
817
818 *Vacb = current;
819 /* There is window between the call to CcRosLookupVacb
820 * and CcRosCreateVacb. We must check if a VACB for the
821 * file offset exist. If there is a VACB, we release
822 * our newly created VACB and return the existing one.
823 */
824 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
825 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
826 previous = NULL;
827 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
828 {
829 current = CONTAINING_RECORD(current_entry,
830 ROS_VACB,
831 CacheMapVacbListEntry);
832 if (IsPointInRange(current->FileOffset.QuadPart,
833 VACB_MAPPING_GRANULARITY,
834 FileOffset))
835 {
836 CcRosVacbIncRefCount(current);
837 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
838 #if DBG
839 if (SharedCacheMap->Trace)
840 {
841 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
842 SharedCacheMap,
843 (*Vacb),
844 current);
845 }
846 #endif
847 CcRosReleaseVacbLock(*Vacb);
848 KeReleaseGuardedMutex(&ViewLock);
849 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
850 *Vacb = current;
851 CcRosAcquireVacbLock(current, NULL);
852 return STATUS_SUCCESS;
853 }
854 if (current->FileOffset.QuadPart < FileOffset)
855 {
856 ASSERT(previous == NULL ||
857 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
858 previous = current;
859 }
860 if (current->FileOffset.QuadPart > FileOffset)
861 break;
862 current_entry = current_entry->Flink;
863 }
864 /* There was no existing VACB. */
865 current = *Vacb;
866 if (previous)
867 {
868 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
869 }
870 else
871 {
872 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
873 }
874 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
875 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
876 KeReleaseGuardedMutex(&ViewLock);
877
878 MI_SET_USAGE(MI_USAGE_CACHE);
879 #if MI_TRACE_PFNS
880 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
881 {
882 PWCHAR pos;
883 ULONG len = 0;
884 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
885 if (pos)
886 {
887 len = wcslen(pos) * sizeof(WCHAR);
888 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
889 }
890 else
891 {
892 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
893 }
894 }
895 #endif
896
897 Status = CcRosMapVacb(current);
898 if (!NT_SUCCESS(Status))
899 {
900 RemoveEntryList(&current->CacheMapVacbListEntry);
901 RemoveEntryList(&current->VacbLruListEntry);
902 CcRosReleaseVacbLock(current);
903 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
904 }
905
906 return Status;
907 }
908
909 NTSTATUS
910 NTAPI
911 CcRosGetVacb (
912 PROS_SHARED_CACHE_MAP SharedCacheMap,
913 LONGLONG FileOffset,
914 PLONGLONG BaseOffset,
915 PVOID* BaseAddress,
916 PBOOLEAN UptoDate,
917 PROS_VACB *Vacb)
918 {
919 PROS_VACB current;
920 NTSTATUS Status;
921
922 ASSERT(SharedCacheMap);
923
924 DPRINT("CcRosGetVacb()\n");
925
926 /*
927 * Look for a VACB already mapping the same data.
928 */
929 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
930 if (current == NULL)
931 {
932 /*
933 * Otherwise create a new VACB.
934 */
935 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
936 if (!NT_SUCCESS(Status))
937 {
938 return Status;
939 }
940 }
941
942 KeAcquireGuardedMutex(&ViewLock);
943
944 /* Move to the tail of the LRU list */
945 RemoveEntryList(&current->VacbLruListEntry);
946 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
947
948 KeReleaseGuardedMutex(&ViewLock);
949
950 /*
951 * Return information about the VACB to the caller.
952 */
953 *UptoDate = current->Valid;
954 *BaseAddress = current->BaseAddress;
955 DPRINT("*BaseAddress %p\n", *BaseAddress);
956 *Vacb = current;
957 *BaseOffset = current->FileOffset.QuadPart;
958 return STATUS_SUCCESS;
959 }
960
961 NTSTATUS
962 NTAPI
963 CcRosRequestVacb (
964 PROS_SHARED_CACHE_MAP SharedCacheMap,
965 LONGLONG FileOffset,
966 PVOID* BaseAddress,
967 PBOOLEAN UptoDate,
968 PROS_VACB *Vacb)
969 /*
970 * FUNCTION: Request a page mapping for a shared cache map
971 */
972 {
973 LONGLONG BaseOffset;
974
975 ASSERT(SharedCacheMap);
976
977 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
978 {
979 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
980 FileOffset, VACB_MAPPING_GRANULARITY);
981 KeBugCheck(CACHE_MANAGER);
982 }
983
984 return CcRosGetVacb(SharedCacheMap,
985 FileOffset,
986 &BaseOffset,
987 BaseAddress,
988 UptoDate,
989 Vacb);
990 }
991
992 static
993 VOID
994 CcFreeCachePage (
995 PVOID Context,
996 MEMORY_AREA* MemoryArea,
997 PVOID Address,
998 PFN_NUMBER Page,
999 SWAPENTRY SwapEntry,
1000 BOOLEAN Dirty)
1001 {
1002 ASSERT(SwapEntry == 0);
1003 if (Page != 0)
1004 {
1005 ASSERT(MmGetReferenceCountPage(Page) == 1);
1006 MmReleasePageMemoryConsumer(MC_CACHE, Page);
1007 }
1008 }
1009
1010 NTSTATUS
1011 CcRosInternalFreeVacb (
1012 PROS_VACB Vacb)
1013 /*
1014 * FUNCTION: Releases a VACB associated with a shared cache map
1015 */
1016 {
1017 DPRINT("Freeing VACB 0x%p\n", Vacb);
1018 #if DBG
1019 if (Vacb->SharedCacheMap->Trace)
1020 {
1021 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
1022 }
1023 #endif
1024
1025 MmLockAddressSpace(MmGetKernelAddressSpace());
1026 MmFreeMemoryArea(MmGetKernelAddressSpace(),
1027 Vacb->MemoryArea,
1028 CcFreeCachePage,
1029 NULL);
1030 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1031
1032 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
1033 return STATUS_SUCCESS;
1034 }
1035
1036 /*
1037 * @implemented
1038 */
1039 VOID
1040 NTAPI
1041 CcFlushCache (
1042 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1043 IN PLARGE_INTEGER FileOffset OPTIONAL,
1044 IN ULONG Length,
1045 OUT PIO_STATUS_BLOCK IoStatus)
1046 {
1047 PROS_SHARED_CACHE_MAP SharedCacheMap;
1048 LARGE_INTEGER Offset;
1049 LONGLONG RemainingLength;
1050 PROS_VACB current;
1051 NTSTATUS Status;
1052 KIRQL oldIrql;
1053
1054 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1055 SectionObjectPointers, FileOffset, Length);
1056
1057 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1058 SectionObjectPointers, FileOffset, Length, IoStatus);
1059
1060 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1061 {
1062 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1063 ASSERT(SharedCacheMap);
1064 if (FileOffset)
1065 {
1066 Offset = *FileOffset;
1067 RemainingLength = Length;
1068 }
1069 else
1070 {
1071 Offset.QuadPart = 0;
1072 RemainingLength = SharedCacheMap->FileSize.QuadPart;
1073 }
1074
1075 if (IoStatus)
1076 {
1077 IoStatus->Status = STATUS_SUCCESS;
1078 IoStatus->Information = 0;
1079 }
1080
1081 while (RemainingLength > 0)
1082 {
1083 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1084 if (current != NULL)
1085 {
1086 if (current->Dirty)
1087 {
1088 Status = CcRosFlushVacb(current);
1089 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1090 {
1091 IoStatus->Status = Status;
1092 }
1093 }
1094
1095 CcRosReleaseVacbLock(current);
1096
1097 KeAcquireGuardedMutex(&ViewLock);
1098 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1099 CcRosVacbDecRefCount(current);
1100 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1101 KeReleaseGuardedMutex(&ViewLock);
1102 }
1103
1104 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1105 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1106 }
1107 }
1108 else
1109 {
1110 if (IoStatus)
1111 {
1112 IoStatus->Status = STATUS_INVALID_PARAMETER;
1113 }
1114 }
1115 }
1116
1117 NTSTATUS
1118 NTAPI
1119 CcRosDeleteFileCache (
1120 PFILE_OBJECT FileObject,
1121 PROS_SHARED_CACHE_MAP SharedCacheMap)
1122 /*
1123 * FUNCTION: Releases the shared cache map associated with a file object
1124 */
1125 {
1126 PLIST_ENTRY current_entry;
1127 PROS_VACB current;
1128 LIST_ENTRY FreeList;
1129 KIRQL oldIrql;
1130
1131 ASSERT(SharedCacheMap);
1132
1133 SharedCacheMap->OpenCount++;
1134 KeReleaseGuardedMutex(&ViewLock);
1135
1136 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1137
1138 KeAcquireGuardedMutex(&ViewLock);
1139 SharedCacheMap->OpenCount--;
1140 if (SharedCacheMap->OpenCount == 0)
1141 {
1142 KIRQL OldIrql;
1143
1144 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1145
1146 /*
1147 * Release all VACBs
1148 */
1149 InitializeListHead(&FreeList);
1150 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1151 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1152 {
1153 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1154 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1155
1156 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1157 CcRosAcquireVacbLock(current, NULL);
1158 RemoveEntryList(&current->VacbLruListEntry);
1159 if (current->Dirty)
1160 {
1161 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1162 CcRosUnmarkDirtyVacb(current, FALSE);
1163 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1164 DPRINT1("Freeing dirty VACB\n");
1165 }
1166 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1167 CcRosReleaseVacbLock(current);
1168
1169 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1170 }
1171 #if DBG
1172 SharedCacheMap->Trace = FALSE;
1173 #endif
1174 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1175
1176 KeReleaseGuardedMutex(&ViewLock);
1177 ObDereferenceObject(SharedCacheMap->FileObject);
1178
1179 while (!IsListEmpty(&FreeList))
1180 {
1181 current_entry = RemoveTailList(&FreeList);
1182 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1183 CcRosInternalFreeVacb(current);
1184 }
1185
1186 KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
1187 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1188 KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
1189
1190 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1191 KeAcquireGuardedMutex(&ViewLock);
1192 }
1193 return STATUS_SUCCESS;
1194 }
1195
1196 VOID
1197 NTAPI
1198 CcRosReferenceCache (
1199 PFILE_OBJECT FileObject)
1200 {
1201 PROS_SHARED_CACHE_MAP SharedCacheMap;
1202 KeAcquireGuardedMutex(&ViewLock);
1203 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1204 ASSERT(SharedCacheMap);
1205 ASSERT(SharedCacheMap->OpenCount != 0);
1206 SharedCacheMap->OpenCount++;
1207 KeReleaseGuardedMutex(&ViewLock);
1208 }
1209
1210 VOID
1211 NTAPI
1212 CcRosRemoveIfClosed (
1213 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1214 {
1215 PROS_SHARED_CACHE_MAP SharedCacheMap;
1216 DPRINT("CcRosRemoveIfClosed()\n");
1217 KeAcquireGuardedMutex(&ViewLock);
1218 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1219 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1220 {
1221 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1222 }
1223 KeReleaseGuardedMutex(&ViewLock);
1224 }
1225
1226
1227 VOID
1228 NTAPI
1229 CcRosDereferenceCache (
1230 PFILE_OBJECT FileObject)
1231 {
1232 PROS_SHARED_CACHE_MAP SharedCacheMap;
1233 KeAcquireGuardedMutex(&ViewLock);
1234 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1235 ASSERT(SharedCacheMap);
1236 if (SharedCacheMap->OpenCount > 0)
1237 {
1238 SharedCacheMap->OpenCount--;
1239 if (SharedCacheMap->OpenCount == 0)
1240 {
1241 MmFreeSectionSegments(SharedCacheMap->FileObject);
1242 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1243 }
1244 }
1245 KeReleaseGuardedMutex(&ViewLock);
1246 }
1247
1248 NTSTATUS
1249 NTAPI
1250 CcRosReleaseFileCache (
1251 PFILE_OBJECT FileObject)
1252 /*
1253 * FUNCTION: Called by the file system when a handle to a file object
1254 * has been closed.
1255 */
1256 {
1257 PROS_SHARED_CACHE_MAP SharedCacheMap;
1258
1259 KeAcquireGuardedMutex(&ViewLock);
1260
1261 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1262 {
1263 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1264 if (FileObject->PrivateCacheMap != NULL)
1265 {
1266 FileObject->PrivateCacheMap = NULL;
1267 if (SharedCacheMap->OpenCount > 0)
1268 {
1269 SharedCacheMap->OpenCount--;
1270 if (SharedCacheMap->OpenCount == 0)
1271 {
1272 MmFreeSectionSegments(SharedCacheMap->FileObject);
1273 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1274 }
1275 }
1276 }
1277 }
1278 KeReleaseGuardedMutex(&ViewLock);
1279 return STATUS_SUCCESS;
1280 }
1281
1282 NTSTATUS
1283 NTAPI
1284 CcTryToInitializeFileCache (
1285 PFILE_OBJECT FileObject)
1286 {
1287 PROS_SHARED_CACHE_MAP SharedCacheMap;
1288 NTSTATUS Status;
1289
1290 KeAcquireGuardedMutex(&ViewLock);
1291
1292 ASSERT(FileObject->SectionObjectPointer);
1293 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1294 if (SharedCacheMap == NULL)
1295 {
1296 Status = STATUS_UNSUCCESSFUL;
1297 }
1298 else
1299 {
1300 if (FileObject->PrivateCacheMap == NULL)
1301 {
1302 FileObject->PrivateCacheMap = SharedCacheMap;
1303 SharedCacheMap->OpenCount++;
1304 }
1305 Status = STATUS_SUCCESS;
1306 }
1307 KeReleaseGuardedMutex(&ViewLock);
1308
1309 return Status;
1310 }
1311
1312
1313 NTSTATUS
1314 NTAPI
1315 CcRosInitializeFileCache (
1316 PFILE_OBJECT FileObject,
1317 PCC_FILE_SIZES FileSizes,
1318 BOOLEAN PinAccess,
1319 PCACHE_MANAGER_CALLBACKS CallBacks,
1320 PVOID LazyWriterContext)
1321 /*
1322 * FUNCTION: Initializes a shared cache map for a file object
1323 */
1324 {
1325 PROS_SHARED_CACHE_MAP SharedCacheMap;
1326
1327 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1328 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1329 FileObject, SharedCacheMap);
1330
1331 KeAcquireGuardedMutex(&ViewLock);
1332 if (SharedCacheMap == NULL)
1333 {
1334 KIRQL OldIrql;
1335
1336 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1337 if (SharedCacheMap == NULL)
1338 {
1339 KeReleaseGuardedMutex(&ViewLock);
1340 return STATUS_INSUFFICIENT_RESOURCES;
1341 }
1342 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1343 ObReferenceObjectByPointer(FileObject,
1344 FILE_ALL_ACCESS,
1345 NULL,
1346 KernelMode);
1347 SharedCacheMap->FileObject = FileObject;
1348 SharedCacheMap->Callbacks = CallBacks;
1349 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1350 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1351 SharedCacheMap->FileSize = FileSizes->FileSize;
1352 SharedCacheMap->PinAccess = PinAccess;
1353 SharedCacheMap->DirtyPageThreshold = 0;
1354 SharedCacheMap->DirtyPages = 0;
1355 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1356 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1357 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1358
1359 KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
1360 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1361 KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
1362 }
1363 if (FileObject->PrivateCacheMap == NULL)
1364 {
1365 FileObject->PrivateCacheMap = SharedCacheMap;
1366 SharedCacheMap->OpenCount++;
1367 }
1368 KeReleaseGuardedMutex(&ViewLock);
1369
1370 return STATUS_SUCCESS;
1371 }
1372
1373 /*
1374 * @implemented
1375 */
1376 PFILE_OBJECT
1377 NTAPI
1378 CcGetFileObjectFromSectionPtrs (
1379 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1380 {
1381 PROS_SHARED_CACHE_MAP SharedCacheMap;
1382
1383 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1384
1385 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1386 {
1387 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1388 ASSERT(SharedCacheMap);
1389 return SharedCacheMap->FileObject;
1390 }
1391 return NULL;
1392 }
1393
1394 VOID
1395 NTAPI
1396 CcShutdownLazyWriter (
1397 VOID)
1398 {
1399 /* Simply set the event, lazy writer will stop when it's done */
1400 KeSetEvent(&iLazyWriterShutdown, IO_DISK_INCREMENT, FALSE);
1401 }
1402
1403 BOOLEAN
1404 INIT_FUNCTION
1405 NTAPI
1406 CcInitView (
1407 VOID)
1408 {
1409 HANDLE LazyWriter;
1410 NTSTATUS Status;
1411 KPRIORITY Priority;
1412 OBJECT_ATTRIBUTES ObjectAttributes;
1413
1414 DPRINT("CcInitView()\n");
1415
1416 InitializeListHead(&DirtyVacbListHead);
1417 InitializeListHead(&VacbLruListHead);
1418 InitializeListHead(&CcDeferredWrites);
1419 InitializeListHead(&CcCleanSharedCacheMapList);
1420 KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1421 KeInitializeSpinLock(&iSharedCacheMapLock);
1422 KeInitializeGuardedMutex(&ViewLock);
1423 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1424 NULL,
1425 NULL,
1426 0,
1427 sizeof(INTERNAL_BCB),
1428 TAG_BCB,
1429 20);
1430 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1431 NULL,
1432 NULL,
1433 0,
1434 sizeof(ROS_SHARED_CACHE_MAP),
1435 TAG_SHARED_CACHE_MAP,
1436 20);
1437 ExInitializeNPagedLookasideList(&VacbLookasideList,
1438 NULL,
1439 NULL,
1440 0,
1441 sizeof(ROS_VACB),
1442 TAG_VACB,
1443 20);
1444
1445 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1446
1447 /* Initialize lazy writer events */
1448 KeInitializeEvent(&iLazyWriterShutdown, SynchronizationEvent, FALSE);
1449 KeInitializeEvent(&iLazyWriterNotify, NotificationEvent, FALSE);
1450
1451 /* Define lazy writer threshold, depending on system type */
1452 CcCapturedSystemSize = MmQuerySystemSize();
1453 switch (CcCapturedSystemSize)
1454 {
1455 case MmSmallSystem:
1456 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8;
1457 break;
1458
1459 case MmMediumSystem:
1460 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4;
1461 break;
1462
1463 case MmLargeSystem:
1464 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8 + MmNumberOfPhysicalPages / 4;
1465 break;
1466 }
1467
1468 /* Start the lazy writer thread */
1469 InitializeObjectAttributes(&ObjectAttributes,
1470 NULL,
1471 OBJ_KERNEL_HANDLE,
1472 NULL,
1473 NULL);
1474 Status = PsCreateSystemThread(&LazyWriter,
1475 THREAD_ALL_ACCESS,
1476 &ObjectAttributes,
1477 NULL,
1478 NULL,
1479 CciLazyWriter,
1480 NULL);
1481 if (!NT_SUCCESS(Status))
1482 {
1483 return FALSE;
1484 }
1485
1486 Priority = 27;
1487 Status = NtSetInformationThread(LazyWriter,
1488 ThreadPriority,
1489 &Priority,
1490 sizeof(Priority));
1491 ASSERT(NT_SUCCESS(Status));
1492
1493 /* Handle is not needed */
1494 ObCloseHandle(LazyWriter, KernelMode);
1495
1496 CcInitCacheZeroPage();
1497
1498 return TRUE;
1499 }
1500
1501 #if DBG && defined(KDBG)
1502 BOOLEAN
1503 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1504 {
1505 PLIST_ENTRY ListEntry;
1506 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1507
1508 KdbpPrint(" Usage Summary (in kb)\n");
1509 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1510 /* No need to lock the spin lock here, we're in DBG */
1511 for (ListEntry = CcCleanSharedCacheMapList.Flink;
1512 ListEntry != &CcCleanSharedCacheMapList;
1513 ListEntry = ListEntry->Flink)
1514 {
1515 PLIST_ENTRY Vacbs;
1516 ULONG Valid = 0, Dirty = 0;
1517 PROS_SHARED_CACHE_MAP SharedCacheMap;
1518 PUNICODE_STRING FileName;
1519
1520 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1521
1522 /* Dirty size */
1523 Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
1524
1525 /* First, count for all the associated VACB */
1526 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1527 Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1528 Vacbs = Vacbs->Flink)
1529 {
1530 PROS_VACB Vacb;
1531
1532 Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
1533 if (Vacb->Valid)
1534 {
1535 Valid += VACB_MAPPING_GRANULARITY / 1024;
1536 }
1537 }
1538
1539 /* Setup name */
1540 if (SharedCacheMap->FileObject != NULL &&
1541 SharedCacheMap->FileObject->FileName.Length != 0)
1542 {
1543 FileName = &SharedCacheMap->FileObject->FileName;
1544 }
1545 else
1546 {
1547 FileName = &NoName;
1548 }
1549
1550 /* And print */
1551 KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
1552 }
1553
1554 return TRUE;
1555 }
1556 #endif
1557
1558 /* EOF */