[NTOSKRNL] Contrary to WinDBG !filecache, we don't display CONTROL_AREA but shared...
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42
43 /* GLOBALS *******************************************************************/
44
45 LIST_ENTRY DirtyVacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 /* Counters:
55 * - Amount of pages flushed by lazy writer
56 * - Number of times lazy writer ran
57 */
58 ULONG CcLazyWritePages = 0;
59 ULONG CcLazyWriteIos = 0;
60
61 /* Internal vars (MS):
62 * - Threshold above which lazy writer will start action
63 * - Amount of dirty pages
64 * - List for deferred writes
65 * - Spinlock when dealing with the deferred list
66 * - List for "clean" shared cache maps
67 */
68 ULONG CcDirtyPageThreshold = 0;
69 ULONG CcTotalDirtyPages = 0;
70 LIST_ENTRY CcDeferredWrites;
71 KSPIN_LOCK CcDeferredWriteSpinLock;
72 LIST_ENTRY CcCleanSharedCacheMapList;
73
74 /* Internal vars (ROS):
75 * - Event to notify lazy writer to shutdown
76 * - Event to inform watchers lazy writer is done for this loop
77 * - Lock for the CcCleanSharedCacheMapList list
78 */
79 KEVENT iLazyWriterShutdown;
80 KEVENT iLazyWriterNotify;
81 KSPIN_LOCK iSharedCacheMapLock;
82
83 #if DBG
84 static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
85 {
86 ++vacb->ReferenceCount;
87 if (vacb->SharedCacheMap->Trace)
88 {
89 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
90 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
91 }
92 }
93 static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
94 {
95 --vacb->ReferenceCount;
96 if (vacb->SharedCacheMap->Trace)
97 {
98 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
99 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
100 }
101 }
102 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
103 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
104 #else
105 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
106 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
107 #endif
108
109 NTSTATUS
110 CcRosInternalFreeVacb(PROS_VACB Vacb);
111
112
113 /* FUNCTIONS *****************************************************************/
114
115 VOID
116 NTAPI
117 CcRosTraceCacheMap (
118 PROS_SHARED_CACHE_MAP SharedCacheMap,
119 BOOLEAN Trace )
120 {
121 #if DBG
122 KIRQL oldirql;
123 PLIST_ENTRY current_entry;
124 PROS_VACB current;
125
126 if (!SharedCacheMap)
127 return;
128
129 SharedCacheMap->Trace = Trace;
130
131 if (Trace)
132 {
133 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
134
135 KeAcquireGuardedMutex(&ViewLock);
136 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
137
138 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
139 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
140 {
141 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
142 current_entry = current_entry->Flink;
143
144 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
145 current, current->ReferenceCount, current->Dirty, current->PageOut );
146 }
147 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
148 KeReleaseGuardedMutex(&ViewLock);
149 }
150 else
151 {
152 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
153 }
154
155 #else
156 UNREFERENCED_PARAMETER(SharedCacheMap);
157 UNREFERENCED_PARAMETER(Trace);
158 #endif
159 }
160
161 NTSTATUS
162 NTAPI
163 CcRosFlushVacb (
164 PROS_VACB Vacb)
165 {
166 NTSTATUS Status;
167 KIRQL oldIrql;
168
169 Status = CcWriteVirtualAddress(Vacb);
170 if (NT_SUCCESS(Status))
171 {
172 KeAcquireGuardedMutex(&ViewLock);
173 KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql);
174
175 Vacb->Dirty = FALSE;
176 RemoveEntryList(&Vacb->DirtyVacbListEntry);
177 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
178 CcRosVacbDecRefCount(Vacb);
179
180 KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
181 KeReleaseGuardedMutex(&ViewLock);
182 }
183
184 return Status;
185 }
186
187 NTSTATUS
188 NTAPI
189 CcRosFlushDirtyPages (
190 ULONG Target,
191 PULONG Count,
192 BOOLEAN Wait,
193 BOOLEAN CalledFromLazy)
194 {
195 PLIST_ENTRY current_entry;
196 PROS_VACB current;
197 BOOLEAN Locked;
198 NTSTATUS Status;
199 LARGE_INTEGER ZeroTimeout;
200
201 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
202
203 (*Count) = 0;
204 ZeroTimeout.QuadPart = 0;
205
206 KeEnterCriticalRegion();
207 KeAcquireGuardedMutex(&ViewLock);
208
209 current_entry = DirtyVacbListHead.Flink;
210 if (current_entry == &DirtyVacbListHead)
211 {
212 DPRINT("No Dirty pages\n");
213 }
214
215 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
216 {
217 current = CONTAINING_RECORD(current_entry,
218 ROS_VACB,
219 DirtyVacbListEntry);
220 current_entry = current_entry->Flink;
221
222 CcRosVacbIncRefCount(current);
223
224 /* When performing lazy write, don't handle temporary files */
225 if (CalledFromLazy &&
226 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
227 {
228 CcRosVacbDecRefCount(current);
229 continue;
230 }
231
232 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
233 current->SharedCacheMap->LazyWriteContext, Wait);
234 if (!Locked)
235 {
236 CcRosVacbDecRefCount(current);
237 continue;
238 }
239
240 Status = CcRosAcquireVacbLock(current,
241 Wait ? NULL : &ZeroTimeout);
242 if (Status != STATUS_SUCCESS)
243 {
244 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
245 current->SharedCacheMap->LazyWriteContext);
246 CcRosVacbDecRefCount(current);
247 continue;
248 }
249
250 ASSERT(current->Dirty);
251
252 /* One reference is added above */
253 if (current->ReferenceCount > 2)
254 {
255 CcRosReleaseVacbLock(current);
256 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
257 current->SharedCacheMap->LazyWriteContext);
258 CcRosVacbDecRefCount(current);
259 continue;
260 }
261
262 KeReleaseGuardedMutex(&ViewLock);
263
264 Status = CcRosFlushVacb(current);
265
266 CcRosReleaseVacbLock(current);
267 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
268 current->SharedCacheMap->LazyWriteContext);
269
270 KeAcquireGuardedMutex(&ViewLock);
271 CcRosVacbDecRefCount(current);
272
273 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
274 (Status != STATUS_MEDIA_WRITE_PROTECTED))
275 {
276 DPRINT1("CC: Failed to flush VACB.\n");
277 }
278 else
279 {
280 ULONG PagesFreed;
281
282 /* How many pages did we free? */
283 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
284 (*Count) += PagesFreed;
285
286 /* Make sure we don't overflow target! */
287 if (Target < PagesFreed)
288 {
289 /* If we would have, jump to zero directly */
290 Target = 0;
291 }
292 else
293 {
294 Target -= PagesFreed;
295 }
296 }
297
298 current_entry = DirtyVacbListHead.Flink;
299 }
300
301 KeReleaseGuardedMutex(&ViewLock);
302 KeLeaveCriticalRegion();
303
304 DPRINT("CcRosFlushDirtyPages() finished\n");
305 return STATUS_SUCCESS;
306 }
307
308 /* FIXME: Someday this could somewhat implement write-behind/read-ahead */
309 VOID
310 NTAPI
311 CciLazyWriter(PVOID Unused)
312 {
313 LARGE_INTEGER OneSecond;
314
315 OneSecond.QuadPart = (LONGLONG)-1*1000*1000*10;
316
317 while (TRUE)
318 {
319 NTSTATUS Status;
320 PLIST_ENTRY ListEntry;
321 ULONG Target, Count = 0;
322
323 /* One per second or until we have to stop */
324 Status = KeWaitForSingleObject(&iLazyWriterShutdown,
325 Executive,
326 KernelMode,
327 FALSE,
328 &OneSecond);
329
330 /* If we succeeed, we've to stop running! */
331 if (Status == STATUS_SUCCESS)
332 {
333 break;
334 }
335
336 /* We're not sleeping anymore */
337 KeClearEvent(&iLazyWriterNotify);
338
339 /* Our target is one-eighth of the dirty pages */
340 Target = CcTotalDirtyPages / 8;
341 if (Target != 0)
342 {
343 /* Flush! */
344 DPRINT("Lazy writer starting (%d)\n", Target);
345 CcRosFlushDirtyPages(Target, &Count, FALSE, TRUE);
346
347 /* And update stats */
348 CcLazyWritePages += Count;
349 ++CcLazyWriteIos;
350 DPRINT("Lazy writer done (%d)\n", Count);
351 }
352
353 /* Inform people waiting on us that we're done */
354 KeSetEvent(&iLazyWriterNotify, IO_DISK_INCREMENT, FALSE);
355
356 /* Likely not optimal, but let's handle one deferred write now! */
357 ListEntry = ExInterlockedRemoveHeadList(&CcDeferredWrites, &CcDeferredWriteSpinLock);
358 if (ListEntry != NULL)
359 {
360 PROS_DEFERRED_WRITE_CONTEXT Context;
361
362 /* Extract the context */
363 Context = CONTAINING_RECORD(ListEntry, ROS_DEFERRED_WRITE_CONTEXT, CcDeferredWritesEntry);
364
365 /* Can we write now? */
366 if (CcCanIWrite(Context->FileObject, Context->BytesToWrite, FALSE, Context->Retrying))
367 {
368 /* Yes! Do it, and destroy the associated context */
369 Context->PostRoutine(Context->Context1, Context->Context2);
370 ExFreePoolWithTag(Context, 'CcDw');
371 }
372 else
373 {
374 /* Otherwise, requeue it, but in tail, so that it doesn't block others
375 * This is clearly to improve, but given the poor algorithm used now
376 * It's better than nothing!
377 */
378 ExInterlockedInsertTailList(&CcDeferredWrites,
379 &Context->CcDeferredWritesEntry,
380 &CcDeferredWriteSpinLock);
381 }
382 }
383 }
384 }
385
386 NTSTATUS
387 CcRosTrimCache (
388 ULONG Target,
389 ULONG Priority,
390 PULONG NrFreed)
391 /*
392 * FUNCTION: Try to free some memory from the file cache.
393 * ARGUMENTS:
394 * Target - The number of pages to be freed.
395 * Priority - The priority of free (currently unused).
396 * NrFreed - Points to a variable where the number of pages
397 * actually freed is returned.
398 */
399 {
400 PLIST_ENTRY current_entry;
401 PROS_VACB current;
402 ULONG PagesFreed;
403 KIRQL oldIrql;
404 LIST_ENTRY FreeList;
405 PFN_NUMBER Page;
406 ULONG i;
407 BOOLEAN FlushedPages = FALSE;
408
409 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
410
411 InitializeListHead(&FreeList);
412
413 *NrFreed = 0;
414
415 retry:
416 KeAcquireGuardedMutex(&ViewLock);
417
418 current_entry = VacbLruListHead.Flink;
419 while (current_entry != &VacbLruListHead)
420 {
421 current = CONTAINING_RECORD(current_entry,
422 ROS_VACB,
423 VacbLruListEntry);
424 current_entry = current_entry->Flink;
425
426 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
427
428 /* Reference the VACB */
429 CcRosVacbIncRefCount(current);
430
431 /* Check if it's mapped and not dirty */
432 if (current->MappedCount > 0 && !current->Dirty)
433 {
434 /* We have to break these locks because Cc sucks */
435 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
436 KeReleaseGuardedMutex(&ViewLock);
437
438 /* Page out the VACB */
439 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
440 {
441 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
442
443 MmPageOutPhysicalAddress(Page);
444 }
445
446 /* Reacquire the locks */
447 KeAcquireGuardedMutex(&ViewLock);
448 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
449 }
450
451 /* Dereference the VACB */
452 CcRosVacbDecRefCount(current);
453
454 /* Check if we can free this entry now */
455 if (current->ReferenceCount == 0)
456 {
457 ASSERT(!current->Dirty);
458 ASSERT(!current->MappedCount);
459
460 RemoveEntryList(&current->CacheMapVacbListEntry);
461 RemoveEntryList(&current->VacbLruListEntry);
462 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
463
464 /* Calculate how many pages we freed for Mm */
465 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
466 Target -= PagesFreed;
467 (*NrFreed) += PagesFreed;
468 }
469
470 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
471 }
472
473 KeReleaseGuardedMutex(&ViewLock);
474
475 /* Try flushing pages if we haven't met our target */
476 if ((Target > 0) && !FlushedPages)
477 {
478 /* Flush dirty pages to disk */
479 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
480 FlushedPages = TRUE;
481
482 /* We can only swap as many pages as we flushed */
483 if (PagesFreed < Target) Target = PagesFreed;
484
485 /* Check if we flushed anything */
486 if (PagesFreed != 0)
487 {
488 /* Try again after flushing dirty pages */
489 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
490 goto retry;
491 }
492 }
493
494 while (!IsListEmpty(&FreeList))
495 {
496 current_entry = RemoveHeadList(&FreeList);
497 current = CONTAINING_RECORD(current_entry,
498 ROS_VACB,
499 CacheMapVacbListEntry);
500 CcRosInternalFreeVacb(current);
501 }
502
503 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
504
505 return STATUS_SUCCESS;
506 }
507
508 NTSTATUS
509 NTAPI
510 CcRosReleaseVacb (
511 PROS_SHARED_CACHE_MAP SharedCacheMap,
512 PROS_VACB Vacb,
513 BOOLEAN Valid,
514 BOOLEAN Dirty,
515 BOOLEAN Mapped)
516 {
517 BOOLEAN WasDirty;
518 KIRQL oldIrql;
519
520 ASSERT(SharedCacheMap);
521
522 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
523 SharedCacheMap, Vacb, Valid);
524
525 KeAcquireGuardedMutex(&ViewLock);
526 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
527
528 Vacb->Valid = Valid;
529
530 WasDirty = Vacb->Dirty;
531 Vacb->Dirty = Vacb->Dirty || Dirty;
532
533 if (!WasDirty && Vacb->Dirty)
534 {
535 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
536 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
537 }
538
539 if (Mapped)
540 {
541 Vacb->MappedCount++;
542 }
543 CcRosVacbDecRefCount(Vacb);
544 if (Mapped && (Vacb->MappedCount == 1))
545 {
546 CcRosVacbIncRefCount(Vacb);
547 }
548 if (!WasDirty && Vacb->Dirty)
549 {
550 CcRosVacbIncRefCount(Vacb);
551 }
552
553 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
554 KeReleaseGuardedMutex(&ViewLock);
555 CcRosReleaseVacbLock(Vacb);
556
557 return STATUS_SUCCESS;
558 }
559
560 /* Returns with VACB Lock Held! */
561 PROS_VACB
562 NTAPI
563 CcRosLookupVacb (
564 PROS_SHARED_CACHE_MAP SharedCacheMap,
565 LONGLONG FileOffset)
566 {
567 PLIST_ENTRY current_entry;
568 PROS_VACB current;
569 KIRQL oldIrql;
570
571 ASSERT(SharedCacheMap);
572
573 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
574 SharedCacheMap, FileOffset);
575
576 KeAcquireGuardedMutex(&ViewLock);
577 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
578
579 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
580 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
581 {
582 current = CONTAINING_RECORD(current_entry,
583 ROS_VACB,
584 CacheMapVacbListEntry);
585 if (IsPointInRange(current->FileOffset.QuadPart,
586 VACB_MAPPING_GRANULARITY,
587 FileOffset))
588 {
589 CcRosVacbIncRefCount(current);
590 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
591 KeReleaseGuardedMutex(&ViewLock);
592 CcRosAcquireVacbLock(current, NULL);
593 return current;
594 }
595 if (current->FileOffset.QuadPart > FileOffset)
596 break;
597 current_entry = current_entry->Flink;
598 }
599
600 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
601 KeReleaseGuardedMutex(&ViewLock);
602
603 return NULL;
604 }
605
606 VOID
607 NTAPI
608 CcRosMarkDirtyVacb (
609 PROS_VACB Vacb)
610 {
611 KIRQL oldIrql;
612 PROS_SHARED_CACHE_MAP SharedCacheMap;
613
614 SharedCacheMap = Vacb->SharedCacheMap;
615
616 KeAcquireGuardedMutex(&ViewLock);
617 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
618
619 if (!Vacb->Dirty)
620 {
621 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
622 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
623 }
624 else
625 {
626 CcRosVacbDecRefCount(Vacb);
627 }
628
629 /* Move to the tail of the LRU list */
630 RemoveEntryList(&Vacb->VacbLruListEntry);
631 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
632
633 Vacb->Dirty = TRUE;
634
635 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
636 KeReleaseGuardedMutex(&ViewLock);
637 }
638
639 NTSTATUS
640 NTAPI
641 CcRosMarkDirtyFile (
642 PROS_SHARED_CACHE_MAP SharedCacheMap,
643 LONGLONG FileOffset)
644 {
645 PROS_VACB Vacb;
646
647 ASSERT(SharedCacheMap);
648
649 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
650 SharedCacheMap, FileOffset);
651
652 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
653 if (Vacb == NULL)
654 {
655 KeBugCheck(CACHE_MANAGER);
656 }
657
658 CcRosMarkDirtyVacb(Vacb);
659
660
661 CcRosReleaseVacbLock(Vacb);
662
663 return STATUS_SUCCESS;
664 }
665
666 NTSTATUS
667 NTAPI
668 CcRosUnmapVacb (
669 PROS_SHARED_CACHE_MAP SharedCacheMap,
670 LONGLONG FileOffset,
671 BOOLEAN NowDirty)
672 {
673 PROS_VACB Vacb;
674 BOOLEAN WasDirty;
675 KIRQL oldIrql;
676
677 ASSERT(SharedCacheMap);
678
679 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
680 SharedCacheMap, FileOffset, NowDirty);
681
682 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
683 if (Vacb == NULL)
684 {
685 return STATUS_UNSUCCESSFUL;
686 }
687
688 KeAcquireGuardedMutex(&ViewLock);
689 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
690
691 WasDirty = Vacb->Dirty;
692 Vacb->Dirty = Vacb->Dirty || NowDirty;
693
694 Vacb->MappedCount--;
695
696 if (!WasDirty && NowDirty)
697 {
698 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
699 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
700 }
701
702 CcRosVacbDecRefCount(Vacb);
703 if (!WasDirty && NowDirty)
704 {
705 CcRosVacbIncRefCount(Vacb);
706 }
707 if (Vacb->MappedCount == 0)
708 {
709 CcRosVacbDecRefCount(Vacb);
710 }
711
712 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
713 KeReleaseGuardedMutex(&ViewLock);
714 CcRosReleaseVacbLock(Vacb);
715
716 return STATUS_SUCCESS;
717 }
718
719 static
720 NTSTATUS
721 CcRosMapVacb(
722 PROS_VACB Vacb)
723 {
724 ULONG i;
725 NTSTATUS Status;
726 ULONG_PTR NumberOfPages;
727
728 /* Create a memory area. */
729 MmLockAddressSpace(MmGetKernelAddressSpace());
730 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
731 0, // nothing checks for VACB mareas, so set to 0
732 &Vacb->BaseAddress,
733 VACB_MAPPING_GRANULARITY,
734 PAGE_READWRITE,
735 (PMEMORY_AREA*)&Vacb->MemoryArea,
736 0,
737 PAGE_SIZE);
738 MmUnlockAddressSpace(MmGetKernelAddressSpace());
739 if (!NT_SUCCESS(Status))
740 {
741 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
742 return Status;
743 }
744
745 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
746 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
747
748 /* Create a virtual mapping for this memory area */
749 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
750 for (i = 0; i < NumberOfPages; i++)
751 {
752 PFN_NUMBER PageFrameNumber;
753
754 MI_SET_USAGE(MI_USAGE_CACHE);
755 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
756 if (PageFrameNumber == 0)
757 {
758 DPRINT1("Unable to allocate page\n");
759 KeBugCheck(MEMORY_MANAGEMENT);
760 }
761
762 Status = MmCreateVirtualMapping(NULL,
763 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
764 PAGE_READWRITE,
765 &PageFrameNumber,
766 1);
767 if (!NT_SUCCESS(Status))
768 {
769 DPRINT1("Unable to create virtual mapping\n");
770 KeBugCheck(MEMORY_MANAGEMENT);
771 }
772 }
773
774 return STATUS_SUCCESS;
775 }
776
777 static
778 NTSTATUS
779 CcRosCreateVacb (
780 PROS_SHARED_CACHE_MAP SharedCacheMap,
781 LONGLONG FileOffset,
782 PROS_VACB *Vacb)
783 {
784 PROS_VACB current;
785 PROS_VACB previous;
786 PLIST_ENTRY current_entry;
787 NTSTATUS Status;
788 KIRQL oldIrql;
789
790 ASSERT(SharedCacheMap);
791
792 DPRINT("CcRosCreateVacb()\n");
793
794 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
795 {
796 *Vacb = NULL;
797 return STATUS_INVALID_PARAMETER;
798 }
799
800 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
801 current->BaseAddress = NULL;
802 current->Valid = FALSE;
803 current->Dirty = FALSE;
804 current->PageOut = FALSE;
805 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
806 current->SharedCacheMap = SharedCacheMap;
807 #if DBG
808 if (SharedCacheMap->Trace)
809 {
810 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
811 }
812 #endif
813 current->MappedCount = 0;
814 current->DirtyVacbListEntry.Flink = NULL;
815 current->DirtyVacbListEntry.Blink = NULL;
816 current->ReferenceCount = 1;
817 current->PinCount = 0;
818 KeInitializeMutex(&current->Mutex, 0);
819 CcRosAcquireVacbLock(current, NULL);
820 KeAcquireGuardedMutex(&ViewLock);
821
822 *Vacb = current;
823 /* There is window between the call to CcRosLookupVacb
824 * and CcRosCreateVacb. We must check if a VACB for the
825 * file offset exist. If there is a VACB, we release
826 * our newly created VACB and return the existing one.
827 */
828 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
829 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
830 previous = NULL;
831 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
832 {
833 current = CONTAINING_RECORD(current_entry,
834 ROS_VACB,
835 CacheMapVacbListEntry);
836 if (IsPointInRange(current->FileOffset.QuadPart,
837 VACB_MAPPING_GRANULARITY,
838 FileOffset))
839 {
840 CcRosVacbIncRefCount(current);
841 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
842 #if DBG
843 if (SharedCacheMap->Trace)
844 {
845 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
846 SharedCacheMap,
847 (*Vacb),
848 current);
849 }
850 #endif
851 CcRosReleaseVacbLock(*Vacb);
852 KeReleaseGuardedMutex(&ViewLock);
853 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
854 *Vacb = current;
855 CcRosAcquireVacbLock(current, NULL);
856 return STATUS_SUCCESS;
857 }
858 if (current->FileOffset.QuadPart < FileOffset)
859 {
860 ASSERT(previous == NULL ||
861 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
862 previous = current;
863 }
864 if (current->FileOffset.QuadPart > FileOffset)
865 break;
866 current_entry = current_entry->Flink;
867 }
868 /* There was no existing VACB. */
869 current = *Vacb;
870 if (previous)
871 {
872 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
873 }
874 else
875 {
876 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
877 }
878 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
879 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
880 KeReleaseGuardedMutex(&ViewLock);
881
882 MI_SET_USAGE(MI_USAGE_CACHE);
883 #if MI_TRACE_PFNS
884 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
885 {
886 PWCHAR pos = NULL;
887 ULONG len = 0;
888 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
889 if (pos)
890 {
891 len = wcslen(pos) * sizeof(WCHAR);
892 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
893 }
894 else
895 {
896 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
897 }
898 }
899 #endif
900
901 Status = CcRosMapVacb(current);
902 if (!NT_SUCCESS(Status))
903 {
904 RemoveEntryList(&current->CacheMapVacbListEntry);
905 RemoveEntryList(&current->VacbLruListEntry);
906 CcRosReleaseVacbLock(current);
907 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
908 }
909
910 return Status;
911 }
912
913 NTSTATUS
914 NTAPI
915 CcRosGetVacb (
916 PROS_SHARED_CACHE_MAP SharedCacheMap,
917 LONGLONG FileOffset,
918 PLONGLONG BaseOffset,
919 PVOID* BaseAddress,
920 PBOOLEAN UptoDate,
921 PROS_VACB *Vacb)
922 {
923 PROS_VACB current;
924 NTSTATUS Status;
925
926 ASSERT(SharedCacheMap);
927
928 DPRINT("CcRosGetVacb()\n");
929
930 /*
931 * Look for a VACB already mapping the same data.
932 */
933 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
934 if (current == NULL)
935 {
936 /*
937 * Otherwise create a new VACB.
938 */
939 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
940 if (!NT_SUCCESS(Status))
941 {
942 return Status;
943 }
944 }
945
946 KeAcquireGuardedMutex(&ViewLock);
947
948 /* Move to the tail of the LRU list */
949 RemoveEntryList(&current->VacbLruListEntry);
950 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
951
952 KeReleaseGuardedMutex(&ViewLock);
953
954 /*
955 * Return information about the VACB to the caller.
956 */
957 *UptoDate = current->Valid;
958 *BaseAddress = current->BaseAddress;
959 DPRINT("*BaseAddress %p\n", *BaseAddress);
960 *Vacb = current;
961 *BaseOffset = current->FileOffset.QuadPart;
962 return STATUS_SUCCESS;
963 }
964
965 NTSTATUS
966 NTAPI
967 CcRosRequestVacb (
968 PROS_SHARED_CACHE_MAP SharedCacheMap,
969 LONGLONG FileOffset,
970 PVOID* BaseAddress,
971 PBOOLEAN UptoDate,
972 PROS_VACB *Vacb)
973 /*
974 * FUNCTION: Request a page mapping for a shared cache map
975 */
976 {
977 LONGLONG BaseOffset;
978
979 ASSERT(SharedCacheMap);
980
981 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
982 {
983 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
984 FileOffset, VACB_MAPPING_GRANULARITY);
985 KeBugCheck(CACHE_MANAGER);
986 }
987
988 return CcRosGetVacb(SharedCacheMap,
989 FileOffset,
990 &BaseOffset,
991 BaseAddress,
992 UptoDate,
993 Vacb);
994 }
995
996 static
997 VOID
998 CcFreeCachePage (
999 PVOID Context,
1000 MEMORY_AREA* MemoryArea,
1001 PVOID Address,
1002 PFN_NUMBER Page,
1003 SWAPENTRY SwapEntry,
1004 BOOLEAN Dirty)
1005 {
1006 ASSERT(SwapEntry == 0);
1007 if (Page != 0)
1008 {
1009 ASSERT(MmGetReferenceCountPage(Page) == 1);
1010 MmReleasePageMemoryConsumer(MC_CACHE, Page);
1011 }
1012 }
1013
1014 NTSTATUS
1015 CcRosInternalFreeVacb (
1016 PROS_VACB Vacb)
1017 /*
1018 * FUNCTION: Releases a VACB associated with a shared cache map
1019 */
1020 {
1021 DPRINT("Freeing VACB 0x%p\n", Vacb);
1022 #if DBG
1023 if (Vacb->SharedCacheMap->Trace)
1024 {
1025 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
1026 }
1027 #endif
1028
1029 MmLockAddressSpace(MmGetKernelAddressSpace());
1030 MmFreeMemoryArea(MmGetKernelAddressSpace(),
1031 Vacb->MemoryArea,
1032 CcFreeCachePage,
1033 NULL);
1034 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1035
1036 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
1037 return STATUS_SUCCESS;
1038 }
1039
1040 /*
1041 * @implemented
1042 */
1043 VOID
1044 NTAPI
1045 CcFlushCache (
1046 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1047 IN PLARGE_INTEGER FileOffset OPTIONAL,
1048 IN ULONG Length,
1049 OUT PIO_STATUS_BLOCK IoStatus)
1050 {
1051 PROS_SHARED_CACHE_MAP SharedCacheMap;
1052 LARGE_INTEGER Offset;
1053 LONGLONG RemainingLength;
1054 PROS_VACB current;
1055 NTSTATUS Status;
1056 KIRQL oldIrql;
1057
1058 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1059 SectionObjectPointers, FileOffset, Length);
1060
1061 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1062 SectionObjectPointers, FileOffset, Length, IoStatus);
1063
1064 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1065 {
1066 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1067 ASSERT(SharedCacheMap);
1068 if (FileOffset)
1069 {
1070 Offset = *FileOffset;
1071 RemainingLength = Length;
1072 }
1073 else
1074 {
1075 Offset.QuadPart = 0;
1076 RemainingLength = SharedCacheMap->FileSize.QuadPart;
1077 }
1078
1079 if (IoStatus)
1080 {
1081 IoStatus->Status = STATUS_SUCCESS;
1082 IoStatus->Information = 0;
1083 }
1084
1085 while (RemainingLength > 0)
1086 {
1087 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1088 if (current != NULL)
1089 {
1090 if (current->Dirty)
1091 {
1092 Status = CcRosFlushVacb(current);
1093 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1094 {
1095 IoStatus->Status = Status;
1096 }
1097 }
1098
1099 CcRosReleaseVacbLock(current);
1100
1101 KeAcquireGuardedMutex(&ViewLock);
1102 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1103 CcRosVacbDecRefCount(current);
1104 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1105 KeReleaseGuardedMutex(&ViewLock);
1106 }
1107
1108 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1109 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1110 }
1111 }
1112 else
1113 {
1114 if (IoStatus)
1115 {
1116 IoStatus->Status = STATUS_INVALID_PARAMETER;
1117 }
1118 }
1119 }
1120
1121 NTSTATUS
1122 NTAPI
1123 CcRosDeleteFileCache (
1124 PFILE_OBJECT FileObject,
1125 PROS_SHARED_CACHE_MAP SharedCacheMap)
1126 /*
1127 * FUNCTION: Releases the shared cache map associated with a file object
1128 */
1129 {
1130 PLIST_ENTRY current_entry;
1131 PROS_VACB current;
1132 LIST_ENTRY FreeList;
1133 KIRQL oldIrql;
1134
1135 ASSERT(SharedCacheMap);
1136
1137 SharedCacheMap->OpenCount++;
1138 KeReleaseGuardedMutex(&ViewLock);
1139
1140 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1141
1142 KeAcquireGuardedMutex(&ViewLock);
1143 SharedCacheMap->OpenCount--;
1144 if (SharedCacheMap->OpenCount == 0)
1145 {
1146 KIRQL OldIrql;
1147
1148 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1149
1150 /*
1151 * Release all VACBs
1152 */
1153 InitializeListHead(&FreeList);
1154 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1155 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1156 {
1157 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1158 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1159 RemoveEntryList(&current->VacbLruListEntry);
1160 if (current->Dirty)
1161 {
1162 RemoveEntryList(&current->DirtyVacbListEntry);
1163 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
1164 DPRINT1("Freeing dirty VACB\n");
1165 }
1166 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1167 }
1168 #if DBG
1169 SharedCacheMap->Trace = FALSE;
1170 #endif
1171 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1172
1173 KeReleaseGuardedMutex(&ViewLock);
1174 ObDereferenceObject(SharedCacheMap->FileObject);
1175
1176 while (!IsListEmpty(&FreeList))
1177 {
1178 current_entry = RemoveTailList(&FreeList);
1179 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1180 CcRosInternalFreeVacb(current);
1181 }
1182
1183 KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
1184 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1185 KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
1186
1187 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1188 KeAcquireGuardedMutex(&ViewLock);
1189 }
1190 return STATUS_SUCCESS;
1191 }
1192
1193 VOID
1194 NTAPI
1195 CcRosReferenceCache (
1196 PFILE_OBJECT FileObject)
1197 {
1198 PROS_SHARED_CACHE_MAP SharedCacheMap;
1199 KeAcquireGuardedMutex(&ViewLock);
1200 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1201 ASSERT(SharedCacheMap);
1202 ASSERT(SharedCacheMap->OpenCount != 0);
1203 SharedCacheMap->OpenCount++;
1204 KeReleaseGuardedMutex(&ViewLock);
1205 }
1206
1207 VOID
1208 NTAPI
1209 CcRosRemoveIfClosed (
1210 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1211 {
1212 PROS_SHARED_CACHE_MAP SharedCacheMap;
1213 DPRINT("CcRosRemoveIfClosed()\n");
1214 KeAcquireGuardedMutex(&ViewLock);
1215 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1216 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1217 {
1218 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1219 }
1220 KeReleaseGuardedMutex(&ViewLock);
1221 }
1222
1223
1224 VOID
1225 NTAPI
1226 CcRosDereferenceCache (
1227 PFILE_OBJECT FileObject)
1228 {
1229 PROS_SHARED_CACHE_MAP SharedCacheMap;
1230 KeAcquireGuardedMutex(&ViewLock);
1231 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1232 ASSERT(SharedCacheMap);
1233 if (SharedCacheMap->OpenCount > 0)
1234 {
1235 SharedCacheMap->OpenCount--;
1236 if (SharedCacheMap->OpenCount == 0)
1237 {
1238 MmFreeSectionSegments(SharedCacheMap->FileObject);
1239 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1240 }
1241 }
1242 KeReleaseGuardedMutex(&ViewLock);
1243 }
1244
1245 NTSTATUS
1246 NTAPI
1247 CcRosReleaseFileCache (
1248 PFILE_OBJECT FileObject)
1249 /*
1250 * FUNCTION: Called by the file system when a handle to a file object
1251 * has been closed.
1252 */
1253 {
1254 PROS_SHARED_CACHE_MAP SharedCacheMap;
1255
1256 KeAcquireGuardedMutex(&ViewLock);
1257
1258 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1259 {
1260 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1261 if (FileObject->PrivateCacheMap != NULL)
1262 {
1263 FileObject->PrivateCacheMap = NULL;
1264 if (SharedCacheMap->OpenCount > 0)
1265 {
1266 SharedCacheMap->OpenCount--;
1267 if (SharedCacheMap->OpenCount == 0)
1268 {
1269 MmFreeSectionSegments(SharedCacheMap->FileObject);
1270 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1271 }
1272 }
1273 }
1274 }
1275 KeReleaseGuardedMutex(&ViewLock);
1276 return STATUS_SUCCESS;
1277 }
1278
1279 NTSTATUS
1280 NTAPI
1281 CcTryToInitializeFileCache (
1282 PFILE_OBJECT FileObject)
1283 {
1284 PROS_SHARED_CACHE_MAP SharedCacheMap;
1285 NTSTATUS Status;
1286
1287 KeAcquireGuardedMutex(&ViewLock);
1288
1289 ASSERT(FileObject->SectionObjectPointer);
1290 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1291 if (SharedCacheMap == NULL)
1292 {
1293 Status = STATUS_UNSUCCESSFUL;
1294 }
1295 else
1296 {
1297 if (FileObject->PrivateCacheMap == NULL)
1298 {
1299 FileObject->PrivateCacheMap = SharedCacheMap;
1300 SharedCacheMap->OpenCount++;
1301 }
1302 Status = STATUS_SUCCESS;
1303 }
1304 KeReleaseGuardedMutex(&ViewLock);
1305
1306 return Status;
1307 }
1308
1309
1310 NTSTATUS
1311 NTAPI
1312 CcRosInitializeFileCache (
1313 PFILE_OBJECT FileObject,
1314 PCC_FILE_SIZES FileSizes,
1315 BOOLEAN PinAccess,
1316 PCACHE_MANAGER_CALLBACKS CallBacks,
1317 PVOID LazyWriterContext)
1318 /*
1319 * FUNCTION: Initializes a shared cache map for a file object
1320 */
1321 {
1322 PROS_SHARED_CACHE_MAP SharedCacheMap;
1323
1324 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1325 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1326 FileObject, SharedCacheMap);
1327
1328 KeAcquireGuardedMutex(&ViewLock);
1329 if (SharedCacheMap == NULL)
1330 {
1331 KIRQL OldIrql;
1332
1333 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1334 if (SharedCacheMap == NULL)
1335 {
1336 KeReleaseGuardedMutex(&ViewLock);
1337 return STATUS_INSUFFICIENT_RESOURCES;
1338 }
1339 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1340 ObReferenceObjectByPointer(FileObject,
1341 FILE_ALL_ACCESS,
1342 NULL,
1343 KernelMode);
1344 SharedCacheMap->FileObject = FileObject;
1345 SharedCacheMap->Callbacks = CallBacks;
1346 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1347 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1348 SharedCacheMap->FileSize = FileSizes->FileSize;
1349 SharedCacheMap->PinAccess = PinAccess;
1350 SharedCacheMap->DirtyPageThreshold = 0;
1351 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1352 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1353 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1354
1355 KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
1356 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1357 KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
1358 }
1359 if (FileObject->PrivateCacheMap == NULL)
1360 {
1361 FileObject->PrivateCacheMap = SharedCacheMap;
1362 SharedCacheMap->OpenCount++;
1363 }
1364 KeReleaseGuardedMutex(&ViewLock);
1365
1366 return STATUS_SUCCESS;
1367 }
1368
1369 /*
1370 * @implemented
1371 */
1372 PFILE_OBJECT
1373 NTAPI
1374 CcGetFileObjectFromSectionPtrs (
1375 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1376 {
1377 PROS_SHARED_CACHE_MAP SharedCacheMap;
1378
1379 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1380
1381 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1382 {
1383 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1384 ASSERT(SharedCacheMap);
1385 return SharedCacheMap->FileObject;
1386 }
1387 return NULL;
1388 }
1389
1390 VOID
1391 NTAPI
1392 CcShutdownLazyWriter (
1393 VOID)
1394 {
1395 /* Simply set the event, lazy writer will stop when it's done */
1396 KeSetEvent(&iLazyWriterShutdown, IO_DISK_INCREMENT, FALSE);
1397 }
1398
1399 BOOLEAN
1400 INIT_FUNCTION
1401 NTAPI
1402 CcInitView (
1403 VOID)
1404 {
1405 HANDLE LazyWriter;
1406 NTSTATUS Status;
1407 KPRIORITY Priority;
1408 OBJECT_ATTRIBUTES ObjectAttributes;
1409
1410 DPRINT("CcInitView()\n");
1411
1412 InitializeListHead(&DirtyVacbListHead);
1413 InitializeListHead(&VacbLruListHead);
1414 InitializeListHead(&CcDeferredWrites);
1415 InitializeListHead(&CcCleanSharedCacheMapList);
1416 KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1417 KeInitializeSpinLock(&iSharedCacheMapLock);
1418 KeInitializeGuardedMutex(&ViewLock);
1419 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1420 NULL,
1421 NULL,
1422 0,
1423 sizeof(INTERNAL_BCB),
1424 TAG_BCB,
1425 20);
1426 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1427 NULL,
1428 NULL,
1429 0,
1430 sizeof(ROS_SHARED_CACHE_MAP),
1431 TAG_SHARED_CACHE_MAP,
1432 20);
1433 ExInitializeNPagedLookasideList(&VacbLookasideList,
1434 NULL,
1435 NULL,
1436 0,
1437 sizeof(ROS_VACB),
1438 TAG_VACB,
1439 20);
1440
1441 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1442
1443 /* Initialize lazy writer events */
1444 KeInitializeEvent(&iLazyWriterShutdown, SynchronizationEvent, FALSE);
1445 KeInitializeEvent(&iLazyWriterNotify, NotificationEvent, FALSE);
1446
1447 /* Define lazy writer threshold, depending on system type */
1448 switch (MmQuerySystemSize())
1449 {
1450 case MmSmallSystem:
1451 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8;
1452 break;
1453
1454 case MmMediumSystem:
1455 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4;
1456 break;
1457
1458 case MmLargeSystem:
1459 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8 + MmNumberOfPhysicalPages / 4;
1460 break;
1461 }
1462
1463 /* Start the lazy writer thread */
1464 InitializeObjectAttributes(&ObjectAttributes,
1465 NULL,
1466 OBJ_KERNEL_HANDLE,
1467 NULL,
1468 NULL);
1469 Status = PsCreateSystemThread(&LazyWriter,
1470 THREAD_ALL_ACCESS,
1471 &ObjectAttributes,
1472 NULL,
1473 NULL,
1474 CciLazyWriter,
1475 NULL);
1476 if (!NT_SUCCESS(Status))
1477 {
1478 return FALSE;
1479 }
1480
1481 Priority = 27;
1482 Status = NtSetInformationThread(LazyWriter,
1483 ThreadPriority,
1484 &Priority,
1485 sizeof(Priority));
1486 ASSERT(NT_SUCCESS(Status));
1487
1488 /* Handle is not needed */
1489 ObCloseHandle(LazyWriter, KernelMode);
1490
1491 CcInitCacheZeroPage();
1492
1493 return TRUE;
1494 }
1495
1496 #if DBG && defined(KDBG)
1497 BOOLEAN
1498 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1499 {
1500 PLIST_ENTRY ListEntry;
1501 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1502
1503 KdbpPrint(" Usage Summary (in kb)\n");
1504 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1505 /* No need to lock the spin lock here, we're in DBG */
1506 for (ListEntry = CcCleanSharedCacheMapList.Flink;
1507 ListEntry != &CcCleanSharedCacheMapList;
1508 ListEntry = ListEntry->Flink)
1509 {
1510 PLIST_ENTRY Vacbs;
1511 ULONG Valid = 0, Dirty = 0;
1512 PROS_SHARED_CACHE_MAP SharedCacheMap;
1513 PUNICODE_STRING FileName;
1514
1515 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1516
1517 /* First, count for all the associated VACB */
1518 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1519 Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1520 Vacbs = Vacbs->Flink)
1521 {
1522 PROS_VACB Vacb;
1523
1524 Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
1525 if (Vacb->Dirty)
1526 {
1527 Dirty += VACB_MAPPING_GRANULARITY / 1024;
1528 }
1529 if (Vacb->Valid)
1530 {
1531 Valid += VACB_MAPPING_GRANULARITY / 1024;
1532 }
1533 }
1534
1535 /* Setup name */
1536 if (SharedCacheMap->FileObject != NULL &&
1537 SharedCacheMap->FileObject->FileName.Length != 0)
1538 {
1539 FileName = &SharedCacheMap->FileObject->FileName;
1540 }
1541 else
1542 {
1543 FileName = &NoName;
1544 }
1545
1546 /* And print */
1547 KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
1548 }
1549
1550 return TRUE;
1551 }
1552 #endif
1553
1554 /* EOF */