[NTOSKRNL] Allow pinned dirty VACB to be lazy written.
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42
43 /* GLOBALS *******************************************************************/
44
45 LIST_ENTRY DirtyVacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 /* Counters:
55 * - Amount of pages flushed by lazy writer
56 * - Number of times lazy writer ran
57 */
58 ULONG CcLazyWritePages = 0;
59 ULONG CcLazyWriteIos = 0;
60
61 /* Internal vars (MS):
62 * - Threshold above which lazy writer will start action
63 * - Amount of dirty pages
64 * - List for deferred writes
65 * - Spinlock when dealing with the deferred list
66 * - List for "clean" shared cache maps
67 */
68 ULONG CcDirtyPageThreshold = 0;
69 ULONG CcTotalDirtyPages = 0;
70 LIST_ENTRY CcDeferredWrites;
71 KSPIN_LOCK CcDeferredWriteSpinLock;
72 LIST_ENTRY CcCleanSharedCacheMapList;
73
74 /* Internal vars (ROS):
75 * - Event to notify lazy writer to shutdown
76 * - Event to inform watchers lazy writer is done for this loop
77 * - Lock for the CcCleanSharedCacheMapList list
78 */
79 KEVENT iLazyWriterShutdown;
80 KEVENT iLazyWriterNotify;
81 KSPIN_LOCK iSharedCacheMapLock;
82
83 #if DBG
84 static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
85 {
86 ++vacb->ReferenceCount;
87 if (vacb->SharedCacheMap->Trace)
88 {
89 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
90 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
91 }
92 }
93 static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
94 {
95 --vacb->ReferenceCount;
96 if (vacb->SharedCacheMap->Trace)
97 {
98 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
99 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
100 }
101 }
102 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
103 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
104 #else
105 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
106 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
107 #endif
108
109 NTSTATUS
110 CcRosInternalFreeVacb(PROS_VACB Vacb);
111
112
113 /* FUNCTIONS *****************************************************************/
114
115 VOID
116 NTAPI
117 CcRosTraceCacheMap (
118 PROS_SHARED_CACHE_MAP SharedCacheMap,
119 BOOLEAN Trace )
120 {
121 #if DBG
122 KIRQL oldirql;
123 PLIST_ENTRY current_entry;
124 PROS_VACB current;
125
126 if (!SharedCacheMap)
127 return;
128
129 SharedCacheMap->Trace = Trace;
130
131 if (Trace)
132 {
133 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
134
135 KeAcquireGuardedMutex(&ViewLock);
136 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
137
138 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
139 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
140 {
141 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
142 current_entry = current_entry->Flink;
143
144 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
145 current, current->ReferenceCount, current->Dirty, current->PageOut );
146 }
147 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
148 KeReleaseGuardedMutex(&ViewLock);
149 }
150 else
151 {
152 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
153 }
154
155 #else
156 UNREFERENCED_PARAMETER(SharedCacheMap);
157 UNREFERENCED_PARAMETER(Trace);
158 #endif
159 }
160
161 NTSTATUS
162 NTAPI
163 CcRosFlushVacb (
164 PROS_VACB Vacb)
165 {
166 NTSTATUS Status;
167 KIRQL oldIrql;
168
169 Status = CcWriteVirtualAddress(Vacb);
170 if (NT_SUCCESS(Status))
171 {
172 KeAcquireGuardedMutex(&ViewLock);
173 KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql);
174
175 Vacb->Dirty = FALSE;
176 RemoveEntryList(&Vacb->DirtyVacbListEntry);
177 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
178 CcRosVacbDecRefCount(Vacb);
179
180 KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
181 KeReleaseGuardedMutex(&ViewLock);
182 }
183
184 return Status;
185 }
186
187 NTSTATUS
188 NTAPI
189 CcRosFlushDirtyPages (
190 ULONG Target,
191 PULONG Count,
192 BOOLEAN Wait,
193 BOOLEAN CalledFromLazy)
194 {
195 PLIST_ENTRY current_entry;
196 PROS_VACB current;
197 BOOLEAN Locked;
198 NTSTATUS Status;
199 LARGE_INTEGER ZeroTimeout;
200
201 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
202
203 (*Count) = 0;
204 ZeroTimeout.QuadPart = 0;
205
206 KeEnterCriticalRegion();
207 KeAcquireGuardedMutex(&ViewLock);
208
209 current_entry = DirtyVacbListHead.Flink;
210 if (current_entry == &DirtyVacbListHead)
211 {
212 DPRINT("No Dirty pages\n");
213 }
214
215 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
216 {
217 current = CONTAINING_RECORD(current_entry,
218 ROS_VACB,
219 DirtyVacbListEntry);
220 current_entry = current_entry->Flink;
221
222 CcRosVacbIncRefCount(current);
223
224 /* When performing lazy write, don't handle temporary files */
225 if (CalledFromLazy &&
226 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
227 {
228 CcRosVacbDecRefCount(current);
229 continue;
230 }
231
232 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
233 current->SharedCacheMap->LazyWriteContext, Wait);
234 if (!Locked)
235 {
236 CcRosVacbDecRefCount(current);
237 continue;
238 }
239
240 Status = CcRosAcquireVacbLock(current,
241 Wait ? NULL : &ZeroTimeout);
242 if (Status != STATUS_SUCCESS)
243 {
244 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
245 current->SharedCacheMap->LazyWriteContext);
246 CcRosVacbDecRefCount(current);
247 continue;
248 }
249
250 ASSERT(current->Dirty);
251
252 /* One reference is added above */
253 if ((current->ReferenceCount > 2 && current->PinCount == 0) ||
254 (current->ReferenceCount > 3 && current->PinCount > 1))
255 {
256 CcRosReleaseVacbLock(current);
257 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
258 current->SharedCacheMap->LazyWriteContext);
259 CcRosVacbDecRefCount(current);
260 continue;
261 }
262
263 KeReleaseGuardedMutex(&ViewLock);
264
265 Status = CcRosFlushVacb(current);
266
267 CcRosReleaseVacbLock(current);
268 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
269 current->SharedCacheMap->LazyWriteContext);
270
271 KeAcquireGuardedMutex(&ViewLock);
272 CcRosVacbDecRefCount(current);
273
274 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
275 (Status != STATUS_MEDIA_WRITE_PROTECTED))
276 {
277 DPRINT1("CC: Failed to flush VACB.\n");
278 }
279 else
280 {
281 ULONG PagesFreed;
282
283 /* How many pages did we free? */
284 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
285 (*Count) += PagesFreed;
286
287 /* Make sure we don't overflow target! */
288 if (Target < PagesFreed)
289 {
290 /* If we would have, jump to zero directly */
291 Target = 0;
292 }
293 else
294 {
295 Target -= PagesFreed;
296 }
297 }
298
299 current_entry = DirtyVacbListHead.Flink;
300 }
301
302 KeReleaseGuardedMutex(&ViewLock);
303 KeLeaveCriticalRegion();
304
305 DPRINT("CcRosFlushDirtyPages() finished\n");
306 return STATUS_SUCCESS;
307 }
308
309 /* FIXME: Someday this could somewhat implement write-behind/read-ahead */
310 VOID
311 NTAPI
312 CciLazyWriter(PVOID Unused)
313 {
314 LARGE_INTEGER OneSecond;
315
316 OneSecond.QuadPart = (LONGLONG)-1*1000*1000*10;
317
318 while (TRUE)
319 {
320 NTSTATUS Status;
321 PLIST_ENTRY ListEntry;
322 ULONG Target, Count = 0;
323
324 /* One per second or until we have to stop */
325 Status = KeWaitForSingleObject(&iLazyWriterShutdown,
326 Executive,
327 KernelMode,
328 FALSE,
329 &OneSecond);
330
331 /* If we succeeed, we've to stop running! */
332 if (Status == STATUS_SUCCESS)
333 {
334 break;
335 }
336
337 /* We're not sleeping anymore */
338 KeClearEvent(&iLazyWriterNotify);
339
340 /* Our target is one-eighth of the dirty pages */
341 Target = CcTotalDirtyPages / 8;
342 if (Target != 0)
343 {
344 /* Flush! */
345 DPRINT("Lazy writer starting (%d)\n", Target);
346 CcRosFlushDirtyPages(Target, &Count, FALSE, TRUE);
347
348 /* And update stats */
349 CcLazyWritePages += Count;
350 ++CcLazyWriteIos;
351 DPRINT("Lazy writer done (%d)\n", Count);
352 }
353
354 /* Inform people waiting on us that we're done */
355 KeSetEvent(&iLazyWriterNotify, IO_DISK_INCREMENT, FALSE);
356
357 /* Likely not optimal, but let's handle one deferred write now! */
358 ListEntry = ExInterlockedRemoveHeadList(&CcDeferredWrites, &CcDeferredWriteSpinLock);
359 if (ListEntry != NULL)
360 {
361 PROS_DEFERRED_WRITE_CONTEXT Context;
362
363 /* Extract the context */
364 Context = CONTAINING_RECORD(ListEntry, ROS_DEFERRED_WRITE_CONTEXT, CcDeferredWritesEntry);
365
366 /* Can we write now? */
367 if (CcCanIWrite(Context->FileObject, Context->BytesToWrite, FALSE, Context->Retrying))
368 {
369 /* Yes! Do it, and destroy the associated context */
370 Context->PostRoutine(Context->Context1, Context->Context2);
371 ExFreePoolWithTag(Context, 'CcDw');
372 }
373 else
374 {
375 /* Otherwise, requeue it, but in tail, so that it doesn't block others
376 * This is clearly to improve, but given the poor algorithm used now
377 * It's better than nothing!
378 */
379 ExInterlockedInsertTailList(&CcDeferredWrites,
380 &Context->CcDeferredWritesEntry,
381 &CcDeferredWriteSpinLock);
382 }
383 }
384 }
385 }
386
387 NTSTATUS
388 CcRosTrimCache (
389 ULONG Target,
390 ULONG Priority,
391 PULONG NrFreed)
392 /*
393 * FUNCTION: Try to free some memory from the file cache.
394 * ARGUMENTS:
395 * Target - The number of pages to be freed.
396 * Priority - The priority of free (currently unused).
397 * NrFreed - Points to a variable where the number of pages
398 * actually freed is returned.
399 */
400 {
401 PLIST_ENTRY current_entry;
402 PROS_VACB current;
403 ULONG PagesFreed;
404 KIRQL oldIrql;
405 LIST_ENTRY FreeList;
406 PFN_NUMBER Page;
407 ULONG i;
408 BOOLEAN FlushedPages = FALSE;
409
410 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
411
412 InitializeListHead(&FreeList);
413
414 *NrFreed = 0;
415
416 retry:
417 KeAcquireGuardedMutex(&ViewLock);
418
419 current_entry = VacbLruListHead.Flink;
420 while (current_entry != &VacbLruListHead)
421 {
422 current = CONTAINING_RECORD(current_entry,
423 ROS_VACB,
424 VacbLruListEntry);
425 current_entry = current_entry->Flink;
426
427 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
428
429 /* Reference the VACB */
430 CcRosVacbIncRefCount(current);
431
432 /* Check if it's mapped and not dirty */
433 if (current->MappedCount > 0 && !current->Dirty)
434 {
435 /* We have to break these locks because Cc sucks */
436 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
437 KeReleaseGuardedMutex(&ViewLock);
438
439 /* Page out the VACB */
440 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
441 {
442 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
443
444 MmPageOutPhysicalAddress(Page);
445 }
446
447 /* Reacquire the locks */
448 KeAcquireGuardedMutex(&ViewLock);
449 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
450 }
451
452 /* Dereference the VACB */
453 CcRosVacbDecRefCount(current);
454
455 /* Check if we can free this entry now */
456 if (current->ReferenceCount == 0)
457 {
458 ASSERT(!current->Dirty);
459 ASSERT(!current->MappedCount);
460
461 RemoveEntryList(&current->CacheMapVacbListEntry);
462 RemoveEntryList(&current->VacbLruListEntry);
463 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
464
465 /* Calculate how many pages we freed for Mm */
466 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
467 Target -= PagesFreed;
468 (*NrFreed) += PagesFreed;
469 }
470
471 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
472 }
473
474 KeReleaseGuardedMutex(&ViewLock);
475
476 /* Try flushing pages if we haven't met our target */
477 if ((Target > 0) && !FlushedPages)
478 {
479 /* Flush dirty pages to disk */
480 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
481 FlushedPages = TRUE;
482
483 /* We can only swap as many pages as we flushed */
484 if (PagesFreed < Target) Target = PagesFreed;
485
486 /* Check if we flushed anything */
487 if (PagesFreed != 0)
488 {
489 /* Try again after flushing dirty pages */
490 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
491 goto retry;
492 }
493 }
494
495 while (!IsListEmpty(&FreeList))
496 {
497 current_entry = RemoveHeadList(&FreeList);
498 current = CONTAINING_RECORD(current_entry,
499 ROS_VACB,
500 CacheMapVacbListEntry);
501 CcRosInternalFreeVacb(current);
502 }
503
504 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
505
506 return STATUS_SUCCESS;
507 }
508
509 NTSTATUS
510 NTAPI
511 CcRosReleaseVacb (
512 PROS_SHARED_CACHE_MAP SharedCacheMap,
513 PROS_VACB Vacb,
514 BOOLEAN Valid,
515 BOOLEAN Dirty,
516 BOOLEAN Mapped)
517 {
518 BOOLEAN WasDirty;
519 KIRQL oldIrql;
520
521 ASSERT(SharedCacheMap);
522
523 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
524 SharedCacheMap, Vacb, Valid);
525
526 KeAcquireGuardedMutex(&ViewLock);
527 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
528
529 Vacb->Valid = Valid;
530
531 WasDirty = Vacb->Dirty;
532 Vacb->Dirty = Vacb->Dirty || Dirty;
533
534 if (!WasDirty && Vacb->Dirty)
535 {
536 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
537 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
538 }
539
540 if (Mapped)
541 {
542 Vacb->MappedCount++;
543 }
544 CcRosVacbDecRefCount(Vacb);
545 if (Mapped && (Vacb->MappedCount == 1))
546 {
547 CcRosVacbIncRefCount(Vacb);
548 }
549 if (!WasDirty && Vacb->Dirty)
550 {
551 CcRosVacbIncRefCount(Vacb);
552 }
553
554 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
555 KeReleaseGuardedMutex(&ViewLock);
556 CcRosReleaseVacbLock(Vacb);
557
558 return STATUS_SUCCESS;
559 }
560
561 /* Returns with VACB Lock Held! */
562 PROS_VACB
563 NTAPI
564 CcRosLookupVacb (
565 PROS_SHARED_CACHE_MAP SharedCacheMap,
566 LONGLONG FileOffset)
567 {
568 PLIST_ENTRY current_entry;
569 PROS_VACB current;
570 KIRQL oldIrql;
571
572 ASSERT(SharedCacheMap);
573
574 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
575 SharedCacheMap, FileOffset);
576
577 KeAcquireGuardedMutex(&ViewLock);
578 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
579
580 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
581 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
582 {
583 current = CONTAINING_RECORD(current_entry,
584 ROS_VACB,
585 CacheMapVacbListEntry);
586 if (IsPointInRange(current->FileOffset.QuadPart,
587 VACB_MAPPING_GRANULARITY,
588 FileOffset))
589 {
590 CcRosVacbIncRefCount(current);
591 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
592 KeReleaseGuardedMutex(&ViewLock);
593 CcRosAcquireVacbLock(current, NULL);
594 return current;
595 }
596 if (current->FileOffset.QuadPart > FileOffset)
597 break;
598 current_entry = current_entry->Flink;
599 }
600
601 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
602 KeReleaseGuardedMutex(&ViewLock);
603
604 return NULL;
605 }
606
607 VOID
608 NTAPI
609 CcRosMarkDirtyVacb (
610 PROS_VACB Vacb)
611 {
612 KIRQL oldIrql;
613 PROS_SHARED_CACHE_MAP SharedCacheMap;
614
615 SharedCacheMap = Vacb->SharedCacheMap;
616
617 KeAcquireGuardedMutex(&ViewLock);
618 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
619
620 if (!Vacb->Dirty)
621 {
622 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
623 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
624 }
625 else
626 {
627 CcRosVacbDecRefCount(Vacb);
628 }
629
630 /* Move to the tail of the LRU list */
631 RemoveEntryList(&Vacb->VacbLruListEntry);
632 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
633
634 Vacb->Dirty = TRUE;
635
636 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
637 KeReleaseGuardedMutex(&ViewLock);
638 }
639
640 NTSTATUS
641 NTAPI
642 CcRosMarkDirtyFile (
643 PROS_SHARED_CACHE_MAP SharedCacheMap,
644 LONGLONG FileOffset)
645 {
646 PROS_VACB Vacb;
647
648 ASSERT(SharedCacheMap);
649
650 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
651 SharedCacheMap, FileOffset);
652
653 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
654 if (Vacb == NULL)
655 {
656 KeBugCheck(CACHE_MANAGER);
657 }
658
659 CcRosMarkDirtyVacb(Vacb);
660
661
662 CcRosReleaseVacbLock(Vacb);
663
664 return STATUS_SUCCESS;
665 }
666
667 NTSTATUS
668 NTAPI
669 CcRosUnmapVacb (
670 PROS_SHARED_CACHE_MAP SharedCacheMap,
671 LONGLONG FileOffset,
672 BOOLEAN NowDirty)
673 {
674 PROS_VACB Vacb;
675 BOOLEAN WasDirty;
676 KIRQL oldIrql;
677
678 ASSERT(SharedCacheMap);
679
680 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
681 SharedCacheMap, FileOffset, NowDirty);
682
683 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
684 if (Vacb == NULL)
685 {
686 return STATUS_UNSUCCESSFUL;
687 }
688
689 KeAcquireGuardedMutex(&ViewLock);
690 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
691
692 WasDirty = Vacb->Dirty;
693 Vacb->Dirty = Vacb->Dirty || NowDirty;
694
695 Vacb->MappedCount--;
696
697 if (!WasDirty && NowDirty)
698 {
699 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
700 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
701 }
702
703 CcRosVacbDecRefCount(Vacb);
704 if (!WasDirty && NowDirty)
705 {
706 CcRosVacbIncRefCount(Vacb);
707 }
708 if (Vacb->MappedCount == 0)
709 {
710 CcRosVacbDecRefCount(Vacb);
711 }
712
713 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
714 KeReleaseGuardedMutex(&ViewLock);
715 CcRosReleaseVacbLock(Vacb);
716
717 return STATUS_SUCCESS;
718 }
719
720 static
721 NTSTATUS
722 CcRosMapVacb(
723 PROS_VACB Vacb)
724 {
725 ULONG i;
726 NTSTATUS Status;
727 ULONG_PTR NumberOfPages;
728
729 /* Create a memory area. */
730 MmLockAddressSpace(MmGetKernelAddressSpace());
731 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
732 0, // nothing checks for VACB mareas, so set to 0
733 &Vacb->BaseAddress,
734 VACB_MAPPING_GRANULARITY,
735 PAGE_READWRITE,
736 (PMEMORY_AREA*)&Vacb->MemoryArea,
737 0,
738 PAGE_SIZE);
739 MmUnlockAddressSpace(MmGetKernelAddressSpace());
740 if (!NT_SUCCESS(Status))
741 {
742 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
743 return Status;
744 }
745
746 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
747 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
748
749 /* Create a virtual mapping for this memory area */
750 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
751 for (i = 0; i < NumberOfPages; i++)
752 {
753 PFN_NUMBER PageFrameNumber;
754
755 MI_SET_USAGE(MI_USAGE_CACHE);
756 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
757 if (PageFrameNumber == 0)
758 {
759 DPRINT1("Unable to allocate page\n");
760 KeBugCheck(MEMORY_MANAGEMENT);
761 }
762
763 Status = MmCreateVirtualMapping(NULL,
764 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
765 PAGE_READWRITE,
766 &PageFrameNumber,
767 1);
768 if (!NT_SUCCESS(Status))
769 {
770 DPRINT1("Unable to create virtual mapping\n");
771 KeBugCheck(MEMORY_MANAGEMENT);
772 }
773 }
774
775 return STATUS_SUCCESS;
776 }
777
778 static
779 NTSTATUS
780 CcRosCreateVacb (
781 PROS_SHARED_CACHE_MAP SharedCacheMap,
782 LONGLONG FileOffset,
783 PROS_VACB *Vacb)
784 {
785 PROS_VACB current;
786 PROS_VACB previous;
787 PLIST_ENTRY current_entry;
788 NTSTATUS Status;
789 KIRQL oldIrql;
790
791 ASSERT(SharedCacheMap);
792
793 DPRINT("CcRosCreateVacb()\n");
794
795 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
796 {
797 *Vacb = NULL;
798 return STATUS_INVALID_PARAMETER;
799 }
800
801 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
802 current->BaseAddress = NULL;
803 current->Valid = FALSE;
804 current->Dirty = FALSE;
805 current->PageOut = FALSE;
806 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
807 current->SharedCacheMap = SharedCacheMap;
808 #if DBG
809 if (SharedCacheMap->Trace)
810 {
811 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
812 }
813 #endif
814 current->MappedCount = 0;
815 current->DirtyVacbListEntry.Flink = NULL;
816 current->DirtyVacbListEntry.Blink = NULL;
817 current->ReferenceCount = 1;
818 current->PinCount = 0;
819 KeInitializeMutex(&current->Mutex, 0);
820 CcRosAcquireVacbLock(current, NULL);
821 KeAcquireGuardedMutex(&ViewLock);
822
823 *Vacb = current;
824 /* There is window between the call to CcRosLookupVacb
825 * and CcRosCreateVacb. We must check if a VACB for the
826 * file offset exist. If there is a VACB, we release
827 * our newly created VACB and return the existing one.
828 */
829 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
830 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
831 previous = NULL;
832 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
833 {
834 current = CONTAINING_RECORD(current_entry,
835 ROS_VACB,
836 CacheMapVacbListEntry);
837 if (IsPointInRange(current->FileOffset.QuadPart,
838 VACB_MAPPING_GRANULARITY,
839 FileOffset))
840 {
841 CcRosVacbIncRefCount(current);
842 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
843 #if DBG
844 if (SharedCacheMap->Trace)
845 {
846 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
847 SharedCacheMap,
848 (*Vacb),
849 current);
850 }
851 #endif
852 CcRosReleaseVacbLock(*Vacb);
853 KeReleaseGuardedMutex(&ViewLock);
854 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
855 *Vacb = current;
856 CcRosAcquireVacbLock(current, NULL);
857 return STATUS_SUCCESS;
858 }
859 if (current->FileOffset.QuadPart < FileOffset)
860 {
861 ASSERT(previous == NULL ||
862 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
863 previous = current;
864 }
865 if (current->FileOffset.QuadPart > FileOffset)
866 break;
867 current_entry = current_entry->Flink;
868 }
869 /* There was no existing VACB. */
870 current = *Vacb;
871 if (previous)
872 {
873 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
874 }
875 else
876 {
877 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
878 }
879 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
880 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
881 KeReleaseGuardedMutex(&ViewLock);
882
883 MI_SET_USAGE(MI_USAGE_CACHE);
884 #if MI_TRACE_PFNS
885 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
886 {
887 PWCHAR pos;
888 ULONG len = 0;
889 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
890 if (pos)
891 {
892 len = wcslen(pos) * sizeof(WCHAR);
893 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
894 }
895 else
896 {
897 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
898 }
899 }
900 #endif
901
902 Status = CcRosMapVacb(current);
903 if (!NT_SUCCESS(Status))
904 {
905 RemoveEntryList(&current->CacheMapVacbListEntry);
906 RemoveEntryList(&current->VacbLruListEntry);
907 CcRosReleaseVacbLock(current);
908 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
909 }
910
911 return Status;
912 }
913
914 NTSTATUS
915 NTAPI
916 CcRosGetVacb (
917 PROS_SHARED_CACHE_MAP SharedCacheMap,
918 LONGLONG FileOffset,
919 PLONGLONG BaseOffset,
920 PVOID* BaseAddress,
921 PBOOLEAN UptoDate,
922 PROS_VACB *Vacb)
923 {
924 PROS_VACB current;
925 NTSTATUS Status;
926
927 ASSERT(SharedCacheMap);
928
929 DPRINT("CcRosGetVacb()\n");
930
931 /*
932 * Look for a VACB already mapping the same data.
933 */
934 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
935 if (current == NULL)
936 {
937 /*
938 * Otherwise create a new VACB.
939 */
940 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
941 if (!NT_SUCCESS(Status))
942 {
943 return Status;
944 }
945 }
946
947 KeAcquireGuardedMutex(&ViewLock);
948
949 /* Move to the tail of the LRU list */
950 RemoveEntryList(&current->VacbLruListEntry);
951 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
952
953 KeReleaseGuardedMutex(&ViewLock);
954
955 /*
956 * Return information about the VACB to the caller.
957 */
958 *UptoDate = current->Valid;
959 *BaseAddress = current->BaseAddress;
960 DPRINT("*BaseAddress %p\n", *BaseAddress);
961 *Vacb = current;
962 *BaseOffset = current->FileOffset.QuadPart;
963 return STATUS_SUCCESS;
964 }
965
966 NTSTATUS
967 NTAPI
968 CcRosRequestVacb (
969 PROS_SHARED_CACHE_MAP SharedCacheMap,
970 LONGLONG FileOffset,
971 PVOID* BaseAddress,
972 PBOOLEAN UptoDate,
973 PROS_VACB *Vacb)
974 /*
975 * FUNCTION: Request a page mapping for a shared cache map
976 */
977 {
978 LONGLONG BaseOffset;
979
980 ASSERT(SharedCacheMap);
981
982 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
983 {
984 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
985 FileOffset, VACB_MAPPING_GRANULARITY);
986 KeBugCheck(CACHE_MANAGER);
987 }
988
989 return CcRosGetVacb(SharedCacheMap,
990 FileOffset,
991 &BaseOffset,
992 BaseAddress,
993 UptoDate,
994 Vacb);
995 }
996
997 static
998 VOID
999 CcFreeCachePage (
1000 PVOID Context,
1001 MEMORY_AREA* MemoryArea,
1002 PVOID Address,
1003 PFN_NUMBER Page,
1004 SWAPENTRY SwapEntry,
1005 BOOLEAN Dirty)
1006 {
1007 ASSERT(SwapEntry == 0);
1008 if (Page != 0)
1009 {
1010 ASSERT(MmGetReferenceCountPage(Page) == 1);
1011 MmReleasePageMemoryConsumer(MC_CACHE, Page);
1012 }
1013 }
1014
1015 NTSTATUS
1016 CcRosInternalFreeVacb (
1017 PROS_VACB Vacb)
1018 /*
1019 * FUNCTION: Releases a VACB associated with a shared cache map
1020 */
1021 {
1022 DPRINT("Freeing VACB 0x%p\n", Vacb);
1023 #if DBG
1024 if (Vacb->SharedCacheMap->Trace)
1025 {
1026 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
1027 }
1028 #endif
1029
1030 MmLockAddressSpace(MmGetKernelAddressSpace());
1031 MmFreeMemoryArea(MmGetKernelAddressSpace(),
1032 Vacb->MemoryArea,
1033 CcFreeCachePage,
1034 NULL);
1035 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1036
1037 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
1038 return STATUS_SUCCESS;
1039 }
1040
1041 /*
1042 * @implemented
1043 */
1044 VOID
1045 NTAPI
1046 CcFlushCache (
1047 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1048 IN PLARGE_INTEGER FileOffset OPTIONAL,
1049 IN ULONG Length,
1050 OUT PIO_STATUS_BLOCK IoStatus)
1051 {
1052 PROS_SHARED_CACHE_MAP SharedCacheMap;
1053 LARGE_INTEGER Offset;
1054 LONGLONG RemainingLength;
1055 PROS_VACB current;
1056 NTSTATUS Status;
1057 KIRQL oldIrql;
1058
1059 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1060 SectionObjectPointers, FileOffset, Length);
1061
1062 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1063 SectionObjectPointers, FileOffset, Length, IoStatus);
1064
1065 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1066 {
1067 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1068 ASSERT(SharedCacheMap);
1069 if (FileOffset)
1070 {
1071 Offset = *FileOffset;
1072 RemainingLength = Length;
1073 }
1074 else
1075 {
1076 Offset.QuadPart = 0;
1077 RemainingLength = SharedCacheMap->FileSize.QuadPart;
1078 }
1079
1080 if (IoStatus)
1081 {
1082 IoStatus->Status = STATUS_SUCCESS;
1083 IoStatus->Information = 0;
1084 }
1085
1086 while (RemainingLength > 0)
1087 {
1088 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1089 if (current != NULL)
1090 {
1091 if (current->Dirty)
1092 {
1093 Status = CcRosFlushVacb(current);
1094 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1095 {
1096 IoStatus->Status = Status;
1097 }
1098 }
1099
1100 CcRosReleaseVacbLock(current);
1101
1102 KeAcquireGuardedMutex(&ViewLock);
1103 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1104 CcRosVacbDecRefCount(current);
1105 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1106 KeReleaseGuardedMutex(&ViewLock);
1107 }
1108
1109 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1110 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1111 }
1112 }
1113 else
1114 {
1115 if (IoStatus)
1116 {
1117 IoStatus->Status = STATUS_INVALID_PARAMETER;
1118 }
1119 }
1120 }
1121
1122 NTSTATUS
1123 NTAPI
1124 CcRosDeleteFileCache (
1125 PFILE_OBJECT FileObject,
1126 PROS_SHARED_CACHE_MAP SharedCacheMap)
1127 /*
1128 * FUNCTION: Releases the shared cache map associated with a file object
1129 */
1130 {
1131 PLIST_ENTRY current_entry;
1132 PROS_VACB current;
1133 LIST_ENTRY FreeList;
1134 KIRQL oldIrql;
1135
1136 ASSERT(SharedCacheMap);
1137
1138 SharedCacheMap->OpenCount++;
1139 KeReleaseGuardedMutex(&ViewLock);
1140
1141 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1142
1143 KeAcquireGuardedMutex(&ViewLock);
1144 SharedCacheMap->OpenCount--;
1145 if (SharedCacheMap->OpenCount == 0)
1146 {
1147 KIRQL OldIrql;
1148
1149 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1150
1151 /*
1152 * Release all VACBs
1153 */
1154 InitializeListHead(&FreeList);
1155 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1156 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1157 {
1158 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1159 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1160 RemoveEntryList(&current->VacbLruListEntry);
1161 if (current->Dirty)
1162 {
1163 RemoveEntryList(&current->DirtyVacbListEntry);
1164 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
1165 DPRINT1("Freeing dirty VACB\n");
1166 }
1167 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1168 }
1169 #if DBG
1170 SharedCacheMap->Trace = FALSE;
1171 #endif
1172 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1173
1174 KeReleaseGuardedMutex(&ViewLock);
1175 ObDereferenceObject(SharedCacheMap->FileObject);
1176
1177 while (!IsListEmpty(&FreeList))
1178 {
1179 current_entry = RemoveTailList(&FreeList);
1180 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1181 CcRosInternalFreeVacb(current);
1182 }
1183
1184 KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
1185 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1186 KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
1187
1188 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1189 KeAcquireGuardedMutex(&ViewLock);
1190 }
1191 return STATUS_SUCCESS;
1192 }
1193
1194 VOID
1195 NTAPI
1196 CcRosReferenceCache (
1197 PFILE_OBJECT FileObject)
1198 {
1199 PROS_SHARED_CACHE_MAP SharedCacheMap;
1200 KeAcquireGuardedMutex(&ViewLock);
1201 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1202 ASSERT(SharedCacheMap);
1203 ASSERT(SharedCacheMap->OpenCount != 0);
1204 SharedCacheMap->OpenCount++;
1205 KeReleaseGuardedMutex(&ViewLock);
1206 }
1207
1208 VOID
1209 NTAPI
1210 CcRosRemoveIfClosed (
1211 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1212 {
1213 PROS_SHARED_CACHE_MAP SharedCacheMap;
1214 DPRINT("CcRosRemoveIfClosed()\n");
1215 KeAcquireGuardedMutex(&ViewLock);
1216 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1217 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1218 {
1219 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1220 }
1221 KeReleaseGuardedMutex(&ViewLock);
1222 }
1223
1224
1225 VOID
1226 NTAPI
1227 CcRosDereferenceCache (
1228 PFILE_OBJECT FileObject)
1229 {
1230 PROS_SHARED_CACHE_MAP SharedCacheMap;
1231 KeAcquireGuardedMutex(&ViewLock);
1232 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1233 ASSERT(SharedCacheMap);
1234 if (SharedCacheMap->OpenCount > 0)
1235 {
1236 SharedCacheMap->OpenCount--;
1237 if (SharedCacheMap->OpenCount == 0)
1238 {
1239 MmFreeSectionSegments(SharedCacheMap->FileObject);
1240 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1241 }
1242 }
1243 KeReleaseGuardedMutex(&ViewLock);
1244 }
1245
1246 NTSTATUS
1247 NTAPI
1248 CcRosReleaseFileCache (
1249 PFILE_OBJECT FileObject)
1250 /*
1251 * FUNCTION: Called by the file system when a handle to a file object
1252 * has been closed.
1253 */
1254 {
1255 PROS_SHARED_CACHE_MAP SharedCacheMap;
1256
1257 KeAcquireGuardedMutex(&ViewLock);
1258
1259 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1260 {
1261 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1262 if (FileObject->PrivateCacheMap != NULL)
1263 {
1264 FileObject->PrivateCacheMap = NULL;
1265 if (SharedCacheMap->OpenCount > 0)
1266 {
1267 SharedCacheMap->OpenCount--;
1268 if (SharedCacheMap->OpenCount == 0)
1269 {
1270 MmFreeSectionSegments(SharedCacheMap->FileObject);
1271 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1272 }
1273 }
1274 }
1275 }
1276 KeReleaseGuardedMutex(&ViewLock);
1277 return STATUS_SUCCESS;
1278 }
1279
1280 NTSTATUS
1281 NTAPI
1282 CcTryToInitializeFileCache (
1283 PFILE_OBJECT FileObject)
1284 {
1285 PROS_SHARED_CACHE_MAP SharedCacheMap;
1286 NTSTATUS Status;
1287
1288 KeAcquireGuardedMutex(&ViewLock);
1289
1290 ASSERT(FileObject->SectionObjectPointer);
1291 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1292 if (SharedCacheMap == NULL)
1293 {
1294 Status = STATUS_UNSUCCESSFUL;
1295 }
1296 else
1297 {
1298 if (FileObject->PrivateCacheMap == NULL)
1299 {
1300 FileObject->PrivateCacheMap = SharedCacheMap;
1301 SharedCacheMap->OpenCount++;
1302 }
1303 Status = STATUS_SUCCESS;
1304 }
1305 KeReleaseGuardedMutex(&ViewLock);
1306
1307 return Status;
1308 }
1309
1310
1311 NTSTATUS
1312 NTAPI
1313 CcRosInitializeFileCache (
1314 PFILE_OBJECT FileObject,
1315 PCC_FILE_SIZES FileSizes,
1316 BOOLEAN PinAccess,
1317 PCACHE_MANAGER_CALLBACKS CallBacks,
1318 PVOID LazyWriterContext)
1319 /*
1320 * FUNCTION: Initializes a shared cache map for a file object
1321 */
1322 {
1323 PROS_SHARED_CACHE_MAP SharedCacheMap;
1324
1325 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1326 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1327 FileObject, SharedCacheMap);
1328
1329 KeAcquireGuardedMutex(&ViewLock);
1330 if (SharedCacheMap == NULL)
1331 {
1332 KIRQL OldIrql;
1333
1334 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1335 if (SharedCacheMap == NULL)
1336 {
1337 KeReleaseGuardedMutex(&ViewLock);
1338 return STATUS_INSUFFICIENT_RESOURCES;
1339 }
1340 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1341 ObReferenceObjectByPointer(FileObject,
1342 FILE_ALL_ACCESS,
1343 NULL,
1344 KernelMode);
1345 SharedCacheMap->FileObject = FileObject;
1346 SharedCacheMap->Callbacks = CallBacks;
1347 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1348 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1349 SharedCacheMap->FileSize = FileSizes->FileSize;
1350 SharedCacheMap->PinAccess = PinAccess;
1351 SharedCacheMap->DirtyPageThreshold = 0;
1352 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1353 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1354 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1355
1356 KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
1357 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1358 KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
1359 }
1360 if (FileObject->PrivateCacheMap == NULL)
1361 {
1362 FileObject->PrivateCacheMap = SharedCacheMap;
1363 SharedCacheMap->OpenCount++;
1364 }
1365 KeReleaseGuardedMutex(&ViewLock);
1366
1367 return STATUS_SUCCESS;
1368 }
1369
1370 /*
1371 * @implemented
1372 */
1373 PFILE_OBJECT
1374 NTAPI
1375 CcGetFileObjectFromSectionPtrs (
1376 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1377 {
1378 PROS_SHARED_CACHE_MAP SharedCacheMap;
1379
1380 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1381
1382 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1383 {
1384 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1385 ASSERT(SharedCacheMap);
1386 return SharedCacheMap->FileObject;
1387 }
1388 return NULL;
1389 }
1390
1391 VOID
1392 NTAPI
1393 CcShutdownLazyWriter (
1394 VOID)
1395 {
1396 /* Simply set the event, lazy writer will stop when it's done */
1397 KeSetEvent(&iLazyWriterShutdown, IO_DISK_INCREMENT, FALSE);
1398 }
1399
1400 BOOLEAN
1401 INIT_FUNCTION
1402 NTAPI
1403 CcInitView (
1404 VOID)
1405 {
1406 HANDLE LazyWriter;
1407 NTSTATUS Status;
1408 KPRIORITY Priority;
1409 OBJECT_ATTRIBUTES ObjectAttributes;
1410
1411 DPRINT("CcInitView()\n");
1412
1413 InitializeListHead(&DirtyVacbListHead);
1414 InitializeListHead(&VacbLruListHead);
1415 InitializeListHead(&CcDeferredWrites);
1416 InitializeListHead(&CcCleanSharedCacheMapList);
1417 KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1418 KeInitializeSpinLock(&iSharedCacheMapLock);
1419 KeInitializeGuardedMutex(&ViewLock);
1420 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1421 NULL,
1422 NULL,
1423 0,
1424 sizeof(INTERNAL_BCB),
1425 TAG_BCB,
1426 20);
1427 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1428 NULL,
1429 NULL,
1430 0,
1431 sizeof(ROS_SHARED_CACHE_MAP),
1432 TAG_SHARED_CACHE_MAP,
1433 20);
1434 ExInitializeNPagedLookasideList(&VacbLookasideList,
1435 NULL,
1436 NULL,
1437 0,
1438 sizeof(ROS_VACB),
1439 TAG_VACB,
1440 20);
1441
1442 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1443
1444 /* Initialize lazy writer events */
1445 KeInitializeEvent(&iLazyWriterShutdown, SynchronizationEvent, FALSE);
1446 KeInitializeEvent(&iLazyWriterNotify, NotificationEvent, FALSE);
1447
1448 /* Define lazy writer threshold, depending on system type */
1449 switch (MmQuerySystemSize())
1450 {
1451 case MmSmallSystem:
1452 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8;
1453 break;
1454
1455 case MmMediumSystem:
1456 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4;
1457 break;
1458
1459 case MmLargeSystem:
1460 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8 + MmNumberOfPhysicalPages / 4;
1461 break;
1462 }
1463
1464 /* Start the lazy writer thread */
1465 InitializeObjectAttributes(&ObjectAttributes,
1466 NULL,
1467 OBJ_KERNEL_HANDLE,
1468 NULL,
1469 NULL);
1470 Status = PsCreateSystemThread(&LazyWriter,
1471 THREAD_ALL_ACCESS,
1472 &ObjectAttributes,
1473 NULL,
1474 NULL,
1475 CciLazyWriter,
1476 NULL);
1477 if (!NT_SUCCESS(Status))
1478 {
1479 return FALSE;
1480 }
1481
1482 Priority = 27;
1483 Status = NtSetInformationThread(LazyWriter,
1484 ThreadPriority,
1485 &Priority,
1486 sizeof(Priority));
1487 ASSERT(NT_SUCCESS(Status));
1488
1489 /* Handle is not needed */
1490 ObCloseHandle(LazyWriter, KernelMode);
1491
1492 CcInitCacheZeroPage();
1493
1494 return TRUE;
1495 }
1496
1497 #if DBG && defined(KDBG)
1498 BOOLEAN
1499 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1500 {
1501 PLIST_ENTRY ListEntry;
1502 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1503
1504 KdbpPrint(" Usage Summary (in kb)\n");
1505 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1506 /* No need to lock the spin lock here, we're in DBG */
1507 for (ListEntry = CcCleanSharedCacheMapList.Flink;
1508 ListEntry != &CcCleanSharedCacheMapList;
1509 ListEntry = ListEntry->Flink)
1510 {
1511 PLIST_ENTRY Vacbs;
1512 ULONG Valid = 0, Dirty = 0;
1513 PROS_SHARED_CACHE_MAP SharedCacheMap;
1514 PUNICODE_STRING FileName;
1515
1516 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1517
1518 /* First, count for all the associated VACB */
1519 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1520 Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1521 Vacbs = Vacbs->Flink)
1522 {
1523 PROS_VACB Vacb;
1524
1525 Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
1526 if (Vacb->Dirty)
1527 {
1528 Dirty += VACB_MAPPING_GRANULARITY / 1024;
1529 }
1530 if (Vacb->Valid)
1531 {
1532 Valid += VACB_MAPPING_GRANULARITY / 1024;
1533 }
1534 }
1535
1536 /* Setup name */
1537 if (SharedCacheMap->FileObject != NULL &&
1538 SharedCacheMap->FileObject->FileName.Length != 0)
1539 {
1540 FileName = &SharedCacheMap->FileObject->FileName;
1541 }
1542 else
1543 {
1544 FileName = &NoName;
1545 }
1546
1547 /* And print */
1548 KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
1549 }
1550
1551 return TRUE;
1552 }
1553 #endif
1554
1555 /* EOF */