[NTOSKRNL] Add an assert against VACB reference count overflow.
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42
43 /* GLOBALS *******************************************************************/
44
45 LIST_ENTRY DirtyVacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 /* Counters:
55 * - Amount of pages flushed by lazy writer
56 * - Number of times lazy writer ran
57 */
58 ULONG CcLazyWritePages = 0;
59 ULONG CcLazyWriteIos = 0;
60
61 /* Internal vars (MS):
62 * - Threshold above which lazy writer will start action
63 * - Amount of dirty pages
64 * - List for deferred writes
65 * - Spinlock when dealing with the deferred list
66 * - List for "clean" shared cache maps
67 * - One second delay for lazy writer
68 */
69 ULONG CcDirtyPageThreshold = 0;
70 ULONG CcTotalDirtyPages = 0;
71 LIST_ENTRY CcDeferredWrites;
72 KSPIN_LOCK CcDeferredWriteSpinLock;
73 LIST_ENTRY CcCleanSharedCacheMapList;
74 LARGE_INTEGER CcIdleDelay = RTL_CONSTANT_LARGE_INTEGER((LONGLONG)-1*1000*1000*10);
75
76 /* Internal vars (ROS):
77 * - Event to notify lazy writer to shutdown
78 * - Event to inform watchers lazy writer is done for this loop
79 * - Lock for the CcCleanSharedCacheMapList list
80 */
81 KEVENT iLazyWriterShutdown;
82 KEVENT iLazyWriterNotify;
83 KSPIN_LOCK iSharedCacheMapLock;
84
85 #if DBG
86 static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
87 {
88 ++vacb->ReferenceCount;
89 if (vacb->SharedCacheMap->Trace)
90 {
91 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
92 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
93 }
94 }
95 static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
96 {
97 ASSERT(vacb->ReferenceCount != 0);
98 --vacb->ReferenceCount;
99 ASSERT(!(vacb->ReferenceCount == 0 && vacb->Dirty));
100 if (vacb->SharedCacheMap->Trace)
101 {
102 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
103 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
104 }
105 }
106 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
107 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
108 #else
109 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
110 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
111 #endif
112
113 NTSTATUS
114 CcRosInternalFreeVacb(PROS_VACB Vacb);
115
116
117 /* FUNCTIONS *****************************************************************/
118
119 VOID
120 NTAPI
121 CcRosTraceCacheMap (
122 PROS_SHARED_CACHE_MAP SharedCacheMap,
123 BOOLEAN Trace )
124 {
125 #if DBG
126 KIRQL oldirql;
127 PLIST_ENTRY current_entry;
128 PROS_VACB current;
129
130 if (!SharedCacheMap)
131 return;
132
133 SharedCacheMap->Trace = Trace;
134
135 if (Trace)
136 {
137 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
138
139 KeAcquireGuardedMutex(&ViewLock);
140 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
141
142 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
143 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
144 {
145 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
146 current_entry = current_entry->Flink;
147
148 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
149 current, current->ReferenceCount, current->Dirty, current->PageOut );
150 }
151 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
152 KeReleaseGuardedMutex(&ViewLock);
153 }
154 else
155 {
156 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
157 }
158
159 #else
160 UNREFERENCED_PARAMETER(SharedCacheMap);
161 UNREFERENCED_PARAMETER(Trace);
162 #endif
163 }
164
165 NTSTATUS
166 NTAPI
167 CcRosFlushVacb (
168 PROS_VACB Vacb)
169 {
170 NTSTATUS Status;
171
172 Status = CcWriteVirtualAddress(Vacb);
173 if (NT_SUCCESS(Status))
174 {
175 CcRosUnmarkDirtyVacb(Vacb, TRUE);
176 }
177
178 return Status;
179 }
180
181 NTSTATUS
182 NTAPI
183 CcRosFlushDirtyPages (
184 ULONG Target,
185 PULONG Count,
186 BOOLEAN Wait,
187 BOOLEAN CalledFromLazy)
188 {
189 PLIST_ENTRY current_entry;
190 PROS_VACB current;
191 BOOLEAN Locked;
192 NTSTATUS Status;
193 LARGE_INTEGER ZeroTimeout;
194
195 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
196
197 (*Count) = 0;
198 ZeroTimeout.QuadPart = 0;
199
200 KeEnterCriticalRegion();
201 KeAcquireGuardedMutex(&ViewLock);
202
203 current_entry = DirtyVacbListHead.Flink;
204 if (current_entry == &DirtyVacbListHead)
205 {
206 DPRINT("No Dirty pages\n");
207 }
208
209 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
210 {
211 current = CONTAINING_RECORD(current_entry,
212 ROS_VACB,
213 DirtyVacbListEntry);
214 current_entry = current_entry->Flink;
215
216 CcRosVacbIncRefCount(current);
217
218 /* When performing lazy write, don't handle temporary files */
219 if (CalledFromLazy &&
220 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
221 {
222 CcRosVacbDecRefCount(current);
223 continue;
224 }
225
226 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
227 current->SharedCacheMap->LazyWriteContext, Wait);
228 if (!Locked)
229 {
230 CcRosVacbDecRefCount(current);
231 continue;
232 }
233
234 Status = CcRosAcquireVacbLock(current,
235 Wait ? NULL : &ZeroTimeout);
236 if (Status != STATUS_SUCCESS)
237 {
238 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
239 current->SharedCacheMap->LazyWriteContext);
240 CcRosVacbDecRefCount(current);
241 continue;
242 }
243
244 ASSERT(current->Dirty);
245
246 /* One reference is added above */
247 if ((current->ReferenceCount > 2 && current->PinCount == 0) ||
248 (current->ReferenceCount > 3 && current->PinCount > 1))
249 {
250 CcRosReleaseVacbLock(current);
251 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
252 current->SharedCacheMap->LazyWriteContext);
253 CcRosVacbDecRefCount(current);
254 continue;
255 }
256
257 KeReleaseGuardedMutex(&ViewLock);
258
259 Status = CcRosFlushVacb(current);
260
261 CcRosReleaseVacbLock(current);
262 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
263 current->SharedCacheMap->LazyWriteContext);
264
265 KeAcquireGuardedMutex(&ViewLock);
266 CcRosVacbDecRefCount(current);
267
268 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
269 (Status != STATUS_MEDIA_WRITE_PROTECTED))
270 {
271 DPRINT1("CC: Failed to flush VACB.\n");
272 }
273 else
274 {
275 ULONG PagesFreed;
276
277 /* How many pages did we free? */
278 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
279 (*Count) += PagesFreed;
280
281 /* Make sure we don't overflow target! */
282 if (Target < PagesFreed)
283 {
284 /* If we would have, jump to zero directly */
285 Target = 0;
286 }
287 else
288 {
289 Target -= PagesFreed;
290 }
291 }
292
293 current_entry = DirtyVacbListHead.Flink;
294 }
295
296 KeReleaseGuardedMutex(&ViewLock);
297 KeLeaveCriticalRegion();
298
299 DPRINT("CcRosFlushDirtyPages() finished\n");
300 return STATUS_SUCCESS;
301 }
302
303 /* FIXME: Someday this could somewhat implement write-behind/read-ahead */
304 VOID
305 NTAPI
306 CciLazyWriter(PVOID Unused)
307 {
308 while (TRUE)
309 {
310 NTSTATUS Status;
311 PLIST_ENTRY ListEntry;
312 ULONG Target, Count = 0;
313
314 /* One per second or until we have to stop */
315 Status = KeWaitForSingleObject(&iLazyWriterShutdown,
316 Executive,
317 KernelMode,
318 FALSE,
319 &CcIdleDelay);
320
321 /* If we succeeed, we've to stop running! */
322 if (Status == STATUS_SUCCESS)
323 {
324 break;
325 }
326
327 /* We're not sleeping anymore */
328 KeClearEvent(&iLazyWriterNotify);
329
330 /* Our target is one-eighth of the dirty pages */
331 Target = CcTotalDirtyPages / 8;
332 if (Target != 0)
333 {
334 /* Flush! */
335 DPRINT("Lazy writer starting (%d)\n", Target);
336 CcRosFlushDirtyPages(Target, &Count, FALSE, TRUE);
337
338 /* And update stats */
339 CcLazyWritePages += Count;
340 ++CcLazyWriteIos;
341 DPRINT("Lazy writer done (%d)\n", Count);
342 }
343
344 /* Inform people waiting on us that we're done */
345 KeSetEvent(&iLazyWriterNotify, IO_DISK_INCREMENT, FALSE);
346
347 /* Likely not optimal, but let's handle one deferred write now! */
348 ListEntry = ExInterlockedRemoveHeadList(&CcDeferredWrites, &CcDeferredWriteSpinLock);
349 if (ListEntry != NULL)
350 {
351 PDEFERRED_WRITE Context;
352
353 /* Extract the context */
354 Context = CONTAINING_RECORD(ListEntry, DEFERRED_WRITE, DeferredWriteLinks);
355 ASSERT(Context->NodeTypeCode == NODE_TYPE_DEFERRED_WRITE);
356
357 /* Can we write now? */
358 if (CcCanIWrite(Context->FileObject, Context->BytesToWrite, FALSE, TRUE))
359 {
360 /* Yes! Do it, and destroy the associated context */
361 Context->PostRoutine(Context->Context1, Context->Context2);
362 ExFreePoolWithTag(Context, 'CcDw');
363 }
364 else
365 {
366 /* Otherwise, requeue it, but in tail, so that it doesn't block others
367 * This is clearly to improve, but given the poor algorithm used now
368 * It's better than nothing!
369 */
370 ExInterlockedInsertTailList(&CcDeferredWrites,
371 &Context->DeferredWriteLinks,
372 &CcDeferredWriteSpinLock);
373 }
374 }
375 }
376 }
377
378 NTSTATUS
379 CcRosTrimCache (
380 ULONG Target,
381 ULONG Priority,
382 PULONG NrFreed)
383 /*
384 * FUNCTION: Try to free some memory from the file cache.
385 * ARGUMENTS:
386 * Target - The number of pages to be freed.
387 * Priority - The priority of free (currently unused).
388 * NrFreed - Points to a variable where the number of pages
389 * actually freed is returned.
390 */
391 {
392 PLIST_ENTRY current_entry;
393 PROS_VACB current;
394 ULONG PagesFreed;
395 KIRQL oldIrql;
396 LIST_ENTRY FreeList;
397 PFN_NUMBER Page;
398 ULONG i;
399 BOOLEAN FlushedPages = FALSE;
400
401 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
402
403 InitializeListHead(&FreeList);
404
405 *NrFreed = 0;
406
407 retry:
408 KeAcquireGuardedMutex(&ViewLock);
409
410 current_entry = VacbLruListHead.Flink;
411 while (current_entry != &VacbLruListHead)
412 {
413 current = CONTAINING_RECORD(current_entry,
414 ROS_VACB,
415 VacbLruListEntry);
416 current_entry = current_entry->Flink;
417
418 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
419
420 /* Reference the VACB */
421 CcRosVacbIncRefCount(current);
422
423 /* Check if it's mapped and not dirty */
424 if (current->MappedCount > 0 && !current->Dirty)
425 {
426 /* We have to break these locks because Cc sucks */
427 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
428 KeReleaseGuardedMutex(&ViewLock);
429
430 /* Page out the VACB */
431 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
432 {
433 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
434
435 MmPageOutPhysicalAddress(Page);
436 }
437
438 /* Reacquire the locks */
439 KeAcquireGuardedMutex(&ViewLock);
440 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
441 }
442
443 /* Dereference the VACB */
444 CcRosVacbDecRefCount(current);
445
446 /* Check if we can free this entry now */
447 if (current->ReferenceCount == 0)
448 {
449 ASSERT(!current->Dirty);
450 ASSERT(!current->MappedCount);
451
452 RemoveEntryList(&current->CacheMapVacbListEntry);
453 RemoveEntryList(&current->VacbLruListEntry);
454 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
455
456 /* Calculate how many pages we freed for Mm */
457 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
458 Target -= PagesFreed;
459 (*NrFreed) += PagesFreed;
460 }
461
462 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
463 }
464
465 KeReleaseGuardedMutex(&ViewLock);
466
467 /* Try flushing pages if we haven't met our target */
468 if ((Target > 0) && !FlushedPages)
469 {
470 /* Flush dirty pages to disk */
471 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
472 FlushedPages = TRUE;
473
474 /* We can only swap as many pages as we flushed */
475 if (PagesFreed < Target) Target = PagesFreed;
476
477 /* Check if we flushed anything */
478 if (PagesFreed != 0)
479 {
480 /* Try again after flushing dirty pages */
481 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
482 goto retry;
483 }
484 }
485
486 while (!IsListEmpty(&FreeList))
487 {
488 current_entry = RemoveHeadList(&FreeList);
489 current = CONTAINING_RECORD(current_entry,
490 ROS_VACB,
491 CacheMapVacbListEntry);
492 CcRosInternalFreeVacb(current);
493 }
494
495 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
496
497 return STATUS_SUCCESS;
498 }
499
500 NTSTATUS
501 NTAPI
502 CcRosReleaseVacb (
503 PROS_SHARED_CACHE_MAP SharedCacheMap,
504 PROS_VACB Vacb,
505 BOOLEAN Valid,
506 BOOLEAN Dirty,
507 BOOLEAN Mapped)
508 {
509 ASSERT(SharedCacheMap);
510
511 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
512 SharedCacheMap, Vacb, Valid);
513
514 Vacb->Valid = Valid;
515
516 if (Dirty && !Vacb->Dirty)
517 {
518 CcRosMarkDirtyVacb(Vacb);
519 }
520
521 if (Mapped)
522 {
523 Vacb->MappedCount++;
524 }
525 CcRosVacbDecRefCount(Vacb);
526 if (Mapped && (Vacb->MappedCount == 1))
527 {
528 CcRosVacbIncRefCount(Vacb);
529 }
530
531 CcRosReleaseVacbLock(Vacb);
532
533 return STATUS_SUCCESS;
534 }
535
536 /* Returns with VACB Lock Held! */
537 PROS_VACB
538 NTAPI
539 CcRosLookupVacb (
540 PROS_SHARED_CACHE_MAP SharedCacheMap,
541 LONGLONG FileOffset)
542 {
543 PLIST_ENTRY current_entry;
544 PROS_VACB current;
545 KIRQL oldIrql;
546
547 ASSERT(SharedCacheMap);
548
549 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
550 SharedCacheMap, FileOffset);
551
552 KeAcquireGuardedMutex(&ViewLock);
553 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
554
555 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
556 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
557 {
558 current = CONTAINING_RECORD(current_entry,
559 ROS_VACB,
560 CacheMapVacbListEntry);
561 if (IsPointInRange(current->FileOffset.QuadPart,
562 VACB_MAPPING_GRANULARITY,
563 FileOffset))
564 {
565 CcRosVacbIncRefCount(current);
566 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
567 KeReleaseGuardedMutex(&ViewLock);
568 CcRosAcquireVacbLock(current, NULL);
569 return current;
570 }
571 if (current->FileOffset.QuadPart > FileOffset)
572 break;
573 current_entry = current_entry->Flink;
574 }
575
576 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
577 KeReleaseGuardedMutex(&ViewLock);
578
579 return NULL;
580 }
581
582 VOID
583 NTAPI
584 CcRosMarkDirtyVacb (
585 PROS_VACB Vacb)
586 {
587 KIRQL oldIrql;
588 PROS_SHARED_CACHE_MAP SharedCacheMap;
589
590 SharedCacheMap = Vacb->SharedCacheMap;
591
592 KeAcquireGuardedMutex(&ViewLock);
593 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
594
595 ASSERT(!Vacb->Dirty);
596
597 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
598 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
599 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
600 CcRosVacbIncRefCount(Vacb);
601
602 /* Move to the tail of the LRU list */
603 RemoveEntryList(&Vacb->VacbLruListEntry);
604 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
605
606 Vacb->Dirty = TRUE;
607
608 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
609 KeReleaseGuardedMutex(&ViewLock);
610 }
611
612 VOID
613 NTAPI
614 CcRosUnmarkDirtyVacb (
615 PROS_VACB Vacb,
616 BOOLEAN LockViews)
617 {
618 KIRQL oldIrql;
619 PROS_SHARED_CACHE_MAP SharedCacheMap;
620
621 SharedCacheMap = Vacb->SharedCacheMap;
622
623 if (LockViews)
624 {
625 KeAcquireGuardedMutex(&ViewLock);
626 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
627 }
628
629 ASSERT(Vacb->Dirty);
630
631 Vacb->Dirty = FALSE;
632
633 RemoveEntryList(&Vacb->DirtyVacbListEntry);
634 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
635 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
636 CcRosVacbDecRefCount(Vacb);
637
638 if (LockViews)
639 {
640 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
641 KeReleaseGuardedMutex(&ViewLock);
642 }
643 }
644
645 NTSTATUS
646 NTAPI
647 CcRosMarkDirtyFile (
648 PROS_SHARED_CACHE_MAP SharedCacheMap,
649 LONGLONG FileOffset)
650 {
651 PROS_VACB Vacb;
652
653 ASSERT(SharedCacheMap);
654
655 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
656 SharedCacheMap, FileOffset);
657
658 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
659 if (Vacb == NULL)
660 {
661 KeBugCheck(CACHE_MANAGER);
662 }
663
664 if (!Vacb->Dirty)
665 {
666 CcRosMarkDirtyVacb(Vacb);
667 }
668
669 CcRosReleaseVacbLock(Vacb);
670
671 return STATUS_SUCCESS;
672 }
673
674 NTSTATUS
675 NTAPI
676 CcRosUnmapVacb (
677 PROS_SHARED_CACHE_MAP SharedCacheMap,
678 LONGLONG FileOffset,
679 BOOLEAN NowDirty)
680 {
681 PROS_VACB Vacb;
682
683 ASSERT(SharedCacheMap);
684
685 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
686 SharedCacheMap, FileOffset, NowDirty);
687
688 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
689 if (Vacb == NULL)
690 {
691 return STATUS_UNSUCCESSFUL;
692 }
693
694 if (NowDirty && !Vacb->Dirty)
695 {
696 CcRosMarkDirtyVacb(Vacb);
697 }
698
699 Vacb->MappedCount--;
700
701 CcRosVacbDecRefCount(Vacb);
702 if (Vacb->MappedCount == 0)
703 {
704 CcRosVacbDecRefCount(Vacb);
705 }
706
707 CcRosReleaseVacbLock(Vacb);
708
709 return STATUS_SUCCESS;
710 }
711
712 static
713 NTSTATUS
714 CcRosMapVacb(
715 PROS_VACB Vacb)
716 {
717 ULONG i;
718 NTSTATUS Status;
719 ULONG_PTR NumberOfPages;
720
721 /* Create a memory area. */
722 MmLockAddressSpace(MmGetKernelAddressSpace());
723 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
724 0, // nothing checks for VACB mareas, so set to 0
725 &Vacb->BaseAddress,
726 VACB_MAPPING_GRANULARITY,
727 PAGE_READWRITE,
728 (PMEMORY_AREA*)&Vacb->MemoryArea,
729 0,
730 PAGE_SIZE);
731 MmUnlockAddressSpace(MmGetKernelAddressSpace());
732 if (!NT_SUCCESS(Status))
733 {
734 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
735 return Status;
736 }
737
738 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
739 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
740
741 /* Create a virtual mapping for this memory area */
742 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
743 for (i = 0; i < NumberOfPages; i++)
744 {
745 PFN_NUMBER PageFrameNumber;
746
747 MI_SET_USAGE(MI_USAGE_CACHE);
748 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
749 if (PageFrameNumber == 0)
750 {
751 DPRINT1("Unable to allocate page\n");
752 KeBugCheck(MEMORY_MANAGEMENT);
753 }
754
755 Status = MmCreateVirtualMapping(NULL,
756 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
757 PAGE_READWRITE,
758 &PageFrameNumber,
759 1);
760 if (!NT_SUCCESS(Status))
761 {
762 DPRINT1("Unable to create virtual mapping\n");
763 KeBugCheck(MEMORY_MANAGEMENT);
764 }
765 }
766
767 return STATUS_SUCCESS;
768 }
769
770 static
771 NTSTATUS
772 CcRosCreateVacb (
773 PROS_SHARED_CACHE_MAP SharedCacheMap,
774 LONGLONG FileOffset,
775 PROS_VACB *Vacb)
776 {
777 PROS_VACB current;
778 PROS_VACB previous;
779 PLIST_ENTRY current_entry;
780 NTSTATUS Status;
781 KIRQL oldIrql;
782
783 ASSERT(SharedCacheMap);
784
785 DPRINT("CcRosCreateVacb()\n");
786
787 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
788 {
789 *Vacb = NULL;
790 return STATUS_INVALID_PARAMETER;
791 }
792
793 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
794 current->BaseAddress = NULL;
795 current->Valid = FALSE;
796 current->Dirty = FALSE;
797 current->PageOut = FALSE;
798 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
799 current->SharedCacheMap = SharedCacheMap;
800 #if DBG
801 if (SharedCacheMap->Trace)
802 {
803 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
804 }
805 #endif
806 current->MappedCount = 0;
807 current->DirtyVacbListEntry.Flink = NULL;
808 current->DirtyVacbListEntry.Blink = NULL;
809 current->ReferenceCount = 1;
810 current->PinCount = 0;
811 KeInitializeMutex(&current->Mutex, 0);
812 CcRosAcquireVacbLock(current, NULL);
813 KeAcquireGuardedMutex(&ViewLock);
814
815 *Vacb = current;
816 /* There is window between the call to CcRosLookupVacb
817 * and CcRosCreateVacb. We must check if a VACB for the
818 * file offset exist. If there is a VACB, we release
819 * our newly created VACB and return the existing one.
820 */
821 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
822 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
823 previous = NULL;
824 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
825 {
826 current = CONTAINING_RECORD(current_entry,
827 ROS_VACB,
828 CacheMapVacbListEntry);
829 if (IsPointInRange(current->FileOffset.QuadPart,
830 VACB_MAPPING_GRANULARITY,
831 FileOffset))
832 {
833 CcRosVacbIncRefCount(current);
834 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
835 #if DBG
836 if (SharedCacheMap->Trace)
837 {
838 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
839 SharedCacheMap,
840 (*Vacb),
841 current);
842 }
843 #endif
844 CcRosReleaseVacbLock(*Vacb);
845 KeReleaseGuardedMutex(&ViewLock);
846 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
847 *Vacb = current;
848 CcRosAcquireVacbLock(current, NULL);
849 return STATUS_SUCCESS;
850 }
851 if (current->FileOffset.QuadPart < FileOffset)
852 {
853 ASSERT(previous == NULL ||
854 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
855 previous = current;
856 }
857 if (current->FileOffset.QuadPart > FileOffset)
858 break;
859 current_entry = current_entry->Flink;
860 }
861 /* There was no existing VACB. */
862 current = *Vacb;
863 if (previous)
864 {
865 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
866 }
867 else
868 {
869 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
870 }
871 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
872 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
873 KeReleaseGuardedMutex(&ViewLock);
874
875 MI_SET_USAGE(MI_USAGE_CACHE);
876 #if MI_TRACE_PFNS
877 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
878 {
879 PWCHAR pos;
880 ULONG len = 0;
881 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
882 if (pos)
883 {
884 len = wcslen(pos) * sizeof(WCHAR);
885 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
886 }
887 else
888 {
889 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
890 }
891 }
892 #endif
893
894 Status = CcRosMapVacb(current);
895 if (!NT_SUCCESS(Status))
896 {
897 RemoveEntryList(&current->CacheMapVacbListEntry);
898 RemoveEntryList(&current->VacbLruListEntry);
899 CcRosReleaseVacbLock(current);
900 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
901 }
902
903 return Status;
904 }
905
906 NTSTATUS
907 NTAPI
908 CcRosGetVacb (
909 PROS_SHARED_CACHE_MAP SharedCacheMap,
910 LONGLONG FileOffset,
911 PLONGLONG BaseOffset,
912 PVOID* BaseAddress,
913 PBOOLEAN UptoDate,
914 PROS_VACB *Vacb)
915 {
916 PROS_VACB current;
917 NTSTATUS Status;
918
919 ASSERT(SharedCacheMap);
920
921 DPRINT("CcRosGetVacb()\n");
922
923 /*
924 * Look for a VACB already mapping the same data.
925 */
926 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
927 if (current == NULL)
928 {
929 /*
930 * Otherwise create a new VACB.
931 */
932 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
933 if (!NT_SUCCESS(Status))
934 {
935 return Status;
936 }
937 }
938
939 KeAcquireGuardedMutex(&ViewLock);
940
941 /* Move to the tail of the LRU list */
942 RemoveEntryList(&current->VacbLruListEntry);
943 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
944
945 KeReleaseGuardedMutex(&ViewLock);
946
947 /*
948 * Return information about the VACB to the caller.
949 */
950 *UptoDate = current->Valid;
951 *BaseAddress = current->BaseAddress;
952 DPRINT("*BaseAddress %p\n", *BaseAddress);
953 *Vacb = current;
954 *BaseOffset = current->FileOffset.QuadPart;
955 return STATUS_SUCCESS;
956 }
957
958 NTSTATUS
959 NTAPI
960 CcRosRequestVacb (
961 PROS_SHARED_CACHE_MAP SharedCacheMap,
962 LONGLONG FileOffset,
963 PVOID* BaseAddress,
964 PBOOLEAN UptoDate,
965 PROS_VACB *Vacb)
966 /*
967 * FUNCTION: Request a page mapping for a shared cache map
968 */
969 {
970 LONGLONG BaseOffset;
971
972 ASSERT(SharedCacheMap);
973
974 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
975 {
976 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
977 FileOffset, VACB_MAPPING_GRANULARITY);
978 KeBugCheck(CACHE_MANAGER);
979 }
980
981 return CcRosGetVacb(SharedCacheMap,
982 FileOffset,
983 &BaseOffset,
984 BaseAddress,
985 UptoDate,
986 Vacb);
987 }
988
989 static
990 VOID
991 CcFreeCachePage (
992 PVOID Context,
993 MEMORY_AREA* MemoryArea,
994 PVOID Address,
995 PFN_NUMBER Page,
996 SWAPENTRY SwapEntry,
997 BOOLEAN Dirty)
998 {
999 ASSERT(SwapEntry == 0);
1000 if (Page != 0)
1001 {
1002 ASSERT(MmGetReferenceCountPage(Page) == 1);
1003 MmReleasePageMemoryConsumer(MC_CACHE, Page);
1004 }
1005 }
1006
1007 NTSTATUS
1008 CcRosInternalFreeVacb (
1009 PROS_VACB Vacb)
1010 /*
1011 * FUNCTION: Releases a VACB associated with a shared cache map
1012 */
1013 {
1014 DPRINT("Freeing VACB 0x%p\n", Vacb);
1015 #if DBG
1016 if (Vacb->SharedCacheMap->Trace)
1017 {
1018 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
1019 }
1020 #endif
1021
1022 MmLockAddressSpace(MmGetKernelAddressSpace());
1023 MmFreeMemoryArea(MmGetKernelAddressSpace(),
1024 Vacb->MemoryArea,
1025 CcFreeCachePage,
1026 NULL);
1027 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1028
1029 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
1030 return STATUS_SUCCESS;
1031 }
1032
1033 /*
1034 * @implemented
1035 */
1036 VOID
1037 NTAPI
1038 CcFlushCache (
1039 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1040 IN PLARGE_INTEGER FileOffset OPTIONAL,
1041 IN ULONG Length,
1042 OUT PIO_STATUS_BLOCK IoStatus)
1043 {
1044 PROS_SHARED_CACHE_MAP SharedCacheMap;
1045 LARGE_INTEGER Offset;
1046 LONGLONG RemainingLength;
1047 PROS_VACB current;
1048 NTSTATUS Status;
1049 KIRQL oldIrql;
1050
1051 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1052 SectionObjectPointers, FileOffset, Length);
1053
1054 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1055 SectionObjectPointers, FileOffset, Length, IoStatus);
1056
1057 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1058 {
1059 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1060 ASSERT(SharedCacheMap);
1061 if (FileOffset)
1062 {
1063 Offset = *FileOffset;
1064 RemainingLength = Length;
1065 }
1066 else
1067 {
1068 Offset.QuadPart = 0;
1069 RemainingLength = SharedCacheMap->FileSize.QuadPart;
1070 }
1071
1072 if (IoStatus)
1073 {
1074 IoStatus->Status = STATUS_SUCCESS;
1075 IoStatus->Information = 0;
1076 }
1077
1078 while (RemainingLength > 0)
1079 {
1080 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1081 if (current != NULL)
1082 {
1083 if (current->Dirty)
1084 {
1085 Status = CcRosFlushVacb(current);
1086 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1087 {
1088 IoStatus->Status = Status;
1089 }
1090 }
1091
1092 CcRosReleaseVacbLock(current);
1093
1094 KeAcquireGuardedMutex(&ViewLock);
1095 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1096 CcRosVacbDecRefCount(current);
1097 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1098 KeReleaseGuardedMutex(&ViewLock);
1099 }
1100
1101 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1102 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1103 }
1104 }
1105 else
1106 {
1107 if (IoStatus)
1108 {
1109 IoStatus->Status = STATUS_INVALID_PARAMETER;
1110 }
1111 }
1112 }
1113
1114 NTSTATUS
1115 NTAPI
1116 CcRosDeleteFileCache (
1117 PFILE_OBJECT FileObject,
1118 PROS_SHARED_CACHE_MAP SharedCacheMap)
1119 /*
1120 * FUNCTION: Releases the shared cache map associated with a file object
1121 */
1122 {
1123 PLIST_ENTRY current_entry;
1124 PROS_VACB current;
1125 LIST_ENTRY FreeList;
1126 KIRQL oldIrql;
1127
1128 ASSERT(SharedCacheMap);
1129
1130 SharedCacheMap->OpenCount++;
1131 KeReleaseGuardedMutex(&ViewLock);
1132
1133 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1134
1135 KeAcquireGuardedMutex(&ViewLock);
1136 SharedCacheMap->OpenCount--;
1137 if (SharedCacheMap->OpenCount == 0)
1138 {
1139 KIRQL OldIrql;
1140
1141 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1142
1143 /*
1144 * Release all VACBs
1145 */
1146 InitializeListHead(&FreeList);
1147 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1148 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1149 {
1150 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1151 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1152
1153 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1154 CcRosAcquireVacbLock(current, NULL);
1155 RemoveEntryList(&current->VacbLruListEntry);
1156 if (current->Dirty)
1157 {
1158 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1159 CcRosUnmarkDirtyVacb(current, FALSE);
1160 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1161 DPRINT1("Freeing dirty VACB\n");
1162 }
1163 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1164 CcRosReleaseVacbLock(current);
1165
1166 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1167 }
1168 #if DBG
1169 SharedCacheMap->Trace = FALSE;
1170 #endif
1171 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1172
1173 KeReleaseGuardedMutex(&ViewLock);
1174 ObDereferenceObject(SharedCacheMap->FileObject);
1175
1176 while (!IsListEmpty(&FreeList))
1177 {
1178 current_entry = RemoveTailList(&FreeList);
1179 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1180 CcRosInternalFreeVacb(current);
1181 }
1182
1183 KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
1184 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1185 KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
1186
1187 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1188 KeAcquireGuardedMutex(&ViewLock);
1189 }
1190 return STATUS_SUCCESS;
1191 }
1192
1193 VOID
1194 NTAPI
1195 CcRosReferenceCache (
1196 PFILE_OBJECT FileObject)
1197 {
1198 PROS_SHARED_CACHE_MAP SharedCacheMap;
1199 KeAcquireGuardedMutex(&ViewLock);
1200 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1201 ASSERT(SharedCacheMap);
1202 ASSERT(SharedCacheMap->OpenCount != 0);
1203 SharedCacheMap->OpenCount++;
1204 KeReleaseGuardedMutex(&ViewLock);
1205 }
1206
1207 VOID
1208 NTAPI
1209 CcRosRemoveIfClosed (
1210 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1211 {
1212 PROS_SHARED_CACHE_MAP SharedCacheMap;
1213 DPRINT("CcRosRemoveIfClosed()\n");
1214 KeAcquireGuardedMutex(&ViewLock);
1215 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1216 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1217 {
1218 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1219 }
1220 KeReleaseGuardedMutex(&ViewLock);
1221 }
1222
1223
1224 VOID
1225 NTAPI
1226 CcRosDereferenceCache (
1227 PFILE_OBJECT FileObject)
1228 {
1229 PROS_SHARED_CACHE_MAP SharedCacheMap;
1230 KeAcquireGuardedMutex(&ViewLock);
1231 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1232 ASSERT(SharedCacheMap);
1233 if (SharedCacheMap->OpenCount > 0)
1234 {
1235 SharedCacheMap->OpenCount--;
1236 if (SharedCacheMap->OpenCount == 0)
1237 {
1238 MmFreeSectionSegments(SharedCacheMap->FileObject);
1239 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1240 }
1241 }
1242 KeReleaseGuardedMutex(&ViewLock);
1243 }
1244
1245 NTSTATUS
1246 NTAPI
1247 CcRosReleaseFileCache (
1248 PFILE_OBJECT FileObject)
1249 /*
1250 * FUNCTION: Called by the file system when a handle to a file object
1251 * has been closed.
1252 */
1253 {
1254 PROS_SHARED_CACHE_MAP SharedCacheMap;
1255
1256 KeAcquireGuardedMutex(&ViewLock);
1257
1258 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1259 {
1260 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1261 if (FileObject->PrivateCacheMap != NULL)
1262 {
1263 FileObject->PrivateCacheMap = NULL;
1264 if (SharedCacheMap->OpenCount > 0)
1265 {
1266 SharedCacheMap->OpenCount--;
1267 if (SharedCacheMap->OpenCount == 0)
1268 {
1269 MmFreeSectionSegments(SharedCacheMap->FileObject);
1270 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1271 }
1272 }
1273 }
1274 }
1275 KeReleaseGuardedMutex(&ViewLock);
1276 return STATUS_SUCCESS;
1277 }
1278
1279 NTSTATUS
1280 NTAPI
1281 CcTryToInitializeFileCache (
1282 PFILE_OBJECT FileObject)
1283 {
1284 PROS_SHARED_CACHE_MAP SharedCacheMap;
1285 NTSTATUS Status;
1286
1287 KeAcquireGuardedMutex(&ViewLock);
1288
1289 ASSERT(FileObject->SectionObjectPointer);
1290 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1291 if (SharedCacheMap == NULL)
1292 {
1293 Status = STATUS_UNSUCCESSFUL;
1294 }
1295 else
1296 {
1297 if (FileObject->PrivateCacheMap == NULL)
1298 {
1299 FileObject->PrivateCacheMap = SharedCacheMap;
1300 SharedCacheMap->OpenCount++;
1301 }
1302 Status = STATUS_SUCCESS;
1303 }
1304 KeReleaseGuardedMutex(&ViewLock);
1305
1306 return Status;
1307 }
1308
1309
1310 NTSTATUS
1311 NTAPI
1312 CcRosInitializeFileCache (
1313 PFILE_OBJECT FileObject,
1314 PCC_FILE_SIZES FileSizes,
1315 BOOLEAN PinAccess,
1316 PCACHE_MANAGER_CALLBACKS CallBacks,
1317 PVOID LazyWriterContext)
1318 /*
1319 * FUNCTION: Initializes a shared cache map for a file object
1320 */
1321 {
1322 PROS_SHARED_CACHE_MAP SharedCacheMap;
1323
1324 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1325 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1326 FileObject, SharedCacheMap);
1327
1328 KeAcquireGuardedMutex(&ViewLock);
1329 if (SharedCacheMap == NULL)
1330 {
1331 KIRQL OldIrql;
1332
1333 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1334 if (SharedCacheMap == NULL)
1335 {
1336 KeReleaseGuardedMutex(&ViewLock);
1337 return STATUS_INSUFFICIENT_RESOURCES;
1338 }
1339 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1340 ObReferenceObjectByPointer(FileObject,
1341 FILE_ALL_ACCESS,
1342 NULL,
1343 KernelMode);
1344 SharedCacheMap->FileObject = FileObject;
1345 SharedCacheMap->Callbacks = CallBacks;
1346 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1347 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1348 SharedCacheMap->FileSize = FileSizes->FileSize;
1349 SharedCacheMap->PinAccess = PinAccess;
1350 SharedCacheMap->DirtyPageThreshold = 0;
1351 SharedCacheMap->DirtyPages = 0;
1352 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1353 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1354 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1355
1356 KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
1357 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1358 KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
1359 }
1360 if (FileObject->PrivateCacheMap == NULL)
1361 {
1362 FileObject->PrivateCacheMap = SharedCacheMap;
1363 SharedCacheMap->OpenCount++;
1364 }
1365 KeReleaseGuardedMutex(&ViewLock);
1366
1367 return STATUS_SUCCESS;
1368 }
1369
1370 /*
1371 * @implemented
1372 */
1373 PFILE_OBJECT
1374 NTAPI
1375 CcGetFileObjectFromSectionPtrs (
1376 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1377 {
1378 PROS_SHARED_CACHE_MAP SharedCacheMap;
1379
1380 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1381
1382 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1383 {
1384 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1385 ASSERT(SharedCacheMap);
1386 return SharedCacheMap->FileObject;
1387 }
1388 return NULL;
1389 }
1390
1391 VOID
1392 NTAPI
1393 CcShutdownLazyWriter (
1394 VOID)
1395 {
1396 /* Simply set the event, lazy writer will stop when it's done */
1397 KeSetEvent(&iLazyWriterShutdown, IO_DISK_INCREMENT, FALSE);
1398 }
1399
1400 BOOLEAN
1401 INIT_FUNCTION
1402 NTAPI
1403 CcInitView (
1404 VOID)
1405 {
1406 HANDLE LazyWriter;
1407 NTSTATUS Status;
1408 KPRIORITY Priority;
1409 OBJECT_ATTRIBUTES ObjectAttributes;
1410
1411 DPRINT("CcInitView()\n");
1412
1413 InitializeListHead(&DirtyVacbListHead);
1414 InitializeListHead(&VacbLruListHead);
1415 InitializeListHead(&CcDeferredWrites);
1416 InitializeListHead(&CcCleanSharedCacheMapList);
1417 KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1418 KeInitializeSpinLock(&iSharedCacheMapLock);
1419 KeInitializeGuardedMutex(&ViewLock);
1420 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1421 NULL,
1422 NULL,
1423 0,
1424 sizeof(INTERNAL_BCB),
1425 TAG_BCB,
1426 20);
1427 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1428 NULL,
1429 NULL,
1430 0,
1431 sizeof(ROS_SHARED_CACHE_MAP),
1432 TAG_SHARED_CACHE_MAP,
1433 20);
1434 ExInitializeNPagedLookasideList(&VacbLookasideList,
1435 NULL,
1436 NULL,
1437 0,
1438 sizeof(ROS_VACB),
1439 TAG_VACB,
1440 20);
1441
1442 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1443
1444 /* Initialize lazy writer events */
1445 KeInitializeEvent(&iLazyWriterShutdown, SynchronizationEvent, FALSE);
1446 KeInitializeEvent(&iLazyWriterNotify, NotificationEvent, FALSE);
1447
1448 /* Define lazy writer threshold, depending on system type */
1449 switch (MmQuerySystemSize())
1450 {
1451 case MmSmallSystem:
1452 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8;
1453 break;
1454
1455 case MmMediumSystem:
1456 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4;
1457 break;
1458
1459 case MmLargeSystem:
1460 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8 + MmNumberOfPhysicalPages / 4;
1461 break;
1462 }
1463
1464 /* Start the lazy writer thread */
1465 InitializeObjectAttributes(&ObjectAttributes,
1466 NULL,
1467 OBJ_KERNEL_HANDLE,
1468 NULL,
1469 NULL);
1470 Status = PsCreateSystemThread(&LazyWriter,
1471 THREAD_ALL_ACCESS,
1472 &ObjectAttributes,
1473 NULL,
1474 NULL,
1475 CciLazyWriter,
1476 NULL);
1477 if (!NT_SUCCESS(Status))
1478 {
1479 return FALSE;
1480 }
1481
1482 Priority = 27;
1483 Status = NtSetInformationThread(LazyWriter,
1484 ThreadPriority,
1485 &Priority,
1486 sizeof(Priority));
1487 ASSERT(NT_SUCCESS(Status));
1488
1489 /* Handle is not needed */
1490 ObCloseHandle(LazyWriter, KernelMode);
1491
1492 CcInitCacheZeroPage();
1493
1494 return TRUE;
1495 }
1496
1497 #if DBG && defined(KDBG)
1498 BOOLEAN
1499 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1500 {
1501 PLIST_ENTRY ListEntry;
1502 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1503
1504 KdbpPrint(" Usage Summary (in kb)\n");
1505 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1506 /* No need to lock the spin lock here, we're in DBG */
1507 for (ListEntry = CcCleanSharedCacheMapList.Flink;
1508 ListEntry != &CcCleanSharedCacheMapList;
1509 ListEntry = ListEntry->Flink)
1510 {
1511 PLIST_ENTRY Vacbs;
1512 ULONG Valid = 0, Dirty = 0;
1513 PROS_SHARED_CACHE_MAP SharedCacheMap;
1514 PUNICODE_STRING FileName;
1515
1516 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1517
1518 /* Dirty size */
1519 Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
1520
1521 /* First, count for all the associated VACB */
1522 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1523 Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1524 Vacbs = Vacbs->Flink)
1525 {
1526 PROS_VACB Vacb;
1527
1528 Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
1529 if (Vacb->Valid)
1530 {
1531 Valid += VACB_MAPPING_GRANULARITY / 1024;
1532 }
1533 }
1534
1535 /* Setup name */
1536 if (SharedCacheMap->FileObject != NULL &&
1537 SharedCacheMap->FileObject->FileName.Length != 0)
1538 {
1539 FileName = &SharedCacheMap->FileObject->FileName;
1540 }
1541 else
1542 {
1543 FileName = &NoName;
1544 }
1545
1546 /* And print */
1547 KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
1548 }
1549
1550 return TRUE;
1551 }
1552 #endif
1553
1554 /* EOF */