[NTOSKRNL] Yet another ASSERT...
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42
43 /* GLOBALS *******************************************************************/
44
45 LIST_ENTRY DirtyVacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 /* Counters:
55 * - Amount of pages flushed by lazy writer
56 * - Number of times lazy writer ran
57 */
58 ULONG CcLazyWritePages = 0;
59 ULONG CcLazyWriteIos = 0;
60
61 /* Internal vars (MS):
62 * - Threshold above which lazy writer will start action
63 * - Amount of dirty pages
64 * - List for deferred writes
65 * - Spinlock when dealing with the deferred list
66 * - List for "clean" shared cache maps
67 * - One second delay for lazy writer
68 */
69 ULONG CcDirtyPageThreshold = 0;
70 ULONG CcTotalDirtyPages = 0;
71 LIST_ENTRY CcDeferredWrites;
72 KSPIN_LOCK CcDeferredWriteSpinLock;
73 LIST_ENTRY CcCleanSharedCacheMapList;
74 LARGE_INTEGER CcIdleDelay = RTL_CONSTANT_LARGE_INTEGER((LONGLONG)-1*1000*1000*10);
75
76 /* Internal vars (ROS):
77 * - Event to notify lazy writer to shutdown
78 * - Event to inform watchers lazy writer is done for this loop
79 * - Lock for the CcCleanSharedCacheMapList list
80 */
81 KEVENT iLazyWriterShutdown;
82 KEVENT iLazyWriterNotify;
83 KSPIN_LOCK iSharedCacheMapLock;
84
85 #if DBG
86 static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
87 {
88 ++vacb->ReferenceCount;
89 if (vacb->SharedCacheMap->Trace)
90 {
91 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
92 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
93 }
94 }
95 static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
96 {
97 ASSERT(vacb->ReferenceCount != 0);
98 --vacb->ReferenceCount;
99 ASSERT(!(vacb->ReferenceCount == 0 && vacb->Dirty));
100 if (vacb->SharedCacheMap->Trace)
101 {
102 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
103 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
104 }
105 }
106 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
107 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
108 #else
109 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
110 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
111 #endif
112
113 NTSTATUS
114 CcRosInternalFreeVacb(PROS_VACB Vacb);
115
116
117 /* FUNCTIONS *****************************************************************/
118
119 VOID
120 NTAPI
121 CcRosTraceCacheMap (
122 PROS_SHARED_CACHE_MAP SharedCacheMap,
123 BOOLEAN Trace )
124 {
125 #if DBG
126 KIRQL oldirql;
127 PLIST_ENTRY current_entry;
128 PROS_VACB current;
129
130 if (!SharedCacheMap)
131 return;
132
133 SharedCacheMap->Trace = Trace;
134
135 if (Trace)
136 {
137 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
138
139 KeAcquireGuardedMutex(&ViewLock);
140 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
141
142 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
143 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
144 {
145 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
146 current_entry = current_entry->Flink;
147
148 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
149 current, current->ReferenceCount, current->Dirty, current->PageOut );
150 }
151 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
152 KeReleaseGuardedMutex(&ViewLock);
153 }
154 else
155 {
156 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
157 }
158
159 #else
160 UNREFERENCED_PARAMETER(SharedCacheMap);
161 UNREFERENCED_PARAMETER(Trace);
162 #endif
163 }
164
165 NTSTATUS
166 NTAPI
167 CcRosFlushVacb (
168 PROS_VACB Vacb)
169 {
170 NTSTATUS Status;
171
172 Status = CcWriteVirtualAddress(Vacb);
173 if (NT_SUCCESS(Status))
174 {
175 CcRosUnmarkDirtyVacb(Vacb, TRUE);
176 }
177
178 return Status;
179 }
180
181 NTSTATUS
182 NTAPI
183 CcRosFlushDirtyPages (
184 ULONG Target,
185 PULONG Count,
186 BOOLEAN Wait,
187 BOOLEAN CalledFromLazy)
188 {
189 PLIST_ENTRY current_entry;
190 PROS_VACB current;
191 BOOLEAN Locked;
192 NTSTATUS Status;
193 LARGE_INTEGER ZeroTimeout;
194
195 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
196
197 (*Count) = 0;
198 ZeroTimeout.QuadPart = 0;
199
200 KeEnterCriticalRegion();
201 KeAcquireGuardedMutex(&ViewLock);
202
203 current_entry = DirtyVacbListHead.Flink;
204 if (current_entry == &DirtyVacbListHead)
205 {
206 DPRINT("No Dirty pages\n");
207 }
208
209 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
210 {
211 current = CONTAINING_RECORD(current_entry,
212 ROS_VACB,
213 DirtyVacbListEntry);
214 current_entry = current_entry->Flink;
215
216 CcRosVacbIncRefCount(current);
217
218 /* When performing lazy write, don't handle temporary files */
219 if (CalledFromLazy &&
220 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
221 {
222 CcRosVacbDecRefCount(current);
223 continue;
224 }
225
226 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
227 current->SharedCacheMap->LazyWriteContext, Wait);
228 if (!Locked)
229 {
230 CcRosVacbDecRefCount(current);
231 continue;
232 }
233
234 Status = CcRosAcquireVacbLock(current,
235 Wait ? NULL : &ZeroTimeout);
236 if (Status != STATUS_SUCCESS)
237 {
238 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
239 current->SharedCacheMap->LazyWriteContext);
240 CcRosVacbDecRefCount(current);
241 continue;
242 }
243
244 ASSERT(current->Dirty);
245
246 /* One reference is added above */
247 if ((current->ReferenceCount > 2 && current->PinCount == 0) ||
248 (current->ReferenceCount > 3 && current->PinCount > 1))
249 {
250 CcRosReleaseVacbLock(current);
251 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
252 current->SharedCacheMap->LazyWriteContext);
253 CcRosVacbDecRefCount(current);
254 continue;
255 }
256
257 KeReleaseGuardedMutex(&ViewLock);
258
259 Status = CcRosFlushVacb(current);
260
261 CcRosReleaseVacbLock(current);
262 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
263 current->SharedCacheMap->LazyWriteContext);
264
265 KeAcquireGuardedMutex(&ViewLock);
266 CcRosVacbDecRefCount(current);
267
268 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
269 (Status != STATUS_MEDIA_WRITE_PROTECTED))
270 {
271 DPRINT1("CC: Failed to flush VACB.\n");
272 }
273 else
274 {
275 ULONG PagesFreed;
276
277 /* How many pages did we free? */
278 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
279 (*Count) += PagesFreed;
280
281 /* Make sure we don't overflow target! */
282 if (Target < PagesFreed)
283 {
284 /* If we would have, jump to zero directly */
285 Target = 0;
286 }
287 else
288 {
289 Target -= PagesFreed;
290 }
291 }
292
293 current_entry = DirtyVacbListHead.Flink;
294 }
295
296 KeReleaseGuardedMutex(&ViewLock);
297 KeLeaveCriticalRegion();
298
299 DPRINT("CcRosFlushDirtyPages() finished\n");
300 return STATUS_SUCCESS;
301 }
302
303 /* FIXME: Someday this could somewhat implement write-behind/read-ahead */
304 VOID
305 NTAPI
306 CciLazyWriter(PVOID Unused)
307 {
308 while (TRUE)
309 {
310 NTSTATUS Status;
311 PLIST_ENTRY ListEntry;
312 ULONG Target, Count = 0;
313
314 /* One per second or until we have to stop */
315 Status = KeWaitForSingleObject(&iLazyWriterShutdown,
316 Executive,
317 KernelMode,
318 FALSE,
319 &CcIdleDelay);
320
321 /* If we succeeed, we've to stop running! */
322 if (Status == STATUS_SUCCESS)
323 {
324 break;
325 }
326
327 /* We're not sleeping anymore */
328 KeClearEvent(&iLazyWriterNotify);
329
330 /* Our target is one-eighth of the dirty pages */
331 Target = CcTotalDirtyPages / 8;
332 if (Target != 0)
333 {
334 /* Flush! */
335 DPRINT("Lazy writer starting (%d)\n", Target);
336 CcRosFlushDirtyPages(Target, &Count, FALSE, TRUE);
337
338 /* And update stats */
339 CcLazyWritePages += Count;
340 ++CcLazyWriteIos;
341 DPRINT("Lazy writer done (%d)\n", Count);
342 }
343
344 /* Inform people waiting on us that we're done */
345 KeSetEvent(&iLazyWriterNotify, IO_DISK_INCREMENT, FALSE);
346
347 /* Likely not optimal, but let's handle one deferred write now! */
348 ListEntry = ExInterlockedRemoveHeadList(&CcDeferredWrites, &CcDeferredWriteSpinLock);
349 if (ListEntry != NULL)
350 {
351 PDEFERRED_WRITE Context;
352
353 /* Extract the context */
354 Context = CONTAINING_RECORD(ListEntry, DEFERRED_WRITE, DeferredWriteLinks);
355 ASSERT(Context->NodeTypeCode == NODE_TYPE_DEFERRED_WRITE);
356
357 /* Can we write now? */
358 if (CcCanIWrite(Context->FileObject, Context->BytesToWrite, FALSE, TRUE))
359 {
360 /* Yes! Do it, and destroy the associated context */
361 Context->PostRoutine(Context->Context1, Context->Context2);
362 ExFreePoolWithTag(Context, 'CcDw');
363 }
364 else
365 {
366 /* Otherwise, requeue it, but in tail, so that it doesn't block others
367 * This is clearly to improve, but given the poor algorithm used now
368 * It's better than nothing!
369 */
370 ExInterlockedInsertTailList(&CcDeferredWrites,
371 &Context->DeferredWriteLinks,
372 &CcDeferredWriteSpinLock);
373 }
374 }
375 }
376 }
377
378 NTSTATUS
379 CcRosTrimCache (
380 ULONG Target,
381 ULONG Priority,
382 PULONG NrFreed)
383 /*
384 * FUNCTION: Try to free some memory from the file cache.
385 * ARGUMENTS:
386 * Target - The number of pages to be freed.
387 * Priority - The priority of free (currently unused).
388 * NrFreed - Points to a variable where the number of pages
389 * actually freed is returned.
390 */
391 {
392 PLIST_ENTRY current_entry;
393 PROS_VACB current;
394 ULONG PagesFreed;
395 KIRQL oldIrql;
396 LIST_ENTRY FreeList;
397 PFN_NUMBER Page;
398 ULONG i;
399 BOOLEAN FlushedPages = FALSE;
400
401 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
402
403 InitializeListHead(&FreeList);
404
405 *NrFreed = 0;
406
407 retry:
408 KeAcquireGuardedMutex(&ViewLock);
409
410 current_entry = VacbLruListHead.Flink;
411 while (current_entry != &VacbLruListHead)
412 {
413 current = CONTAINING_RECORD(current_entry,
414 ROS_VACB,
415 VacbLruListEntry);
416 current_entry = current_entry->Flink;
417
418 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
419
420 /* Reference the VACB */
421 CcRosVacbIncRefCount(current);
422
423 /* Check if it's mapped and not dirty */
424 if (current->MappedCount > 0 && !current->Dirty)
425 {
426 /* We have to break these locks because Cc sucks */
427 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
428 KeReleaseGuardedMutex(&ViewLock);
429
430 /* Page out the VACB */
431 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
432 {
433 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
434
435 MmPageOutPhysicalAddress(Page);
436 }
437
438 /* Reacquire the locks */
439 KeAcquireGuardedMutex(&ViewLock);
440 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
441 }
442
443 /* Dereference the VACB */
444 CcRosVacbDecRefCount(current);
445
446 /* Check if we can free this entry now */
447 if (current->ReferenceCount == 0)
448 {
449 ASSERT(!current->Dirty);
450 ASSERT(!current->MappedCount);
451
452 RemoveEntryList(&current->CacheMapVacbListEntry);
453 RemoveEntryList(&current->VacbLruListEntry);
454 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
455
456 /* Calculate how many pages we freed for Mm */
457 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
458 Target -= PagesFreed;
459 (*NrFreed) += PagesFreed;
460 }
461
462 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
463 }
464
465 KeReleaseGuardedMutex(&ViewLock);
466
467 /* Try flushing pages if we haven't met our target */
468 if ((Target > 0) && !FlushedPages)
469 {
470 /* Flush dirty pages to disk */
471 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
472 FlushedPages = TRUE;
473
474 /* We can only swap as many pages as we flushed */
475 if (PagesFreed < Target) Target = PagesFreed;
476
477 /* Check if we flushed anything */
478 if (PagesFreed != 0)
479 {
480 /* Try again after flushing dirty pages */
481 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
482 goto retry;
483 }
484 }
485
486 while (!IsListEmpty(&FreeList))
487 {
488 current_entry = RemoveHeadList(&FreeList);
489 current = CONTAINING_RECORD(current_entry,
490 ROS_VACB,
491 CacheMapVacbListEntry);
492 CcRosInternalFreeVacb(current);
493 }
494
495 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
496
497 return STATUS_SUCCESS;
498 }
499
500 NTSTATUS
501 NTAPI
502 CcRosReleaseVacb (
503 PROS_SHARED_CACHE_MAP SharedCacheMap,
504 PROS_VACB Vacb,
505 BOOLEAN Valid,
506 BOOLEAN Dirty,
507 BOOLEAN Mapped)
508 {
509 ASSERT(SharedCacheMap);
510
511 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
512 SharedCacheMap, Vacb, Valid);
513
514 Vacb->Valid = Valid;
515
516 if (Dirty && !Vacb->Dirty)
517 {
518 CcRosMarkDirtyVacb(Vacb);
519 }
520
521 if (Mapped)
522 {
523 Vacb->MappedCount++;
524 }
525 CcRosVacbDecRefCount(Vacb);
526 if (Mapped && (Vacb->MappedCount == 1))
527 {
528 CcRosVacbIncRefCount(Vacb);
529 }
530
531 CcRosReleaseVacbLock(Vacb);
532
533 return STATUS_SUCCESS;
534 }
535
536 /* Returns with VACB Lock Held! */
537 PROS_VACB
538 NTAPI
539 CcRosLookupVacb (
540 PROS_SHARED_CACHE_MAP SharedCacheMap,
541 LONGLONG FileOffset)
542 {
543 PLIST_ENTRY current_entry;
544 PROS_VACB current;
545 KIRQL oldIrql;
546
547 ASSERT(SharedCacheMap);
548
549 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
550 SharedCacheMap, FileOffset);
551
552 KeAcquireGuardedMutex(&ViewLock);
553 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
554
555 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
556 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
557 {
558 current = CONTAINING_RECORD(current_entry,
559 ROS_VACB,
560 CacheMapVacbListEntry);
561 if (IsPointInRange(current->FileOffset.QuadPart,
562 VACB_MAPPING_GRANULARITY,
563 FileOffset))
564 {
565 CcRosVacbIncRefCount(current);
566 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
567 KeReleaseGuardedMutex(&ViewLock);
568 CcRosAcquireVacbLock(current, NULL);
569 return current;
570 }
571 if (current->FileOffset.QuadPart > FileOffset)
572 break;
573 current_entry = current_entry->Flink;
574 }
575
576 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
577 KeReleaseGuardedMutex(&ViewLock);
578
579 return NULL;
580 }
581
582 VOID
583 NTAPI
584 CcRosMarkDirtyVacb (
585 PROS_VACB Vacb)
586 {
587 KIRQL oldIrql;
588 PROS_SHARED_CACHE_MAP SharedCacheMap;
589
590 SharedCacheMap = Vacb->SharedCacheMap;
591
592 KeAcquireGuardedMutex(&ViewLock);
593 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
594
595 ASSERT(!Vacb->Dirty);
596
597 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
598 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
599 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
600 CcRosVacbIncRefCount(Vacb);
601
602 /* Move to the tail of the LRU list */
603 RemoveEntryList(&Vacb->VacbLruListEntry);
604 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
605
606 Vacb->Dirty = TRUE;
607
608 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
609 KeReleaseGuardedMutex(&ViewLock);
610 }
611
612 VOID
613 NTAPI
614 CcRosUnmarkDirtyVacb (
615 PROS_VACB Vacb,
616 BOOLEAN LockViews)
617 {
618 KIRQL oldIrql;
619 PROS_SHARED_CACHE_MAP SharedCacheMap;
620
621 SharedCacheMap = Vacb->SharedCacheMap;
622
623 if (LockViews)
624 {
625 KeAcquireGuardedMutex(&ViewLock);
626 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
627 }
628
629 ASSERT(Vacb->Dirty);
630
631 Vacb->Dirty = FALSE;
632
633 RemoveEntryList(&Vacb->DirtyVacbListEntry);
634 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
635 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
636 CcRosVacbDecRefCount(Vacb);
637
638 if (LockViews)
639 {
640 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
641 KeReleaseGuardedMutex(&ViewLock);
642 }
643 }
644
645 NTSTATUS
646 NTAPI
647 CcRosMarkDirtyFile (
648 PROS_SHARED_CACHE_MAP SharedCacheMap,
649 LONGLONG FileOffset)
650 {
651 PROS_VACB Vacb;
652
653 ASSERT(SharedCacheMap);
654
655 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
656 SharedCacheMap, FileOffset);
657
658 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
659 if (Vacb == NULL)
660 {
661 KeBugCheck(CACHE_MANAGER);
662 }
663
664 if (!Vacb->Dirty)
665 {
666 CcRosMarkDirtyVacb(Vacb);
667 }
668
669 CcRosReleaseVacbLock(Vacb);
670
671 return STATUS_SUCCESS;
672 }
673
674 NTSTATUS
675 NTAPI
676 CcRosUnmapVacb (
677 PROS_SHARED_CACHE_MAP SharedCacheMap,
678 LONGLONG FileOffset,
679 BOOLEAN NowDirty)
680 {
681 PROS_VACB Vacb;
682
683 ASSERT(SharedCacheMap);
684
685 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
686 SharedCacheMap, FileOffset, NowDirty);
687
688 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
689 if (Vacb == NULL)
690 {
691 return STATUS_UNSUCCESSFUL;
692 }
693
694 if (NowDirty && !Vacb->Dirty)
695 {
696 CcRosMarkDirtyVacb(Vacb);
697 }
698
699 ASSERT(Vacb->MappedCount != 0);
700 Vacb->MappedCount--;
701
702 CcRosVacbDecRefCount(Vacb);
703 if (Vacb->MappedCount == 0)
704 {
705 CcRosVacbDecRefCount(Vacb);
706 }
707
708 CcRosReleaseVacbLock(Vacb);
709
710 return STATUS_SUCCESS;
711 }
712
713 static
714 NTSTATUS
715 CcRosMapVacb(
716 PROS_VACB Vacb)
717 {
718 ULONG i;
719 NTSTATUS Status;
720 ULONG_PTR NumberOfPages;
721
722 /* Create a memory area. */
723 MmLockAddressSpace(MmGetKernelAddressSpace());
724 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
725 0, // nothing checks for VACB mareas, so set to 0
726 &Vacb->BaseAddress,
727 VACB_MAPPING_GRANULARITY,
728 PAGE_READWRITE,
729 (PMEMORY_AREA*)&Vacb->MemoryArea,
730 0,
731 PAGE_SIZE);
732 MmUnlockAddressSpace(MmGetKernelAddressSpace());
733 if (!NT_SUCCESS(Status))
734 {
735 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
736 return Status;
737 }
738
739 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
740 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
741
742 /* Create a virtual mapping for this memory area */
743 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
744 for (i = 0; i < NumberOfPages; i++)
745 {
746 PFN_NUMBER PageFrameNumber;
747
748 MI_SET_USAGE(MI_USAGE_CACHE);
749 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
750 if (PageFrameNumber == 0)
751 {
752 DPRINT1("Unable to allocate page\n");
753 KeBugCheck(MEMORY_MANAGEMENT);
754 }
755
756 Status = MmCreateVirtualMapping(NULL,
757 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
758 PAGE_READWRITE,
759 &PageFrameNumber,
760 1);
761 if (!NT_SUCCESS(Status))
762 {
763 DPRINT1("Unable to create virtual mapping\n");
764 KeBugCheck(MEMORY_MANAGEMENT);
765 }
766 }
767
768 return STATUS_SUCCESS;
769 }
770
771 static
772 NTSTATUS
773 CcRosCreateVacb (
774 PROS_SHARED_CACHE_MAP SharedCacheMap,
775 LONGLONG FileOffset,
776 PROS_VACB *Vacb)
777 {
778 PROS_VACB current;
779 PROS_VACB previous;
780 PLIST_ENTRY current_entry;
781 NTSTATUS Status;
782 KIRQL oldIrql;
783
784 ASSERT(SharedCacheMap);
785
786 DPRINT("CcRosCreateVacb()\n");
787
788 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
789 {
790 *Vacb = NULL;
791 return STATUS_INVALID_PARAMETER;
792 }
793
794 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
795 current->BaseAddress = NULL;
796 current->Valid = FALSE;
797 current->Dirty = FALSE;
798 current->PageOut = FALSE;
799 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
800 current->SharedCacheMap = SharedCacheMap;
801 #if DBG
802 if (SharedCacheMap->Trace)
803 {
804 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
805 }
806 #endif
807 current->MappedCount = 0;
808 current->DirtyVacbListEntry.Flink = NULL;
809 current->DirtyVacbListEntry.Blink = NULL;
810 current->ReferenceCount = 1;
811 current->PinCount = 0;
812 KeInitializeMutex(&current->Mutex, 0);
813 CcRosAcquireVacbLock(current, NULL);
814 KeAcquireGuardedMutex(&ViewLock);
815
816 *Vacb = current;
817 /* There is window between the call to CcRosLookupVacb
818 * and CcRosCreateVacb. We must check if a VACB for the
819 * file offset exist. If there is a VACB, we release
820 * our newly created VACB and return the existing one.
821 */
822 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
823 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
824 previous = NULL;
825 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
826 {
827 current = CONTAINING_RECORD(current_entry,
828 ROS_VACB,
829 CacheMapVacbListEntry);
830 if (IsPointInRange(current->FileOffset.QuadPart,
831 VACB_MAPPING_GRANULARITY,
832 FileOffset))
833 {
834 CcRosVacbIncRefCount(current);
835 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
836 #if DBG
837 if (SharedCacheMap->Trace)
838 {
839 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
840 SharedCacheMap,
841 (*Vacb),
842 current);
843 }
844 #endif
845 CcRosReleaseVacbLock(*Vacb);
846 KeReleaseGuardedMutex(&ViewLock);
847 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
848 *Vacb = current;
849 CcRosAcquireVacbLock(current, NULL);
850 return STATUS_SUCCESS;
851 }
852 if (current->FileOffset.QuadPart < FileOffset)
853 {
854 ASSERT(previous == NULL ||
855 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
856 previous = current;
857 }
858 if (current->FileOffset.QuadPart > FileOffset)
859 break;
860 current_entry = current_entry->Flink;
861 }
862 /* There was no existing VACB. */
863 current = *Vacb;
864 if (previous)
865 {
866 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
867 }
868 else
869 {
870 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
871 }
872 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
873 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
874 KeReleaseGuardedMutex(&ViewLock);
875
876 MI_SET_USAGE(MI_USAGE_CACHE);
877 #if MI_TRACE_PFNS
878 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
879 {
880 PWCHAR pos;
881 ULONG len = 0;
882 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
883 if (pos)
884 {
885 len = wcslen(pos) * sizeof(WCHAR);
886 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
887 }
888 else
889 {
890 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
891 }
892 }
893 #endif
894
895 Status = CcRosMapVacb(current);
896 if (!NT_SUCCESS(Status))
897 {
898 RemoveEntryList(&current->CacheMapVacbListEntry);
899 RemoveEntryList(&current->VacbLruListEntry);
900 CcRosReleaseVacbLock(current);
901 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
902 }
903
904 return Status;
905 }
906
907 NTSTATUS
908 NTAPI
909 CcRosGetVacb (
910 PROS_SHARED_CACHE_MAP SharedCacheMap,
911 LONGLONG FileOffset,
912 PLONGLONG BaseOffset,
913 PVOID* BaseAddress,
914 PBOOLEAN UptoDate,
915 PROS_VACB *Vacb)
916 {
917 PROS_VACB current;
918 NTSTATUS Status;
919
920 ASSERT(SharedCacheMap);
921
922 DPRINT("CcRosGetVacb()\n");
923
924 /*
925 * Look for a VACB already mapping the same data.
926 */
927 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
928 if (current == NULL)
929 {
930 /*
931 * Otherwise create a new VACB.
932 */
933 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
934 if (!NT_SUCCESS(Status))
935 {
936 return Status;
937 }
938 }
939
940 KeAcquireGuardedMutex(&ViewLock);
941
942 /* Move to the tail of the LRU list */
943 RemoveEntryList(&current->VacbLruListEntry);
944 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
945
946 KeReleaseGuardedMutex(&ViewLock);
947
948 /*
949 * Return information about the VACB to the caller.
950 */
951 *UptoDate = current->Valid;
952 *BaseAddress = current->BaseAddress;
953 DPRINT("*BaseAddress %p\n", *BaseAddress);
954 *Vacb = current;
955 *BaseOffset = current->FileOffset.QuadPart;
956 return STATUS_SUCCESS;
957 }
958
959 NTSTATUS
960 NTAPI
961 CcRosRequestVacb (
962 PROS_SHARED_CACHE_MAP SharedCacheMap,
963 LONGLONG FileOffset,
964 PVOID* BaseAddress,
965 PBOOLEAN UptoDate,
966 PROS_VACB *Vacb)
967 /*
968 * FUNCTION: Request a page mapping for a shared cache map
969 */
970 {
971 LONGLONG BaseOffset;
972
973 ASSERT(SharedCacheMap);
974
975 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
976 {
977 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
978 FileOffset, VACB_MAPPING_GRANULARITY);
979 KeBugCheck(CACHE_MANAGER);
980 }
981
982 return CcRosGetVacb(SharedCacheMap,
983 FileOffset,
984 &BaseOffset,
985 BaseAddress,
986 UptoDate,
987 Vacb);
988 }
989
990 static
991 VOID
992 CcFreeCachePage (
993 PVOID Context,
994 MEMORY_AREA* MemoryArea,
995 PVOID Address,
996 PFN_NUMBER Page,
997 SWAPENTRY SwapEntry,
998 BOOLEAN Dirty)
999 {
1000 ASSERT(SwapEntry == 0);
1001 if (Page != 0)
1002 {
1003 ASSERT(MmGetReferenceCountPage(Page) == 1);
1004 MmReleasePageMemoryConsumer(MC_CACHE, Page);
1005 }
1006 }
1007
1008 NTSTATUS
1009 CcRosInternalFreeVacb (
1010 PROS_VACB Vacb)
1011 /*
1012 * FUNCTION: Releases a VACB associated with a shared cache map
1013 */
1014 {
1015 DPRINT("Freeing VACB 0x%p\n", Vacb);
1016 #if DBG
1017 if (Vacb->SharedCacheMap->Trace)
1018 {
1019 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
1020 }
1021 #endif
1022
1023 MmLockAddressSpace(MmGetKernelAddressSpace());
1024 MmFreeMemoryArea(MmGetKernelAddressSpace(),
1025 Vacb->MemoryArea,
1026 CcFreeCachePage,
1027 NULL);
1028 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1029
1030 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
1031 return STATUS_SUCCESS;
1032 }
1033
1034 /*
1035 * @implemented
1036 */
1037 VOID
1038 NTAPI
1039 CcFlushCache (
1040 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1041 IN PLARGE_INTEGER FileOffset OPTIONAL,
1042 IN ULONG Length,
1043 OUT PIO_STATUS_BLOCK IoStatus)
1044 {
1045 PROS_SHARED_CACHE_MAP SharedCacheMap;
1046 LARGE_INTEGER Offset;
1047 LONGLONG RemainingLength;
1048 PROS_VACB current;
1049 NTSTATUS Status;
1050 KIRQL oldIrql;
1051
1052 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1053 SectionObjectPointers, FileOffset, Length);
1054
1055 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1056 SectionObjectPointers, FileOffset, Length, IoStatus);
1057
1058 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1059 {
1060 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1061 ASSERT(SharedCacheMap);
1062 if (FileOffset)
1063 {
1064 Offset = *FileOffset;
1065 RemainingLength = Length;
1066 }
1067 else
1068 {
1069 Offset.QuadPart = 0;
1070 RemainingLength = SharedCacheMap->FileSize.QuadPart;
1071 }
1072
1073 if (IoStatus)
1074 {
1075 IoStatus->Status = STATUS_SUCCESS;
1076 IoStatus->Information = 0;
1077 }
1078
1079 while (RemainingLength > 0)
1080 {
1081 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1082 if (current != NULL)
1083 {
1084 if (current->Dirty)
1085 {
1086 Status = CcRosFlushVacb(current);
1087 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1088 {
1089 IoStatus->Status = Status;
1090 }
1091 }
1092
1093 CcRosReleaseVacbLock(current);
1094
1095 KeAcquireGuardedMutex(&ViewLock);
1096 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1097 CcRosVacbDecRefCount(current);
1098 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1099 KeReleaseGuardedMutex(&ViewLock);
1100 }
1101
1102 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1103 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1104 }
1105 }
1106 else
1107 {
1108 if (IoStatus)
1109 {
1110 IoStatus->Status = STATUS_INVALID_PARAMETER;
1111 }
1112 }
1113 }
1114
1115 NTSTATUS
1116 NTAPI
1117 CcRosDeleteFileCache (
1118 PFILE_OBJECT FileObject,
1119 PROS_SHARED_CACHE_MAP SharedCacheMap)
1120 /*
1121 * FUNCTION: Releases the shared cache map associated with a file object
1122 */
1123 {
1124 PLIST_ENTRY current_entry;
1125 PROS_VACB current;
1126 LIST_ENTRY FreeList;
1127 KIRQL oldIrql;
1128
1129 ASSERT(SharedCacheMap);
1130
1131 SharedCacheMap->OpenCount++;
1132 KeReleaseGuardedMutex(&ViewLock);
1133
1134 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1135
1136 KeAcquireGuardedMutex(&ViewLock);
1137 SharedCacheMap->OpenCount--;
1138 if (SharedCacheMap->OpenCount == 0)
1139 {
1140 KIRQL OldIrql;
1141
1142 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1143
1144 /*
1145 * Release all VACBs
1146 */
1147 InitializeListHead(&FreeList);
1148 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1149 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1150 {
1151 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1152 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1153
1154 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1155 CcRosAcquireVacbLock(current, NULL);
1156 RemoveEntryList(&current->VacbLruListEntry);
1157 if (current->Dirty)
1158 {
1159 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1160 CcRosUnmarkDirtyVacb(current, FALSE);
1161 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1162 DPRINT1("Freeing dirty VACB\n");
1163 }
1164 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1165 CcRosReleaseVacbLock(current);
1166
1167 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1168 }
1169 #if DBG
1170 SharedCacheMap->Trace = FALSE;
1171 #endif
1172 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1173
1174 KeReleaseGuardedMutex(&ViewLock);
1175 ObDereferenceObject(SharedCacheMap->FileObject);
1176
1177 while (!IsListEmpty(&FreeList))
1178 {
1179 current_entry = RemoveTailList(&FreeList);
1180 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1181 CcRosInternalFreeVacb(current);
1182 }
1183
1184 KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
1185 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1186 KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
1187
1188 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1189 KeAcquireGuardedMutex(&ViewLock);
1190 }
1191 return STATUS_SUCCESS;
1192 }
1193
1194 VOID
1195 NTAPI
1196 CcRosReferenceCache (
1197 PFILE_OBJECT FileObject)
1198 {
1199 PROS_SHARED_CACHE_MAP SharedCacheMap;
1200 KeAcquireGuardedMutex(&ViewLock);
1201 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1202 ASSERT(SharedCacheMap);
1203 ASSERT(SharedCacheMap->OpenCount != 0);
1204 SharedCacheMap->OpenCount++;
1205 KeReleaseGuardedMutex(&ViewLock);
1206 }
1207
1208 VOID
1209 NTAPI
1210 CcRosRemoveIfClosed (
1211 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1212 {
1213 PROS_SHARED_CACHE_MAP SharedCacheMap;
1214 DPRINT("CcRosRemoveIfClosed()\n");
1215 KeAcquireGuardedMutex(&ViewLock);
1216 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1217 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1218 {
1219 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1220 }
1221 KeReleaseGuardedMutex(&ViewLock);
1222 }
1223
1224
1225 VOID
1226 NTAPI
1227 CcRosDereferenceCache (
1228 PFILE_OBJECT FileObject)
1229 {
1230 PROS_SHARED_CACHE_MAP SharedCacheMap;
1231 KeAcquireGuardedMutex(&ViewLock);
1232 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1233 ASSERT(SharedCacheMap);
1234 if (SharedCacheMap->OpenCount > 0)
1235 {
1236 SharedCacheMap->OpenCount--;
1237 if (SharedCacheMap->OpenCount == 0)
1238 {
1239 MmFreeSectionSegments(SharedCacheMap->FileObject);
1240 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1241 }
1242 }
1243 KeReleaseGuardedMutex(&ViewLock);
1244 }
1245
1246 NTSTATUS
1247 NTAPI
1248 CcRosReleaseFileCache (
1249 PFILE_OBJECT FileObject)
1250 /*
1251 * FUNCTION: Called by the file system when a handle to a file object
1252 * has been closed.
1253 */
1254 {
1255 PROS_SHARED_CACHE_MAP SharedCacheMap;
1256
1257 KeAcquireGuardedMutex(&ViewLock);
1258
1259 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1260 {
1261 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1262 if (FileObject->PrivateCacheMap != NULL)
1263 {
1264 FileObject->PrivateCacheMap = NULL;
1265 if (SharedCacheMap->OpenCount > 0)
1266 {
1267 SharedCacheMap->OpenCount--;
1268 if (SharedCacheMap->OpenCount == 0)
1269 {
1270 MmFreeSectionSegments(SharedCacheMap->FileObject);
1271 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1272 }
1273 }
1274 }
1275 }
1276 KeReleaseGuardedMutex(&ViewLock);
1277 return STATUS_SUCCESS;
1278 }
1279
1280 NTSTATUS
1281 NTAPI
1282 CcTryToInitializeFileCache (
1283 PFILE_OBJECT FileObject)
1284 {
1285 PROS_SHARED_CACHE_MAP SharedCacheMap;
1286 NTSTATUS Status;
1287
1288 KeAcquireGuardedMutex(&ViewLock);
1289
1290 ASSERT(FileObject->SectionObjectPointer);
1291 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1292 if (SharedCacheMap == NULL)
1293 {
1294 Status = STATUS_UNSUCCESSFUL;
1295 }
1296 else
1297 {
1298 if (FileObject->PrivateCacheMap == NULL)
1299 {
1300 FileObject->PrivateCacheMap = SharedCacheMap;
1301 SharedCacheMap->OpenCount++;
1302 }
1303 Status = STATUS_SUCCESS;
1304 }
1305 KeReleaseGuardedMutex(&ViewLock);
1306
1307 return Status;
1308 }
1309
1310
1311 NTSTATUS
1312 NTAPI
1313 CcRosInitializeFileCache (
1314 PFILE_OBJECT FileObject,
1315 PCC_FILE_SIZES FileSizes,
1316 BOOLEAN PinAccess,
1317 PCACHE_MANAGER_CALLBACKS CallBacks,
1318 PVOID LazyWriterContext)
1319 /*
1320 * FUNCTION: Initializes a shared cache map for a file object
1321 */
1322 {
1323 PROS_SHARED_CACHE_MAP SharedCacheMap;
1324
1325 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1326 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1327 FileObject, SharedCacheMap);
1328
1329 KeAcquireGuardedMutex(&ViewLock);
1330 if (SharedCacheMap == NULL)
1331 {
1332 KIRQL OldIrql;
1333
1334 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1335 if (SharedCacheMap == NULL)
1336 {
1337 KeReleaseGuardedMutex(&ViewLock);
1338 return STATUS_INSUFFICIENT_RESOURCES;
1339 }
1340 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1341 ObReferenceObjectByPointer(FileObject,
1342 FILE_ALL_ACCESS,
1343 NULL,
1344 KernelMode);
1345 SharedCacheMap->FileObject = FileObject;
1346 SharedCacheMap->Callbacks = CallBacks;
1347 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1348 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1349 SharedCacheMap->FileSize = FileSizes->FileSize;
1350 SharedCacheMap->PinAccess = PinAccess;
1351 SharedCacheMap->DirtyPageThreshold = 0;
1352 SharedCacheMap->DirtyPages = 0;
1353 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1354 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1355 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1356
1357 KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
1358 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1359 KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
1360 }
1361 if (FileObject->PrivateCacheMap == NULL)
1362 {
1363 FileObject->PrivateCacheMap = SharedCacheMap;
1364 SharedCacheMap->OpenCount++;
1365 }
1366 KeReleaseGuardedMutex(&ViewLock);
1367
1368 return STATUS_SUCCESS;
1369 }
1370
1371 /*
1372 * @implemented
1373 */
1374 PFILE_OBJECT
1375 NTAPI
1376 CcGetFileObjectFromSectionPtrs (
1377 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1378 {
1379 PROS_SHARED_CACHE_MAP SharedCacheMap;
1380
1381 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1382
1383 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1384 {
1385 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1386 ASSERT(SharedCacheMap);
1387 return SharedCacheMap->FileObject;
1388 }
1389 return NULL;
1390 }
1391
1392 VOID
1393 NTAPI
1394 CcShutdownLazyWriter (
1395 VOID)
1396 {
1397 /* Simply set the event, lazy writer will stop when it's done */
1398 KeSetEvent(&iLazyWriterShutdown, IO_DISK_INCREMENT, FALSE);
1399 }
1400
1401 BOOLEAN
1402 INIT_FUNCTION
1403 NTAPI
1404 CcInitView (
1405 VOID)
1406 {
1407 HANDLE LazyWriter;
1408 NTSTATUS Status;
1409 KPRIORITY Priority;
1410 OBJECT_ATTRIBUTES ObjectAttributes;
1411
1412 DPRINT("CcInitView()\n");
1413
1414 InitializeListHead(&DirtyVacbListHead);
1415 InitializeListHead(&VacbLruListHead);
1416 InitializeListHead(&CcDeferredWrites);
1417 InitializeListHead(&CcCleanSharedCacheMapList);
1418 KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1419 KeInitializeSpinLock(&iSharedCacheMapLock);
1420 KeInitializeGuardedMutex(&ViewLock);
1421 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1422 NULL,
1423 NULL,
1424 0,
1425 sizeof(INTERNAL_BCB),
1426 TAG_BCB,
1427 20);
1428 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1429 NULL,
1430 NULL,
1431 0,
1432 sizeof(ROS_SHARED_CACHE_MAP),
1433 TAG_SHARED_CACHE_MAP,
1434 20);
1435 ExInitializeNPagedLookasideList(&VacbLookasideList,
1436 NULL,
1437 NULL,
1438 0,
1439 sizeof(ROS_VACB),
1440 TAG_VACB,
1441 20);
1442
1443 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1444
1445 /* Initialize lazy writer events */
1446 KeInitializeEvent(&iLazyWriterShutdown, SynchronizationEvent, FALSE);
1447 KeInitializeEvent(&iLazyWriterNotify, NotificationEvent, FALSE);
1448
1449 /* Define lazy writer threshold, depending on system type */
1450 switch (MmQuerySystemSize())
1451 {
1452 case MmSmallSystem:
1453 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8;
1454 break;
1455
1456 case MmMediumSystem:
1457 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4;
1458 break;
1459
1460 case MmLargeSystem:
1461 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8 + MmNumberOfPhysicalPages / 4;
1462 break;
1463 }
1464
1465 /* Start the lazy writer thread */
1466 InitializeObjectAttributes(&ObjectAttributes,
1467 NULL,
1468 OBJ_KERNEL_HANDLE,
1469 NULL,
1470 NULL);
1471 Status = PsCreateSystemThread(&LazyWriter,
1472 THREAD_ALL_ACCESS,
1473 &ObjectAttributes,
1474 NULL,
1475 NULL,
1476 CciLazyWriter,
1477 NULL);
1478 if (!NT_SUCCESS(Status))
1479 {
1480 return FALSE;
1481 }
1482
1483 Priority = 27;
1484 Status = NtSetInformationThread(LazyWriter,
1485 ThreadPriority,
1486 &Priority,
1487 sizeof(Priority));
1488 ASSERT(NT_SUCCESS(Status));
1489
1490 /* Handle is not needed */
1491 ObCloseHandle(LazyWriter, KernelMode);
1492
1493 CcInitCacheZeroPage();
1494
1495 return TRUE;
1496 }
1497
1498 #if DBG && defined(KDBG)
1499 BOOLEAN
1500 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1501 {
1502 PLIST_ENTRY ListEntry;
1503 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1504
1505 KdbpPrint(" Usage Summary (in kb)\n");
1506 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1507 /* No need to lock the spin lock here, we're in DBG */
1508 for (ListEntry = CcCleanSharedCacheMapList.Flink;
1509 ListEntry != &CcCleanSharedCacheMapList;
1510 ListEntry = ListEntry->Flink)
1511 {
1512 PLIST_ENTRY Vacbs;
1513 ULONG Valid = 0, Dirty = 0;
1514 PROS_SHARED_CACHE_MAP SharedCacheMap;
1515 PUNICODE_STRING FileName;
1516
1517 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1518
1519 /* Dirty size */
1520 Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
1521
1522 /* First, count for all the associated VACB */
1523 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1524 Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1525 Vacbs = Vacbs->Flink)
1526 {
1527 PROS_VACB Vacb;
1528
1529 Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
1530 if (Vacb->Valid)
1531 {
1532 Valid += VACB_MAPPING_GRANULARITY / 1024;
1533 }
1534 }
1535
1536 /* Setup name */
1537 if (SharedCacheMap->FileObject != NULL &&
1538 SharedCacheMap->FileObject->FileName.Length != 0)
1539 {
1540 FileName = &SharedCacheMap->FileObject->FileName;
1541 }
1542 else
1543 {
1544 FileName = &NoName;
1545 }
1546
1547 /* And print */
1548 KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
1549 }
1550
1551 return TRUE;
1552 }
1553 #endif
1554
1555 /* EOF */