1268fb520369497663cf1f50a14c0ddc6fa07a6f
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42
43 /* GLOBALS *******************************************************************/
44
45 LIST_ENTRY DirtyVacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 /* Counters:
55 * - Amount of pages flushed by lazy writer
56 * - Number of times lazy writer ran
57 */
58 ULONG CcLazyWritePages = 0;
59 ULONG CcLazyWriteIos = 0;
60
61 /* Internal vars (MS):
62 * - Threshold above which lazy writer will start action
63 * - Amount of dirty pages
64 * - List for deferred writes
65 * - Spinlock when dealing with the deferred list
66 * - List for "clean" shared cache maps
67 */
68 ULONG CcDirtyPageThreshold = 0;
69 ULONG CcTotalDirtyPages = 0;
70 LIST_ENTRY CcDeferredWrites;
71 KSPIN_LOCK CcDeferredWriteSpinLock;
72 LIST_ENTRY CcCleanSharedCacheMapList;
73
74 /* Internal vars (ROS):
75 * - Event to notify lazy writer to shutdown
76 * - Event to inform watchers lazy writer is done for this loop
77 * - Lock for the CcCleanSharedCacheMapList list
78 */
79 KEVENT iLazyWriterShutdown;
80 KEVENT iLazyWriterNotify;
81 KSPIN_LOCK iSharedCacheMapLock;
82
83 #if DBG
84 static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
85 {
86 ++vacb->ReferenceCount;
87 if (vacb->SharedCacheMap->Trace)
88 {
89 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
90 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
91 }
92 }
93 static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
94 {
95 --vacb->ReferenceCount;
96 if (vacb->SharedCacheMap->Trace)
97 {
98 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
99 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
100 }
101 }
102 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
103 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
104 #else
105 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
106 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
107 #endif
108
109 NTSTATUS
110 CcRosInternalFreeVacb(PROS_VACB Vacb);
111
112
113 /* FUNCTIONS *****************************************************************/
114
115 VOID
116 NTAPI
117 CcRosTraceCacheMap (
118 PROS_SHARED_CACHE_MAP SharedCacheMap,
119 BOOLEAN Trace )
120 {
121 #if DBG
122 KIRQL oldirql;
123 PLIST_ENTRY current_entry;
124 PROS_VACB current;
125
126 if (!SharedCacheMap)
127 return;
128
129 SharedCacheMap->Trace = Trace;
130
131 if (Trace)
132 {
133 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
134
135 KeAcquireGuardedMutex(&ViewLock);
136 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
137
138 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
139 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
140 {
141 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
142 current_entry = current_entry->Flink;
143
144 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
145 current, current->ReferenceCount, current->Dirty, current->PageOut );
146 }
147 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
148 KeReleaseGuardedMutex(&ViewLock);
149 }
150 else
151 {
152 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
153 }
154
155 #else
156 UNREFERENCED_PARAMETER(SharedCacheMap);
157 UNREFERENCED_PARAMETER(Trace);
158 #endif
159 }
160
161 NTSTATUS
162 NTAPI
163 CcRosFlushVacb (
164 PROS_VACB Vacb)
165 {
166 NTSTATUS Status;
167 KIRQL oldIrql;
168
169 Status = CcWriteVirtualAddress(Vacb);
170 if (NT_SUCCESS(Status))
171 {
172 KeAcquireGuardedMutex(&ViewLock);
173 KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql);
174
175 Vacb->Dirty = FALSE;
176 RemoveEntryList(&Vacb->DirtyVacbListEntry);
177 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
178 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
179 CcRosVacbDecRefCount(Vacb);
180
181 KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
182 KeReleaseGuardedMutex(&ViewLock);
183 }
184
185 return Status;
186 }
187
188 NTSTATUS
189 NTAPI
190 CcRosFlushDirtyPages (
191 ULONG Target,
192 PULONG Count,
193 BOOLEAN Wait,
194 BOOLEAN CalledFromLazy)
195 {
196 PLIST_ENTRY current_entry;
197 PROS_VACB current;
198 BOOLEAN Locked;
199 NTSTATUS Status;
200 LARGE_INTEGER ZeroTimeout;
201
202 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
203
204 (*Count) = 0;
205 ZeroTimeout.QuadPart = 0;
206
207 KeEnterCriticalRegion();
208 KeAcquireGuardedMutex(&ViewLock);
209
210 current_entry = DirtyVacbListHead.Flink;
211 if (current_entry == &DirtyVacbListHead)
212 {
213 DPRINT("No Dirty pages\n");
214 }
215
216 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
217 {
218 current = CONTAINING_RECORD(current_entry,
219 ROS_VACB,
220 DirtyVacbListEntry);
221 current_entry = current_entry->Flink;
222
223 CcRosVacbIncRefCount(current);
224
225 /* When performing lazy write, don't handle temporary files */
226 if (CalledFromLazy &&
227 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
228 {
229 CcRosVacbDecRefCount(current);
230 continue;
231 }
232
233 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
234 current->SharedCacheMap->LazyWriteContext, Wait);
235 if (!Locked)
236 {
237 CcRosVacbDecRefCount(current);
238 continue;
239 }
240
241 Status = CcRosAcquireVacbLock(current,
242 Wait ? NULL : &ZeroTimeout);
243 if (Status != STATUS_SUCCESS)
244 {
245 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
246 current->SharedCacheMap->LazyWriteContext);
247 CcRosVacbDecRefCount(current);
248 continue;
249 }
250
251 ASSERT(current->Dirty);
252
253 /* One reference is added above */
254 if ((current->ReferenceCount > 2 && current->PinCount == 0) ||
255 (current->ReferenceCount > 3 && current->PinCount > 1))
256 {
257 CcRosReleaseVacbLock(current);
258 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
259 current->SharedCacheMap->LazyWriteContext);
260 CcRosVacbDecRefCount(current);
261 continue;
262 }
263
264 KeReleaseGuardedMutex(&ViewLock);
265
266 Status = CcRosFlushVacb(current);
267
268 CcRosReleaseVacbLock(current);
269 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
270 current->SharedCacheMap->LazyWriteContext);
271
272 KeAcquireGuardedMutex(&ViewLock);
273 CcRosVacbDecRefCount(current);
274
275 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
276 (Status != STATUS_MEDIA_WRITE_PROTECTED))
277 {
278 DPRINT1("CC: Failed to flush VACB.\n");
279 }
280 else
281 {
282 ULONG PagesFreed;
283
284 /* How many pages did we free? */
285 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
286 (*Count) += PagesFreed;
287
288 /* Make sure we don't overflow target! */
289 if (Target < PagesFreed)
290 {
291 /* If we would have, jump to zero directly */
292 Target = 0;
293 }
294 else
295 {
296 Target -= PagesFreed;
297 }
298 }
299
300 current_entry = DirtyVacbListHead.Flink;
301 }
302
303 KeReleaseGuardedMutex(&ViewLock);
304 KeLeaveCriticalRegion();
305
306 DPRINT("CcRosFlushDirtyPages() finished\n");
307 return STATUS_SUCCESS;
308 }
309
310 /* FIXME: Someday this could somewhat implement write-behind/read-ahead */
311 VOID
312 NTAPI
313 CciLazyWriter(PVOID Unused)
314 {
315 LARGE_INTEGER OneSecond;
316
317 OneSecond.QuadPart = (LONGLONG)-1*1000*1000*10;
318
319 while (TRUE)
320 {
321 NTSTATUS Status;
322 PLIST_ENTRY ListEntry;
323 ULONG Target, Count = 0;
324
325 /* One per second or until we have to stop */
326 Status = KeWaitForSingleObject(&iLazyWriterShutdown,
327 Executive,
328 KernelMode,
329 FALSE,
330 &OneSecond);
331
332 /* If we succeeed, we've to stop running! */
333 if (Status == STATUS_SUCCESS)
334 {
335 break;
336 }
337
338 /* We're not sleeping anymore */
339 KeClearEvent(&iLazyWriterNotify);
340
341 /* Our target is one-eighth of the dirty pages */
342 Target = CcTotalDirtyPages / 8;
343 if (Target != 0)
344 {
345 /* Flush! */
346 DPRINT("Lazy writer starting (%d)\n", Target);
347 CcRosFlushDirtyPages(Target, &Count, FALSE, TRUE);
348
349 /* And update stats */
350 CcLazyWritePages += Count;
351 ++CcLazyWriteIos;
352 DPRINT("Lazy writer done (%d)\n", Count);
353 }
354
355 /* Inform people waiting on us that we're done */
356 KeSetEvent(&iLazyWriterNotify, IO_DISK_INCREMENT, FALSE);
357
358 /* Likely not optimal, but let's handle one deferred write now! */
359 ListEntry = ExInterlockedRemoveHeadList(&CcDeferredWrites, &CcDeferredWriteSpinLock);
360 if (ListEntry != NULL)
361 {
362 PROS_DEFERRED_WRITE_CONTEXT Context;
363
364 /* Extract the context */
365 Context = CONTAINING_RECORD(ListEntry, ROS_DEFERRED_WRITE_CONTEXT, CcDeferredWritesEntry);
366
367 /* Can we write now? */
368 if (CcCanIWrite(Context->FileObject, Context->BytesToWrite, FALSE, Context->Retrying))
369 {
370 /* Yes! Do it, and destroy the associated context */
371 Context->PostRoutine(Context->Context1, Context->Context2);
372 ExFreePoolWithTag(Context, 'CcDw');
373 }
374 else
375 {
376 /* Otherwise, requeue it, but in tail, so that it doesn't block others
377 * This is clearly to improve, but given the poor algorithm used now
378 * It's better than nothing!
379 */
380 ExInterlockedInsertTailList(&CcDeferredWrites,
381 &Context->CcDeferredWritesEntry,
382 &CcDeferredWriteSpinLock);
383 }
384 }
385 }
386 }
387
388 NTSTATUS
389 CcRosTrimCache (
390 ULONG Target,
391 ULONG Priority,
392 PULONG NrFreed)
393 /*
394 * FUNCTION: Try to free some memory from the file cache.
395 * ARGUMENTS:
396 * Target - The number of pages to be freed.
397 * Priority - The priority of free (currently unused).
398 * NrFreed - Points to a variable where the number of pages
399 * actually freed is returned.
400 */
401 {
402 PLIST_ENTRY current_entry;
403 PROS_VACB current;
404 ULONG PagesFreed;
405 KIRQL oldIrql;
406 LIST_ENTRY FreeList;
407 PFN_NUMBER Page;
408 ULONG i;
409 BOOLEAN FlushedPages = FALSE;
410
411 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
412
413 InitializeListHead(&FreeList);
414
415 *NrFreed = 0;
416
417 retry:
418 KeAcquireGuardedMutex(&ViewLock);
419
420 current_entry = VacbLruListHead.Flink;
421 while (current_entry != &VacbLruListHead)
422 {
423 current = CONTAINING_RECORD(current_entry,
424 ROS_VACB,
425 VacbLruListEntry);
426 current_entry = current_entry->Flink;
427
428 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
429
430 /* Reference the VACB */
431 CcRosVacbIncRefCount(current);
432
433 /* Check if it's mapped and not dirty */
434 if (current->MappedCount > 0 && !current->Dirty)
435 {
436 /* We have to break these locks because Cc sucks */
437 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
438 KeReleaseGuardedMutex(&ViewLock);
439
440 /* Page out the VACB */
441 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
442 {
443 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
444
445 MmPageOutPhysicalAddress(Page);
446 }
447
448 /* Reacquire the locks */
449 KeAcquireGuardedMutex(&ViewLock);
450 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
451 }
452
453 /* Dereference the VACB */
454 CcRosVacbDecRefCount(current);
455
456 /* Check if we can free this entry now */
457 if (current->ReferenceCount == 0)
458 {
459 ASSERT(!current->Dirty);
460 ASSERT(!current->MappedCount);
461
462 RemoveEntryList(&current->CacheMapVacbListEntry);
463 RemoveEntryList(&current->VacbLruListEntry);
464 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
465
466 /* Calculate how many pages we freed for Mm */
467 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
468 Target -= PagesFreed;
469 (*NrFreed) += PagesFreed;
470 }
471
472 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
473 }
474
475 KeReleaseGuardedMutex(&ViewLock);
476
477 /* Try flushing pages if we haven't met our target */
478 if ((Target > 0) && !FlushedPages)
479 {
480 /* Flush dirty pages to disk */
481 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
482 FlushedPages = TRUE;
483
484 /* We can only swap as many pages as we flushed */
485 if (PagesFreed < Target) Target = PagesFreed;
486
487 /* Check if we flushed anything */
488 if (PagesFreed != 0)
489 {
490 /* Try again after flushing dirty pages */
491 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
492 goto retry;
493 }
494 }
495
496 while (!IsListEmpty(&FreeList))
497 {
498 current_entry = RemoveHeadList(&FreeList);
499 current = CONTAINING_RECORD(current_entry,
500 ROS_VACB,
501 CacheMapVacbListEntry);
502 CcRosInternalFreeVacb(current);
503 }
504
505 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
506
507 return STATUS_SUCCESS;
508 }
509
510 NTSTATUS
511 NTAPI
512 CcRosReleaseVacb (
513 PROS_SHARED_CACHE_MAP SharedCacheMap,
514 PROS_VACB Vacb,
515 BOOLEAN Valid,
516 BOOLEAN Dirty,
517 BOOLEAN Mapped)
518 {
519 BOOLEAN WasDirty;
520
521 ASSERT(SharedCacheMap);
522
523 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
524 SharedCacheMap, Vacb, Valid);
525
526 Vacb->Valid = Valid;
527
528 WasDirty = FALSE;
529 if (Dirty)
530 {
531 if (!Vacb->Dirty)
532 {
533 CcRosMarkDirtyVacb(Vacb);
534 }
535 else
536 {
537 WasDirty = TRUE;
538 }
539 }
540
541 if (Mapped)
542 {
543 Vacb->MappedCount++;
544 }
545 CcRosVacbDecRefCount(Vacb);
546 if (Mapped && (Vacb->MappedCount == 1))
547 {
548 CcRosVacbIncRefCount(Vacb);
549 }
550 if (!WasDirty && Vacb->Dirty)
551 {
552 CcRosVacbIncRefCount(Vacb);
553 }
554
555 CcRosReleaseVacbLock(Vacb);
556
557 return STATUS_SUCCESS;
558 }
559
560 /* Returns with VACB Lock Held! */
561 PROS_VACB
562 NTAPI
563 CcRosLookupVacb (
564 PROS_SHARED_CACHE_MAP SharedCacheMap,
565 LONGLONG FileOffset)
566 {
567 PLIST_ENTRY current_entry;
568 PROS_VACB current;
569 KIRQL oldIrql;
570
571 ASSERT(SharedCacheMap);
572
573 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
574 SharedCacheMap, FileOffset);
575
576 KeAcquireGuardedMutex(&ViewLock);
577 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
578
579 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
580 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
581 {
582 current = CONTAINING_RECORD(current_entry,
583 ROS_VACB,
584 CacheMapVacbListEntry);
585 if (IsPointInRange(current->FileOffset.QuadPart,
586 VACB_MAPPING_GRANULARITY,
587 FileOffset))
588 {
589 CcRosVacbIncRefCount(current);
590 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
591 KeReleaseGuardedMutex(&ViewLock);
592 CcRosAcquireVacbLock(current, NULL);
593 return current;
594 }
595 if (current->FileOffset.QuadPart > FileOffset)
596 break;
597 current_entry = current_entry->Flink;
598 }
599
600 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
601 KeReleaseGuardedMutex(&ViewLock);
602
603 return NULL;
604 }
605
606 VOID
607 NTAPI
608 CcRosMarkDirtyVacb (
609 PROS_VACB Vacb)
610 {
611 KIRQL oldIrql;
612 PROS_SHARED_CACHE_MAP SharedCacheMap;
613
614 SharedCacheMap = Vacb->SharedCacheMap;
615
616 KeAcquireGuardedMutex(&ViewLock);
617 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
618
619 if (!Vacb->Dirty)
620 {
621 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
622 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
623 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
624 }
625 else
626 {
627 CcRosVacbDecRefCount(Vacb);
628 }
629
630 /* Move to the tail of the LRU list */
631 RemoveEntryList(&Vacb->VacbLruListEntry);
632 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
633
634 Vacb->Dirty = TRUE;
635
636 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
637 KeReleaseGuardedMutex(&ViewLock);
638 }
639
640 NTSTATUS
641 NTAPI
642 CcRosMarkDirtyFile (
643 PROS_SHARED_CACHE_MAP SharedCacheMap,
644 LONGLONG FileOffset)
645 {
646 PROS_VACB Vacb;
647
648 ASSERT(SharedCacheMap);
649
650 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
651 SharedCacheMap, FileOffset);
652
653 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
654 if (Vacb == NULL)
655 {
656 KeBugCheck(CACHE_MANAGER);
657 }
658
659 CcRosMarkDirtyVacb(Vacb);
660
661 CcRosReleaseVacbLock(Vacb);
662
663 return STATUS_SUCCESS;
664 }
665
666 NTSTATUS
667 NTAPI
668 CcRosUnmapVacb (
669 PROS_SHARED_CACHE_MAP SharedCacheMap,
670 LONGLONG FileOffset,
671 BOOLEAN NowDirty)
672 {
673 PROS_VACB Vacb;
674 BOOLEAN WasDirty;
675
676 ASSERT(SharedCacheMap);
677
678 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
679 SharedCacheMap, FileOffset, NowDirty);
680
681 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
682 if (Vacb == NULL)
683 {
684 return STATUS_UNSUCCESSFUL;
685 }
686
687 WasDirty = FALSE;
688 if (NowDirty)
689 {
690 if (!Vacb->Dirty)
691 {
692 CcRosMarkDirtyVacb(Vacb);
693 }
694 else
695 {
696 WasDirty = TRUE;
697 }
698 }
699
700 Vacb->MappedCount--;
701
702 CcRosVacbDecRefCount(Vacb);
703 if (!WasDirty && NowDirty)
704 {
705 CcRosVacbIncRefCount(Vacb);
706 }
707 if (Vacb->MappedCount == 0)
708 {
709 CcRosVacbDecRefCount(Vacb);
710 }
711
712 CcRosReleaseVacbLock(Vacb);
713
714 return STATUS_SUCCESS;
715 }
716
717 static
718 NTSTATUS
719 CcRosMapVacb(
720 PROS_VACB Vacb)
721 {
722 ULONG i;
723 NTSTATUS Status;
724 ULONG_PTR NumberOfPages;
725
726 /* Create a memory area. */
727 MmLockAddressSpace(MmGetKernelAddressSpace());
728 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
729 0, // nothing checks for VACB mareas, so set to 0
730 &Vacb->BaseAddress,
731 VACB_MAPPING_GRANULARITY,
732 PAGE_READWRITE,
733 (PMEMORY_AREA*)&Vacb->MemoryArea,
734 0,
735 PAGE_SIZE);
736 MmUnlockAddressSpace(MmGetKernelAddressSpace());
737 if (!NT_SUCCESS(Status))
738 {
739 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
740 return Status;
741 }
742
743 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
744 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
745
746 /* Create a virtual mapping for this memory area */
747 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
748 for (i = 0; i < NumberOfPages; i++)
749 {
750 PFN_NUMBER PageFrameNumber;
751
752 MI_SET_USAGE(MI_USAGE_CACHE);
753 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
754 if (PageFrameNumber == 0)
755 {
756 DPRINT1("Unable to allocate page\n");
757 KeBugCheck(MEMORY_MANAGEMENT);
758 }
759
760 Status = MmCreateVirtualMapping(NULL,
761 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
762 PAGE_READWRITE,
763 &PageFrameNumber,
764 1);
765 if (!NT_SUCCESS(Status))
766 {
767 DPRINT1("Unable to create virtual mapping\n");
768 KeBugCheck(MEMORY_MANAGEMENT);
769 }
770 }
771
772 return STATUS_SUCCESS;
773 }
774
775 static
776 NTSTATUS
777 CcRosCreateVacb (
778 PROS_SHARED_CACHE_MAP SharedCacheMap,
779 LONGLONG FileOffset,
780 PROS_VACB *Vacb)
781 {
782 PROS_VACB current;
783 PROS_VACB previous;
784 PLIST_ENTRY current_entry;
785 NTSTATUS Status;
786 KIRQL oldIrql;
787
788 ASSERT(SharedCacheMap);
789
790 DPRINT("CcRosCreateVacb()\n");
791
792 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
793 {
794 *Vacb = NULL;
795 return STATUS_INVALID_PARAMETER;
796 }
797
798 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
799 current->BaseAddress = NULL;
800 current->Valid = FALSE;
801 current->Dirty = FALSE;
802 current->PageOut = FALSE;
803 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
804 current->SharedCacheMap = SharedCacheMap;
805 #if DBG
806 if (SharedCacheMap->Trace)
807 {
808 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
809 }
810 #endif
811 current->MappedCount = 0;
812 current->DirtyVacbListEntry.Flink = NULL;
813 current->DirtyVacbListEntry.Blink = NULL;
814 current->ReferenceCount = 1;
815 current->PinCount = 0;
816 KeInitializeMutex(&current->Mutex, 0);
817 CcRosAcquireVacbLock(current, NULL);
818 KeAcquireGuardedMutex(&ViewLock);
819
820 *Vacb = current;
821 /* There is window between the call to CcRosLookupVacb
822 * and CcRosCreateVacb. We must check if a VACB for the
823 * file offset exist. If there is a VACB, we release
824 * our newly created VACB and return the existing one.
825 */
826 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
827 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
828 previous = NULL;
829 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
830 {
831 current = CONTAINING_RECORD(current_entry,
832 ROS_VACB,
833 CacheMapVacbListEntry);
834 if (IsPointInRange(current->FileOffset.QuadPart,
835 VACB_MAPPING_GRANULARITY,
836 FileOffset))
837 {
838 CcRosVacbIncRefCount(current);
839 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
840 #if DBG
841 if (SharedCacheMap->Trace)
842 {
843 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
844 SharedCacheMap,
845 (*Vacb),
846 current);
847 }
848 #endif
849 CcRosReleaseVacbLock(*Vacb);
850 KeReleaseGuardedMutex(&ViewLock);
851 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
852 *Vacb = current;
853 CcRosAcquireVacbLock(current, NULL);
854 return STATUS_SUCCESS;
855 }
856 if (current->FileOffset.QuadPart < FileOffset)
857 {
858 ASSERT(previous == NULL ||
859 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
860 previous = current;
861 }
862 if (current->FileOffset.QuadPart > FileOffset)
863 break;
864 current_entry = current_entry->Flink;
865 }
866 /* There was no existing VACB. */
867 current = *Vacb;
868 if (previous)
869 {
870 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
871 }
872 else
873 {
874 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
875 }
876 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
877 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
878 KeReleaseGuardedMutex(&ViewLock);
879
880 MI_SET_USAGE(MI_USAGE_CACHE);
881 #if MI_TRACE_PFNS
882 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
883 {
884 PWCHAR pos;
885 ULONG len = 0;
886 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
887 if (pos)
888 {
889 len = wcslen(pos) * sizeof(WCHAR);
890 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
891 }
892 else
893 {
894 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
895 }
896 }
897 #endif
898
899 Status = CcRosMapVacb(current);
900 if (!NT_SUCCESS(Status))
901 {
902 RemoveEntryList(&current->CacheMapVacbListEntry);
903 RemoveEntryList(&current->VacbLruListEntry);
904 CcRosReleaseVacbLock(current);
905 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
906 }
907
908 return Status;
909 }
910
911 NTSTATUS
912 NTAPI
913 CcRosGetVacb (
914 PROS_SHARED_CACHE_MAP SharedCacheMap,
915 LONGLONG FileOffset,
916 PLONGLONG BaseOffset,
917 PVOID* BaseAddress,
918 PBOOLEAN UptoDate,
919 PROS_VACB *Vacb)
920 {
921 PROS_VACB current;
922 NTSTATUS Status;
923
924 ASSERT(SharedCacheMap);
925
926 DPRINT("CcRosGetVacb()\n");
927
928 /*
929 * Look for a VACB already mapping the same data.
930 */
931 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
932 if (current == NULL)
933 {
934 /*
935 * Otherwise create a new VACB.
936 */
937 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
938 if (!NT_SUCCESS(Status))
939 {
940 return Status;
941 }
942 }
943
944 KeAcquireGuardedMutex(&ViewLock);
945
946 /* Move to the tail of the LRU list */
947 RemoveEntryList(&current->VacbLruListEntry);
948 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
949
950 KeReleaseGuardedMutex(&ViewLock);
951
952 /*
953 * Return information about the VACB to the caller.
954 */
955 *UptoDate = current->Valid;
956 *BaseAddress = current->BaseAddress;
957 DPRINT("*BaseAddress %p\n", *BaseAddress);
958 *Vacb = current;
959 *BaseOffset = current->FileOffset.QuadPart;
960 return STATUS_SUCCESS;
961 }
962
963 NTSTATUS
964 NTAPI
965 CcRosRequestVacb (
966 PROS_SHARED_CACHE_MAP SharedCacheMap,
967 LONGLONG FileOffset,
968 PVOID* BaseAddress,
969 PBOOLEAN UptoDate,
970 PROS_VACB *Vacb)
971 /*
972 * FUNCTION: Request a page mapping for a shared cache map
973 */
974 {
975 LONGLONG BaseOffset;
976
977 ASSERT(SharedCacheMap);
978
979 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
980 {
981 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
982 FileOffset, VACB_MAPPING_GRANULARITY);
983 KeBugCheck(CACHE_MANAGER);
984 }
985
986 return CcRosGetVacb(SharedCacheMap,
987 FileOffset,
988 &BaseOffset,
989 BaseAddress,
990 UptoDate,
991 Vacb);
992 }
993
994 static
995 VOID
996 CcFreeCachePage (
997 PVOID Context,
998 MEMORY_AREA* MemoryArea,
999 PVOID Address,
1000 PFN_NUMBER Page,
1001 SWAPENTRY SwapEntry,
1002 BOOLEAN Dirty)
1003 {
1004 ASSERT(SwapEntry == 0);
1005 if (Page != 0)
1006 {
1007 ASSERT(MmGetReferenceCountPage(Page) == 1);
1008 MmReleasePageMemoryConsumer(MC_CACHE, Page);
1009 }
1010 }
1011
1012 NTSTATUS
1013 CcRosInternalFreeVacb (
1014 PROS_VACB Vacb)
1015 /*
1016 * FUNCTION: Releases a VACB associated with a shared cache map
1017 */
1018 {
1019 DPRINT("Freeing VACB 0x%p\n", Vacb);
1020 #if DBG
1021 if (Vacb->SharedCacheMap->Trace)
1022 {
1023 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
1024 }
1025 #endif
1026
1027 MmLockAddressSpace(MmGetKernelAddressSpace());
1028 MmFreeMemoryArea(MmGetKernelAddressSpace(),
1029 Vacb->MemoryArea,
1030 CcFreeCachePage,
1031 NULL);
1032 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1033
1034 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
1035 return STATUS_SUCCESS;
1036 }
1037
1038 /*
1039 * @implemented
1040 */
1041 VOID
1042 NTAPI
1043 CcFlushCache (
1044 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1045 IN PLARGE_INTEGER FileOffset OPTIONAL,
1046 IN ULONG Length,
1047 OUT PIO_STATUS_BLOCK IoStatus)
1048 {
1049 PROS_SHARED_CACHE_MAP SharedCacheMap;
1050 LARGE_INTEGER Offset;
1051 LONGLONG RemainingLength;
1052 PROS_VACB current;
1053 NTSTATUS Status;
1054 KIRQL oldIrql;
1055
1056 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1057 SectionObjectPointers, FileOffset, Length);
1058
1059 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1060 SectionObjectPointers, FileOffset, Length, IoStatus);
1061
1062 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1063 {
1064 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1065 ASSERT(SharedCacheMap);
1066 if (FileOffset)
1067 {
1068 Offset = *FileOffset;
1069 RemainingLength = Length;
1070 }
1071 else
1072 {
1073 Offset.QuadPart = 0;
1074 RemainingLength = SharedCacheMap->FileSize.QuadPart;
1075 }
1076
1077 if (IoStatus)
1078 {
1079 IoStatus->Status = STATUS_SUCCESS;
1080 IoStatus->Information = 0;
1081 }
1082
1083 while (RemainingLength > 0)
1084 {
1085 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1086 if (current != NULL)
1087 {
1088 if (current->Dirty)
1089 {
1090 Status = CcRosFlushVacb(current);
1091 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1092 {
1093 IoStatus->Status = Status;
1094 }
1095 }
1096
1097 CcRosReleaseVacbLock(current);
1098
1099 KeAcquireGuardedMutex(&ViewLock);
1100 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1101 CcRosVacbDecRefCount(current);
1102 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1103 KeReleaseGuardedMutex(&ViewLock);
1104 }
1105
1106 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1107 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1108 }
1109 }
1110 else
1111 {
1112 if (IoStatus)
1113 {
1114 IoStatus->Status = STATUS_INVALID_PARAMETER;
1115 }
1116 }
1117 }
1118
1119 NTSTATUS
1120 NTAPI
1121 CcRosDeleteFileCache (
1122 PFILE_OBJECT FileObject,
1123 PROS_SHARED_CACHE_MAP SharedCacheMap)
1124 /*
1125 * FUNCTION: Releases the shared cache map associated with a file object
1126 */
1127 {
1128 PLIST_ENTRY current_entry;
1129 PROS_VACB current;
1130 LIST_ENTRY FreeList;
1131 KIRQL oldIrql;
1132
1133 ASSERT(SharedCacheMap);
1134
1135 SharedCacheMap->OpenCount++;
1136 KeReleaseGuardedMutex(&ViewLock);
1137
1138 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1139
1140 KeAcquireGuardedMutex(&ViewLock);
1141 SharedCacheMap->OpenCount--;
1142 if (SharedCacheMap->OpenCount == 0)
1143 {
1144 KIRQL OldIrql;
1145
1146 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1147
1148 /*
1149 * Release all VACBs
1150 */
1151 InitializeListHead(&FreeList);
1152 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1153 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1154 {
1155 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1156 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1157
1158 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1159 CcRosAcquireVacbLock(current, NULL);
1160 RemoveEntryList(&current->VacbLruListEntry);
1161 if (current->Dirty)
1162 {
1163 RemoveEntryList(&current->DirtyVacbListEntry);
1164 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
1165 current->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
1166 DPRINT1("Freeing dirty VACB\n");
1167 }
1168 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1169 CcRosReleaseVacbLock(current);
1170
1171 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1172 }
1173 #if DBG
1174 SharedCacheMap->Trace = FALSE;
1175 #endif
1176 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1177
1178 KeReleaseGuardedMutex(&ViewLock);
1179 ObDereferenceObject(SharedCacheMap->FileObject);
1180
1181 while (!IsListEmpty(&FreeList))
1182 {
1183 current_entry = RemoveTailList(&FreeList);
1184 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1185 CcRosInternalFreeVacb(current);
1186 }
1187
1188 KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
1189 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1190 KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
1191
1192 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1193 KeAcquireGuardedMutex(&ViewLock);
1194 }
1195 return STATUS_SUCCESS;
1196 }
1197
1198 VOID
1199 NTAPI
1200 CcRosReferenceCache (
1201 PFILE_OBJECT FileObject)
1202 {
1203 PROS_SHARED_CACHE_MAP SharedCacheMap;
1204 KeAcquireGuardedMutex(&ViewLock);
1205 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1206 ASSERT(SharedCacheMap);
1207 ASSERT(SharedCacheMap->OpenCount != 0);
1208 SharedCacheMap->OpenCount++;
1209 KeReleaseGuardedMutex(&ViewLock);
1210 }
1211
1212 VOID
1213 NTAPI
1214 CcRosRemoveIfClosed (
1215 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1216 {
1217 PROS_SHARED_CACHE_MAP SharedCacheMap;
1218 DPRINT("CcRosRemoveIfClosed()\n");
1219 KeAcquireGuardedMutex(&ViewLock);
1220 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1221 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1222 {
1223 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1224 }
1225 KeReleaseGuardedMutex(&ViewLock);
1226 }
1227
1228
1229 VOID
1230 NTAPI
1231 CcRosDereferenceCache (
1232 PFILE_OBJECT FileObject)
1233 {
1234 PROS_SHARED_CACHE_MAP SharedCacheMap;
1235 KeAcquireGuardedMutex(&ViewLock);
1236 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1237 ASSERT(SharedCacheMap);
1238 if (SharedCacheMap->OpenCount > 0)
1239 {
1240 SharedCacheMap->OpenCount--;
1241 if (SharedCacheMap->OpenCount == 0)
1242 {
1243 MmFreeSectionSegments(SharedCacheMap->FileObject);
1244 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1245 }
1246 }
1247 KeReleaseGuardedMutex(&ViewLock);
1248 }
1249
1250 NTSTATUS
1251 NTAPI
1252 CcRosReleaseFileCache (
1253 PFILE_OBJECT FileObject)
1254 /*
1255 * FUNCTION: Called by the file system when a handle to a file object
1256 * has been closed.
1257 */
1258 {
1259 PROS_SHARED_CACHE_MAP SharedCacheMap;
1260
1261 KeAcquireGuardedMutex(&ViewLock);
1262
1263 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1264 {
1265 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1266 if (FileObject->PrivateCacheMap != NULL)
1267 {
1268 FileObject->PrivateCacheMap = NULL;
1269 if (SharedCacheMap->OpenCount > 0)
1270 {
1271 SharedCacheMap->OpenCount--;
1272 if (SharedCacheMap->OpenCount == 0)
1273 {
1274 MmFreeSectionSegments(SharedCacheMap->FileObject);
1275 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1276 }
1277 }
1278 }
1279 }
1280 KeReleaseGuardedMutex(&ViewLock);
1281 return STATUS_SUCCESS;
1282 }
1283
1284 NTSTATUS
1285 NTAPI
1286 CcTryToInitializeFileCache (
1287 PFILE_OBJECT FileObject)
1288 {
1289 PROS_SHARED_CACHE_MAP SharedCacheMap;
1290 NTSTATUS Status;
1291
1292 KeAcquireGuardedMutex(&ViewLock);
1293
1294 ASSERT(FileObject->SectionObjectPointer);
1295 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1296 if (SharedCacheMap == NULL)
1297 {
1298 Status = STATUS_UNSUCCESSFUL;
1299 }
1300 else
1301 {
1302 if (FileObject->PrivateCacheMap == NULL)
1303 {
1304 FileObject->PrivateCacheMap = SharedCacheMap;
1305 SharedCacheMap->OpenCount++;
1306 }
1307 Status = STATUS_SUCCESS;
1308 }
1309 KeReleaseGuardedMutex(&ViewLock);
1310
1311 return Status;
1312 }
1313
1314
1315 NTSTATUS
1316 NTAPI
1317 CcRosInitializeFileCache (
1318 PFILE_OBJECT FileObject,
1319 PCC_FILE_SIZES FileSizes,
1320 BOOLEAN PinAccess,
1321 PCACHE_MANAGER_CALLBACKS CallBacks,
1322 PVOID LazyWriterContext)
1323 /*
1324 * FUNCTION: Initializes a shared cache map for a file object
1325 */
1326 {
1327 PROS_SHARED_CACHE_MAP SharedCacheMap;
1328
1329 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1330 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1331 FileObject, SharedCacheMap);
1332
1333 KeAcquireGuardedMutex(&ViewLock);
1334 if (SharedCacheMap == NULL)
1335 {
1336 KIRQL OldIrql;
1337
1338 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1339 if (SharedCacheMap == NULL)
1340 {
1341 KeReleaseGuardedMutex(&ViewLock);
1342 return STATUS_INSUFFICIENT_RESOURCES;
1343 }
1344 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1345 ObReferenceObjectByPointer(FileObject,
1346 FILE_ALL_ACCESS,
1347 NULL,
1348 KernelMode);
1349 SharedCacheMap->FileObject = FileObject;
1350 SharedCacheMap->Callbacks = CallBacks;
1351 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1352 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1353 SharedCacheMap->FileSize = FileSizes->FileSize;
1354 SharedCacheMap->PinAccess = PinAccess;
1355 SharedCacheMap->DirtyPageThreshold = 0;
1356 SharedCacheMap->DirtyPages = 0;
1357 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1358 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1359 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1360
1361 KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
1362 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1363 KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
1364 }
1365 if (FileObject->PrivateCacheMap == NULL)
1366 {
1367 FileObject->PrivateCacheMap = SharedCacheMap;
1368 SharedCacheMap->OpenCount++;
1369 }
1370 KeReleaseGuardedMutex(&ViewLock);
1371
1372 return STATUS_SUCCESS;
1373 }
1374
1375 /*
1376 * @implemented
1377 */
1378 PFILE_OBJECT
1379 NTAPI
1380 CcGetFileObjectFromSectionPtrs (
1381 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1382 {
1383 PROS_SHARED_CACHE_MAP SharedCacheMap;
1384
1385 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1386
1387 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1388 {
1389 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1390 ASSERT(SharedCacheMap);
1391 return SharedCacheMap->FileObject;
1392 }
1393 return NULL;
1394 }
1395
1396 VOID
1397 NTAPI
1398 CcShutdownLazyWriter (
1399 VOID)
1400 {
1401 /* Simply set the event, lazy writer will stop when it's done */
1402 KeSetEvent(&iLazyWriterShutdown, IO_DISK_INCREMENT, FALSE);
1403 }
1404
1405 BOOLEAN
1406 INIT_FUNCTION
1407 NTAPI
1408 CcInitView (
1409 VOID)
1410 {
1411 HANDLE LazyWriter;
1412 NTSTATUS Status;
1413 KPRIORITY Priority;
1414 OBJECT_ATTRIBUTES ObjectAttributes;
1415
1416 DPRINT("CcInitView()\n");
1417
1418 InitializeListHead(&DirtyVacbListHead);
1419 InitializeListHead(&VacbLruListHead);
1420 InitializeListHead(&CcDeferredWrites);
1421 InitializeListHead(&CcCleanSharedCacheMapList);
1422 KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1423 KeInitializeSpinLock(&iSharedCacheMapLock);
1424 KeInitializeGuardedMutex(&ViewLock);
1425 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1426 NULL,
1427 NULL,
1428 0,
1429 sizeof(INTERNAL_BCB),
1430 TAG_BCB,
1431 20);
1432 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1433 NULL,
1434 NULL,
1435 0,
1436 sizeof(ROS_SHARED_CACHE_MAP),
1437 TAG_SHARED_CACHE_MAP,
1438 20);
1439 ExInitializeNPagedLookasideList(&VacbLookasideList,
1440 NULL,
1441 NULL,
1442 0,
1443 sizeof(ROS_VACB),
1444 TAG_VACB,
1445 20);
1446
1447 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1448
1449 /* Initialize lazy writer events */
1450 KeInitializeEvent(&iLazyWriterShutdown, SynchronizationEvent, FALSE);
1451 KeInitializeEvent(&iLazyWriterNotify, NotificationEvent, FALSE);
1452
1453 /* Define lazy writer threshold, depending on system type */
1454 switch (MmQuerySystemSize())
1455 {
1456 case MmSmallSystem:
1457 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8;
1458 break;
1459
1460 case MmMediumSystem:
1461 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4;
1462 break;
1463
1464 case MmLargeSystem:
1465 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8 + MmNumberOfPhysicalPages / 4;
1466 break;
1467 }
1468
1469 /* Start the lazy writer thread */
1470 InitializeObjectAttributes(&ObjectAttributes,
1471 NULL,
1472 OBJ_KERNEL_HANDLE,
1473 NULL,
1474 NULL);
1475 Status = PsCreateSystemThread(&LazyWriter,
1476 THREAD_ALL_ACCESS,
1477 &ObjectAttributes,
1478 NULL,
1479 NULL,
1480 CciLazyWriter,
1481 NULL);
1482 if (!NT_SUCCESS(Status))
1483 {
1484 return FALSE;
1485 }
1486
1487 Priority = 27;
1488 Status = NtSetInformationThread(LazyWriter,
1489 ThreadPriority,
1490 &Priority,
1491 sizeof(Priority));
1492 ASSERT(NT_SUCCESS(Status));
1493
1494 /* Handle is not needed */
1495 ObCloseHandle(LazyWriter, KernelMode);
1496
1497 CcInitCacheZeroPage();
1498
1499 return TRUE;
1500 }
1501
1502 #if DBG && defined(KDBG)
1503 BOOLEAN
1504 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1505 {
1506 PLIST_ENTRY ListEntry;
1507 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1508
1509 KdbpPrint(" Usage Summary (in kb)\n");
1510 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1511 /* No need to lock the spin lock here, we're in DBG */
1512 for (ListEntry = CcCleanSharedCacheMapList.Flink;
1513 ListEntry != &CcCleanSharedCacheMapList;
1514 ListEntry = ListEntry->Flink)
1515 {
1516 PLIST_ENTRY Vacbs;
1517 ULONG Valid = 0, Dirty = 0;
1518 PROS_SHARED_CACHE_MAP SharedCacheMap;
1519 PUNICODE_STRING FileName;
1520
1521 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1522
1523 /* Dirty size */
1524 Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
1525
1526 /* First, count for all the associated VACB */
1527 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1528 Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1529 Vacbs = Vacbs->Flink)
1530 {
1531 PROS_VACB Vacb;
1532
1533 Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
1534 if (Vacb->Valid)
1535 {
1536 Valid += VACB_MAPPING_GRANULARITY / 1024;
1537 }
1538 }
1539
1540 /* Setup name */
1541 if (SharedCacheMap->FileObject != NULL &&
1542 SharedCacheMap->FileObject->FileName.Length != 0)
1543 {
1544 FileName = &SharedCacheMap->FileObject->FileName;
1545 }
1546 else
1547 {
1548 FileName = &NoName;
1549 }
1550
1551 /* And print */
1552 KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
1553 }
1554
1555 return TRUE;
1556 }
1557 #endif
1558
1559 /* EOF */