2520cab53d220cb66991e2df78ff14c453864022
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42
43 /* GLOBALS *******************************************************************/
44
45 LIST_ENTRY DirtyVacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 /* Counters:
55 * - Amount of pages flushed by lazy writer
56 * - Number of times lazy writer ran
57 */
58 ULONG CcLazyWritePages = 0;
59 ULONG CcLazyWriteIos = 0;
60
61 /* Internal vars (MS):
62 * - Threshold above which lazy writer will start action
63 * - Amount of dirty pages
64 * - List for deferred writes
65 * - Spinlock when dealing with the deferred list
66 * - List for "clean" shared cache maps
67 * - One second delay for lazy writer
68 */
69 ULONG CcDirtyPageThreshold = 0;
70 ULONG CcTotalDirtyPages = 0;
71 LIST_ENTRY CcDeferredWrites;
72 KSPIN_LOCK CcDeferredWriteSpinLock;
73 LIST_ENTRY CcCleanSharedCacheMapList;
74 #ifndef _MSC_VER
75 LARGE_INTEGER CcIdleDelay = {.QuadPart = (LONGLONG)-1*1000*1000*10};
76 #else
77 LARGE_INTEGER CcIdleDelay = {(LONGLONG)-1*1000*1000*10};
78 #endif
79
80 /* Internal vars (ROS):
81 * - Event to notify lazy writer to shutdown
82 * - Event to inform watchers lazy writer is done for this loop
83 * - Lock for the CcCleanSharedCacheMapList list
84 */
85 KEVENT iLazyWriterShutdown;
86 KEVENT iLazyWriterNotify;
87 KSPIN_LOCK iSharedCacheMapLock;
88
89 #if DBG
90 static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
91 {
92 ++vacb->ReferenceCount;
93 if (vacb->SharedCacheMap->Trace)
94 {
95 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
96 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
97 }
98 }
99 static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
100 {
101 --vacb->ReferenceCount;
102 ASSERT(!(vacb->ReferenceCount == 0 && vacb->Dirty));
103 if (vacb->SharedCacheMap->Trace)
104 {
105 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
106 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
107 }
108 }
109 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
110 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
111 #else
112 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
113 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
114 #endif
115
116 NTSTATUS
117 CcRosInternalFreeVacb(PROS_VACB Vacb);
118
119
120 /* FUNCTIONS *****************************************************************/
121
122 VOID
123 NTAPI
124 CcRosTraceCacheMap (
125 PROS_SHARED_CACHE_MAP SharedCacheMap,
126 BOOLEAN Trace )
127 {
128 #if DBG
129 KIRQL oldirql;
130 PLIST_ENTRY current_entry;
131 PROS_VACB current;
132
133 if (!SharedCacheMap)
134 return;
135
136 SharedCacheMap->Trace = Trace;
137
138 if (Trace)
139 {
140 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
141
142 KeAcquireGuardedMutex(&ViewLock);
143 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
144
145 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
146 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
147 {
148 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
149 current_entry = current_entry->Flink;
150
151 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
152 current, current->ReferenceCount, current->Dirty, current->PageOut );
153 }
154 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
155 KeReleaseGuardedMutex(&ViewLock);
156 }
157 else
158 {
159 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
160 }
161
162 #else
163 UNREFERENCED_PARAMETER(SharedCacheMap);
164 UNREFERENCED_PARAMETER(Trace);
165 #endif
166 }
167
168 NTSTATUS
169 NTAPI
170 CcRosFlushVacb (
171 PROS_VACB Vacb)
172 {
173 NTSTATUS Status;
174 KIRQL oldIrql;
175
176 Status = CcWriteVirtualAddress(Vacb);
177 if (NT_SUCCESS(Status))
178 {
179 KeAcquireGuardedMutex(&ViewLock);
180 KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql);
181
182 Vacb->Dirty = FALSE;
183 RemoveEntryList(&Vacb->DirtyVacbListEntry);
184 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
185 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
186 CcRosVacbDecRefCount(Vacb);
187
188 KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
189 KeReleaseGuardedMutex(&ViewLock);
190 }
191
192 return Status;
193 }
194
195 NTSTATUS
196 NTAPI
197 CcRosFlushDirtyPages (
198 ULONG Target,
199 PULONG Count,
200 BOOLEAN Wait,
201 BOOLEAN CalledFromLazy)
202 {
203 PLIST_ENTRY current_entry;
204 PROS_VACB current;
205 BOOLEAN Locked;
206 NTSTATUS Status;
207 LARGE_INTEGER ZeroTimeout;
208
209 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
210
211 (*Count) = 0;
212 ZeroTimeout.QuadPart = 0;
213
214 KeEnterCriticalRegion();
215 KeAcquireGuardedMutex(&ViewLock);
216
217 current_entry = DirtyVacbListHead.Flink;
218 if (current_entry == &DirtyVacbListHead)
219 {
220 DPRINT("No Dirty pages\n");
221 }
222
223 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
224 {
225 current = CONTAINING_RECORD(current_entry,
226 ROS_VACB,
227 DirtyVacbListEntry);
228 current_entry = current_entry->Flink;
229
230 CcRosVacbIncRefCount(current);
231
232 /* When performing lazy write, don't handle temporary files */
233 if (CalledFromLazy &&
234 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
235 {
236 CcRosVacbDecRefCount(current);
237 continue;
238 }
239
240 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
241 current->SharedCacheMap->LazyWriteContext, Wait);
242 if (!Locked)
243 {
244 CcRosVacbDecRefCount(current);
245 continue;
246 }
247
248 Status = CcRosAcquireVacbLock(current,
249 Wait ? NULL : &ZeroTimeout);
250 if (Status != STATUS_SUCCESS)
251 {
252 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
253 current->SharedCacheMap->LazyWriteContext);
254 CcRosVacbDecRefCount(current);
255 continue;
256 }
257
258 ASSERT(current->Dirty);
259
260 /* One reference is added above */
261 if ((current->ReferenceCount > 2 && current->PinCount == 0) ||
262 (current->ReferenceCount > 3 && current->PinCount > 1))
263 {
264 CcRosReleaseVacbLock(current);
265 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
266 current->SharedCacheMap->LazyWriteContext);
267 CcRosVacbDecRefCount(current);
268 continue;
269 }
270
271 KeReleaseGuardedMutex(&ViewLock);
272
273 Status = CcRosFlushVacb(current);
274
275 CcRosReleaseVacbLock(current);
276 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
277 current->SharedCacheMap->LazyWriteContext);
278
279 KeAcquireGuardedMutex(&ViewLock);
280 CcRosVacbDecRefCount(current);
281
282 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
283 (Status != STATUS_MEDIA_WRITE_PROTECTED))
284 {
285 DPRINT1("CC: Failed to flush VACB.\n");
286 }
287 else
288 {
289 ULONG PagesFreed;
290
291 /* How many pages did we free? */
292 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
293 (*Count) += PagesFreed;
294
295 /* Make sure we don't overflow target! */
296 if (Target < PagesFreed)
297 {
298 /* If we would have, jump to zero directly */
299 Target = 0;
300 }
301 else
302 {
303 Target -= PagesFreed;
304 }
305 }
306
307 current_entry = DirtyVacbListHead.Flink;
308 }
309
310 KeReleaseGuardedMutex(&ViewLock);
311 KeLeaveCriticalRegion();
312
313 DPRINT("CcRosFlushDirtyPages() finished\n");
314 return STATUS_SUCCESS;
315 }
316
317 /* FIXME: Someday this could somewhat implement write-behind/read-ahead */
318 VOID
319 NTAPI
320 CciLazyWriter(PVOID Unused)
321 {
322 while (TRUE)
323 {
324 NTSTATUS Status;
325 PLIST_ENTRY ListEntry;
326 ULONG Target, Count = 0;
327
328 /* One per second or until we have to stop */
329 Status = KeWaitForSingleObject(&iLazyWriterShutdown,
330 Executive,
331 KernelMode,
332 FALSE,
333 &CcIdleDelay);
334
335 /* If we succeeed, we've to stop running! */
336 if (Status == STATUS_SUCCESS)
337 {
338 break;
339 }
340
341 /* We're not sleeping anymore */
342 KeClearEvent(&iLazyWriterNotify);
343
344 /* Our target is one-eighth of the dirty pages */
345 Target = CcTotalDirtyPages / 8;
346 if (Target != 0)
347 {
348 /* Flush! */
349 DPRINT("Lazy writer starting (%d)\n", Target);
350 CcRosFlushDirtyPages(Target, &Count, FALSE, TRUE);
351
352 /* And update stats */
353 CcLazyWritePages += Count;
354 ++CcLazyWriteIos;
355 DPRINT("Lazy writer done (%d)\n", Count);
356 }
357
358 /* Inform people waiting on us that we're done */
359 KeSetEvent(&iLazyWriterNotify, IO_DISK_INCREMENT, FALSE);
360
361 /* Likely not optimal, but let's handle one deferred write now! */
362 ListEntry = ExInterlockedRemoveHeadList(&CcDeferredWrites, &CcDeferredWriteSpinLock);
363 if (ListEntry != NULL)
364 {
365 PDEFERRED_WRITE Context;
366
367 /* Extract the context */
368 Context = CONTAINING_RECORD(ListEntry, DEFERRED_WRITE, DeferredWriteLinks);
369 ASSERT(Context->NodeTypeCode == NODE_TYPE_DEFERRED_WRITE);
370
371 /* Can we write now? */
372 if (CcCanIWrite(Context->FileObject, Context->BytesToWrite, FALSE, TRUE))
373 {
374 /* Yes! Do it, and destroy the associated context */
375 Context->PostRoutine(Context->Context1, Context->Context2);
376 ExFreePoolWithTag(Context, 'CcDw');
377 }
378 else
379 {
380 /* Otherwise, requeue it, but in tail, so that it doesn't block others
381 * This is clearly to improve, but given the poor algorithm used now
382 * It's better than nothing!
383 */
384 ExInterlockedInsertTailList(&CcDeferredWrites,
385 &Context->DeferredWriteLinks,
386 &CcDeferredWriteSpinLock);
387 }
388 }
389 }
390 }
391
392 NTSTATUS
393 CcRosTrimCache (
394 ULONG Target,
395 ULONG Priority,
396 PULONG NrFreed)
397 /*
398 * FUNCTION: Try to free some memory from the file cache.
399 * ARGUMENTS:
400 * Target - The number of pages to be freed.
401 * Priority - The priority of free (currently unused).
402 * NrFreed - Points to a variable where the number of pages
403 * actually freed is returned.
404 */
405 {
406 PLIST_ENTRY current_entry;
407 PROS_VACB current;
408 ULONG PagesFreed;
409 KIRQL oldIrql;
410 LIST_ENTRY FreeList;
411 PFN_NUMBER Page;
412 ULONG i;
413 BOOLEAN FlushedPages = FALSE;
414
415 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
416
417 InitializeListHead(&FreeList);
418
419 *NrFreed = 0;
420
421 retry:
422 KeAcquireGuardedMutex(&ViewLock);
423
424 current_entry = VacbLruListHead.Flink;
425 while (current_entry != &VacbLruListHead)
426 {
427 current = CONTAINING_RECORD(current_entry,
428 ROS_VACB,
429 VacbLruListEntry);
430 current_entry = current_entry->Flink;
431
432 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
433
434 /* Reference the VACB */
435 CcRosVacbIncRefCount(current);
436
437 /* Check if it's mapped and not dirty */
438 if (current->MappedCount > 0 && !current->Dirty)
439 {
440 /* We have to break these locks because Cc sucks */
441 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
442 KeReleaseGuardedMutex(&ViewLock);
443
444 /* Page out the VACB */
445 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
446 {
447 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
448
449 MmPageOutPhysicalAddress(Page);
450 }
451
452 /* Reacquire the locks */
453 KeAcquireGuardedMutex(&ViewLock);
454 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
455 }
456
457 /* Dereference the VACB */
458 CcRosVacbDecRefCount(current);
459
460 /* Check if we can free this entry now */
461 if (current->ReferenceCount == 0)
462 {
463 ASSERT(!current->Dirty);
464 ASSERT(!current->MappedCount);
465
466 RemoveEntryList(&current->CacheMapVacbListEntry);
467 RemoveEntryList(&current->VacbLruListEntry);
468 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
469
470 /* Calculate how many pages we freed for Mm */
471 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
472 Target -= PagesFreed;
473 (*NrFreed) += PagesFreed;
474 }
475
476 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
477 }
478
479 KeReleaseGuardedMutex(&ViewLock);
480
481 /* Try flushing pages if we haven't met our target */
482 if ((Target > 0) && !FlushedPages)
483 {
484 /* Flush dirty pages to disk */
485 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
486 FlushedPages = TRUE;
487
488 /* We can only swap as many pages as we flushed */
489 if (PagesFreed < Target) Target = PagesFreed;
490
491 /* Check if we flushed anything */
492 if (PagesFreed != 0)
493 {
494 /* Try again after flushing dirty pages */
495 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
496 goto retry;
497 }
498 }
499
500 while (!IsListEmpty(&FreeList))
501 {
502 current_entry = RemoveHeadList(&FreeList);
503 current = CONTAINING_RECORD(current_entry,
504 ROS_VACB,
505 CacheMapVacbListEntry);
506 CcRosInternalFreeVacb(current);
507 }
508
509 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
510
511 return STATUS_SUCCESS;
512 }
513
514 NTSTATUS
515 NTAPI
516 CcRosReleaseVacb (
517 PROS_SHARED_CACHE_MAP SharedCacheMap,
518 PROS_VACB Vacb,
519 BOOLEAN Valid,
520 BOOLEAN Dirty,
521 BOOLEAN Mapped)
522 {
523 ASSERT(SharedCacheMap);
524
525 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
526 SharedCacheMap, Vacb, Valid);
527
528 Vacb->Valid = Valid;
529
530 if (Dirty && !Vacb->Dirty)
531 {
532 CcRosMarkDirtyVacb(Vacb);
533 }
534
535 if (Mapped)
536 {
537 Vacb->MappedCount++;
538 }
539 CcRosVacbDecRefCount(Vacb);
540 if (Mapped && (Vacb->MappedCount == 1))
541 {
542 CcRosVacbIncRefCount(Vacb);
543 }
544
545 CcRosReleaseVacbLock(Vacb);
546
547 return STATUS_SUCCESS;
548 }
549
550 /* Returns with VACB Lock Held! */
551 PROS_VACB
552 NTAPI
553 CcRosLookupVacb (
554 PROS_SHARED_CACHE_MAP SharedCacheMap,
555 LONGLONG FileOffset)
556 {
557 PLIST_ENTRY current_entry;
558 PROS_VACB current;
559 KIRQL oldIrql;
560
561 ASSERT(SharedCacheMap);
562
563 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
564 SharedCacheMap, FileOffset);
565
566 KeAcquireGuardedMutex(&ViewLock);
567 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
568
569 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
570 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
571 {
572 current = CONTAINING_RECORD(current_entry,
573 ROS_VACB,
574 CacheMapVacbListEntry);
575 if (IsPointInRange(current->FileOffset.QuadPart,
576 VACB_MAPPING_GRANULARITY,
577 FileOffset))
578 {
579 CcRosVacbIncRefCount(current);
580 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
581 KeReleaseGuardedMutex(&ViewLock);
582 CcRosAcquireVacbLock(current, NULL);
583 return current;
584 }
585 if (current->FileOffset.QuadPart > FileOffset)
586 break;
587 current_entry = current_entry->Flink;
588 }
589
590 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
591 KeReleaseGuardedMutex(&ViewLock);
592
593 return NULL;
594 }
595
596 VOID
597 NTAPI
598 CcRosMarkDirtyVacb (
599 PROS_VACB Vacb)
600 {
601 KIRQL oldIrql;
602 PROS_SHARED_CACHE_MAP SharedCacheMap;
603
604 SharedCacheMap = Vacb->SharedCacheMap;
605
606 KeAcquireGuardedMutex(&ViewLock);
607 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
608
609 ASSERT(!Vacb->Dirty);
610
611 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
612 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
613 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
614 CcRosVacbIncRefCount(Vacb);
615
616 /* Move to the tail of the LRU list */
617 RemoveEntryList(&Vacb->VacbLruListEntry);
618 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
619
620 Vacb->Dirty = TRUE;
621
622 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
623 KeReleaseGuardedMutex(&ViewLock);
624 }
625
626 NTSTATUS
627 NTAPI
628 CcRosMarkDirtyFile (
629 PROS_SHARED_CACHE_MAP SharedCacheMap,
630 LONGLONG FileOffset)
631 {
632 PROS_VACB Vacb;
633
634 ASSERT(SharedCacheMap);
635
636 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
637 SharedCacheMap, FileOffset);
638
639 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
640 if (Vacb == NULL)
641 {
642 KeBugCheck(CACHE_MANAGER);
643 }
644
645 if (!Vacb->Dirty)
646 {
647 CcRosMarkDirtyVacb(Vacb);
648 }
649
650 CcRosReleaseVacbLock(Vacb);
651
652 return STATUS_SUCCESS;
653 }
654
655 NTSTATUS
656 NTAPI
657 CcRosUnmapVacb (
658 PROS_SHARED_CACHE_MAP SharedCacheMap,
659 LONGLONG FileOffset,
660 BOOLEAN NowDirty)
661 {
662 PROS_VACB Vacb;
663
664 ASSERT(SharedCacheMap);
665
666 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
667 SharedCacheMap, FileOffset, NowDirty);
668
669 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
670 if (Vacb == NULL)
671 {
672 return STATUS_UNSUCCESSFUL;
673 }
674
675 if (NowDirty && !Vacb->Dirty)
676 {
677 CcRosMarkDirtyVacb(Vacb);
678 }
679
680 Vacb->MappedCount--;
681
682 CcRosVacbDecRefCount(Vacb);
683 if (Vacb->MappedCount == 0)
684 {
685 CcRosVacbDecRefCount(Vacb);
686 }
687
688 CcRosReleaseVacbLock(Vacb);
689
690 return STATUS_SUCCESS;
691 }
692
693 static
694 NTSTATUS
695 CcRosMapVacb(
696 PROS_VACB Vacb)
697 {
698 ULONG i;
699 NTSTATUS Status;
700 ULONG_PTR NumberOfPages;
701
702 /* Create a memory area. */
703 MmLockAddressSpace(MmGetKernelAddressSpace());
704 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
705 0, // nothing checks for VACB mareas, so set to 0
706 &Vacb->BaseAddress,
707 VACB_MAPPING_GRANULARITY,
708 PAGE_READWRITE,
709 (PMEMORY_AREA*)&Vacb->MemoryArea,
710 0,
711 PAGE_SIZE);
712 MmUnlockAddressSpace(MmGetKernelAddressSpace());
713 if (!NT_SUCCESS(Status))
714 {
715 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
716 return Status;
717 }
718
719 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
720 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
721
722 /* Create a virtual mapping for this memory area */
723 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
724 for (i = 0; i < NumberOfPages; i++)
725 {
726 PFN_NUMBER PageFrameNumber;
727
728 MI_SET_USAGE(MI_USAGE_CACHE);
729 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
730 if (PageFrameNumber == 0)
731 {
732 DPRINT1("Unable to allocate page\n");
733 KeBugCheck(MEMORY_MANAGEMENT);
734 }
735
736 Status = MmCreateVirtualMapping(NULL,
737 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
738 PAGE_READWRITE,
739 &PageFrameNumber,
740 1);
741 if (!NT_SUCCESS(Status))
742 {
743 DPRINT1("Unable to create virtual mapping\n");
744 KeBugCheck(MEMORY_MANAGEMENT);
745 }
746 }
747
748 return STATUS_SUCCESS;
749 }
750
751 static
752 NTSTATUS
753 CcRosCreateVacb (
754 PROS_SHARED_CACHE_MAP SharedCacheMap,
755 LONGLONG FileOffset,
756 PROS_VACB *Vacb)
757 {
758 PROS_VACB current;
759 PROS_VACB previous;
760 PLIST_ENTRY current_entry;
761 NTSTATUS Status;
762 KIRQL oldIrql;
763
764 ASSERT(SharedCacheMap);
765
766 DPRINT("CcRosCreateVacb()\n");
767
768 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
769 {
770 *Vacb = NULL;
771 return STATUS_INVALID_PARAMETER;
772 }
773
774 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
775 current->BaseAddress = NULL;
776 current->Valid = FALSE;
777 current->Dirty = FALSE;
778 current->PageOut = FALSE;
779 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
780 current->SharedCacheMap = SharedCacheMap;
781 #if DBG
782 if (SharedCacheMap->Trace)
783 {
784 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
785 }
786 #endif
787 current->MappedCount = 0;
788 current->DirtyVacbListEntry.Flink = NULL;
789 current->DirtyVacbListEntry.Blink = NULL;
790 current->ReferenceCount = 1;
791 current->PinCount = 0;
792 KeInitializeMutex(&current->Mutex, 0);
793 CcRosAcquireVacbLock(current, NULL);
794 KeAcquireGuardedMutex(&ViewLock);
795
796 *Vacb = current;
797 /* There is window between the call to CcRosLookupVacb
798 * and CcRosCreateVacb. We must check if a VACB for the
799 * file offset exist. If there is a VACB, we release
800 * our newly created VACB and return the existing one.
801 */
802 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
803 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
804 previous = NULL;
805 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
806 {
807 current = CONTAINING_RECORD(current_entry,
808 ROS_VACB,
809 CacheMapVacbListEntry);
810 if (IsPointInRange(current->FileOffset.QuadPart,
811 VACB_MAPPING_GRANULARITY,
812 FileOffset))
813 {
814 CcRosVacbIncRefCount(current);
815 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
816 #if DBG
817 if (SharedCacheMap->Trace)
818 {
819 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
820 SharedCacheMap,
821 (*Vacb),
822 current);
823 }
824 #endif
825 CcRosReleaseVacbLock(*Vacb);
826 KeReleaseGuardedMutex(&ViewLock);
827 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
828 *Vacb = current;
829 CcRosAcquireVacbLock(current, NULL);
830 return STATUS_SUCCESS;
831 }
832 if (current->FileOffset.QuadPart < FileOffset)
833 {
834 ASSERT(previous == NULL ||
835 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
836 previous = current;
837 }
838 if (current->FileOffset.QuadPart > FileOffset)
839 break;
840 current_entry = current_entry->Flink;
841 }
842 /* There was no existing VACB. */
843 current = *Vacb;
844 if (previous)
845 {
846 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
847 }
848 else
849 {
850 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
851 }
852 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
853 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
854 KeReleaseGuardedMutex(&ViewLock);
855
856 MI_SET_USAGE(MI_USAGE_CACHE);
857 #if MI_TRACE_PFNS
858 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
859 {
860 PWCHAR pos;
861 ULONG len = 0;
862 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
863 if (pos)
864 {
865 len = wcslen(pos) * sizeof(WCHAR);
866 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
867 }
868 else
869 {
870 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
871 }
872 }
873 #endif
874
875 Status = CcRosMapVacb(current);
876 if (!NT_SUCCESS(Status))
877 {
878 RemoveEntryList(&current->CacheMapVacbListEntry);
879 RemoveEntryList(&current->VacbLruListEntry);
880 CcRosReleaseVacbLock(current);
881 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
882 }
883
884 return Status;
885 }
886
887 NTSTATUS
888 NTAPI
889 CcRosGetVacb (
890 PROS_SHARED_CACHE_MAP SharedCacheMap,
891 LONGLONG FileOffset,
892 PLONGLONG BaseOffset,
893 PVOID* BaseAddress,
894 PBOOLEAN UptoDate,
895 PROS_VACB *Vacb)
896 {
897 PROS_VACB current;
898 NTSTATUS Status;
899
900 ASSERT(SharedCacheMap);
901
902 DPRINT("CcRosGetVacb()\n");
903
904 /*
905 * Look for a VACB already mapping the same data.
906 */
907 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
908 if (current == NULL)
909 {
910 /*
911 * Otherwise create a new VACB.
912 */
913 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
914 if (!NT_SUCCESS(Status))
915 {
916 return Status;
917 }
918 }
919
920 KeAcquireGuardedMutex(&ViewLock);
921
922 /* Move to the tail of the LRU list */
923 RemoveEntryList(&current->VacbLruListEntry);
924 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
925
926 KeReleaseGuardedMutex(&ViewLock);
927
928 /*
929 * Return information about the VACB to the caller.
930 */
931 *UptoDate = current->Valid;
932 *BaseAddress = current->BaseAddress;
933 DPRINT("*BaseAddress %p\n", *BaseAddress);
934 *Vacb = current;
935 *BaseOffset = current->FileOffset.QuadPart;
936 return STATUS_SUCCESS;
937 }
938
939 NTSTATUS
940 NTAPI
941 CcRosRequestVacb (
942 PROS_SHARED_CACHE_MAP SharedCacheMap,
943 LONGLONG FileOffset,
944 PVOID* BaseAddress,
945 PBOOLEAN UptoDate,
946 PROS_VACB *Vacb)
947 /*
948 * FUNCTION: Request a page mapping for a shared cache map
949 */
950 {
951 LONGLONG BaseOffset;
952
953 ASSERT(SharedCacheMap);
954
955 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
956 {
957 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
958 FileOffset, VACB_MAPPING_GRANULARITY);
959 KeBugCheck(CACHE_MANAGER);
960 }
961
962 return CcRosGetVacb(SharedCacheMap,
963 FileOffset,
964 &BaseOffset,
965 BaseAddress,
966 UptoDate,
967 Vacb);
968 }
969
970 static
971 VOID
972 CcFreeCachePage (
973 PVOID Context,
974 MEMORY_AREA* MemoryArea,
975 PVOID Address,
976 PFN_NUMBER Page,
977 SWAPENTRY SwapEntry,
978 BOOLEAN Dirty)
979 {
980 ASSERT(SwapEntry == 0);
981 if (Page != 0)
982 {
983 ASSERT(MmGetReferenceCountPage(Page) == 1);
984 MmReleasePageMemoryConsumer(MC_CACHE, Page);
985 }
986 }
987
988 NTSTATUS
989 CcRosInternalFreeVacb (
990 PROS_VACB Vacb)
991 /*
992 * FUNCTION: Releases a VACB associated with a shared cache map
993 */
994 {
995 DPRINT("Freeing VACB 0x%p\n", Vacb);
996 #if DBG
997 if (Vacb->SharedCacheMap->Trace)
998 {
999 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
1000 }
1001 #endif
1002
1003 MmLockAddressSpace(MmGetKernelAddressSpace());
1004 MmFreeMemoryArea(MmGetKernelAddressSpace(),
1005 Vacb->MemoryArea,
1006 CcFreeCachePage,
1007 NULL);
1008 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1009
1010 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
1011 return STATUS_SUCCESS;
1012 }
1013
1014 /*
1015 * @implemented
1016 */
1017 VOID
1018 NTAPI
1019 CcFlushCache (
1020 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1021 IN PLARGE_INTEGER FileOffset OPTIONAL,
1022 IN ULONG Length,
1023 OUT PIO_STATUS_BLOCK IoStatus)
1024 {
1025 PROS_SHARED_CACHE_MAP SharedCacheMap;
1026 LARGE_INTEGER Offset;
1027 LONGLONG RemainingLength;
1028 PROS_VACB current;
1029 NTSTATUS Status;
1030 KIRQL oldIrql;
1031
1032 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1033 SectionObjectPointers, FileOffset, Length);
1034
1035 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1036 SectionObjectPointers, FileOffset, Length, IoStatus);
1037
1038 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1039 {
1040 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1041 ASSERT(SharedCacheMap);
1042 if (FileOffset)
1043 {
1044 Offset = *FileOffset;
1045 RemainingLength = Length;
1046 }
1047 else
1048 {
1049 Offset.QuadPart = 0;
1050 RemainingLength = SharedCacheMap->FileSize.QuadPart;
1051 }
1052
1053 if (IoStatus)
1054 {
1055 IoStatus->Status = STATUS_SUCCESS;
1056 IoStatus->Information = 0;
1057 }
1058
1059 while (RemainingLength > 0)
1060 {
1061 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1062 if (current != NULL)
1063 {
1064 if (current->Dirty)
1065 {
1066 Status = CcRosFlushVacb(current);
1067 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1068 {
1069 IoStatus->Status = Status;
1070 }
1071 }
1072
1073 CcRosReleaseVacbLock(current);
1074
1075 KeAcquireGuardedMutex(&ViewLock);
1076 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1077 CcRosVacbDecRefCount(current);
1078 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1079 KeReleaseGuardedMutex(&ViewLock);
1080 }
1081
1082 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1083 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1084 }
1085 }
1086 else
1087 {
1088 if (IoStatus)
1089 {
1090 IoStatus->Status = STATUS_INVALID_PARAMETER;
1091 }
1092 }
1093 }
1094
1095 NTSTATUS
1096 NTAPI
1097 CcRosDeleteFileCache (
1098 PFILE_OBJECT FileObject,
1099 PROS_SHARED_CACHE_MAP SharedCacheMap)
1100 /*
1101 * FUNCTION: Releases the shared cache map associated with a file object
1102 */
1103 {
1104 PLIST_ENTRY current_entry;
1105 PROS_VACB current;
1106 LIST_ENTRY FreeList;
1107 KIRQL oldIrql;
1108
1109 ASSERT(SharedCacheMap);
1110
1111 SharedCacheMap->OpenCount++;
1112 KeReleaseGuardedMutex(&ViewLock);
1113
1114 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1115
1116 KeAcquireGuardedMutex(&ViewLock);
1117 SharedCacheMap->OpenCount--;
1118 if (SharedCacheMap->OpenCount == 0)
1119 {
1120 KIRQL OldIrql;
1121
1122 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1123
1124 /*
1125 * Release all VACBs
1126 */
1127 InitializeListHead(&FreeList);
1128 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1129 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1130 {
1131 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1132 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1133
1134 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1135 CcRosAcquireVacbLock(current, NULL);
1136 RemoveEntryList(&current->VacbLruListEntry);
1137 if (current->Dirty)
1138 {
1139 RemoveEntryList(&current->DirtyVacbListEntry);
1140 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
1141 current->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
1142 DPRINT1("Freeing dirty VACB\n");
1143 }
1144 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1145 CcRosReleaseVacbLock(current);
1146
1147 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1148 }
1149 #if DBG
1150 SharedCacheMap->Trace = FALSE;
1151 #endif
1152 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1153
1154 KeReleaseGuardedMutex(&ViewLock);
1155 ObDereferenceObject(SharedCacheMap->FileObject);
1156
1157 while (!IsListEmpty(&FreeList))
1158 {
1159 current_entry = RemoveTailList(&FreeList);
1160 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1161 CcRosInternalFreeVacb(current);
1162 }
1163
1164 KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
1165 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1166 KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
1167
1168 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1169 KeAcquireGuardedMutex(&ViewLock);
1170 }
1171 return STATUS_SUCCESS;
1172 }
1173
1174 VOID
1175 NTAPI
1176 CcRosReferenceCache (
1177 PFILE_OBJECT FileObject)
1178 {
1179 PROS_SHARED_CACHE_MAP SharedCacheMap;
1180 KeAcquireGuardedMutex(&ViewLock);
1181 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1182 ASSERT(SharedCacheMap);
1183 ASSERT(SharedCacheMap->OpenCount != 0);
1184 SharedCacheMap->OpenCount++;
1185 KeReleaseGuardedMutex(&ViewLock);
1186 }
1187
1188 VOID
1189 NTAPI
1190 CcRosRemoveIfClosed (
1191 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1192 {
1193 PROS_SHARED_CACHE_MAP SharedCacheMap;
1194 DPRINT("CcRosRemoveIfClosed()\n");
1195 KeAcquireGuardedMutex(&ViewLock);
1196 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1197 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1198 {
1199 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1200 }
1201 KeReleaseGuardedMutex(&ViewLock);
1202 }
1203
1204
1205 VOID
1206 NTAPI
1207 CcRosDereferenceCache (
1208 PFILE_OBJECT FileObject)
1209 {
1210 PROS_SHARED_CACHE_MAP SharedCacheMap;
1211 KeAcquireGuardedMutex(&ViewLock);
1212 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1213 ASSERT(SharedCacheMap);
1214 if (SharedCacheMap->OpenCount > 0)
1215 {
1216 SharedCacheMap->OpenCount--;
1217 if (SharedCacheMap->OpenCount == 0)
1218 {
1219 MmFreeSectionSegments(SharedCacheMap->FileObject);
1220 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1221 }
1222 }
1223 KeReleaseGuardedMutex(&ViewLock);
1224 }
1225
1226 NTSTATUS
1227 NTAPI
1228 CcRosReleaseFileCache (
1229 PFILE_OBJECT FileObject)
1230 /*
1231 * FUNCTION: Called by the file system when a handle to a file object
1232 * has been closed.
1233 */
1234 {
1235 PROS_SHARED_CACHE_MAP SharedCacheMap;
1236
1237 KeAcquireGuardedMutex(&ViewLock);
1238
1239 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1240 {
1241 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1242 if (FileObject->PrivateCacheMap != NULL)
1243 {
1244 FileObject->PrivateCacheMap = NULL;
1245 if (SharedCacheMap->OpenCount > 0)
1246 {
1247 SharedCacheMap->OpenCount--;
1248 if (SharedCacheMap->OpenCount == 0)
1249 {
1250 MmFreeSectionSegments(SharedCacheMap->FileObject);
1251 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1252 }
1253 }
1254 }
1255 }
1256 KeReleaseGuardedMutex(&ViewLock);
1257 return STATUS_SUCCESS;
1258 }
1259
1260 NTSTATUS
1261 NTAPI
1262 CcTryToInitializeFileCache (
1263 PFILE_OBJECT FileObject)
1264 {
1265 PROS_SHARED_CACHE_MAP SharedCacheMap;
1266 NTSTATUS Status;
1267
1268 KeAcquireGuardedMutex(&ViewLock);
1269
1270 ASSERT(FileObject->SectionObjectPointer);
1271 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1272 if (SharedCacheMap == NULL)
1273 {
1274 Status = STATUS_UNSUCCESSFUL;
1275 }
1276 else
1277 {
1278 if (FileObject->PrivateCacheMap == NULL)
1279 {
1280 FileObject->PrivateCacheMap = SharedCacheMap;
1281 SharedCacheMap->OpenCount++;
1282 }
1283 Status = STATUS_SUCCESS;
1284 }
1285 KeReleaseGuardedMutex(&ViewLock);
1286
1287 return Status;
1288 }
1289
1290
1291 NTSTATUS
1292 NTAPI
1293 CcRosInitializeFileCache (
1294 PFILE_OBJECT FileObject,
1295 PCC_FILE_SIZES FileSizes,
1296 BOOLEAN PinAccess,
1297 PCACHE_MANAGER_CALLBACKS CallBacks,
1298 PVOID LazyWriterContext)
1299 /*
1300 * FUNCTION: Initializes a shared cache map for a file object
1301 */
1302 {
1303 PROS_SHARED_CACHE_MAP SharedCacheMap;
1304
1305 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1306 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1307 FileObject, SharedCacheMap);
1308
1309 KeAcquireGuardedMutex(&ViewLock);
1310 if (SharedCacheMap == NULL)
1311 {
1312 KIRQL OldIrql;
1313
1314 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1315 if (SharedCacheMap == NULL)
1316 {
1317 KeReleaseGuardedMutex(&ViewLock);
1318 return STATUS_INSUFFICIENT_RESOURCES;
1319 }
1320 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1321 ObReferenceObjectByPointer(FileObject,
1322 FILE_ALL_ACCESS,
1323 NULL,
1324 KernelMode);
1325 SharedCacheMap->FileObject = FileObject;
1326 SharedCacheMap->Callbacks = CallBacks;
1327 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1328 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1329 SharedCacheMap->FileSize = FileSizes->FileSize;
1330 SharedCacheMap->PinAccess = PinAccess;
1331 SharedCacheMap->DirtyPageThreshold = 0;
1332 SharedCacheMap->DirtyPages = 0;
1333 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1334 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1335 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1336
1337 KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
1338 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1339 KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
1340 }
1341 if (FileObject->PrivateCacheMap == NULL)
1342 {
1343 FileObject->PrivateCacheMap = SharedCacheMap;
1344 SharedCacheMap->OpenCount++;
1345 }
1346 KeReleaseGuardedMutex(&ViewLock);
1347
1348 return STATUS_SUCCESS;
1349 }
1350
1351 /*
1352 * @implemented
1353 */
1354 PFILE_OBJECT
1355 NTAPI
1356 CcGetFileObjectFromSectionPtrs (
1357 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1358 {
1359 PROS_SHARED_CACHE_MAP SharedCacheMap;
1360
1361 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1362
1363 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1364 {
1365 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1366 ASSERT(SharedCacheMap);
1367 return SharedCacheMap->FileObject;
1368 }
1369 return NULL;
1370 }
1371
1372 VOID
1373 NTAPI
1374 CcShutdownLazyWriter (
1375 VOID)
1376 {
1377 /* Simply set the event, lazy writer will stop when it's done */
1378 KeSetEvent(&iLazyWriterShutdown, IO_DISK_INCREMENT, FALSE);
1379 }
1380
1381 BOOLEAN
1382 INIT_FUNCTION
1383 NTAPI
1384 CcInitView (
1385 VOID)
1386 {
1387 HANDLE LazyWriter;
1388 NTSTATUS Status;
1389 KPRIORITY Priority;
1390 OBJECT_ATTRIBUTES ObjectAttributes;
1391
1392 DPRINT("CcInitView()\n");
1393
1394 InitializeListHead(&DirtyVacbListHead);
1395 InitializeListHead(&VacbLruListHead);
1396 InitializeListHead(&CcDeferredWrites);
1397 InitializeListHead(&CcCleanSharedCacheMapList);
1398 KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1399 KeInitializeSpinLock(&iSharedCacheMapLock);
1400 KeInitializeGuardedMutex(&ViewLock);
1401 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1402 NULL,
1403 NULL,
1404 0,
1405 sizeof(INTERNAL_BCB),
1406 TAG_BCB,
1407 20);
1408 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1409 NULL,
1410 NULL,
1411 0,
1412 sizeof(ROS_SHARED_CACHE_MAP),
1413 TAG_SHARED_CACHE_MAP,
1414 20);
1415 ExInitializeNPagedLookasideList(&VacbLookasideList,
1416 NULL,
1417 NULL,
1418 0,
1419 sizeof(ROS_VACB),
1420 TAG_VACB,
1421 20);
1422
1423 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1424
1425 /* Initialize lazy writer events */
1426 KeInitializeEvent(&iLazyWriterShutdown, SynchronizationEvent, FALSE);
1427 KeInitializeEvent(&iLazyWriterNotify, NotificationEvent, FALSE);
1428
1429 /* Define lazy writer threshold, depending on system type */
1430 switch (MmQuerySystemSize())
1431 {
1432 case MmSmallSystem:
1433 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8;
1434 break;
1435
1436 case MmMediumSystem:
1437 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4;
1438 break;
1439
1440 case MmLargeSystem:
1441 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8 + MmNumberOfPhysicalPages / 4;
1442 break;
1443 }
1444
1445 /* Start the lazy writer thread */
1446 InitializeObjectAttributes(&ObjectAttributes,
1447 NULL,
1448 OBJ_KERNEL_HANDLE,
1449 NULL,
1450 NULL);
1451 Status = PsCreateSystemThread(&LazyWriter,
1452 THREAD_ALL_ACCESS,
1453 &ObjectAttributes,
1454 NULL,
1455 NULL,
1456 CciLazyWriter,
1457 NULL);
1458 if (!NT_SUCCESS(Status))
1459 {
1460 return FALSE;
1461 }
1462
1463 Priority = 27;
1464 Status = NtSetInformationThread(LazyWriter,
1465 ThreadPriority,
1466 &Priority,
1467 sizeof(Priority));
1468 ASSERT(NT_SUCCESS(Status));
1469
1470 /* Handle is not needed */
1471 ObCloseHandle(LazyWriter, KernelMode);
1472
1473 CcInitCacheZeroPage();
1474
1475 return TRUE;
1476 }
1477
1478 #if DBG && defined(KDBG)
1479 BOOLEAN
1480 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1481 {
1482 PLIST_ENTRY ListEntry;
1483 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1484
1485 KdbpPrint(" Usage Summary (in kb)\n");
1486 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1487 /* No need to lock the spin lock here, we're in DBG */
1488 for (ListEntry = CcCleanSharedCacheMapList.Flink;
1489 ListEntry != &CcCleanSharedCacheMapList;
1490 ListEntry = ListEntry->Flink)
1491 {
1492 PLIST_ENTRY Vacbs;
1493 ULONG Valid = 0, Dirty = 0;
1494 PROS_SHARED_CACHE_MAP SharedCacheMap;
1495 PUNICODE_STRING FileName;
1496
1497 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1498
1499 /* Dirty size */
1500 Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
1501
1502 /* First, count for all the associated VACB */
1503 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1504 Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1505 Vacbs = Vacbs->Flink)
1506 {
1507 PROS_VACB Vacb;
1508
1509 Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
1510 if (Vacb->Valid)
1511 {
1512 Valid += VACB_MAPPING_GRANULARITY / 1024;
1513 }
1514 }
1515
1516 /* Setup name */
1517 if (SharedCacheMap->FileObject != NULL &&
1518 SharedCacheMap->FileObject->FileName.Length != 0)
1519 {
1520 FileName = &SharedCacheMap->FileObject->FileName;
1521 }
1522 else
1523 {
1524 FileName = &NoName;
1525 }
1526
1527 /* And print */
1528 KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
1529 }
1530
1531 return TRUE;
1532 }
1533 #endif
1534
1535 /* EOF */