5588a912dbb9265797b11011c0f761dbb4c2bf67
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42
43 /* GLOBALS *******************************************************************/
44
45 LIST_ENTRY DirtyVacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 /* Counters:
55 * - Amount of pages flushed by lazy writer
56 * - Number of times lazy writer ran
57 */
58 ULONG CcLazyWritePages = 0;
59 ULONG CcLazyWriteIos = 0;
60
61 /* Internal vars (MS):
62 * - Threshold above which lazy writer will start action
63 * - Amount of dirty pages
64 * - List for deferred writes
65 * - Spinlock when dealing with the deferred list
66 */
67 ULONG CcDirtyPageThreshold = 0;
68 ULONG CcTotalDirtyPages = 0;
69 LIST_ENTRY CcDeferredWrites;
70 KSPIN_LOCK CcDeferredWriteSpinLock;
71
72 /* Internal vars (ROS):
73 * - Event to notify lazy writer to shutdown
74 * - Event to inform watchers lazy writer is done for this loop
75 */
76 KEVENT iLazyWriterShutdown;
77 KEVENT iLazyWriterNotify;
78
79 #if DBG
80 static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
81 {
82 ++vacb->ReferenceCount;
83 if (vacb->SharedCacheMap->Trace)
84 {
85 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
86 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
87 }
88 }
89 static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
90 {
91 --vacb->ReferenceCount;
92 if (vacb->SharedCacheMap->Trace)
93 {
94 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
95 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
96 }
97 }
98 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
99 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
100 #else
101 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
102 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
103 #endif
104
105 NTSTATUS
106 CcRosInternalFreeVacb(PROS_VACB Vacb);
107
108
109 /* FUNCTIONS *****************************************************************/
110
111 VOID
112 NTAPI
113 CcRosTraceCacheMap (
114 PROS_SHARED_CACHE_MAP SharedCacheMap,
115 BOOLEAN Trace )
116 {
117 #if DBG
118 KIRQL oldirql;
119 PLIST_ENTRY current_entry;
120 PROS_VACB current;
121
122 if (!SharedCacheMap)
123 return;
124
125 SharedCacheMap->Trace = Trace;
126
127 if (Trace)
128 {
129 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
130
131 KeAcquireGuardedMutex(&ViewLock);
132 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
133
134 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
135 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
136 {
137 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
138 current_entry = current_entry->Flink;
139
140 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
141 current, current->ReferenceCount, current->Dirty, current->PageOut );
142 }
143 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
144 KeReleaseGuardedMutex(&ViewLock);
145 }
146 else
147 {
148 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
149 }
150
151 #else
152 UNREFERENCED_PARAMETER(SharedCacheMap);
153 UNREFERENCED_PARAMETER(Trace);
154 #endif
155 }
156
157 NTSTATUS
158 NTAPI
159 CcRosFlushVacb (
160 PROS_VACB Vacb)
161 {
162 NTSTATUS Status;
163 KIRQL oldIrql;
164
165 Status = CcWriteVirtualAddress(Vacb);
166 if (NT_SUCCESS(Status))
167 {
168 KeAcquireGuardedMutex(&ViewLock);
169 KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql);
170
171 Vacb->Dirty = FALSE;
172 RemoveEntryList(&Vacb->DirtyVacbListEntry);
173 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
174 CcRosVacbDecRefCount(Vacb);
175
176 KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
177 KeReleaseGuardedMutex(&ViewLock);
178 }
179
180 return Status;
181 }
182
183 NTSTATUS
184 NTAPI
185 CcRosFlushDirtyPages (
186 ULONG Target,
187 PULONG Count,
188 BOOLEAN Wait,
189 BOOLEAN CalledFromLazy)
190 {
191 PLIST_ENTRY current_entry;
192 PROS_VACB current;
193 BOOLEAN Locked;
194 NTSTATUS Status;
195 LARGE_INTEGER ZeroTimeout;
196
197 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
198
199 (*Count) = 0;
200 ZeroTimeout.QuadPart = 0;
201
202 KeEnterCriticalRegion();
203 KeAcquireGuardedMutex(&ViewLock);
204
205 current_entry = DirtyVacbListHead.Flink;
206 if (current_entry == &DirtyVacbListHead)
207 {
208 DPRINT("No Dirty pages\n");
209 }
210
211 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
212 {
213 current = CONTAINING_RECORD(current_entry,
214 ROS_VACB,
215 DirtyVacbListEntry);
216 current_entry = current_entry->Flink;
217
218 CcRosVacbIncRefCount(current);
219
220 /* When performing lazy write, don't handle temporary files */
221 if (CalledFromLazy &&
222 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
223 {
224 CcRosVacbDecRefCount(current);
225 continue;
226 }
227
228 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
229 current->SharedCacheMap->LazyWriteContext, Wait);
230 if (!Locked)
231 {
232 CcRosVacbDecRefCount(current);
233 continue;
234 }
235
236 Status = CcRosAcquireVacbLock(current,
237 Wait ? NULL : &ZeroTimeout);
238 if (Status != STATUS_SUCCESS)
239 {
240 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
241 current->SharedCacheMap->LazyWriteContext);
242 CcRosVacbDecRefCount(current);
243 continue;
244 }
245
246 ASSERT(current->Dirty);
247
248 /* One reference is added above */
249 if (current->ReferenceCount > 2)
250 {
251 CcRosReleaseVacbLock(current);
252 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
253 current->SharedCacheMap->LazyWriteContext);
254 CcRosVacbDecRefCount(current);
255 continue;
256 }
257
258 KeReleaseGuardedMutex(&ViewLock);
259
260 Status = CcRosFlushVacb(current);
261
262 CcRosReleaseVacbLock(current);
263 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
264 current->SharedCacheMap->LazyWriteContext);
265
266 KeAcquireGuardedMutex(&ViewLock);
267 CcRosVacbDecRefCount(current);
268
269 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
270 (Status != STATUS_MEDIA_WRITE_PROTECTED))
271 {
272 DPRINT1("CC: Failed to flush VACB.\n");
273 }
274 else
275 {
276 ULONG PagesFreed;
277
278 /* How many pages did we free? */
279 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
280 (*Count) += PagesFreed;
281
282 /* Make sure we don't overflow target! */
283 if (Target < PagesFreed)
284 {
285 /* If we would have, jump to zero directly */
286 Target = 0;
287 }
288 else
289 {
290 Target -= PagesFreed;
291 }
292 }
293
294 current_entry = DirtyVacbListHead.Flink;
295 }
296
297 KeReleaseGuardedMutex(&ViewLock);
298 KeLeaveCriticalRegion();
299
300 DPRINT("CcRosFlushDirtyPages() finished\n");
301 return STATUS_SUCCESS;
302 }
303
304 /* FIXME: Someday this could somewhat implement write-behind/read-ahead */
305 VOID
306 NTAPI
307 CciLazyWriter(PVOID Unused)
308 {
309 LARGE_INTEGER OneSecond;
310
311 OneSecond.QuadPart = (LONGLONG)-1*1000*1000*10;
312
313 while (TRUE)
314 {
315 NTSTATUS Status;
316 PLIST_ENTRY ListEntry;
317 ULONG Target, Count = 0;
318
319 /* One per second or until we have to stop */
320 Status = KeWaitForSingleObject(&iLazyWriterShutdown,
321 Executive,
322 KernelMode,
323 FALSE,
324 &OneSecond);
325
326 /* If we succeeed, we've to stop running! */
327 if (Status == STATUS_SUCCESS)
328 {
329 break;
330 }
331
332 /* We're not sleeping anymore */
333 KeClearEvent(&iLazyWriterNotify);
334
335 /* Our target is one-eighth of the dirty pages */
336 Target = CcTotalDirtyPages / 8;
337 if (Target != 0)
338 {
339 /* Flush! */
340 DPRINT("Lazy writer starting (%d)\n", Target);
341 CcRosFlushDirtyPages(Target, &Count, FALSE, TRUE);
342
343 /* And update stats */
344 CcLazyWritePages += Count;
345 ++CcLazyWriteIos;
346 DPRINT("Lazy writer done (%d)\n", Count);
347 }
348
349 /* Inform people waiting on us that we're done */
350 KeSetEvent(&iLazyWriterNotify, IO_DISK_INCREMENT, FALSE);
351
352 /* Likely not optimal, but let's handle one deferred write now! */
353 ListEntry = ExInterlockedRemoveHeadList(&CcDeferredWrites, &CcDeferredWriteSpinLock);
354 if (ListEntry != NULL)
355 {
356 PROS_DEFERRED_WRITE_CONTEXT Context;
357
358 /* Extract the context */
359 Context = CONTAINING_RECORD(ListEntry, ROS_DEFERRED_WRITE_CONTEXT, CcDeferredWritesEntry);
360
361 /* Can we write now? */
362 if (CcCanIWrite(Context->FileObject, Context->BytesToWrite, FALSE, Context->Retrying))
363 {
364 /* Yes! Do it, and destroy the associated context */
365 Context->PostRoutine(Context->Context1, Context->Context2);
366 ExFreePoolWithTag(Context, 'CcDw');
367 }
368 else
369 {
370 /* Otherwise, requeue it, but in tail, so that it doesn't block others
371 * This is clearly to improve, but given the poor algorithm used now
372 * It's better than nothing!
373 */
374 ExInterlockedInsertTailList(&CcDeferredWrites,
375 &Context->CcDeferredWritesEntry,
376 &CcDeferredWriteSpinLock);
377 }
378 }
379 }
380 }
381
382 NTSTATUS
383 CcRosTrimCache (
384 ULONG Target,
385 ULONG Priority,
386 PULONG NrFreed)
387 /*
388 * FUNCTION: Try to free some memory from the file cache.
389 * ARGUMENTS:
390 * Target - The number of pages to be freed.
391 * Priority - The priority of free (currently unused).
392 * NrFreed - Points to a variable where the number of pages
393 * actually freed is returned.
394 */
395 {
396 PLIST_ENTRY current_entry;
397 PROS_VACB current;
398 ULONG PagesFreed;
399 KIRQL oldIrql;
400 LIST_ENTRY FreeList;
401 PFN_NUMBER Page;
402 ULONG i;
403 BOOLEAN FlushedPages = FALSE;
404
405 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
406
407 InitializeListHead(&FreeList);
408
409 *NrFreed = 0;
410
411 retry:
412 KeAcquireGuardedMutex(&ViewLock);
413
414 current_entry = VacbLruListHead.Flink;
415 while (current_entry != &VacbLruListHead)
416 {
417 current = CONTAINING_RECORD(current_entry,
418 ROS_VACB,
419 VacbLruListEntry);
420 current_entry = current_entry->Flink;
421
422 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
423
424 /* Reference the VACB */
425 CcRosVacbIncRefCount(current);
426
427 /* Check if it's mapped and not dirty */
428 if (current->MappedCount > 0 && !current->Dirty)
429 {
430 /* We have to break these locks because Cc sucks */
431 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
432 KeReleaseGuardedMutex(&ViewLock);
433
434 /* Page out the VACB */
435 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
436 {
437 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
438
439 MmPageOutPhysicalAddress(Page);
440 }
441
442 /* Reacquire the locks */
443 KeAcquireGuardedMutex(&ViewLock);
444 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
445 }
446
447 /* Dereference the VACB */
448 CcRosVacbDecRefCount(current);
449
450 /* Check if we can free this entry now */
451 if (current->ReferenceCount == 0)
452 {
453 ASSERT(!current->Dirty);
454 ASSERT(!current->MappedCount);
455
456 RemoveEntryList(&current->CacheMapVacbListEntry);
457 RemoveEntryList(&current->VacbLruListEntry);
458 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
459
460 /* Calculate how many pages we freed for Mm */
461 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
462 Target -= PagesFreed;
463 (*NrFreed) += PagesFreed;
464 }
465
466 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
467 }
468
469 KeReleaseGuardedMutex(&ViewLock);
470
471 /* Try flushing pages if we haven't met our target */
472 if ((Target > 0) && !FlushedPages)
473 {
474 /* Flush dirty pages to disk */
475 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
476 FlushedPages = TRUE;
477
478 /* We can only swap as many pages as we flushed */
479 if (PagesFreed < Target) Target = PagesFreed;
480
481 /* Check if we flushed anything */
482 if (PagesFreed != 0)
483 {
484 /* Try again after flushing dirty pages */
485 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
486 goto retry;
487 }
488 }
489
490 while (!IsListEmpty(&FreeList))
491 {
492 current_entry = RemoveHeadList(&FreeList);
493 current = CONTAINING_RECORD(current_entry,
494 ROS_VACB,
495 CacheMapVacbListEntry);
496 CcRosInternalFreeVacb(current);
497 }
498
499 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
500
501 return STATUS_SUCCESS;
502 }
503
504 NTSTATUS
505 NTAPI
506 CcRosReleaseVacb (
507 PROS_SHARED_CACHE_MAP SharedCacheMap,
508 PROS_VACB Vacb,
509 BOOLEAN Valid,
510 BOOLEAN Dirty,
511 BOOLEAN Mapped)
512 {
513 BOOLEAN WasDirty;
514 KIRQL oldIrql;
515
516 ASSERT(SharedCacheMap);
517
518 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
519 SharedCacheMap, Vacb, Valid);
520
521 KeAcquireGuardedMutex(&ViewLock);
522 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
523
524 Vacb->Valid = Valid;
525
526 WasDirty = Vacb->Dirty;
527 Vacb->Dirty = Vacb->Dirty || Dirty;
528
529 if (!WasDirty && Vacb->Dirty)
530 {
531 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
532 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
533 }
534
535 if (Mapped)
536 {
537 Vacb->MappedCount++;
538 }
539 CcRosVacbDecRefCount(Vacb);
540 if (Mapped && (Vacb->MappedCount == 1))
541 {
542 CcRosVacbIncRefCount(Vacb);
543 }
544 if (!WasDirty && Vacb->Dirty)
545 {
546 CcRosVacbIncRefCount(Vacb);
547 }
548
549 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
550 KeReleaseGuardedMutex(&ViewLock);
551 CcRosReleaseVacbLock(Vacb);
552
553 return STATUS_SUCCESS;
554 }
555
556 /* Returns with VACB Lock Held! */
557 PROS_VACB
558 NTAPI
559 CcRosLookupVacb (
560 PROS_SHARED_CACHE_MAP SharedCacheMap,
561 LONGLONG FileOffset)
562 {
563 PLIST_ENTRY current_entry;
564 PROS_VACB current;
565 KIRQL oldIrql;
566
567 ASSERT(SharedCacheMap);
568
569 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
570 SharedCacheMap, FileOffset);
571
572 KeAcquireGuardedMutex(&ViewLock);
573 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
574
575 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
576 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
577 {
578 current = CONTAINING_RECORD(current_entry,
579 ROS_VACB,
580 CacheMapVacbListEntry);
581 if (IsPointInRange(current->FileOffset.QuadPart,
582 VACB_MAPPING_GRANULARITY,
583 FileOffset))
584 {
585 CcRosVacbIncRefCount(current);
586 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
587 KeReleaseGuardedMutex(&ViewLock);
588 CcRosAcquireVacbLock(current, NULL);
589 return current;
590 }
591 if (current->FileOffset.QuadPart > FileOffset)
592 break;
593 current_entry = current_entry->Flink;
594 }
595
596 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
597 KeReleaseGuardedMutex(&ViewLock);
598
599 return NULL;
600 }
601
602 VOID
603 NTAPI
604 CcRosMarkDirtyVacb (
605 PROS_VACB Vacb)
606 {
607 KIRQL oldIrql;
608 PROS_SHARED_CACHE_MAP SharedCacheMap;
609
610 SharedCacheMap = Vacb->SharedCacheMap;
611
612 KeAcquireGuardedMutex(&ViewLock);
613 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
614
615 if (!Vacb->Dirty)
616 {
617 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
618 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
619 }
620 else
621 {
622 CcRosVacbDecRefCount(Vacb);
623 }
624
625 /* Move to the tail of the LRU list */
626 RemoveEntryList(&Vacb->VacbLruListEntry);
627 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
628
629 Vacb->Dirty = TRUE;
630
631 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
632 KeReleaseGuardedMutex(&ViewLock);
633 }
634
635 NTSTATUS
636 NTAPI
637 CcRosMarkDirtyFile (
638 PROS_SHARED_CACHE_MAP SharedCacheMap,
639 LONGLONG FileOffset)
640 {
641 PROS_VACB Vacb;
642
643 ASSERT(SharedCacheMap);
644
645 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
646 SharedCacheMap, FileOffset);
647
648 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
649 if (Vacb == NULL)
650 {
651 KeBugCheck(CACHE_MANAGER);
652 }
653
654 CcRosMarkDirtyVacb(Vacb);
655
656
657 CcRosReleaseVacbLock(Vacb);
658
659 return STATUS_SUCCESS;
660 }
661
662 NTSTATUS
663 NTAPI
664 CcRosUnmapVacb (
665 PROS_SHARED_CACHE_MAP SharedCacheMap,
666 LONGLONG FileOffset,
667 BOOLEAN NowDirty)
668 {
669 PROS_VACB Vacb;
670 BOOLEAN WasDirty;
671 KIRQL oldIrql;
672
673 ASSERT(SharedCacheMap);
674
675 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
676 SharedCacheMap, FileOffset, NowDirty);
677
678 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
679 if (Vacb == NULL)
680 {
681 return STATUS_UNSUCCESSFUL;
682 }
683
684 KeAcquireGuardedMutex(&ViewLock);
685 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
686
687 WasDirty = Vacb->Dirty;
688 Vacb->Dirty = Vacb->Dirty || NowDirty;
689
690 Vacb->MappedCount--;
691
692 if (!WasDirty && NowDirty)
693 {
694 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
695 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
696 }
697
698 CcRosVacbDecRefCount(Vacb);
699 if (!WasDirty && NowDirty)
700 {
701 CcRosVacbIncRefCount(Vacb);
702 }
703 if (Vacb->MappedCount == 0)
704 {
705 CcRosVacbDecRefCount(Vacb);
706 }
707
708 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
709 KeReleaseGuardedMutex(&ViewLock);
710 CcRosReleaseVacbLock(Vacb);
711
712 return STATUS_SUCCESS;
713 }
714
715 static
716 NTSTATUS
717 CcRosMapVacb(
718 PROS_VACB Vacb)
719 {
720 ULONG i;
721 NTSTATUS Status;
722 ULONG_PTR NumberOfPages;
723
724 /* Create a memory area. */
725 MmLockAddressSpace(MmGetKernelAddressSpace());
726 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
727 0, // nothing checks for VACB mareas, so set to 0
728 &Vacb->BaseAddress,
729 VACB_MAPPING_GRANULARITY,
730 PAGE_READWRITE,
731 (PMEMORY_AREA*)&Vacb->MemoryArea,
732 0,
733 PAGE_SIZE);
734 MmUnlockAddressSpace(MmGetKernelAddressSpace());
735 if (!NT_SUCCESS(Status))
736 {
737 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
738 return Status;
739 }
740
741 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
742 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
743
744 /* Create a virtual mapping for this memory area */
745 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
746 for (i = 0; i < NumberOfPages; i++)
747 {
748 PFN_NUMBER PageFrameNumber;
749
750 MI_SET_USAGE(MI_USAGE_CACHE);
751 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
752 if (PageFrameNumber == 0)
753 {
754 DPRINT1("Unable to allocate page\n");
755 KeBugCheck(MEMORY_MANAGEMENT);
756 }
757
758 Status = MmCreateVirtualMapping(NULL,
759 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
760 PAGE_READWRITE,
761 &PageFrameNumber,
762 1);
763 if (!NT_SUCCESS(Status))
764 {
765 DPRINT1("Unable to create virtual mapping\n");
766 KeBugCheck(MEMORY_MANAGEMENT);
767 }
768 }
769
770 return STATUS_SUCCESS;
771 }
772
773 static
774 NTSTATUS
775 CcRosCreateVacb (
776 PROS_SHARED_CACHE_MAP SharedCacheMap,
777 LONGLONG FileOffset,
778 PROS_VACB *Vacb)
779 {
780 PROS_VACB current;
781 PROS_VACB previous;
782 PLIST_ENTRY current_entry;
783 NTSTATUS Status;
784 KIRQL oldIrql;
785
786 ASSERT(SharedCacheMap);
787
788 DPRINT("CcRosCreateVacb()\n");
789
790 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
791 {
792 *Vacb = NULL;
793 return STATUS_INVALID_PARAMETER;
794 }
795
796 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
797 current->BaseAddress = NULL;
798 current->Valid = FALSE;
799 current->Dirty = FALSE;
800 current->PageOut = FALSE;
801 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
802 current->SharedCacheMap = SharedCacheMap;
803 #if DBG
804 if (SharedCacheMap->Trace)
805 {
806 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
807 }
808 #endif
809 current->MappedCount = 0;
810 current->DirtyVacbListEntry.Flink = NULL;
811 current->DirtyVacbListEntry.Blink = NULL;
812 current->ReferenceCount = 1;
813 current->PinCount = 0;
814 KeInitializeMutex(&current->Mutex, 0);
815 CcRosAcquireVacbLock(current, NULL);
816 KeAcquireGuardedMutex(&ViewLock);
817
818 *Vacb = current;
819 /* There is window between the call to CcRosLookupVacb
820 * and CcRosCreateVacb. We must check if a VACB for the
821 * file offset exist. If there is a VACB, we release
822 * our newly created VACB and return the existing one.
823 */
824 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
825 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
826 previous = NULL;
827 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
828 {
829 current = CONTAINING_RECORD(current_entry,
830 ROS_VACB,
831 CacheMapVacbListEntry);
832 if (IsPointInRange(current->FileOffset.QuadPart,
833 VACB_MAPPING_GRANULARITY,
834 FileOffset))
835 {
836 CcRosVacbIncRefCount(current);
837 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
838 #if DBG
839 if (SharedCacheMap->Trace)
840 {
841 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
842 SharedCacheMap,
843 (*Vacb),
844 current);
845 }
846 #endif
847 CcRosReleaseVacbLock(*Vacb);
848 KeReleaseGuardedMutex(&ViewLock);
849 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
850 *Vacb = current;
851 CcRosAcquireVacbLock(current, NULL);
852 return STATUS_SUCCESS;
853 }
854 if (current->FileOffset.QuadPart < FileOffset)
855 {
856 ASSERT(previous == NULL ||
857 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
858 previous = current;
859 }
860 if (current->FileOffset.QuadPart > FileOffset)
861 break;
862 current_entry = current_entry->Flink;
863 }
864 /* There was no existing VACB. */
865 current = *Vacb;
866 if (previous)
867 {
868 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
869 }
870 else
871 {
872 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
873 }
874 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
875 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
876 KeReleaseGuardedMutex(&ViewLock);
877
878 MI_SET_USAGE(MI_USAGE_CACHE);
879 #if MI_TRACE_PFNS
880 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
881 {
882 PWCHAR pos = NULL;
883 ULONG len = 0;
884 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
885 if (pos)
886 {
887 len = wcslen(pos) * sizeof(WCHAR);
888 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
889 }
890 else
891 {
892 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
893 }
894 }
895 #endif
896
897 Status = CcRosMapVacb(current);
898 if (!NT_SUCCESS(Status))
899 {
900 RemoveEntryList(&current->CacheMapVacbListEntry);
901 RemoveEntryList(&current->VacbLruListEntry);
902 CcRosReleaseVacbLock(current);
903 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
904 }
905
906 return Status;
907 }
908
909 NTSTATUS
910 NTAPI
911 CcRosGetVacb (
912 PROS_SHARED_CACHE_MAP SharedCacheMap,
913 LONGLONG FileOffset,
914 PLONGLONG BaseOffset,
915 PVOID* BaseAddress,
916 PBOOLEAN UptoDate,
917 PROS_VACB *Vacb)
918 {
919 PROS_VACB current;
920 NTSTATUS Status;
921
922 ASSERT(SharedCacheMap);
923
924 DPRINT("CcRosGetVacb()\n");
925
926 /*
927 * Look for a VACB already mapping the same data.
928 */
929 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
930 if (current == NULL)
931 {
932 /*
933 * Otherwise create a new VACB.
934 */
935 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
936 if (!NT_SUCCESS(Status))
937 {
938 return Status;
939 }
940 }
941
942 KeAcquireGuardedMutex(&ViewLock);
943
944 /* Move to the tail of the LRU list */
945 RemoveEntryList(&current->VacbLruListEntry);
946 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
947
948 KeReleaseGuardedMutex(&ViewLock);
949
950 /*
951 * Return information about the VACB to the caller.
952 */
953 *UptoDate = current->Valid;
954 *BaseAddress = current->BaseAddress;
955 DPRINT("*BaseAddress %p\n", *BaseAddress);
956 *Vacb = current;
957 *BaseOffset = current->FileOffset.QuadPart;
958 return STATUS_SUCCESS;
959 }
960
961 NTSTATUS
962 NTAPI
963 CcRosRequestVacb (
964 PROS_SHARED_CACHE_MAP SharedCacheMap,
965 LONGLONG FileOffset,
966 PVOID* BaseAddress,
967 PBOOLEAN UptoDate,
968 PROS_VACB *Vacb)
969 /*
970 * FUNCTION: Request a page mapping for a shared cache map
971 */
972 {
973 LONGLONG BaseOffset;
974
975 ASSERT(SharedCacheMap);
976
977 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
978 {
979 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
980 FileOffset, VACB_MAPPING_GRANULARITY);
981 KeBugCheck(CACHE_MANAGER);
982 }
983
984 return CcRosGetVacb(SharedCacheMap,
985 FileOffset,
986 &BaseOffset,
987 BaseAddress,
988 UptoDate,
989 Vacb);
990 }
991
992 static
993 VOID
994 CcFreeCachePage (
995 PVOID Context,
996 MEMORY_AREA* MemoryArea,
997 PVOID Address,
998 PFN_NUMBER Page,
999 SWAPENTRY SwapEntry,
1000 BOOLEAN Dirty)
1001 {
1002 ASSERT(SwapEntry == 0);
1003 if (Page != 0)
1004 {
1005 ASSERT(MmGetReferenceCountPage(Page) == 1);
1006 MmReleasePageMemoryConsumer(MC_CACHE, Page);
1007 }
1008 }
1009
1010 NTSTATUS
1011 CcRosInternalFreeVacb (
1012 PROS_VACB Vacb)
1013 /*
1014 * FUNCTION: Releases a VACB associated with a shared cache map
1015 */
1016 {
1017 DPRINT("Freeing VACB 0x%p\n", Vacb);
1018 #if DBG
1019 if (Vacb->SharedCacheMap->Trace)
1020 {
1021 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
1022 }
1023 #endif
1024
1025 MmLockAddressSpace(MmGetKernelAddressSpace());
1026 MmFreeMemoryArea(MmGetKernelAddressSpace(),
1027 Vacb->MemoryArea,
1028 CcFreeCachePage,
1029 NULL);
1030 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1031
1032 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
1033 return STATUS_SUCCESS;
1034 }
1035
1036 /*
1037 * @implemented
1038 */
1039 VOID
1040 NTAPI
1041 CcFlushCache (
1042 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1043 IN PLARGE_INTEGER FileOffset OPTIONAL,
1044 IN ULONG Length,
1045 OUT PIO_STATUS_BLOCK IoStatus)
1046 {
1047 PROS_SHARED_CACHE_MAP SharedCacheMap;
1048 LARGE_INTEGER Offset;
1049 LONGLONG RemainingLength;
1050 PROS_VACB current;
1051 NTSTATUS Status;
1052 KIRQL oldIrql;
1053
1054 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1055 SectionObjectPointers, FileOffset, Length);
1056
1057 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1058 SectionObjectPointers, FileOffset, Length, IoStatus);
1059
1060 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1061 {
1062 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1063 ASSERT(SharedCacheMap);
1064 if (FileOffset)
1065 {
1066 Offset = *FileOffset;
1067 RemainingLength = Length;
1068 }
1069 else
1070 {
1071 Offset.QuadPart = 0;
1072 RemainingLength = SharedCacheMap->FileSize.QuadPart;
1073 }
1074
1075 if (IoStatus)
1076 {
1077 IoStatus->Status = STATUS_SUCCESS;
1078 IoStatus->Information = 0;
1079 }
1080
1081 while (RemainingLength > 0)
1082 {
1083 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1084 if (current != NULL)
1085 {
1086 if (current->Dirty)
1087 {
1088 Status = CcRosFlushVacb(current);
1089 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1090 {
1091 IoStatus->Status = Status;
1092 }
1093 }
1094
1095 CcRosReleaseVacbLock(current);
1096
1097 KeAcquireGuardedMutex(&ViewLock);
1098 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1099 CcRosVacbDecRefCount(current);
1100 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1101 KeReleaseGuardedMutex(&ViewLock);
1102 }
1103
1104 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1105 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1106 }
1107 }
1108 else
1109 {
1110 if (IoStatus)
1111 {
1112 IoStatus->Status = STATUS_INVALID_PARAMETER;
1113 }
1114 }
1115 }
1116
1117 NTSTATUS
1118 NTAPI
1119 CcRosDeleteFileCache (
1120 PFILE_OBJECT FileObject,
1121 PROS_SHARED_CACHE_MAP SharedCacheMap)
1122 /*
1123 * FUNCTION: Releases the shared cache map associated with a file object
1124 */
1125 {
1126 PLIST_ENTRY current_entry;
1127 PROS_VACB current;
1128 LIST_ENTRY FreeList;
1129 KIRQL oldIrql;
1130
1131 ASSERT(SharedCacheMap);
1132
1133 SharedCacheMap->OpenCount++;
1134 KeReleaseGuardedMutex(&ViewLock);
1135
1136 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1137
1138 KeAcquireGuardedMutex(&ViewLock);
1139 SharedCacheMap->OpenCount--;
1140 if (SharedCacheMap->OpenCount == 0)
1141 {
1142 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1143
1144 /*
1145 * Release all VACBs
1146 */
1147 InitializeListHead(&FreeList);
1148 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1149 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1150 {
1151 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1152 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1153 RemoveEntryList(&current->VacbLruListEntry);
1154 if (current->Dirty)
1155 {
1156 RemoveEntryList(&current->DirtyVacbListEntry);
1157 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
1158 DPRINT1("Freeing dirty VACB\n");
1159 }
1160 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1161 }
1162 #if DBG
1163 SharedCacheMap->Trace = FALSE;
1164 #endif
1165 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1166
1167 KeReleaseGuardedMutex(&ViewLock);
1168 ObDereferenceObject(SharedCacheMap->FileObject);
1169
1170 while (!IsListEmpty(&FreeList))
1171 {
1172 current_entry = RemoveTailList(&FreeList);
1173 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1174 CcRosInternalFreeVacb(current);
1175 }
1176 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1177 KeAcquireGuardedMutex(&ViewLock);
1178 }
1179 return STATUS_SUCCESS;
1180 }
1181
1182 VOID
1183 NTAPI
1184 CcRosReferenceCache (
1185 PFILE_OBJECT FileObject)
1186 {
1187 PROS_SHARED_CACHE_MAP SharedCacheMap;
1188 KeAcquireGuardedMutex(&ViewLock);
1189 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1190 ASSERT(SharedCacheMap);
1191 ASSERT(SharedCacheMap->OpenCount != 0);
1192 SharedCacheMap->OpenCount++;
1193 KeReleaseGuardedMutex(&ViewLock);
1194 }
1195
1196 VOID
1197 NTAPI
1198 CcRosRemoveIfClosed (
1199 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1200 {
1201 PROS_SHARED_CACHE_MAP SharedCacheMap;
1202 DPRINT("CcRosRemoveIfClosed()\n");
1203 KeAcquireGuardedMutex(&ViewLock);
1204 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1205 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1206 {
1207 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1208 }
1209 KeReleaseGuardedMutex(&ViewLock);
1210 }
1211
1212
1213 VOID
1214 NTAPI
1215 CcRosDereferenceCache (
1216 PFILE_OBJECT FileObject)
1217 {
1218 PROS_SHARED_CACHE_MAP SharedCacheMap;
1219 KeAcquireGuardedMutex(&ViewLock);
1220 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1221 ASSERT(SharedCacheMap);
1222 if (SharedCacheMap->OpenCount > 0)
1223 {
1224 SharedCacheMap->OpenCount--;
1225 if (SharedCacheMap->OpenCount == 0)
1226 {
1227 MmFreeSectionSegments(SharedCacheMap->FileObject);
1228 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1229 }
1230 }
1231 KeReleaseGuardedMutex(&ViewLock);
1232 }
1233
1234 NTSTATUS
1235 NTAPI
1236 CcRosReleaseFileCache (
1237 PFILE_OBJECT FileObject)
1238 /*
1239 * FUNCTION: Called by the file system when a handle to a file object
1240 * has been closed.
1241 */
1242 {
1243 PROS_SHARED_CACHE_MAP SharedCacheMap;
1244
1245 KeAcquireGuardedMutex(&ViewLock);
1246
1247 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1248 {
1249 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1250 if (FileObject->PrivateCacheMap != NULL)
1251 {
1252 FileObject->PrivateCacheMap = NULL;
1253 if (SharedCacheMap->OpenCount > 0)
1254 {
1255 SharedCacheMap->OpenCount--;
1256 if (SharedCacheMap->OpenCount == 0)
1257 {
1258 MmFreeSectionSegments(SharedCacheMap->FileObject);
1259 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1260 }
1261 }
1262 }
1263 }
1264 KeReleaseGuardedMutex(&ViewLock);
1265 return STATUS_SUCCESS;
1266 }
1267
1268 NTSTATUS
1269 NTAPI
1270 CcTryToInitializeFileCache (
1271 PFILE_OBJECT FileObject)
1272 {
1273 PROS_SHARED_CACHE_MAP SharedCacheMap;
1274 NTSTATUS Status;
1275
1276 KeAcquireGuardedMutex(&ViewLock);
1277
1278 ASSERT(FileObject->SectionObjectPointer);
1279 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1280 if (SharedCacheMap == NULL)
1281 {
1282 Status = STATUS_UNSUCCESSFUL;
1283 }
1284 else
1285 {
1286 if (FileObject->PrivateCacheMap == NULL)
1287 {
1288 FileObject->PrivateCacheMap = SharedCacheMap;
1289 SharedCacheMap->OpenCount++;
1290 }
1291 Status = STATUS_SUCCESS;
1292 }
1293 KeReleaseGuardedMutex(&ViewLock);
1294
1295 return Status;
1296 }
1297
1298
1299 NTSTATUS
1300 NTAPI
1301 CcRosInitializeFileCache (
1302 PFILE_OBJECT FileObject,
1303 PCC_FILE_SIZES FileSizes,
1304 BOOLEAN PinAccess,
1305 PCACHE_MANAGER_CALLBACKS CallBacks,
1306 PVOID LazyWriterContext)
1307 /*
1308 * FUNCTION: Initializes a shared cache map for a file object
1309 */
1310 {
1311 PROS_SHARED_CACHE_MAP SharedCacheMap;
1312
1313 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1314 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1315 FileObject, SharedCacheMap);
1316
1317 KeAcquireGuardedMutex(&ViewLock);
1318 if (SharedCacheMap == NULL)
1319 {
1320 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1321 if (SharedCacheMap == NULL)
1322 {
1323 KeReleaseGuardedMutex(&ViewLock);
1324 return STATUS_INSUFFICIENT_RESOURCES;
1325 }
1326 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1327 ObReferenceObjectByPointer(FileObject,
1328 FILE_ALL_ACCESS,
1329 NULL,
1330 KernelMode);
1331 SharedCacheMap->FileObject = FileObject;
1332 SharedCacheMap->Callbacks = CallBacks;
1333 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1334 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1335 SharedCacheMap->FileSize = FileSizes->FileSize;
1336 SharedCacheMap->PinAccess = PinAccess;
1337 SharedCacheMap->DirtyPageThreshold = 0;
1338 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1339 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1340 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1341 }
1342 if (FileObject->PrivateCacheMap == NULL)
1343 {
1344 FileObject->PrivateCacheMap = SharedCacheMap;
1345 SharedCacheMap->OpenCount++;
1346 }
1347 KeReleaseGuardedMutex(&ViewLock);
1348
1349 return STATUS_SUCCESS;
1350 }
1351
1352 /*
1353 * @implemented
1354 */
1355 PFILE_OBJECT
1356 NTAPI
1357 CcGetFileObjectFromSectionPtrs (
1358 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1359 {
1360 PROS_SHARED_CACHE_MAP SharedCacheMap;
1361
1362 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1363
1364 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1365 {
1366 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1367 ASSERT(SharedCacheMap);
1368 return SharedCacheMap->FileObject;
1369 }
1370 return NULL;
1371 }
1372
1373 VOID
1374 NTAPI
1375 CcShutdownLazyWriter (
1376 VOID)
1377 {
1378 /* Simply set the event, lazy writer will stop when it's done */
1379 KeSetEvent(&iLazyWriterShutdown, IO_DISK_INCREMENT, FALSE);
1380 }
1381
1382 BOOLEAN
1383 INIT_FUNCTION
1384 NTAPI
1385 CcInitView (
1386 VOID)
1387 {
1388 HANDLE LazyWriter;
1389 NTSTATUS Status;
1390 KPRIORITY Priority;
1391 OBJECT_ATTRIBUTES ObjectAttributes;
1392
1393 DPRINT("CcInitView()\n");
1394
1395 InitializeListHead(&DirtyVacbListHead);
1396 InitializeListHead(&VacbLruListHead);
1397 InitializeListHead(&CcDeferredWrites);
1398 KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1399 KeInitializeGuardedMutex(&ViewLock);
1400 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1401 NULL,
1402 NULL,
1403 0,
1404 sizeof(INTERNAL_BCB),
1405 TAG_BCB,
1406 20);
1407 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1408 NULL,
1409 NULL,
1410 0,
1411 sizeof(ROS_SHARED_CACHE_MAP),
1412 TAG_SHARED_CACHE_MAP,
1413 20);
1414 ExInitializeNPagedLookasideList(&VacbLookasideList,
1415 NULL,
1416 NULL,
1417 0,
1418 sizeof(ROS_VACB),
1419 TAG_VACB,
1420 20);
1421
1422 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1423
1424 /* Initialize lazy writer events */
1425 KeInitializeEvent(&iLazyWriterShutdown, SynchronizationEvent, FALSE);
1426 KeInitializeEvent(&iLazyWriterNotify, NotificationEvent, FALSE);
1427
1428 /* Define lazy writer threshold, depending on system type */
1429 switch (MmQuerySystemSize())
1430 {
1431 case MmSmallSystem:
1432 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8;
1433 break;
1434
1435 case MmMediumSystem:
1436 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4;
1437 break;
1438
1439 case MmLargeSystem:
1440 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8 + MmNumberOfPhysicalPages / 4;
1441 break;
1442 }
1443
1444 /* Start the lazy writer thread */
1445 InitializeObjectAttributes(&ObjectAttributes,
1446 NULL,
1447 OBJ_KERNEL_HANDLE,
1448 NULL,
1449 NULL);
1450 Status = PsCreateSystemThread(&LazyWriter,
1451 THREAD_ALL_ACCESS,
1452 &ObjectAttributes,
1453 NULL,
1454 NULL,
1455 CciLazyWriter,
1456 NULL);
1457 if (!NT_SUCCESS(Status))
1458 {
1459 return FALSE;
1460 }
1461
1462 Priority = 27;
1463 Status = NtSetInformationThread(LazyWriter,
1464 ThreadPriority,
1465 &Priority,
1466 sizeof(Priority));
1467 ASSERT(NT_SUCCESS(Status));
1468
1469 /* Handle is not needed */
1470 ObCloseHandle(LazyWriter, KernelMode);
1471
1472 CcInitCacheZeroPage();
1473
1474 return TRUE;
1475 }
1476
1477 /* EOF */