[NTOSKRNL] Implement support for deferred writes in Cc.
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 */
9
10 /* NOTES **********************************************************************
11 *
12 * This is not the NT implementation of a file cache nor anything much like
13 * it.
14 *
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
17 *
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
20 *
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
23 *
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
27 *
28 * (4) Copy the data into or out of the page as necessary.
29 *
30 * (5) Release the cache page
31 */
32 /* INCLUDES ******************************************************************/
33
34 #include <ntoskrnl.h>
35 #define NDEBUG
36 #include <debug.h>
37
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
40 #endif
41
42 /* GLOBALS *******************************************************************/
43
44 LIST_ENTRY DirtyVacbListHead;
45 static LIST_ENTRY VacbLruListHead;
46
47 KGUARDED_MUTEX ViewLock;
48
49 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
50 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
51 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
52
53 /* Counters:
54 * - Amount of pages flushed by lazy writer
55 * - Number of times lazy writer ran
56 */
57 ULONG CcLazyWritePages = 0;
58 ULONG CcLazyWriteIos = 0;
59
60 /* Internal vars (MS):
61 * - Threshold above which lazy writer will start action
62 * - Amount of dirty pages
63 * - List for deferred writes
64 * - Spinlock when dealing with the deferred list
65 */
66 ULONG CcDirtyPageThreshold = 0;
67 ULONG CcTotalDirtyPages = 0;
68 LIST_ENTRY CcDeferredWrites;
69 KSPIN_LOCK CcDeferredWriteSpinLock;
70
71 /* Internal vars (ROS):
72 * - Event to notify lazy writer to shutdown
73 * - Event to inform watchers lazy writer is done for this loop
74 */
75 KEVENT iLazyWriterShutdown;
76 KEVENT iLazyWriterNotify;
77
78 #if DBG
79 static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
80 {
81 ++vacb->ReferenceCount;
82 if (vacb->SharedCacheMap->Trace)
83 {
84 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
85 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
86 }
87 }
88 static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
89 {
90 --vacb->ReferenceCount;
91 if (vacb->SharedCacheMap->Trace)
92 {
93 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
94 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
95 }
96 }
97 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
98 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
99 #else
100 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
101 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
102 #endif
103
104 NTSTATUS
105 CcRosInternalFreeVacb(PROS_VACB Vacb);
106
107
108 /* FUNCTIONS *****************************************************************/
109
110 VOID
111 NTAPI
112 CcRosTraceCacheMap (
113 PROS_SHARED_CACHE_MAP SharedCacheMap,
114 BOOLEAN Trace )
115 {
116 #if DBG
117 KIRQL oldirql;
118 PLIST_ENTRY current_entry;
119 PROS_VACB current;
120
121 if (!SharedCacheMap)
122 return;
123
124 SharedCacheMap->Trace = Trace;
125
126 if (Trace)
127 {
128 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
129
130 KeAcquireGuardedMutex(&ViewLock);
131 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
132
133 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
134 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
135 {
136 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
137 current_entry = current_entry->Flink;
138
139 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
140 current, current->ReferenceCount, current->Dirty, current->PageOut );
141 }
142 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
143 KeReleaseGuardedMutex(&ViewLock);
144 }
145 else
146 {
147 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
148 }
149
150 #else
151 UNREFERENCED_PARAMETER(SharedCacheMap);
152 UNREFERENCED_PARAMETER(Trace);
153 #endif
154 }
155
156 NTSTATUS
157 NTAPI
158 CcRosFlushVacb (
159 PROS_VACB Vacb)
160 {
161 NTSTATUS Status;
162 KIRQL oldIrql;
163
164 Status = CcWriteVirtualAddress(Vacb);
165 if (NT_SUCCESS(Status))
166 {
167 KeAcquireGuardedMutex(&ViewLock);
168 KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql);
169
170 Vacb->Dirty = FALSE;
171 RemoveEntryList(&Vacb->DirtyVacbListEntry);
172 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
173 CcRosVacbDecRefCount(Vacb);
174
175 KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
176 KeReleaseGuardedMutex(&ViewLock);
177 }
178
179 return Status;
180 }
181
182 NTSTATUS
183 NTAPI
184 CcRosFlushDirtyPages (
185 ULONG Target,
186 PULONG Count,
187 BOOLEAN Wait,
188 BOOLEAN CalledFromLazy)
189 {
190 PLIST_ENTRY current_entry;
191 PROS_VACB current;
192 BOOLEAN Locked;
193 NTSTATUS Status;
194 LARGE_INTEGER ZeroTimeout;
195
196 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
197
198 (*Count) = 0;
199 ZeroTimeout.QuadPart = 0;
200
201 KeEnterCriticalRegion();
202 KeAcquireGuardedMutex(&ViewLock);
203
204 current_entry = DirtyVacbListHead.Flink;
205 if (current_entry == &DirtyVacbListHead)
206 {
207 DPRINT("No Dirty pages\n");
208 }
209
210 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
211 {
212 current = CONTAINING_RECORD(current_entry,
213 ROS_VACB,
214 DirtyVacbListEntry);
215 current_entry = current_entry->Flink;
216
217 CcRosVacbIncRefCount(current);
218
219 /* When performing lazy write, don't handle temporary files */
220 if (CalledFromLazy &&
221 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
222 {
223 CcRosVacbDecRefCount(current);
224 continue;
225 }
226
227 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
228 current->SharedCacheMap->LazyWriteContext, Wait);
229 if (!Locked)
230 {
231 CcRosVacbDecRefCount(current);
232 continue;
233 }
234
235 Status = CcRosAcquireVacbLock(current,
236 Wait ? NULL : &ZeroTimeout);
237 if (Status != STATUS_SUCCESS)
238 {
239 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
240 current->SharedCacheMap->LazyWriteContext);
241 CcRosVacbDecRefCount(current);
242 continue;
243 }
244
245 ASSERT(current->Dirty);
246
247 /* One reference is added above */
248 if (current->ReferenceCount > 2)
249 {
250 CcRosReleaseVacbLock(current);
251 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
252 current->SharedCacheMap->LazyWriteContext);
253 CcRosVacbDecRefCount(current);
254 continue;
255 }
256
257 KeReleaseGuardedMutex(&ViewLock);
258
259 Status = CcRosFlushVacb(current);
260
261 CcRosReleaseVacbLock(current);
262 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
263 current->SharedCacheMap->LazyWriteContext);
264
265 KeAcquireGuardedMutex(&ViewLock);
266 CcRosVacbDecRefCount(current);
267
268 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
269 (Status != STATUS_MEDIA_WRITE_PROTECTED))
270 {
271 DPRINT1("CC: Failed to flush VACB.\n");
272 }
273 else
274 {
275 ULONG PagesFreed;
276
277 /* How many pages did we free? */
278 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
279 (*Count) += PagesFreed;
280
281 /* Make sure we don't overflow target! */
282 if (Target < PagesFreed)
283 {
284 /* If we would have, jump to zero directly */
285 Target = 0;
286 }
287 else
288 {
289 Target -= PagesFreed;
290 }
291 }
292
293 current_entry = DirtyVacbListHead.Flink;
294 }
295
296 KeReleaseGuardedMutex(&ViewLock);
297 KeLeaveCriticalRegion();
298
299 DPRINT("CcRosFlushDirtyPages() finished\n");
300 return STATUS_SUCCESS;
301 }
302
303 /* FIXME: Someday this could somewhat implement write-behind/read-ahead */
304 VOID
305 NTAPI
306 CciLazyWriter(PVOID Unused)
307 {
308 LARGE_INTEGER OneSecond;
309
310 OneSecond.QuadPart = (LONGLONG)-1*1000*1000*10;
311
312 while (TRUE)
313 {
314 NTSTATUS Status;
315 PLIST_ENTRY ListEntry;
316 ULONG Target, Count = 0;
317
318 /* One per second or until we have to stop */
319 Status = KeWaitForSingleObject(&iLazyWriterShutdown,
320 Executive,
321 KernelMode,
322 FALSE,
323 &OneSecond);
324
325 /* If we succeeed, we've to stop running! */
326 if (Status == STATUS_SUCCESS)
327 {
328 break;
329 }
330
331 /* We're not sleeping anymore */
332 KeClearEvent(&iLazyWriterNotify);
333
334 /* Our target is one-eighth of the dirty pages */
335 Target = CcTotalDirtyPages / 8;
336 if (Target != 0)
337 {
338 /* Flush! */
339 DPRINT("Lazy writer starting (%d)\n", Target);
340 CcRosFlushDirtyPages(Target, &Count, FALSE, TRUE);
341
342 /* And update stats */
343 CcLazyWritePages += Count;
344 ++CcLazyWriteIos;
345 DPRINT("Lazy writer done (%d)\n", Count);
346 }
347
348 /* Inform people waiting on us that we're done */
349 KeSetEvent(&iLazyWriterNotify, IO_DISK_INCREMENT, FALSE);
350
351 /* Likely not optimal, but let's handle one deferred write now! */
352 ListEntry = ExInterlockedRemoveHeadList(&CcDeferredWrites, &CcDeferredWriteSpinLock);
353 if (ListEntry != NULL)
354 {
355 PROS_DEFERRED_WRITE_CONTEXT Context;
356
357 /* Extract the context */
358 Context = CONTAINING_RECORD(ListEntry, ROS_DEFERRED_WRITE_CONTEXT, CcDeferredWritesEntry);
359
360 /* Can we write now? */
361 if (CcCanIWrite(Context->FileObject, Context->BytesToWrite, FALSE, Context->Retrying))
362 {
363 /* Yes! Do it, and destroy the associated context */
364 Context->PostRoutine(Context->Context1, Context->Context2);
365 ExFreePoolWithTag(Context, 'CcDw');
366 }
367 else
368 {
369 /* Otherwise, requeue it, but in tail, so that it doesn't block others
370 * This is clearly to improve, but given the poor algorithm used now
371 * It's better than nothing!
372 */
373 ExInterlockedInsertTailList(&CcDeferredWrites,
374 &Context->CcDeferredWritesEntry,
375 &CcDeferredWriteSpinLock);
376 }
377 }
378 }
379 }
380
381 NTSTATUS
382 CcRosTrimCache (
383 ULONG Target,
384 ULONG Priority,
385 PULONG NrFreed)
386 /*
387 * FUNCTION: Try to free some memory from the file cache.
388 * ARGUMENTS:
389 * Target - The number of pages to be freed.
390 * Priority - The priority of free (currently unused).
391 * NrFreed - Points to a variable where the number of pages
392 * actually freed is returned.
393 */
394 {
395 PLIST_ENTRY current_entry;
396 PROS_VACB current;
397 ULONG PagesFreed;
398 KIRQL oldIrql;
399 LIST_ENTRY FreeList;
400 PFN_NUMBER Page;
401 ULONG i;
402 BOOLEAN FlushedPages = FALSE;
403
404 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
405
406 InitializeListHead(&FreeList);
407
408 *NrFreed = 0;
409
410 retry:
411 KeAcquireGuardedMutex(&ViewLock);
412
413 current_entry = VacbLruListHead.Flink;
414 while (current_entry != &VacbLruListHead)
415 {
416 current = CONTAINING_RECORD(current_entry,
417 ROS_VACB,
418 VacbLruListEntry);
419 current_entry = current_entry->Flink;
420
421 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
422
423 /* Reference the VACB */
424 CcRosVacbIncRefCount(current);
425
426 /* Check if it's mapped and not dirty */
427 if (current->MappedCount > 0 && !current->Dirty)
428 {
429 /* We have to break these locks because Cc sucks */
430 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
431 KeReleaseGuardedMutex(&ViewLock);
432
433 /* Page out the VACB */
434 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
435 {
436 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
437
438 MmPageOutPhysicalAddress(Page);
439 }
440
441 /* Reacquire the locks */
442 KeAcquireGuardedMutex(&ViewLock);
443 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
444 }
445
446 /* Dereference the VACB */
447 CcRosVacbDecRefCount(current);
448
449 /* Check if we can free this entry now */
450 if (current->ReferenceCount == 0)
451 {
452 ASSERT(!current->Dirty);
453 ASSERT(!current->MappedCount);
454
455 RemoveEntryList(&current->CacheMapVacbListEntry);
456 RemoveEntryList(&current->VacbLruListEntry);
457 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
458
459 /* Calculate how many pages we freed for Mm */
460 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
461 Target -= PagesFreed;
462 (*NrFreed) += PagesFreed;
463 }
464
465 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
466 }
467
468 KeReleaseGuardedMutex(&ViewLock);
469
470 /* Try flushing pages if we haven't met our target */
471 if ((Target > 0) && !FlushedPages)
472 {
473 /* Flush dirty pages to disk */
474 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
475 FlushedPages = TRUE;
476
477 /* We can only swap as many pages as we flushed */
478 if (PagesFreed < Target) Target = PagesFreed;
479
480 /* Check if we flushed anything */
481 if (PagesFreed != 0)
482 {
483 /* Try again after flushing dirty pages */
484 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
485 goto retry;
486 }
487 }
488
489 while (!IsListEmpty(&FreeList))
490 {
491 current_entry = RemoveHeadList(&FreeList);
492 current = CONTAINING_RECORD(current_entry,
493 ROS_VACB,
494 CacheMapVacbListEntry);
495 CcRosInternalFreeVacb(current);
496 }
497
498 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
499
500 return STATUS_SUCCESS;
501 }
502
503 NTSTATUS
504 NTAPI
505 CcRosReleaseVacb (
506 PROS_SHARED_CACHE_MAP SharedCacheMap,
507 PROS_VACB Vacb,
508 BOOLEAN Valid,
509 BOOLEAN Dirty,
510 BOOLEAN Mapped)
511 {
512 BOOLEAN WasDirty;
513 KIRQL oldIrql;
514
515 ASSERT(SharedCacheMap);
516
517 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
518 SharedCacheMap, Vacb, Valid);
519
520 KeAcquireGuardedMutex(&ViewLock);
521 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
522
523 Vacb->Valid = Valid;
524
525 WasDirty = Vacb->Dirty;
526 Vacb->Dirty = Vacb->Dirty || Dirty;
527
528 if (!WasDirty && Vacb->Dirty)
529 {
530 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
531 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
532 }
533
534 if (Mapped)
535 {
536 Vacb->MappedCount++;
537 }
538 CcRosVacbDecRefCount(Vacb);
539 if (Mapped && (Vacb->MappedCount == 1))
540 {
541 CcRosVacbIncRefCount(Vacb);
542 }
543 if (!WasDirty && Vacb->Dirty)
544 {
545 CcRosVacbIncRefCount(Vacb);
546 }
547
548 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
549 KeReleaseGuardedMutex(&ViewLock);
550 CcRosReleaseVacbLock(Vacb);
551
552 return STATUS_SUCCESS;
553 }
554
555 /* Returns with VACB Lock Held! */
556 PROS_VACB
557 NTAPI
558 CcRosLookupVacb (
559 PROS_SHARED_CACHE_MAP SharedCacheMap,
560 LONGLONG FileOffset)
561 {
562 PLIST_ENTRY current_entry;
563 PROS_VACB current;
564 KIRQL oldIrql;
565
566 ASSERT(SharedCacheMap);
567
568 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
569 SharedCacheMap, FileOffset);
570
571 KeAcquireGuardedMutex(&ViewLock);
572 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
573
574 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
575 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
576 {
577 current = CONTAINING_RECORD(current_entry,
578 ROS_VACB,
579 CacheMapVacbListEntry);
580 if (IsPointInRange(current->FileOffset.QuadPart,
581 VACB_MAPPING_GRANULARITY,
582 FileOffset))
583 {
584 CcRosVacbIncRefCount(current);
585 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
586 KeReleaseGuardedMutex(&ViewLock);
587 CcRosAcquireVacbLock(current, NULL);
588 return current;
589 }
590 if (current->FileOffset.QuadPart > FileOffset)
591 break;
592 current_entry = current_entry->Flink;
593 }
594
595 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
596 KeReleaseGuardedMutex(&ViewLock);
597
598 return NULL;
599 }
600
601 VOID
602 NTAPI
603 CcRosMarkDirtyVacb (
604 PROS_VACB Vacb)
605 {
606 KIRQL oldIrql;
607 PROS_SHARED_CACHE_MAP SharedCacheMap;
608
609 SharedCacheMap = Vacb->SharedCacheMap;
610
611 KeAcquireGuardedMutex(&ViewLock);
612 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
613
614 if (!Vacb->Dirty)
615 {
616 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
617 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
618 }
619 else
620 {
621 CcRosVacbDecRefCount(Vacb);
622 }
623
624 /* Move to the tail of the LRU list */
625 RemoveEntryList(&Vacb->VacbLruListEntry);
626 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
627
628 Vacb->Dirty = TRUE;
629
630 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
631 KeReleaseGuardedMutex(&ViewLock);
632 }
633
634 NTSTATUS
635 NTAPI
636 CcRosMarkDirtyFile (
637 PROS_SHARED_CACHE_MAP SharedCacheMap,
638 LONGLONG FileOffset)
639 {
640 PROS_VACB Vacb;
641
642 ASSERT(SharedCacheMap);
643
644 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
645 SharedCacheMap, FileOffset);
646
647 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
648 if (Vacb == NULL)
649 {
650 KeBugCheck(CACHE_MANAGER);
651 }
652
653 CcRosMarkDirtyVacb(Vacb);
654
655
656 CcRosReleaseVacbLock(Vacb);
657
658 return STATUS_SUCCESS;
659 }
660
661 NTSTATUS
662 NTAPI
663 CcRosUnmapVacb (
664 PROS_SHARED_CACHE_MAP SharedCacheMap,
665 LONGLONG FileOffset,
666 BOOLEAN NowDirty)
667 {
668 PROS_VACB Vacb;
669 BOOLEAN WasDirty;
670 KIRQL oldIrql;
671
672 ASSERT(SharedCacheMap);
673
674 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
675 SharedCacheMap, FileOffset, NowDirty);
676
677 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
678 if (Vacb == NULL)
679 {
680 return STATUS_UNSUCCESSFUL;
681 }
682
683 KeAcquireGuardedMutex(&ViewLock);
684 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
685
686 WasDirty = Vacb->Dirty;
687 Vacb->Dirty = Vacb->Dirty || NowDirty;
688
689 Vacb->MappedCount--;
690
691 if (!WasDirty && NowDirty)
692 {
693 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
694 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
695 }
696
697 CcRosVacbDecRefCount(Vacb);
698 if (!WasDirty && NowDirty)
699 {
700 CcRosVacbIncRefCount(Vacb);
701 }
702 if (Vacb->MappedCount == 0)
703 {
704 CcRosVacbDecRefCount(Vacb);
705 }
706
707 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
708 KeReleaseGuardedMutex(&ViewLock);
709 CcRosReleaseVacbLock(Vacb);
710
711 return STATUS_SUCCESS;
712 }
713
714 static
715 NTSTATUS
716 CcRosMapVacb(
717 PROS_VACB Vacb)
718 {
719 ULONG i;
720 NTSTATUS Status;
721 ULONG_PTR NumberOfPages;
722
723 /* Create a memory area. */
724 MmLockAddressSpace(MmGetKernelAddressSpace());
725 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
726 0, // nothing checks for VACB mareas, so set to 0
727 &Vacb->BaseAddress,
728 VACB_MAPPING_GRANULARITY,
729 PAGE_READWRITE,
730 (PMEMORY_AREA*)&Vacb->MemoryArea,
731 0,
732 PAGE_SIZE);
733 MmUnlockAddressSpace(MmGetKernelAddressSpace());
734 if (!NT_SUCCESS(Status))
735 {
736 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
737 return Status;
738 }
739
740 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
741 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
742
743 /* Create a virtual mapping for this memory area */
744 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
745 for (i = 0; i < NumberOfPages; i++)
746 {
747 PFN_NUMBER PageFrameNumber;
748
749 MI_SET_USAGE(MI_USAGE_CACHE);
750 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
751 if (PageFrameNumber == 0)
752 {
753 DPRINT1("Unable to allocate page\n");
754 KeBugCheck(MEMORY_MANAGEMENT);
755 }
756
757 Status = MmCreateVirtualMapping(NULL,
758 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
759 PAGE_READWRITE,
760 &PageFrameNumber,
761 1);
762 if (!NT_SUCCESS(Status))
763 {
764 DPRINT1("Unable to create virtual mapping\n");
765 KeBugCheck(MEMORY_MANAGEMENT);
766 }
767 }
768
769 return STATUS_SUCCESS;
770 }
771
772 static
773 NTSTATUS
774 CcRosCreateVacb (
775 PROS_SHARED_CACHE_MAP SharedCacheMap,
776 LONGLONG FileOffset,
777 PROS_VACB *Vacb)
778 {
779 PROS_VACB current;
780 PROS_VACB previous;
781 PLIST_ENTRY current_entry;
782 NTSTATUS Status;
783 KIRQL oldIrql;
784
785 ASSERT(SharedCacheMap);
786
787 DPRINT("CcRosCreateVacb()\n");
788
789 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
790 {
791 *Vacb = NULL;
792 return STATUS_INVALID_PARAMETER;
793 }
794
795 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
796 current->BaseAddress = NULL;
797 current->Valid = FALSE;
798 current->Dirty = FALSE;
799 current->PageOut = FALSE;
800 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
801 current->SharedCacheMap = SharedCacheMap;
802 #if DBG
803 if (SharedCacheMap->Trace)
804 {
805 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
806 }
807 #endif
808 current->MappedCount = 0;
809 current->DirtyVacbListEntry.Flink = NULL;
810 current->DirtyVacbListEntry.Blink = NULL;
811 current->ReferenceCount = 1;
812 current->PinCount = 0;
813 KeInitializeMutex(&current->Mutex, 0);
814 CcRosAcquireVacbLock(current, NULL);
815 KeAcquireGuardedMutex(&ViewLock);
816
817 *Vacb = current;
818 /* There is window between the call to CcRosLookupVacb
819 * and CcRosCreateVacb. We must check if a VACB for the
820 * file offset exist. If there is a VACB, we release
821 * our newly created VACB and return the existing one.
822 */
823 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
824 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
825 previous = NULL;
826 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
827 {
828 current = CONTAINING_RECORD(current_entry,
829 ROS_VACB,
830 CacheMapVacbListEntry);
831 if (IsPointInRange(current->FileOffset.QuadPart,
832 VACB_MAPPING_GRANULARITY,
833 FileOffset))
834 {
835 CcRosVacbIncRefCount(current);
836 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
837 #if DBG
838 if (SharedCacheMap->Trace)
839 {
840 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
841 SharedCacheMap,
842 (*Vacb),
843 current);
844 }
845 #endif
846 CcRosReleaseVacbLock(*Vacb);
847 KeReleaseGuardedMutex(&ViewLock);
848 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
849 *Vacb = current;
850 CcRosAcquireVacbLock(current, NULL);
851 return STATUS_SUCCESS;
852 }
853 if (current->FileOffset.QuadPart < FileOffset)
854 {
855 ASSERT(previous == NULL ||
856 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
857 previous = current;
858 }
859 if (current->FileOffset.QuadPart > FileOffset)
860 break;
861 current_entry = current_entry->Flink;
862 }
863 /* There was no existing VACB. */
864 current = *Vacb;
865 if (previous)
866 {
867 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
868 }
869 else
870 {
871 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
872 }
873 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
874 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
875 KeReleaseGuardedMutex(&ViewLock);
876
877 MI_SET_USAGE(MI_USAGE_CACHE);
878 #if MI_TRACE_PFNS
879 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
880 {
881 PWCHAR pos = NULL;
882 ULONG len = 0;
883 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
884 if (pos)
885 {
886 len = wcslen(pos) * sizeof(WCHAR);
887 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
888 }
889 else
890 {
891 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
892 }
893 }
894 #endif
895
896 Status = CcRosMapVacb(current);
897 if (!NT_SUCCESS(Status))
898 {
899 RemoveEntryList(&current->CacheMapVacbListEntry);
900 RemoveEntryList(&current->VacbLruListEntry);
901 CcRosReleaseVacbLock(current);
902 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
903 }
904
905 return Status;
906 }
907
908 NTSTATUS
909 NTAPI
910 CcRosGetVacb (
911 PROS_SHARED_CACHE_MAP SharedCacheMap,
912 LONGLONG FileOffset,
913 PLONGLONG BaseOffset,
914 PVOID* BaseAddress,
915 PBOOLEAN UptoDate,
916 PROS_VACB *Vacb)
917 {
918 PROS_VACB current;
919 NTSTATUS Status;
920
921 ASSERT(SharedCacheMap);
922
923 DPRINT("CcRosGetVacb()\n");
924
925 /*
926 * Look for a VACB already mapping the same data.
927 */
928 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
929 if (current == NULL)
930 {
931 /*
932 * Otherwise create a new VACB.
933 */
934 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
935 if (!NT_SUCCESS(Status))
936 {
937 return Status;
938 }
939 }
940
941 KeAcquireGuardedMutex(&ViewLock);
942
943 /* Move to the tail of the LRU list */
944 RemoveEntryList(&current->VacbLruListEntry);
945 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
946
947 KeReleaseGuardedMutex(&ViewLock);
948
949 /*
950 * Return information about the VACB to the caller.
951 */
952 *UptoDate = current->Valid;
953 *BaseAddress = current->BaseAddress;
954 DPRINT("*BaseAddress %p\n", *BaseAddress);
955 *Vacb = current;
956 *BaseOffset = current->FileOffset.QuadPart;
957 return STATUS_SUCCESS;
958 }
959
960 NTSTATUS
961 NTAPI
962 CcRosRequestVacb (
963 PROS_SHARED_CACHE_MAP SharedCacheMap,
964 LONGLONG FileOffset,
965 PVOID* BaseAddress,
966 PBOOLEAN UptoDate,
967 PROS_VACB *Vacb)
968 /*
969 * FUNCTION: Request a page mapping for a shared cache map
970 */
971 {
972 LONGLONG BaseOffset;
973
974 ASSERT(SharedCacheMap);
975
976 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
977 {
978 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
979 FileOffset, VACB_MAPPING_GRANULARITY);
980 KeBugCheck(CACHE_MANAGER);
981 }
982
983 return CcRosGetVacb(SharedCacheMap,
984 FileOffset,
985 &BaseOffset,
986 BaseAddress,
987 UptoDate,
988 Vacb);
989 }
990
991 static
992 VOID
993 CcFreeCachePage (
994 PVOID Context,
995 MEMORY_AREA* MemoryArea,
996 PVOID Address,
997 PFN_NUMBER Page,
998 SWAPENTRY SwapEntry,
999 BOOLEAN Dirty)
1000 {
1001 ASSERT(SwapEntry == 0);
1002 if (Page != 0)
1003 {
1004 ASSERT(MmGetReferenceCountPage(Page) == 1);
1005 MmReleasePageMemoryConsumer(MC_CACHE, Page);
1006 }
1007 }
1008
1009 NTSTATUS
1010 CcRosInternalFreeVacb (
1011 PROS_VACB Vacb)
1012 /*
1013 * FUNCTION: Releases a VACB associated with a shared cache map
1014 */
1015 {
1016 DPRINT("Freeing VACB 0x%p\n", Vacb);
1017 #if DBG
1018 if (Vacb->SharedCacheMap->Trace)
1019 {
1020 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
1021 }
1022 #endif
1023
1024 MmLockAddressSpace(MmGetKernelAddressSpace());
1025 MmFreeMemoryArea(MmGetKernelAddressSpace(),
1026 Vacb->MemoryArea,
1027 CcFreeCachePage,
1028 NULL);
1029 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1030
1031 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
1032 return STATUS_SUCCESS;
1033 }
1034
1035 /*
1036 * @implemented
1037 */
1038 VOID
1039 NTAPI
1040 CcFlushCache (
1041 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1042 IN PLARGE_INTEGER FileOffset OPTIONAL,
1043 IN ULONG Length,
1044 OUT PIO_STATUS_BLOCK IoStatus)
1045 {
1046 PROS_SHARED_CACHE_MAP SharedCacheMap;
1047 LARGE_INTEGER Offset;
1048 LONGLONG RemainingLength;
1049 PROS_VACB current;
1050 NTSTATUS Status;
1051 KIRQL oldIrql;
1052
1053 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1054 SectionObjectPointers, FileOffset, Length);
1055
1056 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1057 SectionObjectPointers, FileOffset, Length, IoStatus);
1058
1059 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1060 {
1061 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1062 ASSERT(SharedCacheMap);
1063 if (FileOffset)
1064 {
1065 Offset = *FileOffset;
1066 RemainingLength = Length;
1067 }
1068 else
1069 {
1070 Offset.QuadPart = 0;
1071 RemainingLength = SharedCacheMap->FileSize.QuadPart;
1072 }
1073
1074 if (IoStatus)
1075 {
1076 IoStatus->Status = STATUS_SUCCESS;
1077 IoStatus->Information = 0;
1078 }
1079
1080 while (RemainingLength > 0)
1081 {
1082 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1083 if (current != NULL)
1084 {
1085 if (current->Dirty)
1086 {
1087 Status = CcRosFlushVacb(current);
1088 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1089 {
1090 IoStatus->Status = Status;
1091 }
1092 }
1093
1094 CcRosReleaseVacbLock(current);
1095
1096 KeAcquireGuardedMutex(&ViewLock);
1097 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1098 CcRosVacbDecRefCount(current);
1099 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1100 KeReleaseGuardedMutex(&ViewLock);
1101 }
1102
1103 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1104 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1105 }
1106 }
1107 else
1108 {
1109 if (IoStatus)
1110 {
1111 IoStatus->Status = STATUS_INVALID_PARAMETER;
1112 }
1113 }
1114 }
1115
1116 NTSTATUS
1117 NTAPI
1118 CcRosDeleteFileCache (
1119 PFILE_OBJECT FileObject,
1120 PROS_SHARED_CACHE_MAP SharedCacheMap)
1121 /*
1122 * FUNCTION: Releases the shared cache map associated with a file object
1123 */
1124 {
1125 PLIST_ENTRY current_entry;
1126 PROS_VACB current;
1127 LIST_ENTRY FreeList;
1128 KIRQL oldIrql;
1129
1130 ASSERT(SharedCacheMap);
1131
1132 SharedCacheMap->OpenCount++;
1133 KeReleaseGuardedMutex(&ViewLock);
1134
1135 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1136
1137 KeAcquireGuardedMutex(&ViewLock);
1138 SharedCacheMap->OpenCount--;
1139 if (SharedCacheMap->OpenCount == 0)
1140 {
1141 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1142
1143 /*
1144 * Release all VACBs
1145 */
1146 InitializeListHead(&FreeList);
1147 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1148 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1149 {
1150 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1151 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1152 RemoveEntryList(&current->VacbLruListEntry);
1153 if (current->Dirty)
1154 {
1155 RemoveEntryList(&current->DirtyVacbListEntry);
1156 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
1157 DPRINT1("Freeing dirty VACB\n");
1158 }
1159 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1160 }
1161 #if DBG
1162 SharedCacheMap->Trace = FALSE;
1163 #endif
1164 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1165
1166 KeReleaseGuardedMutex(&ViewLock);
1167 ObDereferenceObject(SharedCacheMap->FileObject);
1168
1169 while (!IsListEmpty(&FreeList))
1170 {
1171 current_entry = RemoveTailList(&FreeList);
1172 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1173 CcRosInternalFreeVacb(current);
1174 }
1175 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1176 KeAcquireGuardedMutex(&ViewLock);
1177 }
1178 return STATUS_SUCCESS;
1179 }
1180
1181 VOID
1182 NTAPI
1183 CcRosReferenceCache (
1184 PFILE_OBJECT FileObject)
1185 {
1186 PROS_SHARED_CACHE_MAP SharedCacheMap;
1187 KeAcquireGuardedMutex(&ViewLock);
1188 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1189 ASSERT(SharedCacheMap);
1190 ASSERT(SharedCacheMap->OpenCount != 0);
1191 SharedCacheMap->OpenCount++;
1192 KeReleaseGuardedMutex(&ViewLock);
1193 }
1194
1195 VOID
1196 NTAPI
1197 CcRosRemoveIfClosed (
1198 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1199 {
1200 PROS_SHARED_CACHE_MAP SharedCacheMap;
1201 DPRINT("CcRosRemoveIfClosed()\n");
1202 KeAcquireGuardedMutex(&ViewLock);
1203 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1204 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1205 {
1206 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1207 }
1208 KeReleaseGuardedMutex(&ViewLock);
1209 }
1210
1211
1212 VOID
1213 NTAPI
1214 CcRosDereferenceCache (
1215 PFILE_OBJECT FileObject)
1216 {
1217 PROS_SHARED_CACHE_MAP SharedCacheMap;
1218 KeAcquireGuardedMutex(&ViewLock);
1219 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1220 ASSERT(SharedCacheMap);
1221 if (SharedCacheMap->OpenCount > 0)
1222 {
1223 SharedCacheMap->OpenCount--;
1224 if (SharedCacheMap->OpenCount == 0)
1225 {
1226 MmFreeSectionSegments(SharedCacheMap->FileObject);
1227 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1228 }
1229 }
1230 KeReleaseGuardedMutex(&ViewLock);
1231 }
1232
1233 NTSTATUS
1234 NTAPI
1235 CcRosReleaseFileCache (
1236 PFILE_OBJECT FileObject)
1237 /*
1238 * FUNCTION: Called by the file system when a handle to a file object
1239 * has been closed.
1240 */
1241 {
1242 PROS_SHARED_CACHE_MAP SharedCacheMap;
1243
1244 KeAcquireGuardedMutex(&ViewLock);
1245
1246 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1247 {
1248 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1249 if (FileObject->PrivateCacheMap != NULL)
1250 {
1251 FileObject->PrivateCacheMap = NULL;
1252 if (SharedCacheMap->OpenCount > 0)
1253 {
1254 SharedCacheMap->OpenCount--;
1255 if (SharedCacheMap->OpenCount == 0)
1256 {
1257 MmFreeSectionSegments(SharedCacheMap->FileObject);
1258 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1259 }
1260 }
1261 }
1262 }
1263 KeReleaseGuardedMutex(&ViewLock);
1264 return STATUS_SUCCESS;
1265 }
1266
1267 NTSTATUS
1268 NTAPI
1269 CcTryToInitializeFileCache (
1270 PFILE_OBJECT FileObject)
1271 {
1272 PROS_SHARED_CACHE_MAP SharedCacheMap;
1273 NTSTATUS Status;
1274
1275 KeAcquireGuardedMutex(&ViewLock);
1276
1277 ASSERT(FileObject->SectionObjectPointer);
1278 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1279 if (SharedCacheMap == NULL)
1280 {
1281 Status = STATUS_UNSUCCESSFUL;
1282 }
1283 else
1284 {
1285 if (FileObject->PrivateCacheMap == NULL)
1286 {
1287 FileObject->PrivateCacheMap = SharedCacheMap;
1288 SharedCacheMap->OpenCount++;
1289 }
1290 Status = STATUS_SUCCESS;
1291 }
1292 KeReleaseGuardedMutex(&ViewLock);
1293
1294 return Status;
1295 }
1296
1297
1298 NTSTATUS
1299 NTAPI
1300 CcRosInitializeFileCache (
1301 PFILE_OBJECT FileObject,
1302 PCC_FILE_SIZES FileSizes,
1303 BOOLEAN PinAccess,
1304 PCACHE_MANAGER_CALLBACKS CallBacks,
1305 PVOID LazyWriterContext)
1306 /*
1307 * FUNCTION: Initializes a shared cache map for a file object
1308 */
1309 {
1310 PROS_SHARED_CACHE_MAP SharedCacheMap;
1311
1312 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1313 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1314 FileObject, SharedCacheMap);
1315
1316 KeAcquireGuardedMutex(&ViewLock);
1317 if (SharedCacheMap == NULL)
1318 {
1319 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1320 if (SharedCacheMap == NULL)
1321 {
1322 KeReleaseGuardedMutex(&ViewLock);
1323 return STATUS_INSUFFICIENT_RESOURCES;
1324 }
1325 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1326 ObReferenceObjectByPointer(FileObject,
1327 FILE_ALL_ACCESS,
1328 NULL,
1329 KernelMode);
1330 SharedCacheMap->FileObject = FileObject;
1331 SharedCacheMap->Callbacks = CallBacks;
1332 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1333 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1334 SharedCacheMap->FileSize = FileSizes->FileSize;
1335 SharedCacheMap->PinAccess = PinAccess;
1336 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1337 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1338 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1339 }
1340 if (FileObject->PrivateCacheMap == NULL)
1341 {
1342 FileObject->PrivateCacheMap = SharedCacheMap;
1343 SharedCacheMap->OpenCount++;
1344 }
1345 KeReleaseGuardedMutex(&ViewLock);
1346
1347 return STATUS_SUCCESS;
1348 }
1349
1350 /*
1351 * @implemented
1352 */
1353 PFILE_OBJECT
1354 NTAPI
1355 CcGetFileObjectFromSectionPtrs (
1356 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1357 {
1358 PROS_SHARED_CACHE_MAP SharedCacheMap;
1359
1360 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1361
1362 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1363 {
1364 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1365 ASSERT(SharedCacheMap);
1366 return SharedCacheMap->FileObject;
1367 }
1368 return NULL;
1369 }
1370
1371 VOID
1372 NTAPI
1373 CcShutdownLazyWriter (
1374 VOID)
1375 {
1376 /* Simply set the event, lazy writer will stop when it's done */
1377 KeSetEvent(&iLazyWriterShutdown, IO_DISK_INCREMENT, FALSE);
1378 }
1379
1380 BOOLEAN
1381 INIT_FUNCTION
1382 NTAPI
1383 CcInitView (
1384 VOID)
1385 {
1386 HANDLE LazyWriter;
1387 NTSTATUS Status;
1388 OBJECT_ATTRIBUTES ObjectAttributes;
1389
1390 DPRINT("CcInitView()\n");
1391
1392 InitializeListHead(&DirtyVacbListHead);
1393 InitializeListHead(&VacbLruListHead);
1394 InitializeListHead(&CcDeferredWrites);
1395 KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1396 KeInitializeGuardedMutex(&ViewLock);
1397 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1398 NULL,
1399 NULL,
1400 0,
1401 sizeof(INTERNAL_BCB),
1402 TAG_BCB,
1403 20);
1404 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1405 NULL,
1406 NULL,
1407 0,
1408 sizeof(ROS_SHARED_CACHE_MAP),
1409 TAG_SHARED_CACHE_MAP,
1410 20);
1411 ExInitializeNPagedLookasideList(&VacbLookasideList,
1412 NULL,
1413 NULL,
1414 0,
1415 sizeof(ROS_VACB),
1416 TAG_VACB,
1417 20);
1418
1419 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1420
1421 /* Initialize lazy writer events */
1422 KeInitializeEvent(&iLazyWriterShutdown, SynchronizationEvent, FALSE);
1423 KeInitializeEvent(&iLazyWriterNotify, NotificationEvent, FALSE);
1424
1425 /* Define lazy writer threshold, depending on system type */
1426 switch (MmQuerySystemSize())
1427 {
1428 case MmSmallSystem:
1429 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8;
1430 break;
1431
1432 case MmMediumSystem:
1433 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4;
1434 break;
1435
1436 case MmLargeSystem:
1437 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8 + MmNumberOfPhysicalPages / 4;
1438 break;
1439 }
1440
1441 /* Start the lazy writer thread */
1442 InitializeObjectAttributes(&ObjectAttributes,
1443 NULL,
1444 OBJ_KERNEL_HANDLE,
1445 NULL,
1446 NULL);
1447 Status = PsCreateSystemThread(&LazyWriter,
1448 THREAD_ALL_ACCESS,
1449 &ObjectAttributes,
1450 NULL,
1451 NULL,
1452 CciLazyWriter,
1453 NULL);
1454 if (!NT_SUCCESS(Status))
1455 {
1456 return FALSE;
1457 }
1458
1459 /* Handle is not needed */
1460 ObCloseHandle(LazyWriter, KernelMode);
1461
1462 CcInitCacheZeroPage();
1463
1464 return TRUE;
1465 }
1466
1467 /* EOF */