[TRANSLATION] Simplified Chinese translations updated. (#348)
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42
43 /* GLOBALS *******************************************************************/
44
45 LIST_ENTRY DirtyVacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 /* Counters:
55 * - Amount of pages flushed by lazy writer
56 * - Number of times lazy writer ran
57 */
58 ULONG CcLazyWritePages = 0;
59 ULONG CcLazyWriteIos = 0;
60
61 /* Internal vars (MS):
62 * - Threshold above which lazy writer will start action
63 * - Amount of dirty pages
64 * - List for deferred writes
65 * - Spinlock when dealing with the deferred list
66 * - List for "clean" shared cache maps
67 * - One second delay for lazy writer
68 */
69 ULONG CcDirtyPageThreshold = 0;
70 ULONG CcTotalDirtyPages = 0;
71 LIST_ENTRY CcDeferredWrites;
72 KSPIN_LOCK CcDeferredWriteSpinLock;
73 LIST_ENTRY CcCleanSharedCacheMapList;
74 LARGE_INTEGER CcIdleDelay = RTL_CONSTANT_LARGE_INTEGER((LONGLONG)-1*1000*1000*10);
75
76 /* Internal vars (ROS):
77 * - Event to notify lazy writer to shutdown
78 * - Event to inform watchers lazy writer is done for this loop
79 * - Lock for the CcCleanSharedCacheMapList list
80 */
81 KEVENT iLazyWriterShutdown;
82 KEVENT iLazyWriterNotify;
83 KSPIN_LOCK iSharedCacheMapLock;
84
85 #if DBG
86 static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
87 {
88 ++vacb->ReferenceCount;
89 if (vacb->SharedCacheMap->Trace)
90 {
91 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
92 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
93 }
94 }
95 static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
96 {
97 --vacb->ReferenceCount;
98 ASSERT(!(vacb->ReferenceCount == 0 && vacb->Dirty));
99 if (vacb->SharedCacheMap->Trace)
100 {
101 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
102 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
103 }
104 }
105 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
106 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
107 #else
108 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
109 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
110 #endif
111
112 NTSTATUS
113 CcRosInternalFreeVacb(PROS_VACB Vacb);
114
115
116 /* FUNCTIONS *****************************************************************/
117
118 VOID
119 NTAPI
120 CcRosTraceCacheMap (
121 PROS_SHARED_CACHE_MAP SharedCacheMap,
122 BOOLEAN Trace )
123 {
124 #if DBG
125 KIRQL oldirql;
126 PLIST_ENTRY current_entry;
127 PROS_VACB current;
128
129 if (!SharedCacheMap)
130 return;
131
132 SharedCacheMap->Trace = Trace;
133
134 if (Trace)
135 {
136 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
137
138 KeAcquireGuardedMutex(&ViewLock);
139 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
140
141 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
142 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
143 {
144 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
145 current_entry = current_entry->Flink;
146
147 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
148 current, current->ReferenceCount, current->Dirty, current->PageOut );
149 }
150 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
151 KeReleaseGuardedMutex(&ViewLock);
152 }
153 else
154 {
155 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
156 }
157
158 #else
159 UNREFERENCED_PARAMETER(SharedCacheMap);
160 UNREFERENCED_PARAMETER(Trace);
161 #endif
162 }
163
164 NTSTATUS
165 NTAPI
166 CcRosFlushVacb (
167 PROS_VACB Vacb)
168 {
169 NTSTATUS Status;
170 KIRQL oldIrql;
171
172 Status = CcWriteVirtualAddress(Vacb);
173 if (NT_SUCCESS(Status))
174 {
175 KeAcquireGuardedMutex(&ViewLock);
176 KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql);
177
178 Vacb->Dirty = FALSE;
179 RemoveEntryList(&Vacb->DirtyVacbListEntry);
180 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
181 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
182 CcRosVacbDecRefCount(Vacb);
183
184 KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
185 KeReleaseGuardedMutex(&ViewLock);
186 }
187
188 return Status;
189 }
190
191 NTSTATUS
192 NTAPI
193 CcRosFlushDirtyPages (
194 ULONG Target,
195 PULONG Count,
196 BOOLEAN Wait,
197 BOOLEAN CalledFromLazy)
198 {
199 PLIST_ENTRY current_entry;
200 PROS_VACB current;
201 BOOLEAN Locked;
202 NTSTATUS Status;
203 LARGE_INTEGER ZeroTimeout;
204
205 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
206
207 (*Count) = 0;
208 ZeroTimeout.QuadPart = 0;
209
210 KeEnterCriticalRegion();
211 KeAcquireGuardedMutex(&ViewLock);
212
213 current_entry = DirtyVacbListHead.Flink;
214 if (current_entry == &DirtyVacbListHead)
215 {
216 DPRINT("No Dirty pages\n");
217 }
218
219 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
220 {
221 current = CONTAINING_RECORD(current_entry,
222 ROS_VACB,
223 DirtyVacbListEntry);
224 current_entry = current_entry->Flink;
225
226 CcRosVacbIncRefCount(current);
227
228 /* When performing lazy write, don't handle temporary files */
229 if (CalledFromLazy &&
230 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
231 {
232 CcRosVacbDecRefCount(current);
233 continue;
234 }
235
236 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
237 current->SharedCacheMap->LazyWriteContext, Wait);
238 if (!Locked)
239 {
240 CcRosVacbDecRefCount(current);
241 continue;
242 }
243
244 Status = CcRosAcquireVacbLock(current,
245 Wait ? NULL : &ZeroTimeout);
246 if (Status != STATUS_SUCCESS)
247 {
248 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
249 current->SharedCacheMap->LazyWriteContext);
250 CcRosVacbDecRefCount(current);
251 continue;
252 }
253
254 ASSERT(current->Dirty);
255
256 /* One reference is added above */
257 if ((current->ReferenceCount > 2 && current->PinCount == 0) ||
258 (current->ReferenceCount > 3 && current->PinCount > 1))
259 {
260 CcRosReleaseVacbLock(current);
261 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
262 current->SharedCacheMap->LazyWriteContext);
263 CcRosVacbDecRefCount(current);
264 continue;
265 }
266
267 KeReleaseGuardedMutex(&ViewLock);
268
269 Status = CcRosFlushVacb(current);
270
271 CcRosReleaseVacbLock(current);
272 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
273 current->SharedCacheMap->LazyWriteContext);
274
275 KeAcquireGuardedMutex(&ViewLock);
276 CcRosVacbDecRefCount(current);
277
278 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
279 (Status != STATUS_MEDIA_WRITE_PROTECTED))
280 {
281 DPRINT1("CC: Failed to flush VACB.\n");
282 }
283 else
284 {
285 ULONG PagesFreed;
286
287 /* How many pages did we free? */
288 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
289 (*Count) += PagesFreed;
290
291 /* Make sure we don't overflow target! */
292 if (Target < PagesFreed)
293 {
294 /* If we would have, jump to zero directly */
295 Target = 0;
296 }
297 else
298 {
299 Target -= PagesFreed;
300 }
301 }
302
303 current_entry = DirtyVacbListHead.Flink;
304 }
305
306 KeReleaseGuardedMutex(&ViewLock);
307 KeLeaveCriticalRegion();
308
309 DPRINT("CcRosFlushDirtyPages() finished\n");
310 return STATUS_SUCCESS;
311 }
312
313 /* FIXME: Someday this could somewhat implement write-behind/read-ahead */
314 VOID
315 NTAPI
316 CciLazyWriter(PVOID Unused)
317 {
318 while (TRUE)
319 {
320 NTSTATUS Status;
321 PLIST_ENTRY ListEntry;
322 ULONG Target, Count = 0;
323
324 /* One per second or until we have to stop */
325 Status = KeWaitForSingleObject(&iLazyWriterShutdown,
326 Executive,
327 KernelMode,
328 FALSE,
329 &CcIdleDelay);
330
331 /* If we succeeed, we've to stop running! */
332 if (Status == STATUS_SUCCESS)
333 {
334 break;
335 }
336
337 /* We're not sleeping anymore */
338 KeClearEvent(&iLazyWriterNotify);
339
340 /* Our target is one-eighth of the dirty pages */
341 Target = CcTotalDirtyPages / 8;
342 if (Target != 0)
343 {
344 /* Flush! */
345 DPRINT("Lazy writer starting (%d)\n", Target);
346 CcRosFlushDirtyPages(Target, &Count, FALSE, TRUE);
347
348 /* And update stats */
349 CcLazyWritePages += Count;
350 ++CcLazyWriteIos;
351 DPRINT("Lazy writer done (%d)\n", Count);
352 }
353
354 /* Inform people waiting on us that we're done */
355 KeSetEvent(&iLazyWriterNotify, IO_DISK_INCREMENT, FALSE);
356
357 /* Likely not optimal, but let's handle one deferred write now! */
358 ListEntry = ExInterlockedRemoveHeadList(&CcDeferredWrites, &CcDeferredWriteSpinLock);
359 if (ListEntry != NULL)
360 {
361 PDEFERRED_WRITE Context;
362
363 /* Extract the context */
364 Context = CONTAINING_RECORD(ListEntry, DEFERRED_WRITE, DeferredWriteLinks);
365 ASSERT(Context->NodeTypeCode == NODE_TYPE_DEFERRED_WRITE);
366
367 /* Can we write now? */
368 if (CcCanIWrite(Context->FileObject, Context->BytesToWrite, FALSE, TRUE))
369 {
370 /* Yes! Do it, and destroy the associated context */
371 Context->PostRoutine(Context->Context1, Context->Context2);
372 ExFreePoolWithTag(Context, 'CcDw');
373 }
374 else
375 {
376 /* Otherwise, requeue it, but in tail, so that it doesn't block others
377 * This is clearly to improve, but given the poor algorithm used now
378 * It's better than nothing!
379 */
380 ExInterlockedInsertTailList(&CcDeferredWrites,
381 &Context->DeferredWriteLinks,
382 &CcDeferredWriteSpinLock);
383 }
384 }
385 }
386 }
387
388 NTSTATUS
389 CcRosTrimCache (
390 ULONG Target,
391 ULONG Priority,
392 PULONG NrFreed)
393 /*
394 * FUNCTION: Try to free some memory from the file cache.
395 * ARGUMENTS:
396 * Target - The number of pages to be freed.
397 * Priority - The priority of free (currently unused).
398 * NrFreed - Points to a variable where the number of pages
399 * actually freed is returned.
400 */
401 {
402 PLIST_ENTRY current_entry;
403 PROS_VACB current;
404 ULONG PagesFreed;
405 KIRQL oldIrql;
406 LIST_ENTRY FreeList;
407 PFN_NUMBER Page;
408 ULONG i;
409 BOOLEAN FlushedPages = FALSE;
410
411 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
412
413 InitializeListHead(&FreeList);
414
415 *NrFreed = 0;
416
417 retry:
418 KeAcquireGuardedMutex(&ViewLock);
419
420 current_entry = VacbLruListHead.Flink;
421 while (current_entry != &VacbLruListHead)
422 {
423 current = CONTAINING_RECORD(current_entry,
424 ROS_VACB,
425 VacbLruListEntry);
426 current_entry = current_entry->Flink;
427
428 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
429
430 /* Reference the VACB */
431 CcRosVacbIncRefCount(current);
432
433 /* Check if it's mapped and not dirty */
434 if (current->MappedCount > 0 && !current->Dirty)
435 {
436 /* We have to break these locks because Cc sucks */
437 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
438 KeReleaseGuardedMutex(&ViewLock);
439
440 /* Page out the VACB */
441 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
442 {
443 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
444
445 MmPageOutPhysicalAddress(Page);
446 }
447
448 /* Reacquire the locks */
449 KeAcquireGuardedMutex(&ViewLock);
450 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
451 }
452
453 /* Dereference the VACB */
454 CcRosVacbDecRefCount(current);
455
456 /* Check if we can free this entry now */
457 if (current->ReferenceCount == 0)
458 {
459 ASSERT(!current->Dirty);
460 ASSERT(!current->MappedCount);
461
462 RemoveEntryList(&current->CacheMapVacbListEntry);
463 RemoveEntryList(&current->VacbLruListEntry);
464 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
465
466 /* Calculate how many pages we freed for Mm */
467 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
468 Target -= PagesFreed;
469 (*NrFreed) += PagesFreed;
470 }
471
472 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
473 }
474
475 KeReleaseGuardedMutex(&ViewLock);
476
477 /* Try flushing pages if we haven't met our target */
478 if ((Target > 0) && !FlushedPages)
479 {
480 /* Flush dirty pages to disk */
481 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
482 FlushedPages = TRUE;
483
484 /* We can only swap as many pages as we flushed */
485 if (PagesFreed < Target) Target = PagesFreed;
486
487 /* Check if we flushed anything */
488 if (PagesFreed != 0)
489 {
490 /* Try again after flushing dirty pages */
491 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
492 goto retry;
493 }
494 }
495
496 while (!IsListEmpty(&FreeList))
497 {
498 current_entry = RemoveHeadList(&FreeList);
499 current = CONTAINING_RECORD(current_entry,
500 ROS_VACB,
501 CacheMapVacbListEntry);
502 CcRosInternalFreeVacb(current);
503 }
504
505 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
506
507 return STATUS_SUCCESS;
508 }
509
510 NTSTATUS
511 NTAPI
512 CcRosReleaseVacb (
513 PROS_SHARED_CACHE_MAP SharedCacheMap,
514 PROS_VACB Vacb,
515 BOOLEAN Valid,
516 BOOLEAN Dirty,
517 BOOLEAN Mapped)
518 {
519 ASSERT(SharedCacheMap);
520
521 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
522 SharedCacheMap, Vacb, Valid);
523
524 Vacb->Valid = Valid;
525
526 if (Dirty && !Vacb->Dirty)
527 {
528 CcRosMarkDirtyVacb(Vacb);
529 }
530
531 if (Mapped)
532 {
533 Vacb->MappedCount++;
534 }
535 CcRosVacbDecRefCount(Vacb);
536 if (Mapped && (Vacb->MappedCount == 1))
537 {
538 CcRosVacbIncRefCount(Vacb);
539 }
540
541 CcRosReleaseVacbLock(Vacb);
542
543 return STATUS_SUCCESS;
544 }
545
546 /* Returns with VACB Lock Held! */
547 PROS_VACB
548 NTAPI
549 CcRosLookupVacb (
550 PROS_SHARED_CACHE_MAP SharedCacheMap,
551 LONGLONG FileOffset)
552 {
553 PLIST_ENTRY current_entry;
554 PROS_VACB current;
555 KIRQL oldIrql;
556
557 ASSERT(SharedCacheMap);
558
559 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
560 SharedCacheMap, FileOffset);
561
562 KeAcquireGuardedMutex(&ViewLock);
563 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
564
565 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
566 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
567 {
568 current = CONTAINING_RECORD(current_entry,
569 ROS_VACB,
570 CacheMapVacbListEntry);
571 if (IsPointInRange(current->FileOffset.QuadPart,
572 VACB_MAPPING_GRANULARITY,
573 FileOffset))
574 {
575 CcRosVacbIncRefCount(current);
576 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
577 KeReleaseGuardedMutex(&ViewLock);
578 CcRosAcquireVacbLock(current, NULL);
579 return current;
580 }
581 if (current->FileOffset.QuadPart > FileOffset)
582 break;
583 current_entry = current_entry->Flink;
584 }
585
586 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
587 KeReleaseGuardedMutex(&ViewLock);
588
589 return NULL;
590 }
591
592 VOID
593 NTAPI
594 CcRosMarkDirtyVacb (
595 PROS_VACB Vacb)
596 {
597 KIRQL oldIrql;
598 PROS_SHARED_CACHE_MAP SharedCacheMap;
599
600 SharedCacheMap = Vacb->SharedCacheMap;
601
602 KeAcquireGuardedMutex(&ViewLock);
603 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
604
605 ASSERT(!Vacb->Dirty);
606
607 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
608 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
609 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
610 CcRosVacbIncRefCount(Vacb);
611
612 /* Move to the tail of the LRU list */
613 RemoveEntryList(&Vacb->VacbLruListEntry);
614 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
615
616 Vacb->Dirty = TRUE;
617
618 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
619 KeReleaseGuardedMutex(&ViewLock);
620 }
621
622 NTSTATUS
623 NTAPI
624 CcRosMarkDirtyFile (
625 PROS_SHARED_CACHE_MAP SharedCacheMap,
626 LONGLONG FileOffset)
627 {
628 PROS_VACB Vacb;
629
630 ASSERT(SharedCacheMap);
631
632 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
633 SharedCacheMap, FileOffset);
634
635 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
636 if (Vacb == NULL)
637 {
638 KeBugCheck(CACHE_MANAGER);
639 }
640
641 if (!Vacb->Dirty)
642 {
643 CcRosMarkDirtyVacb(Vacb);
644 }
645
646 CcRosReleaseVacbLock(Vacb);
647
648 return STATUS_SUCCESS;
649 }
650
651 NTSTATUS
652 NTAPI
653 CcRosUnmapVacb (
654 PROS_SHARED_CACHE_MAP SharedCacheMap,
655 LONGLONG FileOffset,
656 BOOLEAN NowDirty)
657 {
658 PROS_VACB Vacb;
659
660 ASSERT(SharedCacheMap);
661
662 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
663 SharedCacheMap, FileOffset, NowDirty);
664
665 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
666 if (Vacb == NULL)
667 {
668 return STATUS_UNSUCCESSFUL;
669 }
670
671 if (NowDirty && !Vacb->Dirty)
672 {
673 CcRosMarkDirtyVacb(Vacb);
674 }
675
676 Vacb->MappedCount--;
677
678 CcRosVacbDecRefCount(Vacb);
679 if (Vacb->MappedCount == 0)
680 {
681 CcRosVacbDecRefCount(Vacb);
682 }
683
684 CcRosReleaseVacbLock(Vacb);
685
686 return STATUS_SUCCESS;
687 }
688
689 static
690 NTSTATUS
691 CcRosMapVacb(
692 PROS_VACB Vacb)
693 {
694 ULONG i;
695 NTSTATUS Status;
696 ULONG_PTR NumberOfPages;
697
698 /* Create a memory area. */
699 MmLockAddressSpace(MmGetKernelAddressSpace());
700 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
701 0, // nothing checks for VACB mareas, so set to 0
702 &Vacb->BaseAddress,
703 VACB_MAPPING_GRANULARITY,
704 PAGE_READWRITE,
705 (PMEMORY_AREA*)&Vacb->MemoryArea,
706 0,
707 PAGE_SIZE);
708 MmUnlockAddressSpace(MmGetKernelAddressSpace());
709 if (!NT_SUCCESS(Status))
710 {
711 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
712 return Status;
713 }
714
715 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
716 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
717
718 /* Create a virtual mapping for this memory area */
719 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
720 for (i = 0; i < NumberOfPages; i++)
721 {
722 PFN_NUMBER PageFrameNumber;
723
724 MI_SET_USAGE(MI_USAGE_CACHE);
725 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
726 if (PageFrameNumber == 0)
727 {
728 DPRINT1("Unable to allocate page\n");
729 KeBugCheck(MEMORY_MANAGEMENT);
730 }
731
732 Status = MmCreateVirtualMapping(NULL,
733 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
734 PAGE_READWRITE,
735 &PageFrameNumber,
736 1);
737 if (!NT_SUCCESS(Status))
738 {
739 DPRINT1("Unable to create virtual mapping\n");
740 KeBugCheck(MEMORY_MANAGEMENT);
741 }
742 }
743
744 return STATUS_SUCCESS;
745 }
746
747 static
748 NTSTATUS
749 CcRosCreateVacb (
750 PROS_SHARED_CACHE_MAP SharedCacheMap,
751 LONGLONG FileOffset,
752 PROS_VACB *Vacb)
753 {
754 PROS_VACB current;
755 PROS_VACB previous;
756 PLIST_ENTRY current_entry;
757 NTSTATUS Status;
758 KIRQL oldIrql;
759
760 ASSERT(SharedCacheMap);
761
762 DPRINT("CcRosCreateVacb()\n");
763
764 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
765 {
766 *Vacb = NULL;
767 return STATUS_INVALID_PARAMETER;
768 }
769
770 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
771 current->BaseAddress = NULL;
772 current->Valid = FALSE;
773 current->Dirty = FALSE;
774 current->PageOut = FALSE;
775 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
776 current->SharedCacheMap = SharedCacheMap;
777 #if DBG
778 if (SharedCacheMap->Trace)
779 {
780 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
781 }
782 #endif
783 current->MappedCount = 0;
784 current->DirtyVacbListEntry.Flink = NULL;
785 current->DirtyVacbListEntry.Blink = NULL;
786 current->ReferenceCount = 1;
787 current->PinCount = 0;
788 KeInitializeMutex(&current->Mutex, 0);
789 CcRosAcquireVacbLock(current, NULL);
790 KeAcquireGuardedMutex(&ViewLock);
791
792 *Vacb = current;
793 /* There is window between the call to CcRosLookupVacb
794 * and CcRosCreateVacb. We must check if a VACB for the
795 * file offset exist. If there is a VACB, we release
796 * our newly created VACB and return the existing one.
797 */
798 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
799 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
800 previous = NULL;
801 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
802 {
803 current = CONTAINING_RECORD(current_entry,
804 ROS_VACB,
805 CacheMapVacbListEntry);
806 if (IsPointInRange(current->FileOffset.QuadPart,
807 VACB_MAPPING_GRANULARITY,
808 FileOffset))
809 {
810 CcRosVacbIncRefCount(current);
811 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
812 #if DBG
813 if (SharedCacheMap->Trace)
814 {
815 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
816 SharedCacheMap,
817 (*Vacb),
818 current);
819 }
820 #endif
821 CcRosReleaseVacbLock(*Vacb);
822 KeReleaseGuardedMutex(&ViewLock);
823 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
824 *Vacb = current;
825 CcRosAcquireVacbLock(current, NULL);
826 return STATUS_SUCCESS;
827 }
828 if (current->FileOffset.QuadPart < FileOffset)
829 {
830 ASSERT(previous == NULL ||
831 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
832 previous = current;
833 }
834 if (current->FileOffset.QuadPart > FileOffset)
835 break;
836 current_entry = current_entry->Flink;
837 }
838 /* There was no existing VACB. */
839 current = *Vacb;
840 if (previous)
841 {
842 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
843 }
844 else
845 {
846 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
847 }
848 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
849 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
850 KeReleaseGuardedMutex(&ViewLock);
851
852 MI_SET_USAGE(MI_USAGE_CACHE);
853 #if MI_TRACE_PFNS
854 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
855 {
856 PWCHAR pos;
857 ULONG len = 0;
858 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
859 if (pos)
860 {
861 len = wcslen(pos) * sizeof(WCHAR);
862 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
863 }
864 else
865 {
866 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
867 }
868 }
869 #endif
870
871 Status = CcRosMapVacb(current);
872 if (!NT_SUCCESS(Status))
873 {
874 RemoveEntryList(&current->CacheMapVacbListEntry);
875 RemoveEntryList(&current->VacbLruListEntry);
876 CcRosReleaseVacbLock(current);
877 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
878 }
879
880 return Status;
881 }
882
883 NTSTATUS
884 NTAPI
885 CcRosGetVacb (
886 PROS_SHARED_CACHE_MAP SharedCacheMap,
887 LONGLONG FileOffset,
888 PLONGLONG BaseOffset,
889 PVOID* BaseAddress,
890 PBOOLEAN UptoDate,
891 PROS_VACB *Vacb)
892 {
893 PROS_VACB current;
894 NTSTATUS Status;
895
896 ASSERT(SharedCacheMap);
897
898 DPRINT("CcRosGetVacb()\n");
899
900 /*
901 * Look for a VACB already mapping the same data.
902 */
903 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
904 if (current == NULL)
905 {
906 /*
907 * Otherwise create a new VACB.
908 */
909 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
910 if (!NT_SUCCESS(Status))
911 {
912 return Status;
913 }
914 }
915
916 KeAcquireGuardedMutex(&ViewLock);
917
918 /* Move to the tail of the LRU list */
919 RemoveEntryList(&current->VacbLruListEntry);
920 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
921
922 KeReleaseGuardedMutex(&ViewLock);
923
924 /*
925 * Return information about the VACB to the caller.
926 */
927 *UptoDate = current->Valid;
928 *BaseAddress = current->BaseAddress;
929 DPRINT("*BaseAddress %p\n", *BaseAddress);
930 *Vacb = current;
931 *BaseOffset = current->FileOffset.QuadPart;
932 return STATUS_SUCCESS;
933 }
934
935 NTSTATUS
936 NTAPI
937 CcRosRequestVacb (
938 PROS_SHARED_CACHE_MAP SharedCacheMap,
939 LONGLONG FileOffset,
940 PVOID* BaseAddress,
941 PBOOLEAN UptoDate,
942 PROS_VACB *Vacb)
943 /*
944 * FUNCTION: Request a page mapping for a shared cache map
945 */
946 {
947 LONGLONG BaseOffset;
948
949 ASSERT(SharedCacheMap);
950
951 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
952 {
953 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
954 FileOffset, VACB_MAPPING_GRANULARITY);
955 KeBugCheck(CACHE_MANAGER);
956 }
957
958 return CcRosGetVacb(SharedCacheMap,
959 FileOffset,
960 &BaseOffset,
961 BaseAddress,
962 UptoDate,
963 Vacb);
964 }
965
966 static
967 VOID
968 CcFreeCachePage (
969 PVOID Context,
970 MEMORY_AREA* MemoryArea,
971 PVOID Address,
972 PFN_NUMBER Page,
973 SWAPENTRY SwapEntry,
974 BOOLEAN Dirty)
975 {
976 ASSERT(SwapEntry == 0);
977 if (Page != 0)
978 {
979 ASSERT(MmGetReferenceCountPage(Page) == 1);
980 MmReleasePageMemoryConsumer(MC_CACHE, Page);
981 }
982 }
983
984 NTSTATUS
985 CcRosInternalFreeVacb (
986 PROS_VACB Vacb)
987 /*
988 * FUNCTION: Releases a VACB associated with a shared cache map
989 */
990 {
991 DPRINT("Freeing VACB 0x%p\n", Vacb);
992 #if DBG
993 if (Vacb->SharedCacheMap->Trace)
994 {
995 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
996 }
997 #endif
998
999 MmLockAddressSpace(MmGetKernelAddressSpace());
1000 MmFreeMemoryArea(MmGetKernelAddressSpace(),
1001 Vacb->MemoryArea,
1002 CcFreeCachePage,
1003 NULL);
1004 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1005
1006 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
1007 return STATUS_SUCCESS;
1008 }
1009
1010 /*
1011 * @implemented
1012 */
1013 VOID
1014 NTAPI
1015 CcFlushCache (
1016 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1017 IN PLARGE_INTEGER FileOffset OPTIONAL,
1018 IN ULONG Length,
1019 OUT PIO_STATUS_BLOCK IoStatus)
1020 {
1021 PROS_SHARED_CACHE_MAP SharedCacheMap;
1022 LARGE_INTEGER Offset;
1023 LONGLONG RemainingLength;
1024 PROS_VACB current;
1025 NTSTATUS Status;
1026 KIRQL oldIrql;
1027
1028 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
1029 SectionObjectPointers, FileOffset, Length);
1030
1031 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1032 SectionObjectPointers, FileOffset, Length, IoStatus);
1033
1034 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1035 {
1036 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1037 ASSERT(SharedCacheMap);
1038 if (FileOffset)
1039 {
1040 Offset = *FileOffset;
1041 RemainingLength = Length;
1042 }
1043 else
1044 {
1045 Offset.QuadPart = 0;
1046 RemainingLength = SharedCacheMap->FileSize.QuadPart;
1047 }
1048
1049 if (IoStatus)
1050 {
1051 IoStatus->Status = STATUS_SUCCESS;
1052 IoStatus->Information = 0;
1053 }
1054
1055 while (RemainingLength > 0)
1056 {
1057 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1058 if (current != NULL)
1059 {
1060 if (current->Dirty)
1061 {
1062 Status = CcRosFlushVacb(current);
1063 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1064 {
1065 IoStatus->Status = Status;
1066 }
1067 }
1068
1069 CcRosReleaseVacbLock(current);
1070
1071 KeAcquireGuardedMutex(&ViewLock);
1072 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1073 CcRosVacbDecRefCount(current);
1074 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1075 KeReleaseGuardedMutex(&ViewLock);
1076 }
1077
1078 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1079 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1080 }
1081 }
1082 else
1083 {
1084 if (IoStatus)
1085 {
1086 IoStatus->Status = STATUS_INVALID_PARAMETER;
1087 }
1088 }
1089 }
1090
1091 NTSTATUS
1092 NTAPI
1093 CcRosDeleteFileCache (
1094 PFILE_OBJECT FileObject,
1095 PROS_SHARED_CACHE_MAP SharedCacheMap)
1096 /*
1097 * FUNCTION: Releases the shared cache map associated with a file object
1098 */
1099 {
1100 PLIST_ENTRY current_entry;
1101 PROS_VACB current;
1102 LIST_ENTRY FreeList;
1103 KIRQL oldIrql;
1104
1105 ASSERT(SharedCacheMap);
1106
1107 SharedCacheMap->OpenCount++;
1108 KeReleaseGuardedMutex(&ViewLock);
1109
1110 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1111
1112 KeAcquireGuardedMutex(&ViewLock);
1113 SharedCacheMap->OpenCount--;
1114 if (SharedCacheMap->OpenCount == 0)
1115 {
1116 KIRQL OldIrql;
1117
1118 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1119
1120 /*
1121 * Release all VACBs
1122 */
1123 InitializeListHead(&FreeList);
1124 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1125 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1126 {
1127 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1128 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1129
1130 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1131 CcRosAcquireVacbLock(current, NULL);
1132 RemoveEntryList(&current->VacbLruListEntry);
1133 if (current->Dirty)
1134 {
1135 RemoveEntryList(&current->DirtyVacbListEntry);
1136 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
1137 current->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
1138 DPRINT1("Freeing dirty VACB\n");
1139 }
1140 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1141 CcRosReleaseVacbLock(current);
1142
1143 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1144 }
1145 #if DBG
1146 SharedCacheMap->Trace = FALSE;
1147 #endif
1148 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1149
1150 KeReleaseGuardedMutex(&ViewLock);
1151 ObDereferenceObject(SharedCacheMap->FileObject);
1152
1153 while (!IsListEmpty(&FreeList))
1154 {
1155 current_entry = RemoveTailList(&FreeList);
1156 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1157 CcRosInternalFreeVacb(current);
1158 }
1159
1160 KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
1161 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1162 KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
1163
1164 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1165 KeAcquireGuardedMutex(&ViewLock);
1166 }
1167 return STATUS_SUCCESS;
1168 }
1169
1170 VOID
1171 NTAPI
1172 CcRosReferenceCache (
1173 PFILE_OBJECT FileObject)
1174 {
1175 PROS_SHARED_CACHE_MAP SharedCacheMap;
1176 KeAcquireGuardedMutex(&ViewLock);
1177 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1178 ASSERT(SharedCacheMap);
1179 ASSERT(SharedCacheMap->OpenCount != 0);
1180 SharedCacheMap->OpenCount++;
1181 KeReleaseGuardedMutex(&ViewLock);
1182 }
1183
1184 VOID
1185 NTAPI
1186 CcRosRemoveIfClosed (
1187 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1188 {
1189 PROS_SHARED_CACHE_MAP SharedCacheMap;
1190 DPRINT("CcRosRemoveIfClosed()\n");
1191 KeAcquireGuardedMutex(&ViewLock);
1192 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1193 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1194 {
1195 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1196 }
1197 KeReleaseGuardedMutex(&ViewLock);
1198 }
1199
1200
1201 VOID
1202 NTAPI
1203 CcRosDereferenceCache (
1204 PFILE_OBJECT FileObject)
1205 {
1206 PROS_SHARED_CACHE_MAP SharedCacheMap;
1207 KeAcquireGuardedMutex(&ViewLock);
1208 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1209 ASSERT(SharedCacheMap);
1210 if (SharedCacheMap->OpenCount > 0)
1211 {
1212 SharedCacheMap->OpenCount--;
1213 if (SharedCacheMap->OpenCount == 0)
1214 {
1215 MmFreeSectionSegments(SharedCacheMap->FileObject);
1216 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1217 }
1218 }
1219 KeReleaseGuardedMutex(&ViewLock);
1220 }
1221
1222 NTSTATUS
1223 NTAPI
1224 CcRosReleaseFileCache (
1225 PFILE_OBJECT FileObject)
1226 /*
1227 * FUNCTION: Called by the file system when a handle to a file object
1228 * has been closed.
1229 */
1230 {
1231 PROS_SHARED_CACHE_MAP SharedCacheMap;
1232
1233 KeAcquireGuardedMutex(&ViewLock);
1234
1235 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1236 {
1237 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1238 if (FileObject->PrivateCacheMap != NULL)
1239 {
1240 FileObject->PrivateCacheMap = NULL;
1241 if (SharedCacheMap->OpenCount > 0)
1242 {
1243 SharedCacheMap->OpenCount--;
1244 if (SharedCacheMap->OpenCount == 0)
1245 {
1246 MmFreeSectionSegments(SharedCacheMap->FileObject);
1247 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1248 }
1249 }
1250 }
1251 }
1252 KeReleaseGuardedMutex(&ViewLock);
1253 return STATUS_SUCCESS;
1254 }
1255
1256 NTSTATUS
1257 NTAPI
1258 CcTryToInitializeFileCache (
1259 PFILE_OBJECT FileObject)
1260 {
1261 PROS_SHARED_CACHE_MAP SharedCacheMap;
1262 NTSTATUS Status;
1263
1264 KeAcquireGuardedMutex(&ViewLock);
1265
1266 ASSERT(FileObject->SectionObjectPointer);
1267 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1268 if (SharedCacheMap == NULL)
1269 {
1270 Status = STATUS_UNSUCCESSFUL;
1271 }
1272 else
1273 {
1274 if (FileObject->PrivateCacheMap == NULL)
1275 {
1276 FileObject->PrivateCacheMap = SharedCacheMap;
1277 SharedCacheMap->OpenCount++;
1278 }
1279 Status = STATUS_SUCCESS;
1280 }
1281 KeReleaseGuardedMutex(&ViewLock);
1282
1283 return Status;
1284 }
1285
1286
1287 NTSTATUS
1288 NTAPI
1289 CcRosInitializeFileCache (
1290 PFILE_OBJECT FileObject,
1291 PCC_FILE_SIZES FileSizes,
1292 BOOLEAN PinAccess,
1293 PCACHE_MANAGER_CALLBACKS CallBacks,
1294 PVOID LazyWriterContext)
1295 /*
1296 * FUNCTION: Initializes a shared cache map for a file object
1297 */
1298 {
1299 PROS_SHARED_CACHE_MAP SharedCacheMap;
1300
1301 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1302 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1303 FileObject, SharedCacheMap);
1304
1305 KeAcquireGuardedMutex(&ViewLock);
1306 if (SharedCacheMap == NULL)
1307 {
1308 KIRQL OldIrql;
1309
1310 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1311 if (SharedCacheMap == NULL)
1312 {
1313 KeReleaseGuardedMutex(&ViewLock);
1314 return STATUS_INSUFFICIENT_RESOURCES;
1315 }
1316 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1317 ObReferenceObjectByPointer(FileObject,
1318 FILE_ALL_ACCESS,
1319 NULL,
1320 KernelMode);
1321 SharedCacheMap->FileObject = FileObject;
1322 SharedCacheMap->Callbacks = CallBacks;
1323 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1324 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1325 SharedCacheMap->FileSize = FileSizes->FileSize;
1326 SharedCacheMap->PinAccess = PinAccess;
1327 SharedCacheMap->DirtyPageThreshold = 0;
1328 SharedCacheMap->DirtyPages = 0;
1329 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1330 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1331 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1332
1333 KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
1334 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1335 KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
1336 }
1337 if (FileObject->PrivateCacheMap == NULL)
1338 {
1339 FileObject->PrivateCacheMap = SharedCacheMap;
1340 SharedCacheMap->OpenCount++;
1341 }
1342 KeReleaseGuardedMutex(&ViewLock);
1343
1344 return STATUS_SUCCESS;
1345 }
1346
1347 /*
1348 * @implemented
1349 */
1350 PFILE_OBJECT
1351 NTAPI
1352 CcGetFileObjectFromSectionPtrs (
1353 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1354 {
1355 PROS_SHARED_CACHE_MAP SharedCacheMap;
1356
1357 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1358
1359 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1360 {
1361 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1362 ASSERT(SharedCacheMap);
1363 return SharedCacheMap->FileObject;
1364 }
1365 return NULL;
1366 }
1367
1368 VOID
1369 NTAPI
1370 CcShutdownLazyWriter (
1371 VOID)
1372 {
1373 /* Simply set the event, lazy writer will stop when it's done */
1374 KeSetEvent(&iLazyWriterShutdown, IO_DISK_INCREMENT, FALSE);
1375 }
1376
1377 BOOLEAN
1378 INIT_FUNCTION
1379 NTAPI
1380 CcInitView (
1381 VOID)
1382 {
1383 HANDLE LazyWriter;
1384 NTSTATUS Status;
1385 KPRIORITY Priority;
1386 OBJECT_ATTRIBUTES ObjectAttributes;
1387
1388 DPRINT("CcInitView()\n");
1389
1390 InitializeListHead(&DirtyVacbListHead);
1391 InitializeListHead(&VacbLruListHead);
1392 InitializeListHead(&CcDeferredWrites);
1393 InitializeListHead(&CcCleanSharedCacheMapList);
1394 KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1395 KeInitializeSpinLock(&iSharedCacheMapLock);
1396 KeInitializeGuardedMutex(&ViewLock);
1397 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1398 NULL,
1399 NULL,
1400 0,
1401 sizeof(INTERNAL_BCB),
1402 TAG_BCB,
1403 20);
1404 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1405 NULL,
1406 NULL,
1407 0,
1408 sizeof(ROS_SHARED_CACHE_MAP),
1409 TAG_SHARED_CACHE_MAP,
1410 20);
1411 ExInitializeNPagedLookasideList(&VacbLookasideList,
1412 NULL,
1413 NULL,
1414 0,
1415 sizeof(ROS_VACB),
1416 TAG_VACB,
1417 20);
1418
1419 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1420
1421 /* Initialize lazy writer events */
1422 KeInitializeEvent(&iLazyWriterShutdown, SynchronizationEvent, FALSE);
1423 KeInitializeEvent(&iLazyWriterNotify, NotificationEvent, FALSE);
1424
1425 /* Define lazy writer threshold, depending on system type */
1426 switch (MmQuerySystemSize())
1427 {
1428 case MmSmallSystem:
1429 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8;
1430 break;
1431
1432 case MmMediumSystem:
1433 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4;
1434 break;
1435
1436 case MmLargeSystem:
1437 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8 + MmNumberOfPhysicalPages / 4;
1438 break;
1439 }
1440
1441 /* Start the lazy writer thread */
1442 InitializeObjectAttributes(&ObjectAttributes,
1443 NULL,
1444 OBJ_KERNEL_HANDLE,
1445 NULL,
1446 NULL);
1447 Status = PsCreateSystemThread(&LazyWriter,
1448 THREAD_ALL_ACCESS,
1449 &ObjectAttributes,
1450 NULL,
1451 NULL,
1452 CciLazyWriter,
1453 NULL);
1454 if (!NT_SUCCESS(Status))
1455 {
1456 return FALSE;
1457 }
1458
1459 Priority = 27;
1460 Status = NtSetInformationThread(LazyWriter,
1461 ThreadPriority,
1462 &Priority,
1463 sizeof(Priority));
1464 ASSERT(NT_SUCCESS(Status));
1465
1466 /* Handle is not needed */
1467 ObCloseHandle(LazyWriter, KernelMode);
1468
1469 CcInitCacheZeroPage();
1470
1471 return TRUE;
1472 }
1473
1474 #if DBG && defined(KDBG)
1475 BOOLEAN
1476 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1477 {
1478 PLIST_ENTRY ListEntry;
1479 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1480
1481 KdbpPrint(" Usage Summary (in kb)\n");
1482 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1483 /* No need to lock the spin lock here, we're in DBG */
1484 for (ListEntry = CcCleanSharedCacheMapList.Flink;
1485 ListEntry != &CcCleanSharedCacheMapList;
1486 ListEntry = ListEntry->Flink)
1487 {
1488 PLIST_ENTRY Vacbs;
1489 ULONG Valid = 0, Dirty = 0;
1490 PROS_SHARED_CACHE_MAP SharedCacheMap;
1491 PUNICODE_STRING FileName;
1492
1493 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1494
1495 /* Dirty size */
1496 Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
1497
1498 /* First, count for all the associated VACB */
1499 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1500 Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1501 Vacbs = Vacbs->Flink)
1502 {
1503 PROS_VACB Vacb;
1504
1505 Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
1506 if (Vacb->Valid)
1507 {
1508 Valid += VACB_MAPPING_GRANULARITY / 1024;
1509 }
1510 }
1511
1512 /* Setup name */
1513 if (SharedCacheMap->FileObject != NULL &&
1514 SharedCacheMap->FileObject->FileName.Length != 0)
1515 {
1516 FileName = &SharedCacheMap->FileObject->FileName;
1517 }
1518 else
1519 {
1520 FileName = &NoName;
1521 }
1522
1523 /* And print */
1524 KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
1525 }
1526
1527 return TRUE;
1528 }
1529 #endif
1530
1531 /* EOF */