[NTOSKRNL] Bring an initial (and not perfect ;-)) implementation of read ahead to...
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 * Pierre Schweitzer (pierre@reactos.org)
9 */
10
11 /* NOTES **********************************************************************
12 *
13 * This is not the NT implementation of a file cache nor anything much like
14 * it.
15 *
16 * The general procedure for a filesystem to implement a read or write
17 * dispatch routine is as follows
18 *
19 * (1) If caching for the FCB hasn't been initiated then so do by calling
20 * CcInitializeFileCache.
21 *
22 * (2) For each 4k region which is being read or written obtain a cache page
23 * by calling CcRequestCachePage.
24 *
25 * (3) If either the page is being read or not completely written, and it is
26 * not up to date then read its data from the underlying medium. If the read
27 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
28 *
29 * (4) Copy the data into or out of the page as necessary.
30 *
31 * (5) Release the cache page
32 */
33 /* INCLUDES ******************************************************************/
34
35 #include <ntoskrnl.h>
36 #define NDEBUG
37 #include <debug.h>
38
39 #if defined (ALLOC_PRAGMA)
40 #pragma alloc_text(INIT, CcInitView)
41 #endif
42
43 /* GLOBALS *******************************************************************/
44
45 LIST_ENTRY DirtyVacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 /* Internal vars (MS):
55 * - Threshold above which lazy writer will start action
56 * - Amount of dirty pages
57 * - List for deferred writes
58 * - Spinlock when dealing with the deferred list
59 * - List for "clean" shared cache maps
60 */
61 ULONG CcDirtyPageThreshold = 0;
62 ULONG CcTotalDirtyPages = 0;
63 LIST_ENTRY CcDeferredWrites;
64 KSPIN_LOCK CcDeferredWriteSpinLock;
65 LIST_ENTRY CcCleanSharedCacheMapList;
66
67 /* Internal vars (ROS):
68 * - Lock for the CcCleanSharedCacheMapList list
69 */
70 KSPIN_LOCK iSharedCacheMapLock;
71
72 #if DBG
73 static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
74 {
75 ++vacb->ReferenceCount;
76 if (vacb->SharedCacheMap->Trace)
77 {
78 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
79 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
80 }
81 }
82 static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
83 {
84 ASSERT(vacb->ReferenceCount != 0);
85 --vacb->ReferenceCount;
86 ASSERT(!(vacb->ReferenceCount == 0 && vacb->Dirty));
87 if (vacb->SharedCacheMap->Trace)
88 {
89 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
90 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
91 }
92 }
93 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
94 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
95 #else
96 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
97 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
98 #endif
99
100 NTSTATUS
101 CcRosInternalFreeVacb(PROS_VACB Vacb);
102
103
104 /* FUNCTIONS *****************************************************************/
105
106 VOID
107 NTAPI
108 CcRosTraceCacheMap (
109 PROS_SHARED_CACHE_MAP SharedCacheMap,
110 BOOLEAN Trace )
111 {
112 #if DBG
113 KIRQL oldirql;
114 PLIST_ENTRY current_entry;
115 PROS_VACB current;
116
117 if (!SharedCacheMap)
118 return;
119
120 SharedCacheMap->Trace = Trace;
121
122 if (Trace)
123 {
124 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
125
126 KeAcquireGuardedMutex(&ViewLock);
127 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
128
129 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
130 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
131 {
132 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
133 current_entry = current_entry->Flink;
134
135 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
136 current, current->ReferenceCount, current->Dirty, current->PageOut );
137 }
138 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
139 KeReleaseGuardedMutex(&ViewLock);
140 }
141 else
142 {
143 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
144 }
145
146 #else
147 UNREFERENCED_PARAMETER(SharedCacheMap);
148 UNREFERENCED_PARAMETER(Trace);
149 #endif
150 }
151
152 NTSTATUS
153 NTAPI
154 CcRosFlushVacb (
155 PROS_VACB Vacb)
156 {
157 NTSTATUS Status;
158
159 Status = CcWriteVirtualAddress(Vacb);
160 if (NT_SUCCESS(Status))
161 {
162 CcRosUnmarkDirtyVacb(Vacb, TRUE);
163 }
164
165 return Status;
166 }
167
168 NTSTATUS
169 NTAPI
170 CcRosFlushDirtyPages (
171 ULONG Target,
172 PULONG Count,
173 BOOLEAN Wait,
174 BOOLEAN CalledFromLazy)
175 {
176 PLIST_ENTRY current_entry;
177 PROS_VACB current;
178 BOOLEAN Locked;
179 NTSTATUS Status;
180 LARGE_INTEGER ZeroTimeout;
181
182 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
183
184 (*Count) = 0;
185 ZeroTimeout.QuadPart = 0;
186
187 KeEnterCriticalRegion();
188 KeAcquireGuardedMutex(&ViewLock);
189
190 current_entry = DirtyVacbListHead.Flink;
191 if (current_entry == &DirtyVacbListHead)
192 {
193 DPRINT("No Dirty pages\n");
194 }
195
196 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
197 {
198 current = CONTAINING_RECORD(current_entry,
199 ROS_VACB,
200 DirtyVacbListEntry);
201 current_entry = current_entry->Flink;
202
203 CcRosVacbIncRefCount(current);
204
205 /* When performing lazy write, don't handle temporary files */
206 if (CalledFromLazy &&
207 BooleanFlagOn(current->SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE))
208 {
209 CcRosVacbDecRefCount(current);
210 continue;
211 }
212
213 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
214 current->SharedCacheMap->LazyWriteContext, Wait);
215 if (!Locked)
216 {
217 CcRosVacbDecRefCount(current);
218 continue;
219 }
220
221 Status = CcRosAcquireVacbLock(current,
222 Wait ? NULL : &ZeroTimeout);
223 if (Status != STATUS_SUCCESS)
224 {
225 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
226 current->SharedCacheMap->LazyWriteContext);
227 CcRosVacbDecRefCount(current);
228 continue;
229 }
230
231 ASSERT(current->Dirty);
232
233 /* One reference is added above */
234 if (current->ReferenceCount > 2)
235 {
236 CcRosReleaseVacbLock(current);
237 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
238 current->SharedCacheMap->LazyWriteContext);
239 CcRosVacbDecRefCount(current);
240 continue;
241 }
242
243 KeReleaseGuardedMutex(&ViewLock);
244
245 Status = CcRosFlushVacb(current);
246
247 CcRosReleaseVacbLock(current);
248 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
249 current->SharedCacheMap->LazyWriteContext);
250
251 KeAcquireGuardedMutex(&ViewLock);
252 CcRosVacbDecRefCount(current);
253
254 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
255 (Status != STATUS_MEDIA_WRITE_PROTECTED))
256 {
257 DPRINT1("CC: Failed to flush VACB.\n");
258 }
259 else
260 {
261 ULONG PagesFreed;
262
263 /* How many pages did we free? */
264 PagesFreed = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
265 (*Count) += PagesFreed;
266
267 /* Make sure we don't overflow target! */
268 if (Target < PagesFreed)
269 {
270 /* If we would have, jump to zero directly */
271 Target = 0;
272 }
273 else
274 {
275 Target -= PagesFreed;
276 }
277 }
278
279 current_entry = DirtyVacbListHead.Flink;
280 }
281
282 KeReleaseGuardedMutex(&ViewLock);
283 KeLeaveCriticalRegion();
284
285 DPRINT("CcRosFlushDirtyPages() finished\n");
286 return STATUS_SUCCESS;
287 }
288
289 NTSTATUS
290 CcRosTrimCache (
291 ULONG Target,
292 ULONG Priority,
293 PULONG NrFreed)
294 /*
295 * FUNCTION: Try to free some memory from the file cache.
296 * ARGUMENTS:
297 * Target - The number of pages to be freed.
298 * Priority - The priority of free (currently unused).
299 * NrFreed - Points to a variable where the number of pages
300 * actually freed is returned.
301 */
302 {
303 PLIST_ENTRY current_entry;
304 PROS_VACB current;
305 ULONG PagesFreed;
306 KIRQL oldIrql;
307 LIST_ENTRY FreeList;
308 PFN_NUMBER Page;
309 ULONG i;
310 BOOLEAN FlushedPages = FALSE;
311
312 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
313
314 InitializeListHead(&FreeList);
315
316 *NrFreed = 0;
317
318 retry:
319 KeAcquireGuardedMutex(&ViewLock);
320
321 current_entry = VacbLruListHead.Flink;
322 while (current_entry != &VacbLruListHead)
323 {
324 current = CONTAINING_RECORD(current_entry,
325 ROS_VACB,
326 VacbLruListEntry);
327 current_entry = current_entry->Flink;
328
329 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
330
331 /* Reference the VACB */
332 CcRosVacbIncRefCount(current);
333
334 /* Check if it's mapped and not dirty */
335 if (current->MappedCount > 0 && !current->Dirty)
336 {
337 /* We have to break these locks because Cc sucks */
338 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
339 KeReleaseGuardedMutex(&ViewLock);
340
341 /* Page out the VACB */
342 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
343 {
344 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
345
346 MmPageOutPhysicalAddress(Page);
347 }
348
349 /* Reacquire the locks */
350 KeAcquireGuardedMutex(&ViewLock);
351 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
352 }
353
354 /* Dereference the VACB */
355 CcRosVacbDecRefCount(current);
356
357 /* Check if we can free this entry now */
358 if (current->ReferenceCount == 0)
359 {
360 ASSERT(!current->Dirty);
361 ASSERT(!current->MappedCount);
362
363 RemoveEntryList(&current->CacheMapVacbListEntry);
364 RemoveEntryList(&current->VacbLruListEntry);
365 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
366
367 /* Calculate how many pages we freed for Mm */
368 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
369 Target -= PagesFreed;
370 (*NrFreed) += PagesFreed;
371 }
372
373 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
374 }
375
376 KeReleaseGuardedMutex(&ViewLock);
377
378 /* Try flushing pages if we haven't met our target */
379 if ((Target > 0) && !FlushedPages)
380 {
381 /* Flush dirty pages to disk */
382 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE, FALSE);
383 FlushedPages = TRUE;
384
385 /* We can only swap as many pages as we flushed */
386 if (PagesFreed < Target) Target = PagesFreed;
387
388 /* Check if we flushed anything */
389 if (PagesFreed != 0)
390 {
391 /* Try again after flushing dirty pages */
392 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
393 goto retry;
394 }
395 }
396
397 while (!IsListEmpty(&FreeList))
398 {
399 current_entry = RemoveHeadList(&FreeList);
400 current = CONTAINING_RECORD(current_entry,
401 ROS_VACB,
402 CacheMapVacbListEntry);
403 CcRosInternalFreeVacb(current);
404 }
405
406 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
407
408 return STATUS_SUCCESS;
409 }
410
411 NTSTATUS
412 NTAPI
413 CcRosReleaseVacb (
414 PROS_SHARED_CACHE_MAP SharedCacheMap,
415 PROS_VACB Vacb,
416 BOOLEAN Valid,
417 BOOLEAN Dirty,
418 BOOLEAN Mapped)
419 {
420 ASSERT(SharedCacheMap);
421
422 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
423 SharedCacheMap, Vacb, Valid);
424
425 Vacb->Valid = Valid;
426
427 if (Dirty && !Vacb->Dirty)
428 {
429 CcRosMarkDirtyVacb(Vacb);
430 }
431
432 if (Mapped)
433 {
434 Vacb->MappedCount++;
435 }
436 CcRosVacbDecRefCount(Vacb);
437 if (Mapped && (Vacb->MappedCount == 1))
438 {
439 CcRosVacbIncRefCount(Vacb);
440 }
441
442 CcRosReleaseVacbLock(Vacb);
443
444 return STATUS_SUCCESS;
445 }
446
447 /* Returns with VACB Lock Held! */
448 PROS_VACB
449 NTAPI
450 CcRosLookupVacb (
451 PROS_SHARED_CACHE_MAP SharedCacheMap,
452 LONGLONG FileOffset)
453 {
454 PLIST_ENTRY current_entry;
455 PROS_VACB current;
456 KIRQL oldIrql;
457
458 ASSERT(SharedCacheMap);
459
460 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
461 SharedCacheMap, FileOffset);
462
463 KeAcquireGuardedMutex(&ViewLock);
464 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
465
466 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
467 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
468 {
469 current = CONTAINING_RECORD(current_entry,
470 ROS_VACB,
471 CacheMapVacbListEntry);
472 if (IsPointInRange(current->FileOffset.QuadPart,
473 VACB_MAPPING_GRANULARITY,
474 FileOffset))
475 {
476 CcRosVacbIncRefCount(current);
477 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
478 KeReleaseGuardedMutex(&ViewLock);
479 CcRosAcquireVacbLock(current, NULL);
480 return current;
481 }
482 if (current->FileOffset.QuadPart > FileOffset)
483 break;
484 current_entry = current_entry->Flink;
485 }
486
487 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
488 KeReleaseGuardedMutex(&ViewLock);
489
490 return NULL;
491 }
492
493 VOID
494 NTAPI
495 CcRosMarkDirtyVacb (
496 PROS_VACB Vacb)
497 {
498 KIRQL oldIrql;
499 PROS_SHARED_CACHE_MAP SharedCacheMap;
500
501 SharedCacheMap = Vacb->SharedCacheMap;
502
503 KeAcquireGuardedMutex(&ViewLock);
504 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
505
506 ASSERT(!Vacb->Dirty);
507
508 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
509 CcTotalDirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
510 Vacb->SharedCacheMap->DirtyPages += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
511 CcRosVacbIncRefCount(Vacb);
512
513 /* Move to the tail of the LRU list */
514 RemoveEntryList(&Vacb->VacbLruListEntry);
515 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
516
517 Vacb->Dirty = TRUE;
518
519 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
520 KeReleaseGuardedMutex(&ViewLock);
521
522 /* Schedule a lazy writer run to now that we have dirty VACB */
523 oldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
524 if (!LazyWriter.ScanActive)
525 {
526 CcScheduleLazyWriteScan(FALSE);
527 }
528 KeReleaseQueuedSpinLock(LockQueueMasterLock, oldIrql);
529 }
530
531 VOID
532 NTAPI
533 CcRosUnmarkDirtyVacb (
534 PROS_VACB Vacb,
535 BOOLEAN LockViews)
536 {
537 KIRQL oldIrql;
538 PROS_SHARED_CACHE_MAP SharedCacheMap;
539
540 SharedCacheMap = Vacb->SharedCacheMap;
541
542 if (LockViews)
543 {
544 KeAcquireGuardedMutex(&ViewLock);
545 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
546 }
547
548 ASSERT(Vacb->Dirty);
549
550 Vacb->Dirty = FALSE;
551
552 RemoveEntryList(&Vacb->DirtyVacbListEntry);
553 CcTotalDirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
554 Vacb->SharedCacheMap->DirtyPages -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
555 CcRosVacbDecRefCount(Vacb);
556
557 if (LockViews)
558 {
559 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
560 KeReleaseGuardedMutex(&ViewLock);
561 }
562 }
563
564 NTSTATUS
565 NTAPI
566 CcRosMarkDirtyFile (
567 PROS_SHARED_CACHE_MAP SharedCacheMap,
568 LONGLONG FileOffset)
569 {
570 PROS_VACB Vacb;
571
572 ASSERT(SharedCacheMap);
573
574 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
575 SharedCacheMap, FileOffset);
576
577 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
578 if (Vacb == NULL)
579 {
580 KeBugCheck(CACHE_MANAGER);
581 }
582
583 if (!Vacb->Dirty)
584 {
585 CcRosMarkDirtyVacb(Vacb);
586 }
587
588 CcRosReleaseVacbLock(Vacb);
589
590 return STATUS_SUCCESS;
591 }
592
593 NTSTATUS
594 NTAPI
595 CcRosUnmapVacb (
596 PROS_SHARED_CACHE_MAP SharedCacheMap,
597 LONGLONG FileOffset,
598 BOOLEAN NowDirty)
599 {
600 PROS_VACB Vacb;
601
602 ASSERT(SharedCacheMap);
603
604 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
605 SharedCacheMap, FileOffset, NowDirty);
606
607 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
608 if (Vacb == NULL)
609 {
610 return STATUS_UNSUCCESSFUL;
611 }
612
613 if (NowDirty && !Vacb->Dirty)
614 {
615 CcRosMarkDirtyVacb(Vacb);
616 }
617
618 ASSERT(Vacb->MappedCount != 0);
619 Vacb->MappedCount--;
620
621 CcRosVacbDecRefCount(Vacb);
622 if (Vacb->MappedCount == 0)
623 {
624 CcRosVacbDecRefCount(Vacb);
625 }
626
627 CcRosReleaseVacbLock(Vacb);
628
629 return STATUS_SUCCESS;
630 }
631
632 static
633 NTSTATUS
634 CcRosMapVacb(
635 PROS_VACB Vacb)
636 {
637 ULONG i;
638 NTSTATUS Status;
639 ULONG_PTR NumberOfPages;
640
641 /* Create a memory area. */
642 MmLockAddressSpace(MmGetKernelAddressSpace());
643 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
644 0, // nothing checks for VACB mareas, so set to 0
645 &Vacb->BaseAddress,
646 VACB_MAPPING_GRANULARITY,
647 PAGE_READWRITE,
648 (PMEMORY_AREA*)&Vacb->MemoryArea,
649 0,
650 PAGE_SIZE);
651 MmUnlockAddressSpace(MmGetKernelAddressSpace());
652 if (!NT_SUCCESS(Status))
653 {
654 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
655 return Status;
656 }
657
658 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
659 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
660
661 /* Create a virtual mapping for this memory area */
662 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
663 for (i = 0; i < NumberOfPages; i++)
664 {
665 PFN_NUMBER PageFrameNumber;
666
667 MI_SET_USAGE(MI_USAGE_CACHE);
668 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
669 if (PageFrameNumber == 0)
670 {
671 DPRINT1("Unable to allocate page\n");
672 KeBugCheck(MEMORY_MANAGEMENT);
673 }
674
675 Status = MmCreateVirtualMapping(NULL,
676 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
677 PAGE_READWRITE,
678 &PageFrameNumber,
679 1);
680 if (!NT_SUCCESS(Status))
681 {
682 DPRINT1("Unable to create virtual mapping\n");
683 KeBugCheck(MEMORY_MANAGEMENT);
684 }
685 }
686
687 return STATUS_SUCCESS;
688 }
689
690 static
691 NTSTATUS
692 CcRosCreateVacb (
693 PROS_SHARED_CACHE_MAP SharedCacheMap,
694 LONGLONG FileOffset,
695 PROS_VACB *Vacb)
696 {
697 PROS_VACB current;
698 PROS_VACB previous;
699 PLIST_ENTRY current_entry;
700 NTSTATUS Status;
701 KIRQL oldIrql;
702
703 ASSERT(SharedCacheMap);
704
705 DPRINT("CcRosCreateVacb()\n");
706
707 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
708 {
709 *Vacb = NULL;
710 return STATUS_INVALID_PARAMETER;
711 }
712
713 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
714 current->BaseAddress = NULL;
715 current->Valid = FALSE;
716 current->Dirty = FALSE;
717 current->PageOut = FALSE;
718 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
719 current->SharedCacheMap = SharedCacheMap;
720 #if DBG
721 if (SharedCacheMap->Trace)
722 {
723 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
724 }
725 #endif
726 current->MappedCount = 0;
727 current->DirtyVacbListEntry.Flink = NULL;
728 current->DirtyVacbListEntry.Blink = NULL;
729 current->ReferenceCount = 1;
730 current->PinCount = 0;
731 KeInitializeMutex(&current->Mutex, 0);
732 CcRosAcquireVacbLock(current, NULL);
733 KeAcquireGuardedMutex(&ViewLock);
734
735 *Vacb = current;
736 /* There is window between the call to CcRosLookupVacb
737 * and CcRosCreateVacb. We must check if a VACB for the
738 * file offset exist. If there is a VACB, we release
739 * our newly created VACB and return the existing one.
740 */
741 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
742 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
743 previous = NULL;
744 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
745 {
746 current = CONTAINING_RECORD(current_entry,
747 ROS_VACB,
748 CacheMapVacbListEntry);
749 if (IsPointInRange(current->FileOffset.QuadPart,
750 VACB_MAPPING_GRANULARITY,
751 FileOffset))
752 {
753 CcRosVacbIncRefCount(current);
754 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
755 #if DBG
756 if (SharedCacheMap->Trace)
757 {
758 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
759 SharedCacheMap,
760 (*Vacb),
761 current);
762 }
763 #endif
764 CcRosReleaseVacbLock(*Vacb);
765 KeReleaseGuardedMutex(&ViewLock);
766 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
767 *Vacb = current;
768 CcRosAcquireVacbLock(current, NULL);
769 return STATUS_SUCCESS;
770 }
771 if (current->FileOffset.QuadPart < FileOffset)
772 {
773 ASSERT(previous == NULL ||
774 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
775 previous = current;
776 }
777 if (current->FileOffset.QuadPart > FileOffset)
778 break;
779 current_entry = current_entry->Flink;
780 }
781 /* There was no existing VACB. */
782 current = *Vacb;
783 if (previous)
784 {
785 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
786 }
787 else
788 {
789 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
790 }
791 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
792 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
793 KeReleaseGuardedMutex(&ViewLock);
794
795 MI_SET_USAGE(MI_USAGE_CACHE);
796 #if MI_TRACE_PFNS
797 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
798 {
799 PWCHAR pos;
800 ULONG len = 0;
801 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
802 if (pos)
803 {
804 len = wcslen(pos) * sizeof(WCHAR);
805 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
806 }
807 else
808 {
809 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
810 }
811 }
812 #endif
813
814 Status = CcRosMapVacb(current);
815 if (!NT_SUCCESS(Status))
816 {
817 RemoveEntryList(&current->CacheMapVacbListEntry);
818 RemoveEntryList(&current->VacbLruListEntry);
819 CcRosReleaseVacbLock(current);
820 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
821 }
822
823 return Status;
824 }
825
826 NTSTATUS
827 NTAPI
828 CcRosGetVacb (
829 PROS_SHARED_CACHE_MAP SharedCacheMap,
830 LONGLONG FileOffset,
831 PLONGLONG BaseOffset,
832 PVOID* BaseAddress,
833 PBOOLEAN UptoDate,
834 PROS_VACB *Vacb)
835 {
836 PROS_VACB current;
837 NTSTATUS Status;
838
839 ASSERT(SharedCacheMap);
840
841 DPRINT("CcRosGetVacb()\n");
842
843 /*
844 * Look for a VACB already mapping the same data.
845 */
846 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
847 if (current == NULL)
848 {
849 /*
850 * Otherwise create a new VACB.
851 */
852 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
853 if (!NT_SUCCESS(Status))
854 {
855 return Status;
856 }
857 }
858
859 KeAcquireGuardedMutex(&ViewLock);
860
861 /* Move to the tail of the LRU list */
862 RemoveEntryList(&current->VacbLruListEntry);
863 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
864
865 KeReleaseGuardedMutex(&ViewLock);
866
867 /*
868 * Return information about the VACB to the caller.
869 */
870 *UptoDate = current->Valid;
871 *BaseAddress = current->BaseAddress;
872 DPRINT("*BaseAddress %p\n", *BaseAddress);
873 *Vacb = current;
874 *BaseOffset = current->FileOffset.QuadPart;
875 return STATUS_SUCCESS;
876 }
877
878 NTSTATUS
879 NTAPI
880 CcRosRequestVacb (
881 PROS_SHARED_CACHE_MAP SharedCacheMap,
882 LONGLONG FileOffset,
883 PVOID* BaseAddress,
884 PBOOLEAN UptoDate,
885 PROS_VACB *Vacb)
886 /*
887 * FUNCTION: Request a page mapping for a shared cache map
888 */
889 {
890 LONGLONG BaseOffset;
891
892 ASSERT(SharedCacheMap);
893
894 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
895 {
896 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
897 FileOffset, VACB_MAPPING_GRANULARITY);
898 KeBugCheck(CACHE_MANAGER);
899 }
900
901 return CcRosGetVacb(SharedCacheMap,
902 FileOffset,
903 &BaseOffset,
904 BaseAddress,
905 UptoDate,
906 Vacb);
907 }
908
909 static
910 VOID
911 CcFreeCachePage (
912 PVOID Context,
913 MEMORY_AREA* MemoryArea,
914 PVOID Address,
915 PFN_NUMBER Page,
916 SWAPENTRY SwapEntry,
917 BOOLEAN Dirty)
918 {
919 ASSERT(SwapEntry == 0);
920 if (Page != 0)
921 {
922 ASSERT(MmGetReferenceCountPage(Page) == 1);
923 MmReleasePageMemoryConsumer(MC_CACHE, Page);
924 }
925 }
926
927 NTSTATUS
928 CcRosInternalFreeVacb (
929 PROS_VACB Vacb)
930 /*
931 * FUNCTION: Releases a VACB associated with a shared cache map
932 */
933 {
934 DPRINT("Freeing VACB 0x%p\n", Vacb);
935 #if DBG
936 if (Vacb->SharedCacheMap->Trace)
937 {
938 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
939 }
940 #endif
941
942 MmLockAddressSpace(MmGetKernelAddressSpace());
943 MmFreeMemoryArea(MmGetKernelAddressSpace(),
944 Vacb->MemoryArea,
945 CcFreeCachePage,
946 NULL);
947 MmUnlockAddressSpace(MmGetKernelAddressSpace());
948
949 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
950 return STATUS_SUCCESS;
951 }
952
953 /*
954 * @implemented
955 */
956 VOID
957 NTAPI
958 CcFlushCache (
959 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
960 IN PLARGE_INTEGER FileOffset OPTIONAL,
961 IN ULONG Length,
962 OUT PIO_STATUS_BLOCK IoStatus)
963 {
964 PROS_SHARED_CACHE_MAP SharedCacheMap;
965 LARGE_INTEGER Offset;
966 LONGLONG RemainingLength;
967 PROS_VACB current;
968 NTSTATUS Status;
969 KIRQL oldIrql;
970
971 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
972 SectionObjectPointers, FileOffset, Length);
973
974 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
975 SectionObjectPointers, FileOffset, Length, IoStatus);
976
977 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
978 {
979 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
980 ASSERT(SharedCacheMap);
981 if (FileOffset)
982 {
983 Offset = *FileOffset;
984 RemainingLength = Length;
985 }
986 else
987 {
988 Offset.QuadPart = 0;
989 RemainingLength = SharedCacheMap->FileSize.QuadPart;
990 }
991
992 if (IoStatus)
993 {
994 IoStatus->Status = STATUS_SUCCESS;
995 IoStatus->Information = 0;
996 }
997
998 while (RemainingLength > 0)
999 {
1000 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
1001 if (current != NULL)
1002 {
1003 if (current->Dirty)
1004 {
1005 Status = CcRosFlushVacb(current);
1006 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1007 {
1008 IoStatus->Status = Status;
1009 }
1010 }
1011
1012 CcRosReleaseVacbLock(current);
1013
1014 KeAcquireGuardedMutex(&ViewLock);
1015 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1016 CcRosVacbDecRefCount(current);
1017 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1018 KeReleaseGuardedMutex(&ViewLock);
1019 }
1020
1021 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1022 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
1023 }
1024 }
1025 else
1026 {
1027 if (IoStatus)
1028 {
1029 IoStatus->Status = STATUS_INVALID_PARAMETER;
1030 }
1031 }
1032 }
1033
1034 NTSTATUS
1035 NTAPI
1036 CcRosDeleteFileCache (
1037 PFILE_OBJECT FileObject,
1038 PROS_SHARED_CACHE_MAP SharedCacheMap)
1039 /*
1040 * FUNCTION: Releases the shared cache map associated with a file object
1041 */
1042 {
1043 PLIST_ENTRY current_entry;
1044 PROS_VACB current;
1045 LIST_ENTRY FreeList;
1046 KIRQL oldIrql;
1047
1048 ASSERT(SharedCacheMap);
1049
1050 SharedCacheMap->OpenCount++;
1051 KeReleaseGuardedMutex(&ViewLock);
1052
1053 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1054
1055 KeAcquireGuardedMutex(&ViewLock);
1056 SharedCacheMap->OpenCount--;
1057 if (SharedCacheMap->OpenCount == 0)
1058 {
1059 KIRQL OldIrql;
1060
1061 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1062
1063 /*
1064 * Release all VACBs
1065 */
1066 InitializeListHead(&FreeList);
1067 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1068 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1069 {
1070 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1071 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1072
1073 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1074 CcRosAcquireVacbLock(current, NULL);
1075 RemoveEntryList(&current->VacbLruListEntry);
1076 if (current->Dirty)
1077 {
1078 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1079 CcRosUnmarkDirtyVacb(current, FALSE);
1080 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1081 DPRINT1("Freeing dirty VACB\n");
1082 }
1083 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1084 CcRosReleaseVacbLock(current);
1085
1086 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1087 }
1088 #if DBG
1089 SharedCacheMap->Trace = FALSE;
1090 #endif
1091 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1092
1093 KeReleaseGuardedMutex(&ViewLock);
1094 ObDereferenceObject(SharedCacheMap->FileObject);
1095
1096 while (!IsListEmpty(&FreeList))
1097 {
1098 current_entry = RemoveTailList(&FreeList);
1099 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1100 CcRosInternalFreeVacb(current);
1101 }
1102
1103 KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
1104 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1105 KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
1106
1107 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1108 KeAcquireGuardedMutex(&ViewLock);
1109 }
1110 return STATUS_SUCCESS;
1111 }
1112
1113 VOID
1114 NTAPI
1115 CcRosReferenceCache (
1116 PFILE_OBJECT FileObject)
1117 {
1118 PROS_SHARED_CACHE_MAP SharedCacheMap;
1119 KeAcquireGuardedMutex(&ViewLock);
1120 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1121 ASSERT(SharedCacheMap);
1122 ASSERT(SharedCacheMap->OpenCount != 0);
1123 SharedCacheMap->OpenCount++;
1124 KeReleaseGuardedMutex(&ViewLock);
1125 }
1126
1127 VOID
1128 NTAPI
1129 CcRosRemoveIfClosed (
1130 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1131 {
1132 PROS_SHARED_CACHE_MAP SharedCacheMap;
1133 DPRINT("CcRosRemoveIfClosed()\n");
1134 KeAcquireGuardedMutex(&ViewLock);
1135 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1136 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1137 {
1138 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1139 }
1140 KeReleaseGuardedMutex(&ViewLock);
1141 }
1142
1143
1144 VOID
1145 NTAPI
1146 CcRosDereferenceCache (
1147 PFILE_OBJECT FileObject)
1148 {
1149 PROS_SHARED_CACHE_MAP SharedCacheMap;
1150 KeAcquireGuardedMutex(&ViewLock);
1151 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1152 ASSERT(SharedCacheMap);
1153 if (SharedCacheMap->OpenCount > 0)
1154 {
1155 SharedCacheMap->OpenCount--;
1156 if (SharedCacheMap->OpenCount == 0)
1157 {
1158 MmFreeSectionSegments(SharedCacheMap->FileObject);
1159 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1160 }
1161 }
1162 KeReleaseGuardedMutex(&ViewLock);
1163 }
1164
1165 NTSTATUS
1166 NTAPI
1167 CcRosReleaseFileCache (
1168 PFILE_OBJECT FileObject)
1169 /*
1170 * FUNCTION: Called by the file system when a handle to a file object
1171 * has been closed.
1172 */
1173 {
1174 PROS_SHARED_CACHE_MAP SharedCacheMap;
1175
1176 KeAcquireGuardedMutex(&ViewLock);
1177
1178 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1179 {
1180 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1181 if (FileObject->PrivateCacheMap != NULL)
1182 {
1183 KIRQL OldIrql;
1184 PPRIVATE_CACHE_MAP PrivateMap;
1185
1186 /* Closing the handle, so kill the private cache map */
1187 PrivateMap = FileObject->PrivateCacheMap;
1188
1189 /* Remove it from the file */
1190 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
1191 RemoveEntryList(&PrivateMap->PrivateLinks);
1192 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
1193
1194 /* And free it.
1195 * Before you event try to remove it from FO, always
1196 * lock the master lock, to be sure not to race
1197 * with a potential read ahead ongoing!
1198 */
1199 OldIrql = KeAcquireQueuedSpinLock(LockQueueMasterLock);
1200 FileObject->PrivateCacheMap = NULL;
1201 KeReleaseQueuedSpinLock(LockQueueMasterLock, OldIrql);
1202 ExFreePoolWithTag(PrivateMap, TAG_PRIVATE_CACHE_MAP);
1203
1204 if (SharedCacheMap->OpenCount > 0)
1205 {
1206 SharedCacheMap->OpenCount--;
1207 if (SharedCacheMap->OpenCount == 0)
1208 {
1209 MmFreeSectionSegments(SharedCacheMap->FileObject);
1210 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1211 }
1212 }
1213 }
1214 }
1215 KeReleaseGuardedMutex(&ViewLock);
1216 return STATUS_SUCCESS;
1217 }
1218
1219 NTSTATUS
1220 NTAPI
1221 CcRosInitializeFileCache (
1222 PFILE_OBJECT FileObject,
1223 PCC_FILE_SIZES FileSizes,
1224 BOOLEAN PinAccess,
1225 PCACHE_MANAGER_CALLBACKS CallBacks,
1226 PVOID LazyWriterContext)
1227 /*
1228 * FUNCTION: Initializes a shared cache map for a file object
1229 */
1230 {
1231 KIRQL OldIrql;
1232 BOOLEAN Allocated;
1233 PROS_SHARED_CACHE_MAP SharedCacheMap;
1234
1235 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1236 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1237 FileObject, SharedCacheMap);
1238
1239 Allocated = FALSE;
1240 KeAcquireGuardedMutex(&ViewLock);
1241 if (SharedCacheMap == NULL)
1242 {
1243 Allocated = TRUE;
1244 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1245 if (SharedCacheMap == NULL)
1246 {
1247 KeReleaseGuardedMutex(&ViewLock);
1248 return STATUS_INSUFFICIENT_RESOURCES;
1249 }
1250 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1251 ObReferenceObjectByPointer(FileObject,
1252 FILE_ALL_ACCESS,
1253 NULL,
1254 KernelMode);
1255 SharedCacheMap->NodeTypeCode = NODE_TYPE_SHARED_MAP;
1256 SharedCacheMap->NodeByteSize = sizeof(*SharedCacheMap);
1257 SharedCacheMap->FileObject = FileObject;
1258 SharedCacheMap->Callbacks = CallBacks;
1259 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1260 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1261 SharedCacheMap->FileSize = FileSizes->FileSize;
1262 SharedCacheMap->PinAccess = PinAccess;
1263 SharedCacheMap->DirtyPageThreshold = 0;
1264 SharedCacheMap->DirtyPages = 0;
1265 InitializeListHead(&SharedCacheMap->PrivateList);
1266 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1267 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1268 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1269
1270 KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
1271 InsertTailList(&CcCleanSharedCacheMapList, &SharedCacheMap->SharedCacheMapLinks);
1272 KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
1273 }
1274 if (FileObject->PrivateCacheMap == NULL)
1275 {
1276 PPRIVATE_CACHE_MAP PrivateMap;
1277
1278 /* Allocate the private cache map for this handle */
1279 PrivateMap = ExAllocatePoolWithTag(NonPagedPool, sizeof(PRIVATE_CACHE_MAP), TAG_PRIVATE_CACHE_MAP);
1280 if (PrivateMap == NULL)
1281 {
1282 /* If we also allocated the shared cache map for this file, kill it */
1283 if (Allocated)
1284 {
1285 KeAcquireSpinLock(&iSharedCacheMapLock, &OldIrql);
1286 RemoveEntryList(&SharedCacheMap->SharedCacheMapLinks);
1287 KeReleaseSpinLock(&iSharedCacheMapLock, OldIrql);
1288
1289 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1290 ObDereferenceObject(FileObject);
1291 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1292 }
1293
1294 KeReleaseGuardedMutex(&ViewLock);
1295 return STATUS_INSUFFICIENT_RESOURCES;
1296 }
1297
1298 /* Initialize it */
1299 RtlZeroMemory(PrivateMap, sizeof(PRIVATE_CACHE_MAP));
1300 PrivateMap->NodeTypeCode = NODE_TYPE_PRIVATE_MAP;
1301 PrivateMap->ReadAheadMask = PAGE_SIZE - 1;
1302 PrivateMap->FileObject = FileObject;
1303 KeInitializeSpinLock(&PrivateMap->ReadAheadSpinLock);
1304
1305 /* Link it to the file */
1306 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &OldIrql);
1307 InsertTailList(&SharedCacheMap->PrivateList, &PrivateMap->PrivateLinks);
1308 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, OldIrql);
1309
1310 FileObject->PrivateCacheMap = PrivateMap;
1311 SharedCacheMap->OpenCount++;
1312 }
1313 KeReleaseGuardedMutex(&ViewLock);
1314
1315 return STATUS_SUCCESS;
1316 }
1317
1318 /*
1319 * @implemented
1320 */
1321 PFILE_OBJECT
1322 NTAPI
1323 CcGetFileObjectFromSectionPtrs (
1324 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1325 {
1326 PROS_SHARED_CACHE_MAP SharedCacheMap;
1327
1328 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1329
1330 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1331 {
1332 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1333 ASSERT(SharedCacheMap);
1334 return SharedCacheMap->FileObject;
1335 }
1336 return NULL;
1337 }
1338
1339 VOID
1340 INIT_FUNCTION
1341 NTAPI
1342 CcInitView (
1343 VOID)
1344 {
1345 DPRINT("CcInitView()\n");
1346
1347 InitializeListHead(&DirtyVacbListHead);
1348 InitializeListHead(&VacbLruListHead);
1349 InitializeListHead(&CcDeferredWrites);
1350 InitializeListHead(&CcCleanSharedCacheMapList);
1351 KeInitializeSpinLock(&CcDeferredWriteSpinLock);
1352 KeInitializeSpinLock(&iSharedCacheMapLock);
1353 KeInitializeGuardedMutex(&ViewLock);
1354 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1355 NULL,
1356 NULL,
1357 0,
1358 sizeof(INTERNAL_BCB),
1359 TAG_BCB,
1360 20);
1361 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1362 NULL,
1363 NULL,
1364 0,
1365 sizeof(ROS_SHARED_CACHE_MAP),
1366 TAG_SHARED_CACHE_MAP,
1367 20);
1368 ExInitializeNPagedLookasideList(&VacbLookasideList,
1369 NULL,
1370 NULL,
1371 0,
1372 sizeof(ROS_VACB),
1373 TAG_VACB,
1374 20);
1375
1376 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1377
1378 CcInitCacheZeroPage();
1379 }
1380
1381 #if DBG && defined(KDBG)
1382 BOOLEAN
1383 ExpKdbgExtFileCache(ULONG Argc, PCHAR Argv[])
1384 {
1385 PLIST_ENTRY ListEntry;
1386 UNICODE_STRING NoName = RTL_CONSTANT_STRING(L"No name for File");
1387
1388 KdbpPrint(" Usage Summary (in kb)\n");
1389 KdbpPrint("Shared\t\tValid\tDirty\tName\n");
1390 /* No need to lock the spin lock here, we're in DBG */
1391 for (ListEntry = CcCleanSharedCacheMapList.Flink;
1392 ListEntry != &CcCleanSharedCacheMapList;
1393 ListEntry = ListEntry->Flink)
1394 {
1395 PLIST_ENTRY Vacbs;
1396 ULONG Valid = 0, Dirty = 0;
1397 PROS_SHARED_CACHE_MAP SharedCacheMap;
1398 PUNICODE_STRING FileName;
1399
1400 SharedCacheMap = CONTAINING_RECORD(ListEntry, ROS_SHARED_CACHE_MAP, SharedCacheMapLinks);
1401
1402 /* Dirty size */
1403 Dirty = (SharedCacheMap->DirtyPages * PAGE_SIZE) / 1024;
1404
1405 /* First, count for all the associated VACB */
1406 for (Vacbs = SharedCacheMap->CacheMapVacbListHead.Flink;
1407 Vacbs != &SharedCacheMap->CacheMapVacbListHead;
1408 Vacbs = Vacbs->Flink)
1409 {
1410 PROS_VACB Vacb;
1411
1412 Vacb = CONTAINING_RECORD(Vacbs, ROS_VACB, CacheMapVacbListEntry);
1413 if (Vacb->Valid)
1414 {
1415 Valid += VACB_MAPPING_GRANULARITY / 1024;
1416 }
1417 }
1418
1419 /* Setup name */
1420 if (SharedCacheMap->FileObject != NULL &&
1421 SharedCacheMap->FileObject->FileName.Length != 0)
1422 {
1423 FileName = &SharedCacheMap->FileObject->FileName;
1424 }
1425 else
1426 {
1427 FileName = &NoName;
1428 }
1429
1430 /* And print */
1431 KdbpPrint("%p\t%d\t%d\t%wZ\n", SharedCacheMap, Valid, Dirty, FileName);
1432 }
1433
1434 return TRUE;
1435 }
1436 #endif
1437
1438 /* EOF */