[NTOS:CC]
[reactos.git] / reactos / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 */
9
10 /* NOTES **********************************************************************
11 *
12 * This is not the NT implementation of a file cache nor anything much like
13 * it.
14 *
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
17 *
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
20 *
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
23 *
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
27 *
28 * (4) Copy the data into or out of the page as necessary.
29 *
30 * (5) Release the cache page
31 */
32 /* INCLUDES ******************************************************************/
33
34 #include <ntoskrnl.h>
35 #define NDEBUG
36 #include <debug.h>
37
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
40 #endif
41
42 /* GLOBALS *******************************************************************/
43
44 static LIST_ENTRY DirtyVacbListHead;
45 static LIST_ENTRY VacbLruListHead;
46 ULONG DirtyPageCount = 0;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 #if DBG
55 static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
56 {
57 ++vacb->ReferenceCount;
58 if (vacb->SharedCacheMap->Trace)
59 {
60 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
61 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
62 }
63 }
64 static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
65 {
66 --vacb->ReferenceCount;
67 if (vacb->SharedCacheMap->Trace)
68 {
69 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
70 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
71 }
72 }
73 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
74 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
75 #else
76 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
77 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
78 #endif
79
80 NTSTATUS
81 CcRosInternalFreeVacb(PROS_VACB Vacb);
82
83
84 /* FUNCTIONS *****************************************************************/
85
86 VOID
87 NTAPI
88 CcRosTraceCacheMap (
89 PROS_SHARED_CACHE_MAP SharedCacheMap,
90 BOOLEAN Trace )
91 {
92 #if DBG
93 KIRQL oldirql;
94 PLIST_ENTRY current_entry;
95 PROS_VACB current;
96
97 if (!SharedCacheMap)
98 return;
99
100 SharedCacheMap->Trace = Trace;
101
102 if (Trace)
103 {
104 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
105
106 KeAcquireGuardedMutex(&ViewLock);
107 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
108
109 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
110 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
111 {
112 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
113 current_entry = current_entry->Flink;
114
115 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
116 current, current->ReferenceCount, current->Dirty, current->PageOut );
117 }
118 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
119 KeReleaseGuardedMutex(&ViewLock);
120 }
121 else
122 {
123 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
124 }
125
126 #else
127 UNREFERENCED_PARAMETER(SharedCacheMap);
128 UNREFERENCED_PARAMETER(Trace);
129 #endif
130 }
131
132 NTSTATUS
133 NTAPI
134 CcRosFlushVacb (
135 PROS_VACB Vacb)
136 {
137 NTSTATUS Status;
138 KIRQL oldIrql;
139
140 Status = CcWriteVirtualAddress(Vacb);
141 if (NT_SUCCESS(Status))
142 {
143 KeAcquireGuardedMutex(&ViewLock);
144 KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql);
145
146 Vacb->Dirty = FALSE;
147 RemoveEntryList(&Vacb->DirtyVacbListEntry);
148 DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
149 CcRosVacbDecRefCount(Vacb);
150
151 KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
152 KeReleaseGuardedMutex(&ViewLock);
153 }
154
155 return Status;
156 }
157
158 NTSTATUS
159 NTAPI
160 CcRosFlushDirtyPages (
161 ULONG Target,
162 PULONG Count,
163 BOOLEAN Wait)
164 {
165 PLIST_ENTRY current_entry;
166 PROS_VACB current;
167 BOOLEAN Locked;
168 NTSTATUS Status;
169 LARGE_INTEGER ZeroTimeout;
170
171 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
172
173 (*Count) = 0;
174 ZeroTimeout.QuadPart = 0;
175
176 KeEnterCriticalRegion();
177 KeAcquireGuardedMutex(&ViewLock);
178
179 current_entry = DirtyVacbListHead.Flink;
180 if (current_entry == &DirtyVacbListHead)
181 {
182 DPRINT("No Dirty pages\n");
183 }
184
185 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
186 {
187 current = CONTAINING_RECORD(current_entry,
188 ROS_VACB,
189 DirtyVacbListEntry);
190 current_entry = current_entry->Flink;
191
192 CcRosVacbIncRefCount(current);
193
194 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
195 current->SharedCacheMap->LazyWriteContext, Wait);
196 if (!Locked)
197 {
198 CcRosVacbDecRefCount(current);
199 continue;
200 }
201
202 Status = CcRosAcquireVacbLock(current,
203 Wait ? NULL : &ZeroTimeout);
204 if (Status != STATUS_SUCCESS)
205 {
206 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
207 current->SharedCacheMap->LazyWriteContext);
208 CcRosVacbDecRefCount(current);
209 continue;
210 }
211
212 ASSERT(current->Dirty);
213
214 /* One reference is added above */
215 if (current->ReferenceCount > 2)
216 {
217 CcRosReleaseVacbLock(current);
218 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
219 current->SharedCacheMap->LazyWriteContext);
220 CcRosVacbDecRefCount(current);
221 continue;
222 }
223
224 KeReleaseGuardedMutex(&ViewLock);
225
226 Status = CcRosFlushVacb(current);
227
228 CcRosReleaseVacbLock(current);
229 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
230 current->SharedCacheMap->LazyWriteContext);
231
232 KeAcquireGuardedMutex(&ViewLock);
233 CcRosVacbDecRefCount(current);
234
235 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
236 (Status != STATUS_MEDIA_WRITE_PROTECTED))
237 {
238 DPRINT1("CC: Failed to flush VACB.\n");
239 }
240 else
241 {
242 (*Count) += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
243 Target -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
244 }
245
246 current_entry = DirtyVacbListHead.Flink;
247 }
248
249 KeReleaseGuardedMutex(&ViewLock);
250 KeLeaveCriticalRegion();
251
252 DPRINT("CcRosFlushDirtyPages() finished\n");
253 return STATUS_SUCCESS;
254 }
255
256 NTSTATUS
257 CcRosTrimCache (
258 ULONG Target,
259 ULONG Priority,
260 PULONG NrFreed)
261 /*
262 * FUNCTION: Try to free some memory from the file cache.
263 * ARGUMENTS:
264 * Target - The number of pages to be freed.
265 * Priority - The priority of free (currently unused).
266 * NrFreed - Points to a variable where the number of pages
267 * actually freed is returned.
268 */
269 {
270 PLIST_ENTRY current_entry;
271 PROS_VACB current;
272 ULONG PagesFreed;
273 KIRQL oldIrql;
274 LIST_ENTRY FreeList;
275 PFN_NUMBER Page;
276 ULONG i;
277 BOOLEAN FlushedPages = FALSE;
278
279 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
280
281 InitializeListHead(&FreeList);
282
283 *NrFreed = 0;
284
285 retry:
286 KeAcquireGuardedMutex(&ViewLock);
287
288 current_entry = VacbLruListHead.Flink;
289 while (current_entry != &VacbLruListHead)
290 {
291 current = CONTAINING_RECORD(current_entry,
292 ROS_VACB,
293 VacbLruListEntry);
294 current_entry = current_entry->Flink;
295
296 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
297
298 /* Reference the VACB */
299 CcRosVacbIncRefCount(current);
300
301 /* Check if it's mapped and not dirty */
302 if (current->MappedCount > 0 && !current->Dirty)
303 {
304 /* We have to break these locks because Cc sucks */
305 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
306 KeReleaseGuardedMutex(&ViewLock);
307
308 /* Page out the VACB */
309 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
310 {
311 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
312
313 MmPageOutPhysicalAddress(Page);
314 }
315
316 /* Reacquire the locks */
317 KeAcquireGuardedMutex(&ViewLock);
318 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
319 }
320
321 /* Dereference the VACB */
322 CcRosVacbDecRefCount(current);
323
324 /* Check if we can free this entry now */
325 if (current->ReferenceCount == 0)
326 {
327 ASSERT(!current->Dirty);
328 ASSERT(!current->MappedCount);
329
330 RemoveEntryList(&current->CacheMapVacbListEntry);
331 RemoveEntryList(&current->VacbLruListEntry);
332 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
333
334 /* Calculate how many pages we freed for Mm */
335 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
336 Target -= PagesFreed;
337 (*NrFreed) += PagesFreed;
338 }
339
340 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
341 }
342
343 KeReleaseGuardedMutex(&ViewLock);
344
345 /* Try flushing pages if we haven't met our target */
346 if ((Target > 0) && !FlushedPages)
347 {
348 /* Flush dirty pages to disk */
349 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE);
350 FlushedPages = TRUE;
351
352 /* We can only swap as many pages as we flushed */
353 if (PagesFreed < Target) Target = PagesFreed;
354
355 /* Check if we flushed anything */
356 if (PagesFreed != 0)
357 {
358 /* Try again after flushing dirty pages */
359 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
360 goto retry;
361 }
362 }
363
364 while (!IsListEmpty(&FreeList))
365 {
366 current_entry = RemoveHeadList(&FreeList);
367 current = CONTAINING_RECORD(current_entry,
368 ROS_VACB,
369 CacheMapVacbListEntry);
370 CcRosInternalFreeVacb(current);
371 }
372
373 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
374
375 return STATUS_SUCCESS;
376 }
377
378 NTSTATUS
379 NTAPI
380 CcRosReleaseVacb (
381 PROS_SHARED_CACHE_MAP SharedCacheMap,
382 PROS_VACB Vacb,
383 BOOLEAN Valid,
384 BOOLEAN Dirty,
385 BOOLEAN Mapped)
386 {
387 BOOLEAN WasDirty;
388 KIRQL oldIrql;
389
390 ASSERT(SharedCacheMap);
391
392 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
393 SharedCacheMap, Vacb, Valid);
394
395 KeAcquireGuardedMutex(&ViewLock);
396 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
397
398 Vacb->Valid = Valid;
399
400 WasDirty = Vacb->Dirty;
401 Vacb->Dirty = Vacb->Dirty || Dirty;
402
403 if (!WasDirty && Vacb->Dirty)
404 {
405 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
406 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
407 }
408
409 if (Mapped)
410 {
411 Vacb->MappedCount++;
412 }
413 CcRosVacbDecRefCount(Vacb);
414 if (Mapped && (Vacb->MappedCount == 1))
415 {
416 CcRosVacbIncRefCount(Vacb);
417 }
418 if (!WasDirty && Vacb->Dirty)
419 {
420 CcRosVacbIncRefCount(Vacb);
421 }
422
423 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
424 KeReleaseGuardedMutex(&ViewLock);
425 CcRosReleaseVacbLock(Vacb);
426
427 return STATUS_SUCCESS;
428 }
429
430 /* Returns with VACB Lock Held! */
431 PROS_VACB
432 NTAPI
433 CcRosLookupVacb (
434 PROS_SHARED_CACHE_MAP SharedCacheMap,
435 LONGLONG FileOffset)
436 {
437 PLIST_ENTRY current_entry;
438 PROS_VACB current;
439 KIRQL oldIrql;
440
441 ASSERT(SharedCacheMap);
442
443 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
444 SharedCacheMap, FileOffset);
445
446 KeAcquireGuardedMutex(&ViewLock);
447 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
448
449 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
450 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
451 {
452 current = CONTAINING_RECORD(current_entry,
453 ROS_VACB,
454 CacheMapVacbListEntry);
455 if (IsPointInRange(current->FileOffset.QuadPart,
456 VACB_MAPPING_GRANULARITY,
457 FileOffset))
458 {
459 CcRosVacbIncRefCount(current);
460 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
461 KeReleaseGuardedMutex(&ViewLock);
462 CcRosAcquireVacbLock(current, NULL);
463 return current;
464 }
465 if (current->FileOffset.QuadPart > FileOffset)
466 break;
467 current_entry = current_entry->Flink;
468 }
469
470 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
471 KeReleaseGuardedMutex(&ViewLock);
472
473 return NULL;
474 }
475
476 NTSTATUS
477 NTAPI
478 CcRosMarkDirtyVacb (
479 PROS_SHARED_CACHE_MAP SharedCacheMap,
480 LONGLONG FileOffset)
481 {
482 PROS_VACB Vacb;
483 KIRQL oldIrql;
484
485 ASSERT(SharedCacheMap);
486
487 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
488 SharedCacheMap, FileOffset);
489
490 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
491 if (Vacb == NULL)
492 {
493 KeBugCheck(CACHE_MANAGER);
494 }
495
496 KeAcquireGuardedMutex(&ViewLock);
497 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
498
499 if (!Vacb->Dirty)
500 {
501 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
502 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
503 }
504 else
505 {
506 CcRosVacbDecRefCount(Vacb);
507 }
508
509 /* Move to the tail of the LRU list */
510 RemoveEntryList(&Vacb->VacbLruListEntry);
511 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
512
513 Vacb->Dirty = TRUE;
514
515 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
516 KeReleaseGuardedMutex(&ViewLock);
517 CcRosReleaseVacbLock(Vacb);
518
519 return STATUS_SUCCESS;
520 }
521
522 NTSTATUS
523 NTAPI
524 CcRosUnmapVacb (
525 PROS_SHARED_CACHE_MAP SharedCacheMap,
526 LONGLONG FileOffset,
527 BOOLEAN NowDirty)
528 {
529 PROS_VACB Vacb;
530 BOOLEAN WasDirty;
531 KIRQL oldIrql;
532
533 ASSERT(SharedCacheMap);
534
535 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
536 SharedCacheMap, FileOffset, NowDirty);
537
538 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
539 if (Vacb == NULL)
540 {
541 return STATUS_UNSUCCESSFUL;
542 }
543
544 KeAcquireGuardedMutex(&ViewLock);
545 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
546
547 WasDirty = Vacb->Dirty;
548 Vacb->Dirty = Vacb->Dirty || NowDirty;
549
550 Vacb->MappedCount--;
551
552 if (!WasDirty && NowDirty)
553 {
554 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
555 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
556 }
557
558 CcRosVacbDecRefCount(Vacb);
559 if (!WasDirty && NowDirty)
560 {
561 CcRosVacbIncRefCount(Vacb);
562 }
563 if (Vacb->MappedCount == 0)
564 {
565 CcRosVacbDecRefCount(Vacb);
566 }
567
568 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
569 KeReleaseGuardedMutex(&ViewLock);
570 CcRosReleaseVacbLock(Vacb);
571
572 return STATUS_SUCCESS;
573 }
574
575 static
576 NTSTATUS
577 CcRosMapVacb(
578 PROS_VACB Vacb)
579 {
580 ULONG i;
581 NTSTATUS Status;
582 ULONG_PTR NumberOfPages;
583
584 /* Create a memory area. */
585 MmLockAddressSpace(MmGetKernelAddressSpace());
586 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
587 0, // nothing checks for VACB mareas, so set to 0
588 &Vacb->BaseAddress,
589 VACB_MAPPING_GRANULARITY,
590 PAGE_READWRITE,
591 (PMEMORY_AREA*)&Vacb->MemoryArea,
592 0,
593 PAGE_SIZE);
594 MmUnlockAddressSpace(MmGetKernelAddressSpace());
595 if (!NT_SUCCESS(Status))
596 {
597 KeBugCheck(CACHE_MANAGER);
598 }
599
600 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
601 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
602
603 /* Create a virtual mapping for this memory area */
604 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
605 for (i = 0; i < NumberOfPages; i++)
606 {
607 PFN_NUMBER PageFrameNumber;
608
609 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
610 if (PageFrameNumber == 0)
611 {
612 DPRINT1("Unable to allocate page\n");
613 KeBugCheck(MEMORY_MANAGEMENT);
614 }
615
616 Status = MmCreateVirtualMapping(NULL,
617 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
618 PAGE_READWRITE,
619 &PageFrameNumber,
620 1);
621 if (!NT_SUCCESS(Status))
622 {
623 DPRINT1("Unable to create virtual mapping\n");
624 KeBugCheck(MEMORY_MANAGEMENT);
625 }
626 }
627
628 return STATUS_SUCCESS;
629 }
630
631 static
632 NTSTATUS
633 CcRosCreateVacb (
634 PROS_SHARED_CACHE_MAP SharedCacheMap,
635 LONGLONG FileOffset,
636 PROS_VACB *Vacb)
637 {
638 PROS_VACB current;
639 PROS_VACB previous;
640 PLIST_ENTRY current_entry;
641 NTSTATUS Status;
642 KIRQL oldIrql;
643
644 ASSERT(SharedCacheMap);
645
646 DPRINT("CcRosCreateVacb()\n");
647
648 if (FileOffset >= SharedCacheMap->FileSize.QuadPart)
649 {
650 *Vacb = NULL;
651 return STATUS_INVALID_PARAMETER;
652 }
653
654 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
655 current->BaseAddress = NULL;
656 current->Valid = FALSE;
657 current->Dirty = FALSE;
658 current->PageOut = FALSE;
659 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
660 current->SharedCacheMap = SharedCacheMap;
661 #if DBG
662 if (SharedCacheMap->Trace)
663 {
664 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
665 }
666 #endif
667 current->MappedCount = 0;
668 current->DirtyVacbListEntry.Flink = NULL;
669 current->DirtyVacbListEntry.Blink = NULL;
670 current->ReferenceCount = 1;
671 current->PinCount = 0;
672 KeInitializeMutex(&current->Mutex, 0);
673 CcRosAcquireVacbLock(current, NULL);
674 KeAcquireGuardedMutex(&ViewLock);
675
676 *Vacb = current;
677 /* There is window between the call to CcRosLookupVacb
678 * and CcRosCreateVacb. We must check if a VACB for the
679 * file offset exist. If there is a VACB, we release
680 * our newly created VACB and return the existing one.
681 */
682 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
683 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
684 previous = NULL;
685 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
686 {
687 current = CONTAINING_RECORD(current_entry,
688 ROS_VACB,
689 CacheMapVacbListEntry);
690 if (IsPointInRange(current->FileOffset.QuadPart,
691 VACB_MAPPING_GRANULARITY,
692 FileOffset))
693 {
694 CcRosVacbIncRefCount(current);
695 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
696 #if DBG
697 if (SharedCacheMap->Trace)
698 {
699 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
700 SharedCacheMap,
701 (*Vacb),
702 current);
703 }
704 #endif
705 CcRosReleaseVacbLock(*Vacb);
706 KeReleaseGuardedMutex(&ViewLock);
707 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
708 *Vacb = current;
709 CcRosAcquireVacbLock(current, NULL);
710 return STATUS_SUCCESS;
711 }
712 if (current->FileOffset.QuadPart < FileOffset)
713 {
714 ASSERT(previous == NULL ||
715 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
716 previous = current;
717 }
718 if (current->FileOffset.QuadPart > FileOffset)
719 break;
720 current_entry = current_entry->Flink;
721 }
722 /* There was no existing VACB. */
723 current = *Vacb;
724 if (previous)
725 {
726 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
727 }
728 else
729 {
730 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
731 }
732 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
733 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
734 KeReleaseGuardedMutex(&ViewLock);
735
736 MI_SET_USAGE(MI_USAGE_CACHE);
737 #if MI_TRACE_PFNS
738 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
739 {
740 PWCHAR pos = NULL;
741 ULONG len = 0;
742 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
743 len = wcslen(pos) * sizeof(WCHAR);
744 if (pos) snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
745 }
746 #endif
747
748 Status = CcRosMapVacb(current);
749
750 return Status;
751 }
752
753 NTSTATUS
754 NTAPI
755 CcRosGetVacb (
756 PROS_SHARED_CACHE_MAP SharedCacheMap,
757 LONGLONG FileOffset,
758 PLONGLONG BaseOffset,
759 PVOID* BaseAddress,
760 PBOOLEAN UptoDate,
761 PROS_VACB *Vacb)
762 {
763 PROS_VACB current;
764 NTSTATUS Status;
765
766 ASSERT(SharedCacheMap);
767
768 DPRINT("CcRosGetVacb()\n");
769
770 /*
771 * Look for a VACB already mapping the same data.
772 */
773 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
774 if (current == NULL)
775 {
776 /*
777 * Otherwise create a new VACB.
778 */
779 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
780 if (!NT_SUCCESS(Status))
781 {
782 return Status;
783 }
784 }
785
786 KeAcquireGuardedMutex(&ViewLock);
787
788 /* Move to the tail of the LRU list */
789 RemoveEntryList(&current->VacbLruListEntry);
790 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
791
792 KeReleaseGuardedMutex(&ViewLock);
793
794 /*
795 * Return information about the VACB to the caller.
796 */
797 *UptoDate = current->Valid;
798 *BaseAddress = current->BaseAddress;
799 DPRINT("*BaseAddress %p\n", *BaseAddress);
800 *Vacb = current;
801 *BaseOffset = current->FileOffset.QuadPart;
802 return STATUS_SUCCESS;
803 }
804
805 NTSTATUS
806 NTAPI
807 CcRosRequestVacb (
808 PROS_SHARED_CACHE_MAP SharedCacheMap,
809 LONGLONG FileOffset,
810 PVOID* BaseAddress,
811 PBOOLEAN UptoDate,
812 PROS_VACB *Vacb)
813 /*
814 * FUNCTION: Request a page mapping for a shared cache map
815 */
816 {
817 LONGLONG BaseOffset;
818
819 ASSERT(SharedCacheMap);
820
821 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
822 {
823 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
824 FileOffset, VACB_MAPPING_GRANULARITY);
825 KeBugCheck(CACHE_MANAGER);
826 }
827
828 return CcRosGetVacb(SharedCacheMap,
829 FileOffset,
830 &BaseOffset,
831 BaseAddress,
832 UptoDate,
833 Vacb);
834 }
835
836 static
837 VOID
838 CcFreeCachePage (
839 PVOID Context,
840 MEMORY_AREA* MemoryArea,
841 PVOID Address,
842 PFN_NUMBER Page,
843 SWAPENTRY SwapEntry,
844 BOOLEAN Dirty)
845 {
846 ASSERT(SwapEntry == 0);
847 if (Page != 0)
848 {
849 ASSERT(MmGetReferenceCountPage(Page) == 1);
850 MmReleasePageMemoryConsumer(MC_CACHE, Page);
851 }
852 }
853
854 NTSTATUS
855 CcRosInternalFreeVacb (
856 PROS_VACB Vacb)
857 /*
858 * FUNCTION: Releases a VACB associated with a shared cache map
859 */
860 {
861 DPRINT("Freeing VACB 0x%p\n", Vacb);
862 #if DBG
863 if (Vacb->SharedCacheMap->Trace)
864 {
865 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
866 }
867 #endif
868
869 MmLockAddressSpace(MmGetKernelAddressSpace());
870 MmFreeMemoryArea(MmGetKernelAddressSpace(),
871 Vacb->MemoryArea,
872 CcFreeCachePage,
873 NULL);
874 MmUnlockAddressSpace(MmGetKernelAddressSpace());
875
876 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
877 return STATUS_SUCCESS;
878 }
879
880 /*
881 * @implemented
882 */
883 VOID
884 NTAPI
885 CcFlushCache (
886 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
887 IN PLARGE_INTEGER FileOffset OPTIONAL,
888 IN ULONG Length,
889 OUT PIO_STATUS_BLOCK IoStatus)
890 {
891 PROS_SHARED_CACHE_MAP SharedCacheMap;
892 LARGE_INTEGER Offset;
893 LONGLONG RemainingLength;
894 PROS_VACB current;
895 NTSTATUS Status;
896 KIRQL oldIrql;
897
898 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
899 SectionObjectPointers, FileOffset, Length);
900
901 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
902 SectionObjectPointers, FileOffset, Length, IoStatus);
903
904 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
905 {
906 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
907 ASSERT(SharedCacheMap);
908 if (FileOffset)
909 {
910 Offset = *FileOffset;
911 RemainingLength = Length;
912 }
913 else
914 {
915 Offset.QuadPart = 0;
916 RemainingLength = SharedCacheMap->FileSize.QuadPart;
917 }
918
919 if (IoStatus)
920 {
921 IoStatus->Status = STATUS_SUCCESS;
922 IoStatus->Information = 0;
923 }
924
925 while (RemainingLength > 0)
926 {
927 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
928 if (current != NULL)
929 {
930 if (current->Dirty)
931 {
932 Status = CcRosFlushVacb(current);
933 if (!NT_SUCCESS(Status) && IoStatus != NULL)
934 {
935 IoStatus->Status = Status;
936 }
937 }
938
939 CcRosReleaseVacbLock(current);
940
941 KeAcquireGuardedMutex(&ViewLock);
942 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
943 CcRosVacbDecRefCount(current);
944 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
945 KeReleaseGuardedMutex(&ViewLock);
946 }
947
948 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
949 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
950 }
951 }
952 else
953 {
954 if (IoStatus)
955 {
956 IoStatus->Status = STATUS_INVALID_PARAMETER;
957 }
958 }
959 }
960
961 NTSTATUS
962 NTAPI
963 CcRosDeleteFileCache (
964 PFILE_OBJECT FileObject,
965 PROS_SHARED_CACHE_MAP SharedCacheMap)
966 /*
967 * FUNCTION: Releases the shared cache map associated with a file object
968 */
969 {
970 PLIST_ENTRY current_entry;
971 PROS_VACB current;
972 LIST_ENTRY FreeList;
973 KIRQL oldIrql;
974
975 ASSERT(SharedCacheMap);
976
977 SharedCacheMap->RefCount++;
978 KeReleaseGuardedMutex(&ViewLock);
979
980 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
981
982 KeAcquireGuardedMutex(&ViewLock);
983 SharedCacheMap->RefCount--;
984 if (SharedCacheMap->RefCount == 0)
985 {
986 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
987
988 /*
989 * Release all VACBs
990 */
991 InitializeListHead(&FreeList);
992 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
993 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
994 {
995 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
996 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
997 RemoveEntryList(&current->VacbLruListEntry);
998 if (current->Dirty)
999 {
1000 RemoveEntryList(&current->DirtyVacbListEntry);
1001 DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
1002 DPRINT1("Freeing dirty VACB\n");
1003 }
1004 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1005 }
1006 #if DBG
1007 SharedCacheMap->Trace = FALSE;
1008 #endif
1009 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1010
1011 KeReleaseGuardedMutex(&ViewLock);
1012 ObDereferenceObject(SharedCacheMap->FileObject);
1013
1014 while (!IsListEmpty(&FreeList))
1015 {
1016 current_entry = RemoveTailList(&FreeList);
1017 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1018 CcRosInternalFreeVacb(current);
1019 }
1020 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1021 KeAcquireGuardedMutex(&ViewLock);
1022 }
1023 return STATUS_SUCCESS;
1024 }
1025
1026 VOID
1027 NTAPI
1028 CcRosReferenceCache (
1029 PFILE_OBJECT FileObject)
1030 {
1031 PROS_SHARED_CACHE_MAP SharedCacheMap;
1032 KeAcquireGuardedMutex(&ViewLock);
1033 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1034 ASSERT(SharedCacheMap);
1035 ASSERT(SharedCacheMap->RefCount != 0);
1036 SharedCacheMap->RefCount++;
1037 KeReleaseGuardedMutex(&ViewLock);
1038 }
1039
1040 VOID
1041 NTAPI
1042 CcRosRemoveIfClosed (
1043 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1044 {
1045 PROS_SHARED_CACHE_MAP SharedCacheMap;
1046 DPRINT("CcRosRemoveIfClosed()\n");
1047 KeAcquireGuardedMutex(&ViewLock);
1048 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1049 if (SharedCacheMap && SharedCacheMap->RefCount == 0)
1050 {
1051 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1052 }
1053 KeReleaseGuardedMutex(&ViewLock);
1054 }
1055
1056
1057 VOID
1058 NTAPI
1059 CcRosDereferenceCache (
1060 PFILE_OBJECT FileObject)
1061 {
1062 PROS_SHARED_CACHE_MAP SharedCacheMap;
1063 KeAcquireGuardedMutex(&ViewLock);
1064 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1065 ASSERT(SharedCacheMap);
1066 if (SharedCacheMap->RefCount > 0)
1067 {
1068 SharedCacheMap->RefCount--;
1069 if (SharedCacheMap->RefCount == 0)
1070 {
1071 MmFreeSectionSegments(SharedCacheMap->FileObject);
1072 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1073 }
1074 }
1075 KeReleaseGuardedMutex(&ViewLock);
1076 }
1077
1078 NTSTATUS
1079 NTAPI
1080 CcRosReleaseFileCache (
1081 PFILE_OBJECT FileObject)
1082 /*
1083 * FUNCTION: Called by the file system when a handle to a file object
1084 * has been closed.
1085 */
1086 {
1087 PROS_SHARED_CACHE_MAP SharedCacheMap;
1088
1089 KeAcquireGuardedMutex(&ViewLock);
1090
1091 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1092 {
1093 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1094 if (FileObject->PrivateCacheMap != NULL)
1095 {
1096 FileObject->PrivateCacheMap = NULL;
1097 if (SharedCacheMap->RefCount > 0)
1098 {
1099 SharedCacheMap->RefCount--;
1100 if (SharedCacheMap->RefCount == 0)
1101 {
1102 MmFreeSectionSegments(SharedCacheMap->FileObject);
1103 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1104 }
1105 }
1106 }
1107 }
1108 KeReleaseGuardedMutex(&ViewLock);
1109 return STATUS_SUCCESS;
1110 }
1111
1112 NTSTATUS
1113 NTAPI
1114 CcTryToInitializeFileCache (
1115 PFILE_OBJECT FileObject)
1116 {
1117 PROS_SHARED_CACHE_MAP SharedCacheMap;
1118 NTSTATUS Status;
1119
1120 KeAcquireGuardedMutex(&ViewLock);
1121
1122 ASSERT(FileObject->SectionObjectPointer);
1123 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1124 if (SharedCacheMap == NULL)
1125 {
1126 Status = STATUS_UNSUCCESSFUL;
1127 }
1128 else
1129 {
1130 if (FileObject->PrivateCacheMap == NULL)
1131 {
1132 FileObject->PrivateCacheMap = SharedCacheMap;
1133 SharedCacheMap->RefCount++;
1134 }
1135 Status = STATUS_SUCCESS;
1136 }
1137 KeReleaseGuardedMutex(&ViewLock);
1138
1139 return Status;
1140 }
1141
1142
1143 NTSTATUS
1144 NTAPI
1145 CcRosInitializeFileCache (
1146 PFILE_OBJECT FileObject,
1147 PCC_FILE_SIZES FileSizes,
1148 BOOLEAN PinAccess,
1149 PCACHE_MANAGER_CALLBACKS CallBacks,
1150 PVOID LazyWriterContext)
1151 /*
1152 * FUNCTION: Initializes a shared cache map for a file object
1153 */
1154 {
1155 PROS_SHARED_CACHE_MAP SharedCacheMap;
1156
1157 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1158 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1159 FileObject, SharedCacheMap);
1160
1161 KeAcquireGuardedMutex(&ViewLock);
1162 if (SharedCacheMap == NULL)
1163 {
1164 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1165 if (SharedCacheMap == NULL)
1166 {
1167 KeReleaseGuardedMutex(&ViewLock);
1168 return STATUS_INSUFFICIENT_RESOURCES;
1169 }
1170 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1171 ObReferenceObjectByPointer(FileObject,
1172 FILE_ALL_ACCESS,
1173 NULL,
1174 KernelMode);
1175 SharedCacheMap->FileObject = FileObject;
1176 SharedCacheMap->Callbacks = CallBacks;
1177 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1178 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1179 SharedCacheMap->FileSize = FileSizes->FileSize;
1180 SharedCacheMap->PinAccess = PinAccess;
1181 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1182 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1183 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1184 }
1185 if (FileObject->PrivateCacheMap == NULL)
1186 {
1187 FileObject->PrivateCacheMap = SharedCacheMap;
1188 SharedCacheMap->RefCount++;
1189 }
1190 KeReleaseGuardedMutex(&ViewLock);
1191
1192 return STATUS_SUCCESS;
1193 }
1194
1195 /*
1196 * @implemented
1197 */
1198 PFILE_OBJECT
1199 NTAPI
1200 CcGetFileObjectFromSectionPtrs (
1201 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1202 {
1203 PROS_SHARED_CACHE_MAP SharedCacheMap;
1204
1205 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1206
1207 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1208 {
1209 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1210 ASSERT(SharedCacheMap);
1211 return SharedCacheMap->FileObject;
1212 }
1213 return NULL;
1214 }
1215
1216 VOID
1217 INIT_FUNCTION
1218 NTAPI
1219 CcInitView (
1220 VOID)
1221 {
1222 DPRINT("CcInitView()\n");
1223
1224 InitializeListHead(&DirtyVacbListHead);
1225 InitializeListHead(&VacbLruListHead);
1226 KeInitializeGuardedMutex(&ViewLock);
1227 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1228 NULL,
1229 NULL,
1230 0,
1231 sizeof(INTERNAL_BCB),
1232 TAG_BCB,
1233 20);
1234 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1235 NULL,
1236 NULL,
1237 0,
1238 sizeof(ROS_SHARED_CACHE_MAP),
1239 TAG_SHARED_CACHE_MAP,
1240 20);
1241 ExInitializeNPagedLookasideList(&VacbLookasideList,
1242 NULL,
1243 NULL,
1244 0,
1245 sizeof(ROS_VACB),
1246 TAG_VACB,
1247 20);
1248
1249 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1250
1251 CcInitCacheZeroPage();
1252 }
1253
1254 /* EOF */