[NTOS:CC]
[reactos.git] / reactos / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 */
9
10 /* NOTES **********************************************************************
11 *
12 * This is not the NT implementation of a file cache nor anything much like
13 * it.
14 *
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
17 *
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
20 *
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
23 *
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
27 *
28 * (4) Copy the data into or out of the page as necessary.
29 *
30 * (5) Release the cache page
31 */
32 /* INCLUDES ******************************************************************/
33
34 #include <ntoskrnl.h>
35 #define NDEBUG
36 #include <debug.h>
37
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
40 #endif
41
42 /* GLOBALS *******************************************************************/
43
44 static LIST_ENTRY DirtyVacbListHead;
45 static LIST_ENTRY VacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47 ULONG DirtyPageCount = 0;
48
49 KGUARDED_MUTEX ViewLock;
50
51 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
52 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
53 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
54
55 #if DBG
56 static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
57 {
58 ++vacb->ReferenceCount;
59 if (vacb->SharedCacheMap->Trace)
60 {
61 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
62 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
63 }
64 }
65 static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
66 {
67 --vacb->ReferenceCount;
68 if (vacb->SharedCacheMap->Trace)
69 {
70 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
71 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
72 }
73 }
74 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
75 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
76 #else
77 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
78 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
79 #endif
80
81 NTSTATUS
82 CcRosInternalFreeVacb(PROS_VACB Vacb);
83
84
85 /* FUNCTIONS *****************************************************************/
86
87 VOID
88 NTAPI
89 CcRosTraceCacheMap (
90 PROS_SHARED_CACHE_MAP SharedCacheMap,
91 BOOLEAN Trace )
92 {
93 #if DBG
94 KIRQL oldirql;
95 PLIST_ENTRY current_entry;
96 PROS_VACB current;
97
98 if (!SharedCacheMap)
99 return;
100
101 SharedCacheMap->Trace = Trace;
102
103 if (Trace)
104 {
105 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
106
107 KeAcquireGuardedMutex(&ViewLock);
108 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
109
110 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
111 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
112 {
113 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
114 current_entry = current_entry->Flink;
115
116 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
117 current, current->ReferenceCount, current->Dirty, current->PageOut );
118 }
119 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
120 KeReleaseGuardedMutex(&ViewLock);
121 }
122 else
123 {
124 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
125 }
126
127 #else
128 UNREFERENCED_PARAMETER(SharedCacheMap);
129 UNREFERENCED_PARAMETER(Trace);
130 #endif
131 }
132
133 NTSTATUS
134 NTAPI
135 CcRosFlushVacb (
136 PROS_VACB Vacb)
137 {
138 NTSTATUS Status;
139 KIRQL oldIrql;
140
141 Status = CcWriteVirtualAddress(Vacb);
142 if (NT_SUCCESS(Status))
143 {
144 KeAcquireGuardedMutex(&ViewLock);
145 KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql);
146
147 Vacb->Dirty = FALSE;
148 RemoveEntryList(&Vacb->DirtyVacbListEntry);
149 DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
150 CcRosVacbDecRefCount(Vacb);
151
152 KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
153 KeReleaseGuardedMutex(&ViewLock);
154 }
155
156 return Status;
157 }
158
159 NTSTATUS
160 NTAPI
161 CcRosFlushDirtyPages (
162 ULONG Target,
163 PULONG Count,
164 BOOLEAN Wait)
165 {
166 PLIST_ENTRY current_entry;
167 PROS_VACB current;
168 BOOLEAN Locked;
169 NTSTATUS Status;
170 LARGE_INTEGER ZeroTimeout;
171
172 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
173
174 (*Count) = 0;
175 ZeroTimeout.QuadPart = 0;
176
177 KeEnterCriticalRegion();
178 KeAcquireGuardedMutex(&ViewLock);
179
180 current_entry = DirtyVacbListHead.Flink;
181 if (current_entry == &DirtyVacbListHead)
182 {
183 DPRINT("No Dirty pages\n");
184 }
185
186 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
187 {
188 current = CONTAINING_RECORD(current_entry,
189 ROS_VACB,
190 DirtyVacbListEntry);
191 current_entry = current_entry->Flink;
192
193 CcRosVacbIncRefCount(current);
194
195 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
196 current->SharedCacheMap->LazyWriteContext, Wait);
197 if (!Locked)
198 {
199 CcRosVacbDecRefCount(current);
200 continue;
201 }
202
203 Status = KeWaitForSingleObject(&current->Mutex,
204 Executive,
205 KernelMode,
206 FALSE,
207 Wait ? NULL : &ZeroTimeout);
208 if (Status != STATUS_SUCCESS)
209 {
210 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
211 current->SharedCacheMap->LazyWriteContext);
212 CcRosVacbDecRefCount(current);
213 continue;
214 }
215
216 ASSERT(current->Dirty);
217
218 /* One reference is added above */
219 if (current->ReferenceCount > 2)
220 {
221 KeReleaseMutex(&current->Mutex, FALSE);
222 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
223 current->SharedCacheMap->LazyWriteContext);
224 CcRosVacbDecRefCount(current);
225 continue;
226 }
227
228 KeReleaseGuardedMutex(&ViewLock);
229
230 Status = CcRosFlushVacb(current);
231
232 KeReleaseMutex(&current->Mutex, FALSE);
233 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
234 current->SharedCacheMap->LazyWriteContext);
235
236 KeAcquireGuardedMutex(&ViewLock);
237 CcRosVacbDecRefCount(current);
238
239 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
240 {
241 DPRINT1("CC: Failed to flush VACB.\n");
242 }
243 else
244 {
245 (*Count) += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
246 Target -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
247 }
248
249 current_entry = DirtyVacbListHead.Flink;
250 }
251
252 KeReleaseGuardedMutex(&ViewLock);
253 KeLeaveCriticalRegion();
254
255 DPRINT("CcRosFlushDirtyPages() finished\n");
256 return STATUS_SUCCESS;
257 }
258
259 NTSTATUS
260 CcRosTrimCache (
261 ULONG Target,
262 ULONG Priority,
263 PULONG NrFreed)
264 /*
265 * FUNCTION: Try to free some memory from the file cache.
266 * ARGUMENTS:
267 * Target - The number of pages to be freed.
268 * Priority - The priority of free (currently unused).
269 * NrFreed - Points to a variable where the number of pages
270 * actually freed is returned.
271 */
272 {
273 PLIST_ENTRY current_entry;
274 PROS_VACB current;
275 ULONG PagesFreed;
276 KIRQL oldIrql;
277 LIST_ENTRY FreeList;
278 PFN_NUMBER Page;
279 ULONG i;
280 BOOLEAN FlushedPages = FALSE;
281
282 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
283
284 InitializeListHead(&FreeList);
285
286 *NrFreed = 0;
287
288 retry:
289 KeAcquireGuardedMutex(&ViewLock);
290
291 current_entry = VacbLruListHead.Flink;
292 while (current_entry != &VacbLruListHead)
293 {
294 current = CONTAINING_RECORD(current_entry,
295 ROS_VACB,
296 VacbLruListEntry);
297 current_entry = current_entry->Flink;
298
299 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
300
301 /* Reference the VACB */
302 CcRosVacbIncRefCount(current);
303
304 /* Check if it's mapped and not dirty */
305 if (current->MappedCount > 0 && !current->Dirty)
306 {
307 /* We have to break these locks because Cc sucks */
308 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
309 KeReleaseGuardedMutex(&ViewLock);
310
311 /* Page out the VACB */
312 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
313 {
314 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
315
316 MmPageOutPhysicalAddress(Page);
317 }
318
319 /* Reacquire the locks */
320 KeAcquireGuardedMutex(&ViewLock);
321 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
322 }
323
324 /* Dereference the VACB */
325 CcRosVacbDecRefCount(current);
326
327 /* Check if we can free this entry now */
328 if (current->ReferenceCount == 0)
329 {
330 ASSERT(!current->Dirty);
331 ASSERT(!current->MappedCount);
332
333 RemoveEntryList(&current->CacheMapVacbListEntry);
334 RemoveEntryList(&current->VacbListEntry);
335 RemoveEntryList(&current->VacbLruListEntry);
336 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
337
338 /* Calculate how many pages we freed for Mm */
339 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
340 Target -= PagesFreed;
341 (*NrFreed) += PagesFreed;
342 }
343
344 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
345 }
346
347 KeReleaseGuardedMutex(&ViewLock);
348
349 /* Try flushing pages if we haven't met our target */
350 if ((Target > 0) && !FlushedPages)
351 {
352 /* Flush dirty pages to disk */
353 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE);
354 FlushedPages = TRUE;
355
356 /* We can only swap as many pages as we flushed */
357 if (PagesFreed < Target) Target = PagesFreed;
358
359 /* Check if we flushed anything */
360 if (PagesFreed != 0)
361 {
362 /* Try again after flushing dirty pages */
363 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
364 goto retry;
365 }
366 }
367
368 while (!IsListEmpty(&FreeList))
369 {
370 current_entry = RemoveHeadList(&FreeList);
371 current = CONTAINING_RECORD(current_entry,
372 ROS_VACB,
373 CacheMapVacbListEntry);
374 CcRosInternalFreeVacb(current);
375 }
376
377 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
378
379 return STATUS_SUCCESS;
380 }
381
382 NTSTATUS
383 NTAPI
384 CcRosReleaseVacb (
385 PROS_SHARED_CACHE_MAP SharedCacheMap,
386 PROS_VACB Vacb,
387 BOOLEAN Valid,
388 BOOLEAN Dirty,
389 BOOLEAN Mapped)
390 {
391 BOOLEAN WasDirty;
392 KIRQL oldIrql;
393
394 ASSERT(SharedCacheMap);
395
396 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
397 SharedCacheMap, Vacb, Valid);
398
399 KeAcquireGuardedMutex(&ViewLock);
400 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
401
402 Vacb->Valid = Valid;
403
404 WasDirty = Vacb->Dirty;
405 Vacb->Dirty = Vacb->Dirty || Dirty;
406
407 if (!WasDirty && Vacb->Dirty)
408 {
409 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
410 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
411 }
412
413 if (Mapped)
414 {
415 Vacb->MappedCount++;
416 }
417 CcRosVacbDecRefCount(Vacb);
418 if (Mapped && (Vacb->MappedCount == 1))
419 {
420 CcRosVacbIncRefCount(Vacb);
421 }
422 if (!WasDirty && Vacb->Dirty)
423 {
424 CcRosVacbIncRefCount(Vacb);
425 }
426
427 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
428 KeReleaseGuardedMutex(&ViewLock);
429 KeReleaseMutex(&Vacb->Mutex, FALSE);
430
431 return STATUS_SUCCESS;
432 }
433
434 /* Returns with VACB Lock Held! */
435 PROS_VACB
436 NTAPI
437 CcRosLookupVacb (
438 PROS_SHARED_CACHE_MAP SharedCacheMap,
439 ULONG FileOffset)
440 {
441 PLIST_ENTRY current_entry;
442 PROS_VACB current;
443 KIRQL oldIrql;
444
445 ASSERT(SharedCacheMap);
446
447 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %lu)\n",
448 SharedCacheMap, FileOffset);
449
450 KeAcquireGuardedMutex(&ViewLock);
451 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
452
453 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
454 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
455 {
456 current = CONTAINING_RECORD(current_entry,
457 ROS_VACB,
458 CacheMapVacbListEntry);
459 if (IsPointInRange(current->FileOffset, VACB_MAPPING_GRANULARITY,
460 FileOffset))
461 {
462 CcRosVacbIncRefCount(current);
463 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
464 KeReleaseGuardedMutex(&ViewLock);
465 KeWaitForSingleObject(&current->Mutex,
466 Executive,
467 KernelMode,
468 FALSE,
469 NULL);
470 return current;
471 }
472 if (current->FileOffset > FileOffset)
473 break;
474 current_entry = current_entry->Flink;
475 }
476
477 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
478 KeReleaseGuardedMutex(&ViewLock);
479
480 return NULL;
481 }
482
483 NTSTATUS
484 NTAPI
485 CcRosMarkDirtyVacb (
486 PROS_SHARED_CACHE_MAP SharedCacheMap,
487 ULONG FileOffset)
488 {
489 PROS_VACB Vacb;
490 KIRQL oldIrql;
491
492 ASSERT(SharedCacheMap);
493
494 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %lu)\n",
495 SharedCacheMap, FileOffset);
496
497 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
498 if (Vacb == NULL)
499 {
500 KeBugCheck(CACHE_MANAGER);
501 }
502
503 KeAcquireGuardedMutex(&ViewLock);
504 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
505
506 if (!Vacb->Dirty)
507 {
508 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
509 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
510 }
511 else
512 {
513 CcRosVacbDecRefCount(Vacb);
514 }
515
516 /* Move to the tail of the LRU list */
517 RemoveEntryList(&Vacb->VacbLruListEntry);
518 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
519
520 Vacb->Dirty = TRUE;
521
522 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
523 KeReleaseGuardedMutex(&ViewLock);
524 KeReleaseMutex(&Vacb->Mutex, FALSE);
525
526 return STATUS_SUCCESS;
527 }
528
529 NTSTATUS
530 NTAPI
531 CcRosUnmapVacb (
532 PROS_SHARED_CACHE_MAP SharedCacheMap,
533 ULONG FileOffset,
534 BOOLEAN NowDirty)
535 {
536 PROS_VACB Vacb;
537 BOOLEAN WasDirty;
538 KIRQL oldIrql;
539
540 ASSERT(SharedCacheMap);
541
542 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %lu, NowDirty %u)\n",
543 SharedCacheMap, FileOffset, NowDirty);
544
545 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
546 if (Vacb == NULL)
547 {
548 return STATUS_UNSUCCESSFUL;
549 }
550
551 KeAcquireGuardedMutex(&ViewLock);
552 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
553
554 WasDirty = Vacb->Dirty;
555 Vacb->Dirty = Vacb->Dirty || NowDirty;
556
557 Vacb->MappedCount--;
558
559 if (!WasDirty && NowDirty)
560 {
561 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
562 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
563 }
564
565 CcRosVacbDecRefCount(Vacb);
566 if (!WasDirty && NowDirty)
567 {
568 CcRosVacbIncRefCount(Vacb);
569 }
570 if (Vacb->MappedCount == 0)
571 {
572 CcRosVacbDecRefCount(Vacb);
573 }
574
575 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
576 KeReleaseGuardedMutex(&ViewLock);
577 KeReleaseMutex(&Vacb->Mutex, FALSE);
578
579 return STATUS_SUCCESS;
580 }
581
582 static
583 NTSTATUS
584 CcRosCreateVacb (
585 PROS_SHARED_CACHE_MAP SharedCacheMap,
586 ULONG FileOffset,
587 PROS_VACB *Vacb)
588 {
589 PROS_VACB current;
590 PROS_VACB previous;
591 PLIST_ENTRY current_entry;
592 NTSTATUS Status;
593 KIRQL oldIrql;
594
595 ASSERT(SharedCacheMap);
596
597 DPRINT("CcRosCreateVacb()\n");
598
599 if (FileOffset >= SharedCacheMap->FileSize.u.LowPart)
600 {
601 *Vacb = NULL;
602 return STATUS_INVALID_PARAMETER;
603 }
604
605 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
606 current->Valid = FALSE;
607 current->Dirty = FALSE;
608 current->PageOut = FALSE;
609 current->FileOffset = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
610 current->SharedCacheMap = SharedCacheMap;
611 #if DBG
612 if (SharedCacheMap->Trace)
613 {
614 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
615 }
616 #endif
617 current->MappedCount = 0;
618 current->DirtyVacbListEntry.Flink = NULL;
619 current->DirtyVacbListEntry.Blink = NULL;
620 current->ReferenceCount = 1;
621 KeInitializeMutex(&current->Mutex, 0);
622 KeWaitForSingleObject(&current->Mutex,
623 Executive,
624 KernelMode,
625 FALSE,
626 NULL);
627 KeAcquireGuardedMutex(&ViewLock);
628
629 *Vacb = current;
630 /* There is window between the call to CcRosLookupVacb
631 * and CcRosCreateVacb. We must check if a VACB for the
632 * file offset exist. If there is a VACB, we release
633 * our newly created VACB and return the existing one.
634 */
635 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
636 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
637 previous = NULL;
638 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
639 {
640 current = CONTAINING_RECORD(current_entry,
641 ROS_VACB,
642 CacheMapVacbListEntry);
643 if (IsPointInRange(current->FileOffset, VACB_MAPPING_GRANULARITY,
644 FileOffset))
645 {
646 CcRosVacbIncRefCount(current);
647 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
648 #if DBG
649 if (SharedCacheMap->Trace)
650 {
651 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
652 SharedCacheMap,
653 (*Vacb),
654 current);
655 }
656 #endif
657 KeReleaseMutex(&(*Vacb)->Mutex, FALSE);
658 KeReleaseGuardedMutex(&ViewLock);
659 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
660 *Vacb = current;
661 KeWaitForSingleObject(&current->Mutex,
662 Executive,
663 KernelMode,
664 FALSE,
665 NULL);
666 return STATUS_SUCCESS;
667 }
668 if (current->FileOffset < FileOffset)
669 {
670 ASSERT(previous == NULL ||
671 previous->FileOffset < current->FileOffset);
672 previous = current;
673 }
674 if (current->FileOffset > FileOffset)
675 break;
676 current_entry = current_entry->Flink;
677 }
678 /* There was no existing VACB. */
679 current = *Vacb;
680 if (previous)
681 {
682 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
683 }
684 else
685 {
686 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
687 }
688 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
689 InsertTailList(&VacbListHead, &current->VacbListEntry);
690 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
691 KeReleaseGuardedMutex(&ViewLock);
692
693 MmLockAddressSpace(MmGetKernelAddressSpace());
694 current->BaseAddress = NULL;
695 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
696 0, // nothing checks for VACB mareas, so set to 0
697 &current->BaseAddress,
698 VACB_MAPPING_GRANULARITY,
699 PAGE_READWRITE,
700 (PMEMORY_AREA*)&current->MemoryArea,
701 FALSE,
702 0,
703 PAGE_SIZE);
704 MmUnlockAddressSpace(MmGetKernelAddressSpace());
705 if (!NT_SUCCESS(Status))
706 {
707 KeBugCheck(CACHE_MANAGER);
708 }
709
710 /* Create a virtual mapping for this memory area */
711 MI_SET_USAGE(MI_USAGE_CACHE);
712 #if MI_TRACE_PFNS
713 PWCHAR pos = NULL;
714 ULONG len = 0;
715 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
716 {
717 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
718 len = wcslen(pos) * sizeof(WCHAR);
719 if (pos) snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
720 }
721 #endif
722
723 MmMapMemoryArea(current->BaseAddress, VACB_MAPPING_GRANULARITY,
724 MC_CACHE, PAGE_READWRITE);
725
726 return STATUS_SUCCESS;
727 }
728
729 NTSTATUS
730 NTAPI
731 CcRosGetVacbChain (
732 PROS_SHARED_CACHE_MAP SharedCacheMap,
733 ULONG FileOffset,
734 ULONG Length,
735 PROS_VACB *Vacb)
736 {
737 PROS_VACB current;
738 ULONG i;
739 PROS_VACB *VacbList;
740 PROS_VACB Previous = NULL;
741
742 ASSERT(SharedCacheMap);
743
744 DPRINT("CcRosGetVacbChain()\n");
745
746 Length = ROUND_UP(Length, VACB_MAPPING_GRANULARITY);
747
748 VacbList = _alloca(sizeof(PROS_VACB) *
749 (Length / VACB_MAPPING_GRANULARITY));
750
751 /*
752 * Look for a VACB already mapping the same data.
753 */
754 for (i = 0; i < (Length / VACB_MAPPING_GRANULARITY); i++)
755 {
756 ULONG CurrentOffset = FileOffset + (i * VACB_MAPPING_GRANULARITY);
757 current = CcRosLookupVacb(SharedCacheMap, CurrentOffset);
758 if (current != NULL)
759 {
760 KeAcquireGuardedMutex(&ViewLock);
761
762 /* Move to tail of LRU list */
763 RemoveEntryList(&current->VacbLruListEntry);
764 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
765
766 KeReleaseGuardedMutex(&ViewLock);
767
768 VacbList[i] = current;
769 }
770 else
771 {
772 CcRosCreateVacb(SharedCacheMap, CurrentOffset, &current);
773 VacbList[i] = current;
774 }
775 }
776
777 for (i = 0; i < Length / VACB_MAPPING_GRANULARITY; i++)
778 {
779 if (i == 0)
780 {
781 *Vacb = VacbList[i];
782 Previous = VacbList[i];
783 }
784 else
785 {
786 Previous->NextInChain = VacbList[i];
787 Previous = VacbList[i];
788 }
789 }
790 ASSERT(Previous);
791 Previous->NextInChain = NULL;
792
793 return STATUS_SUCCESS;
794 }
795
796 NTSTATUS
797 NTAPI
798 CcRosGetVacb (
799 PROS_SHARED_CACHE_MAP SharedCacheMap,
800 ULONG FileOffset,
801 PULONG BaseOffset,
802 PVOID* BaseAddress,
803 PBOOLEAN UptoDate,
804 PROS_VACB *Vacb)
805 {
806 PROS_VACB current;
807 NTSTATUS Status;
808
809 ASSERT(SharedCacheMap);
810
811 DPRINT("CcRosGetVacb()\n");
812
813 /*
814 * Look for a VACB already mapping the same data.
815 */
816 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
817 if (current == NULL)
818 {
819 /*
820 * Otherwise create a new VACB.
821 */
822 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
823 if (!NT_SUCCESS(Status))
824 {
825 return Status;
826 }
827 }
828
829 KeAcquireGuardedMutex(&ViewLock);
830
831 /* Move to the tail of the LRU list */
832 RemoveEntryList(&current->VacbLruListEntry);
833 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
834
835 KeReleaseGuardedMutex(&ViewLock);
836
837 /*
838 * Return information about the VACB to the caller.
839 */
840 *UptoDate = current->Valid;
841 *BaseAddress = current->BaseAddress;
842 DPRINT("*BaseAddress %p\n", *BaseAddress);
843 *Vacb = current;
844 *BaseOffset = current->FileOffset;
845 return STATUS_SUCCESS;
846 }
847
848 NTSTATUS
849 NTAPI
850 CcRosRequestVacb (
851 PROS_SHARED_CACHE_MAP SharedCacheMap,
852 ULONG FileOffset,
853 PVOID* BaseAddress,
854 PBOOLEAN UptoDate,
855 PROS_VACB *Vacb)
856 /*
857 * FUNCTION: Request a page mapping for a shared cache map
858 */
859 {
860 ULONG BaseOffset;
861
862 ASSERT(SharedCacheMap);
863
864 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
865 {
866 DPRINT1("Bad fileoffset %x should be multiple of %x",
867 FileOffset, VACB_MAPPING_GRANULARITY);
868 KeBugCheck(CACHE_MANAGER);
869 }
870
871 return CcRosGetVacb(SharedCacheMap,
872 FileOffset,
873 &BaseOffset,
874 BaseAddress,
875 UptoDate,
876 Vacb);
877 }
878
879 static
880 VOID
881 CcFreeCachePage (
882 PVOID Context,
883 MEMORY_AREA* MemoryArea,
884 PVOID Address,
885 PFN_NUMBER Page,
886 SWAPENTRY SwapEntry,
887 BOOLEAN Dirty)
888 {
889 ASSERT(SwapEntry == 0);
890 if (Page != 0)
891 {
892 ASSERT(MmGetReferenceCountPage(Page) == 1);
893 MmReleasePageMemoryConsumer(MC_CACHE, Page);
894 }
895 }
896
897 NTSTATUS
898 CcRosInternalFreeVacb (
899 PROS_VACB Vacb)
900 /*
901 * FUNCTION: Releases a VACB associated with a shared cache map
902 */
903 {
904 DPRINT("Freeing VACB 0x%p\n", Vacb);
905 #if DBG
906 if (Vacb->SharedCacheMap->Trace)
907 {
908 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
909 }
910 #endif
911
912 MmLockAddressSpace(MmGetKernelAddressSpace());
913 MmFreeMemoryArea(MmGetKernelAddressSpace(),
914 Vacb->MemoryArea,
915 CcFreeCachePage,
916 NULL);
917 MmUnlockAddressSpace(MmGetKernelAddressSpace());
918
919 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
920 return STATUS_SUCCESS;
921 }
922
923 /*
924 * @implemented
925 */
926 VOID
927 NTAPI
928 CcFlushCache (
929 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
930 IN PLARGE_INTEGER FileOffset OPTIONAL,
931 IN ULONG Length,
932 OUT PIO_STATUS_BLOCK IoStatus)
933 {
934 PROS_SHARED_CACHE_MAP SharedCacheMap;
935 LARGE_INTEGER Offset;
936 PROS_VACB current;
937 NTSTATUS Status;
938 KIRQL oldIrql;
939
940 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
941 SectionObjectPointers, FileOffset, Length, IoStatus);
942
943 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
944 {
945 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
946 ASSERT(SharedCacheMap);
947 if (FileOffset)
948 {
949 Offset = *FileOffset;
950 }
951 else
952 {
953 Offset.QuadPart = (LONGLONG)0;
954 Length = SharedCacheMap->FileSize.u.LowPart;
955 }
956
957 if (IoStatus)
958 {
959 IoStatus->Status = STATUS_SUCCESS;
960 IoStatus->Information = 0;
961 }
962
963 while (Length > 0)
964 {
965 current = CcRosLookupVacb(SharedCacheMap, Offset.u.LowPart);
966 if (current != NULL)
967 {
968 if (current->Dirty)
969 {
970 Status = CcRosFlushVacb(current);
971 if (!NT_SUCCESS(Status) && IoStatus != NULL)
972 {
973 IoStatus->Status = Status;
974 }
975 }
976 KeReleaseMutex(&current->Mutex, FALSE);
977
978 KeAcquireGuardedMutex(&ViewLock);
979 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
980 CcRosVacbDecRefCount(current);
981 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
982 KeReleaseGuardedMutex(&ViewLock);
983 }
984
985 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
986 if (Length > VACB_MAPPING_GRANULARITY)
987 {
988 Length -= VACB_MAPPING_GRANULARITY;
989 }
990 else
991 {
992 Length = 0;
993 }
994 }
995 }
996 else
997 {
998 if (IoStatus)
999 {
1000 IoStatus->Status = STATUS_INVALID_PARAMETER;
1001 }
1002 }
1003 }
1004
1005 NTSTATUS
1006 NTAPI
1007 CcRosDeleteFileCache (
1008 PFILE_OBJECT FileObject,
1009 PROS_SHARED_CACHE_MAP SharedCacheMap)
1010 /*
1011 * FUNCTION: Releases the shared cache map associated with a file object
1012 */
1013 {
1014 PLIST_ENTRY current_entry;
1015 PROS_VACB current;
1016 LIST_ENTRY FreeList;
1017 KIRQL oldIrql;
1018
1019 ASSERT(SharedCacheMap);
1020
1021 SharedCacheMap->RefCount++;
1022 KeReleaseGuardedMutex(&ViewLock);
1023
1024 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1025
1026 KeAcquireGuardedMutex(&ViewLock);
1027 SharedCacheMap->RefCount--;
1028 if (SharedCacheMap->RefCount == 0)
1029 {
1030 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1031
1032 /*
1033 * Release all VACBs
1034 */
1035 InitializeListHead(&FreeList);
1036 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1037 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1038 {
1039 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1040 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1041 RemoveEntryList(&current->VacbListEntry);
1042 RemoveEntryList(&current->VacbLruListEntry);
1043 if (current->Dirty)
1044 {
1045 RemoveEntryList(&current->DirtyVacbListEntry);
1046 DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
1047 DPRINT1("Freeing dirty VACB\n");
1048 }
1049 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1050 }
1051 #if DBG
1052 SharedCacheMap->Trace = FALSE;
1053 #endif
1054 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1055
1056 KeReleaseGuardedMutex(&ViewLock);
1057 ObDereferenceObject(SharedCacheMap->FileObject);
1058
1059 while (!IsListEmpty(&FreeList))
1060 {
1061 current_entry = RemoveTailList(&FreeList);
1062 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1063 CcRosInternalFreeVacb(current);
1064 }
1065 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1066 KeAcquireGuardedMutex(&ViewLock);
1067 }
1068 return STATUS_SUCCESS;
1069 }
1070
1071 VOID
1072 NTAPI
1073 CcRosReferenceCache (
1074 PFILE_OBJECT FileObject)
1075 {
1076 PROS_SHARED_CACHE_MAP SharedCacheMap;
1077 KeAcquireGuardedMutex(&ViewLock);
1078 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1079 ASSERT(SharedCacheMap);
1080 ASSERT(SharedCacheMap->RefCount != 0);
1081 SharedCacheMap->RefCount++;
1082 KeReleaseGuardedMutex(&ViewLock);
1083 }
1084
1085 VOID
1086 NTAPI
1087 CcRosRemoveIfClosed (
1088 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1089 {
1090 PROS_SHARED_CACHE_MAP SharedCacheMap;
1091 DPRINT("CcRosRemoveIfClosed()\n");
1092 KeAcquireGuardedMutex(&ViewLock);
1093 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1094 if (SharedCacheMap && SharedCacheMap->RefCount == 0)
1095 {
1096 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1097 }
1098 KeReleaseGuardedMutex(&ViewLock);
1099 }
1100
1101
1102 VOID
1103 NTAPI
1104 CcRosDereferenceCache (
1105 PFILE_OBJECT FileObject)
1106 {
1107 PROS_SHARED_CACHE_MAP SharedCacheMap;
1108 KeAcquireGuardedMutex(&ViewLock);
1109 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1110 ASSERT(SharedCacheMap);
1111 if (SharedCacheMap->RefCount > 0)
1112 {
1113 SharedCacheMap->RefCount--;
1114 if (SharedCacheMap->RefCount == 0)
1115 {
1116 MmFreeSectionSegments(SharedCacheMap->FileObject);
1117 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1118 }
1119 }
1120 KeReleaseGuardedMutex(&ViewLock);
1121 }
1122
1123 NTSTATUS
1124 NTAPI
1125 CcRosReleaseFileCache (
1126 PFILE_OBJECT FileObject)
1127 /*
1128 * FUNCTION: Called by the file system when a handle to a file object
1129 * has been closed.
1130 */
1131 {
1132 PROS_SHARED_CACHE_MAP SharedCacheMap;
1133
1134 KeAcquireGuardedMutex(&ViewLock);
1135
1136 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1137 {
1138 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1139 if (FileObject->PrivateCacheMap != NULL)
1140 {
1141 FileObject->PrivateCacheMap = NULL;
1142 if (SharedCacheMap->RefCount > 0)
1143 {
1144 SharedCacheMap->RefCount--;
1145 if (SharedCacheMap->RefCount == 0)
1146 {
1147 MmFreeSectionSegments(SharedCacheMap->FileObject);
1148 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1149 }
1150 }
1151 }
1152 }
1153 KeReleaseGuardedMutex(&ViewLock);
1154 return STATUS_SUCCESS;
1155 }
1156
1157 NTSTATUS
1158 NTAPI
1159 CcTryToInitializeFileCache (
1160 PFILE_OBJECT FileObject)
1161 {
1162 PROS_SHARED_CACHE_MAP SharedCacheMap;
1163 NTSTATUS Status;
1164
1165 KeAcquireGuardedMutex(&ViewLock);
1166
1167 ASSERT(FileObject->SectionObjectPointer);
1168 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1169 if (SharedCacheMap == NULL)
1170 {
1171 Status = STATUS_UNSUCCESSFUL;
1172 }
1173 else
1174 {
1175 if (FileObject->PrivateCacheMap == NULL)
1176 {
1177 FileObject->PrivateCacheMap = SharedCacheMap;
1178 SharedCacheMap->RefCount++;
1179 }
1180 Status = STATUS_SUCCESS;
1181 }
1182 KeReleaseGuardedMutex(&ViewLock);
1183
1184 return Status;
1185 }
1186
1187
1188 NTSTATUS
1189 NTAPI
1190 CcRosInitializeFileCache (
1191 PFILE_OBJECT FileObject,
1192 PCACHE_MANAGER_CALLBACKS CallBacks,
1193 PVOID LazyWriterContext)
1194 /*
1195 * FUNCTION: Initializes a shared cache map for a file object
1196 */
1197 {
1198 PROS_SHARED_CACHE_MAP SharedCacheMap;
1199
1200 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1201 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1202 FileObject, SharedCacheMap);
1203
1204 KeAcquireGuardedMutex(&ViewLock);
1205 if (SharedCacheMap == NULL)
1206 {
1207 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1208 if (SharedCacheMap == NULL)
1209 {
1210 KeReleaseGuardedMutex(&ViewLock);
1211 return STATUS_UNSUCCESSFUL;
1212 }
1213 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1214 ObReferenceObjectByPointer(FileObject,
1215 FILE_ALL_ACCESS,
1216 NULL,
1217 KernelMode);
1218 SharedCacheMap->FileObject = FileObject;
1219 SharedCacheMap->Callbacks = CallBacks;
1220 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1221 if (FileObject->FsContext)
1222 {
1223 SharedCacheMap->AllocationSize =
1224 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->AllocationSize;
1225 SharedCacheMap->FileSize =
1226 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->FileSize;
1227 }
1228 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1229 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1230 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1231 }
1232 if (FileObject->PrivateCacheMap == NULL)
1233 {
1234 FileObject->PrivateCacheMap = SharedCacheMap;
1235 SharedCacheMap->RefCount++;
1236 }
1237 KeReleaseGuardedMutex(&ViewLock);
1238
1239 return STATUS_SUCCESS;
1240 }
1241
1242 /*
1243 * @implemented
1244 */
1245 PFILE_OBJECT
1246 NTAPI
1247 CcGetFileObjectFromSectionPtrs (
1248 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1249 {
1250 PROS_SHARED_CACHE_MAP SharedCacheMap;
1251 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1252 {
1253 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1254 ASSERT(SharedCacheMap);
1255 return SharedCacheMap->FileObject;
1256 }
1257 return NULL;
1258 }
1259
1260 VOID
1261 INIT_FUNCTION
1262 NTAPI
1263 CcInitView (
1264 VOID)
1265 {
1266 DPRINT("CcInitView()\n");
1267
1268 InitializeListHead(&VacbListHead);
1269 InitializeListHead(&DirtyVacbListHead);
1270 InitializeListHead(&VacbLruListHead);
1271 KeInitializeGuardedMutex(&ViewLock);
1272 ExInitializeNPagedLookasideList (&iBcbLookasideList,
1273 NULL,
1274 NULL,
1275 0,
1276 sizeof(INTERNAL_BCB),
1277 TAG_IBCB,
1278 20);
1279 ExInitializeNPagedLookasideList (&SharedCacheMapLookasideList,
1280 NULL,
1281 NULL,
1282 0,
1283 sizeof(ROS_SHARED_CACHE_MAP),
1284 TAG_BCB,
1285 20);
1286 ExInitializeNPagedLookasideList (&VacbLookasideList,
1287 NULL,
1288 NULL,
1289 0,
1290 sizeof(ROS_VACB),
1291 TAG_CSEG,
1292 20);
1293
1294 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1295
1296 CcInitCacheZeroPage();
1297
1298 }
1299
1300 /* EOF */