Sync to trunk revision 63857.
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 */
9
10 /* NOTES **********************************************************************
11 *
12 * This is not the NT implementation of a file cache nor anything much like
13 * it.
14 *
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
17 *
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
20 *
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
23 *
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
27 *
28 * (4) Copy the data into or out of the page as necessary.
29 *
30 * (5) Release the cache page
31 */
32 /* INCLUDES ******************************************************************/
33
34 #include <ntoskrnl.h>
35 #define NDEBUG
36 #include <debug.h>
37
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
40 #endif
41
42 /* GLOBALS *******************************************************************/
43
44 static LIST_ENTRY DirtyVacbListHead;
45 static LIST_ENTRY VacbLruListHead;
46 ULONG DirtyPageCount = 0;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 #if DBG
55 static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
56 {
57 ++vacb->ReferenceCount;
58 if (vacb->SharedCacheMap->Trace)
59 {
60 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
61 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
62 }
63 }
64 static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
65 {
66 --vacb->ReferenceCount;
67 if (vacb->SharedCacheMap->Trace)
68 {
69 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
70 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
71 }
72 }
73 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
74 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
75 #else
76 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
77 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
78 #endif
79
80 NTSTATUS
81 CcRosInternalFreeVacb(PROS_VACB Vacb);
82
83
84 /* FUNCTIONS *****************************************************************/
85
86 VOID
87 NTAPI
88 CcRosTraceCacheMap (
89 PROS_SHARED_CACHE_MAP SharedCacheMap,
90 BOOLEAN Trace )
91 {
92 #if DBG
93 KIRQL oldirql;
94 PLIST_ENTRY current_entry;
95 PROS_VACB current;
96
97 if (!SharedCacheMap)
98 return;
99
100 SharedCacheMap->Trace = Trace;
101
102 if (Trace)
103 {
104 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
105
106 KeAcquireGuardedMutex(&ViewLock);
107 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
108
109 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
110 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
111 {
112 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
113 current_entry = current_entry->Flink;
114
115 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
116 current, current->ReferenceCount, current->Dirty, current->PageOut );
117 }
118 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
119 KeReleaseGuardedMutex(&ViewLock);
120 }
121 else
122 {
123 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
124 }
125
126 #else
127 UNREFERENCED_PARAMETER(SharedCacheMap);
128 UNREFERENCED_PARAMETER(Trace);
129 #endif
130 }
131
132 NTSTATUS
133 NTAPI
134 CcRosFlushVacb (
135 PROS_VACB Vacb)
136 {
137 NTSTATUS Status;
138 KIRQL oldIrql;
139
140 Status = CcWriteVirtualAddress(Vacb);
141 if (NT_SUCCESS(Status))
142 {
143 KeAcquireGuardedMutex(&ViewLock);
144 KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql);
145
146 Vacb->Dirty = FALSE;
147 RemoveEntryList(&Vacb->DirtyVacbListEntry);
148 DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
149 CcRosVacbDecRefCount(Vacb);
150
151 KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
152 KeReleaseGuardedMutex(&ViewLock);
153 }
154
155 return Status;
156 }
157
158 NTSTATUS
159 NTAPI
160 CcRosFlushDirtyPages (
161 ULONG Target,
162 PULONG Count,
163 BOOLEAN Wait)
164 {
165 PLIST_ENTRY current_entry;
166 PROS_VACB current;
167 BOOLEAN Locked;
168 NTSTATUS Status;
169 LARGE_INTEGER ZeroTimeout;
170
171 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
172
173 (*Count) = 0;
174 ZeroTimeout.QuadPart = 0;
175
176 KeEnterCriticalRegion();
177 KeAcquireGuardedMutex(&ViewLock);
178
179 current_entry = DirtyVacbListHead.Flink;
180 if (current_entry == &DirtyVacbListHead)
181 {
182 DPRINT("No Dirty pages\n");
183 }
184
185 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
186 {
187 current = CONTAINING_RECORD(current_entry,
188 ROS_VACB,
189 DirtyVacbListEntry);
190 current_entry = current_entry->Flink;
191
192 CcRosVacbIncRefCount(current);
193
194 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
195 current->SharedCacheMap->LazyWriteContext, Wait);
196 if (!Locked)
197 {
198 CcRosVacbDecRefCount(current);
199 continue;
200 }
201
202 Status = KeWaitForSingleObject(&current->Mutex,
203 Executive,
204 KernelMode,
205 FALSE,
206 Wait ? NULL : &ZeroTimeout);
207 if (Status != STATUS_SUCCESS)
208 {
209 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
210 current->SharedCacheMap->LazyWriteContext);
211 CcRosVacbDecRefCount(current);
212 continue;
213 }
214
215 ASSERT(current->Dirty);
216
217 /* One reference is added above */
218 if (current->ReferenceCount > 2)
219 {
220 KeReleaseMutex(&current->Mutex, FALSE);
221 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
222 current->SharedCacheMap->LazyWriteContext);
223 CcRosVacbDecRefCount(current);
224 continue;
225 }
226
227 KeReleaseGuardedMutex(&ViewLock);
228
229 Status = CcRosFlushVacb(current);
230
231 KeReleaseMutex(&current->Mutex, FALSE);
232 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
233 current->SharedCacheMap->LazyWriteContext);
234
235 KeAcquireGuardedMutex(&ViewLock);
236 CcRosVacbDecRefCount(current);
237
238 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
239 {
240 DPRINT1("CC: Failed to flush VACB.\n");
241 }
242 else
243 {
244 (*Count) += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
245 Target -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
246 }
247
248 current_entry = DirtyVacbListHead.Flink;
249 }
250
251 KeReleaseGuardedMutex(&ViewLock);
252 KeLeaveCriticalRegion();
253
254 DPRINT("CcRosFlushDirtyPages() finished\n");
255 return STATUS_SUCCESS;
256 }
257
258 NTSTATUS
259 CcRosTrimCache (
260 ULONG Target,
261 ULONG Priority,
262 PULONG NrFreed)
263 /*
264 * FUNCTION: Try to free some memory from the file cache.
265 * ARGUMENTS:
266 * Target - The number of pages to be freed.
267 * Priority - The priority of free (currently unused).
268 * NrFreed - Points to a variable where the number of pages
269 * actually freed is returned.
270 */
271 {
272 PLIST_ENTRY current_entry;
273 PROS_VACB current;
274 ULONG PagesFreed;
275 KIRQL oldIrql;
276 LIST_ENTRY FreeList;
277 PFN_NUMBER Page;
278 ULONG i;
279 BOOLEAN FlushedPages = FALSE;
280
281 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
282
283 InitializeListHead(&FreeList);
284
285 *NrFreed = 0;
286
287 retry:
288 KeAcquireGuardedMutex(&ViewLock);
289
290 current_entry = VacbLruListHead.Flink;
291 while (current_entry != &VacbLruListHead)
292 {
293 current = CONTAINING_RECORD(current_entry,
294 ROS_VACB,
295 VacbLruListEntry);
296 current_entry = current_entry->Flink;
297
298 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
299
300 /* Reference the VACB */
301 CcRosVacbIncRefCount(current);
302
303 /* Check if it's mapped and not dirty */
304 if (current->MappedCount > 0 && !current->Dirty)
305 {
306 /* We have to break these locks because Cc sucks */
307 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
308 KeReleaseGuardedMutex(&ViewLock);
309
310 /* Page out the VACB */
311 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
312 {
313 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
314
315 MmPageOutPhysicalAddress(Page);
316 }
317
318 /* Reacquire the locks */
319 KeAcquireGuardedMutex(&ViewLock);
320 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
321 }
322
323 /* Dereference the VACB */
324 CcRosVacbDecRefCount(current);
325
326 /* Check if we can free this entry now */
327 if (current->ReferenceCount == 0)
328 {
329 ASSERT(!current->Dirty);
330 ASSERT(!current->MappedCount);
331
332 RemoveEntryList(&current->CacheMapVacbListEntry);
333 RemoveEntryList(&current->VacbLruListEntry);
334 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
335
336 /* Calculate how many pages we freed for Mm */
337 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
338 Target -= PagesFreed;
339 (*NrFreed) += PagesFreed;
340 }
341
342 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
343 }
344
345 KeReleaseGuardedMutex(&ViewLock);
346
347 /* Try flushing pages if we haven't met our target */
348 if ((Target > 0) && !FlushedPages)
349 {
350 /* Flush dirty pages to disk */
351 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE);
352 FlushedPages = TRUE;
353
354 /* We can only swap as many pages as we flushed */
355 if (PagesFreed < Target) Target = PagesFreed;
356
357 /* Check if we flushed anything */
358 if (PagesFreed != 0)
359 {
360 /* Try again after flushing dirty pages */
361 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
362 goto retry;
363 }
364 }
365
366 while (!IsListEmpty(&FreeList))
367 {
368 current_entry = RemoveHeadList(&FreeList);
369 current = CONTAINING_RECORD(current_entry,
370 ROS_VACB,
371 CacheMapVacbListEntry);
372 CcRosInternalFreeVacb(current);
373 }
374
375 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
376
377 return STATUS_SUCCESS;
378 }
379
380 NTSTATUS
381 NTAPI
382 CcRosReleaseVacb (
383 PROS_SHARED_CACHE_MAP SharedCacheMap,
384 PROS_VACB Vacb,
385 BOOLEAN Valid,
386 BOOLEAN Dirty,
387 BOOLEAN Mapped)
388 {
389 BOOLEAN WasDirty;
390 KIRQL oldIrql;
391
392 ASSERT(SharedCacheMap);
393
394 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
395 SharedCacheMap, Vacb, Valid);
396
397 KeAcquireGuardedMutex(&ViewLock);
398 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
399
400 Vacb->Valid = Valid;
401
402 WasDirty = Vacb->Dirty;
403 Vacb->Dirty = Vacb->Dirty || Dirty;
404
405 if (!WasDirty && Vacb->Dirty)
406 {
407 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
408 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
409 }
410
411 if (Mapped)
412 {
413 Vacb->MappedCount++;
414 }
415 CcRosVacbDecRefCount(Vacb);
416 if (Mapped && (Vacb->MappedCount == 1))
417 {
418 CcRosVacbIncRefCount(Vacb);
419 }
420 if (!WasDirty && Vacb->Dirty)
421 {
422 CcRosVacbIncRefCount(Vacb);
423 }
424
425 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
426 KeReleaseGuardedMutex(&ViewLock);
427 KeReleaseMutex(&Vacb->Mutex, FALSE);
428
429 return STATUS_SUCCESS;
430 }
431
432 /* Returns with VACB Lock Held! */
433 PROS_VACB
434 NTAPI
435 CcRosLookupVacb (
436 PROS_SHARED_CACHE_MAP SharedCacheMap,
437 ULONG FileOffset)
438 {
439 PLIST_ENTRY current_entry;
440 PROS_VACB current;
441 KIRQL oldIrql;
442
443 ASSERT(SharedCacheMap);
444
445 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %lu)\n",
446 SharedCacheMap, FileOffset);
447
448 KeAcquireGuardedMutex(&ViewLock);
449 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
450
451 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
452 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
453 {
454 current = CONTAINING_RECORD(current_entry,
455 ROS_VACB,
456 CacheMapVacbListEntry);
457 if (IsPointInRange(current->FileOffset.QuadPart,
458 VACB_MAPPING_GRANULARITY,
459 FileOffset))
460 {
461 CcRosVacbIncRefCount(current);
462 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
463 KeReleaseGuardedMutex(&ViewLock);
464 KeWaitForSingleObject(&current->Mutex,
465 Executive,
466 KernelMode,
467 FALSE,
468 NULL);
469 return current;
470 }
471 if (current->FileOffset.QuadPart > FileOffset)
472 break;
473 current_entry = current_entry->Flink;
474 }
475
476 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
477 KeReleaseGuardedMutex(&ViewLock);
478
479 return NULL;
480 }
481
482 NTSTATUS
483 NTAPI
484 CcRosMarkDirtyVacb (
485 PROS_SHARED_CACHE_MAP SharedCacheMap,
486 ULONG FileOffset)
487 {
488 PROS_VACB Vacb;
489 KIRQL oldIrql;
490
491 ASSERT(SharedCacheMap);
492
493 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %lu)\n",
494 SharedCacheMap, FileOffset);
495
496 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
497 if (Vacb == NULL)
498 {
499 KeBugCheck(CACHE_MANAGER);
500 }
501
502 KeAcquireGuardedMutex(&ViewLock);
503 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
504
505 if (!Vacb->Dirty)
506 {
507 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
508 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
509 }
510 else
511 {
512 CcRosVacbDecRefCount(Vacb);
513 }
514
515 /* Move to the tail of the LRU list */
516 RemoveEntryList(&Vacb->VacbLruListEntry);
517 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
518
519 Vacb->Dirty = TRUE;
520
521 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
522 KeReleaseGuardedMutex(&ViewLock);
523 KeReleaseMutex(&Vacb->Mutex, FALSE);
524
525 return STATUS_SUCCESS;
526 }
527
528 NTSTATUS
529 NTAPI
530 CcRosUnmapVacb (
531 PROS_SHARED_CACHE_MAP SharedCacheMap,
532 ULONG FileOffset,
533 BOOLEAN NowDirty)
534 {
535 PROS_VACB Vacb;
536 BOOLEAN WasDirty;
537 KIRQL oldIrql;
538
539 ASSERT(SharedCacheMap);
540
541 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %lu, NowDirty %u)\n",
542 SharedCacheMap, FileOffset, NowDirty);
543
544 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
545 if (Vacb == NULL)
546 {
547 return STATUS_UNSUCCESSFUL;
548 }
549
550 KeAcquireGuardedMutex(&ViewLock);
551 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
552
553 WasDirty = Vacb->Dirty;
554 Vacb->Dirty = Vacb->Dirty || NowDirty;
555
556 Vacb->MappedCount--;
557
558 if (!WasDirty && NowDirty)
559 {
560 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
561 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
562 }
563
564 CcRosVacbDecRefCount(Vacb);
565 if (!WasDirty && NowDirty)
566 {
567 CcRosVacbIncRefCount(Vacb);
568 }
569 if (Vacb->MappedCount == 0)
570 {
571 CcRosVacbDecRefCount(Vacb);
572 }
573
574 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
575 KeReleaseGuardedMutex(&ViewLock);
576 KeReleaseMutex(&Vacb->Mutex, FALSE);
577
578 return STATUS_SUCCESS;
579 }
580
581 static
582 NTSTATUS
583 CcRosCreateVacb (
584 PROS_SHARED_CACHE_MAP SharedCacheMap,
585 ULONG FileOffset,
586 PROS_VACB *Vacb)
587 {
588 PROS_VACB current;
589 PROS_VACB previous;
590 PLIST_ENTRY current_entry;
591 NTSTATUS Status;
592 KIRQL oldIrql;
593
594 ASSERT(SharedCacheMap);
595
596 DPRINT("CcRosCreateVacb()\n");
597
598 if (FileOffset >= SharedCacheMap->FileSize.u.LowPart)
599 {
600 *Vacb = NULL;
601 return STATUS_INVALID_PARAMETER;
602 }
603
604 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
605 current->Valid = FALSE;
606 current->Dirty = FALSE;
607 current->PageOut = FALSE;
608 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
609 current->SharedCacheMap = SharedCacheMap;
610 #if DBG
611 if (SharedCacheMap->Trace)
612 {
613 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
614 }
615 #endif
616 current->MappedCount = 0;
617 current->DirtyVacbListEntry.Flink = NULL;
618 current->DirtyVacbListEntry.Blink = NULL;
619 current->ReferenceCount = 1;
620 KeInitializeMutex(&current->Mutex, 0);
621 KeWaitForSingleObject(&current->Mutex,
622 Executive,
623 KernelMode,
624 FALSE,
625 NULL);
626 KeAcquireGuardedMutex(&ViewLock);
627
628 *Vacb = current;
629 /* There is window between the call to CcRosLookupVacb
630 * and CcRosCreateVacb. We must check if a VACB for the
631 * file offset exist. If there is a VACB, we release
632 * our newly created VACB and return the existing one.
633 */
634 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
635 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
636 previous = NULL;
637 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
638 {
639 current = CONTAINING_RECORD(current_entry,
640 ROS_VACB,
641 CacheMapVacbListEntry);
642 if (IsPointInRange(current->FileOffset.QuadPart,
643 VACB_MAPPING_GRANULARITY,
644 FileOffset))
645 {
646 CcRosVacbIncRefCount(current);
647 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
648 #if DBG
649 if (SharedCacheMap->Trace)
650 {
651 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
652 SharedCacheMap,
653 (*Vacb),
654 current);
655 }
656 #endif
657 KeReleaseMutex(&(*Vacb)->Mutex, FALSE);
658 KeReleaseGuardedMutex(&ViewLock);
659 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
660 *Vacb = current;
661 KeWaitForSingleObject(&current->Mutex,
662 Executive,
663 KernelMode,
664 FALSE,
665 NULL);
666 return STATUS_SUCCESS;
667 }
668 if (current->FileOffset.QuadPart < FileOffset)
669 {
670 ASSERT(previous == NULL ||
671 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
672 previous = current;
673 }
674 if (current->FileOffset.QuadPart > FileOffset)
675 break;
676 current_entry = current_entry->Flink;
677 }
678 /* There was no existing VACB. */
679 current = *Vacb;
680 if (previous)
681 {
682 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
683 }
684 else
685 {
686 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
687 }
688 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
689 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
690 KeReleaseGuardedMutex(&ViewLock);
691
692 MmLockAddressSpace(MmGetKernelAddressSpace());
693 current->BaseAddress = NULL;
694 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
695 0, // nothing checks for VACB mareas, so set to 0
696 &current->BaseAddress,
697 VACB_MAPPING_GRANULARITY,
698 PAGE_READWRITE,
699 (PMEMORY_AREA*)&current->MemoryArea,
700 FALSE,
701 0,
702 PAGE_SIZE);
703 MmUnlockAddressSpace(MmGetKernelAddressSpace());
704 if (!NT_SUCCESS(Status))
705 {
706 KeBugCheck(CACHE_MANAGER);
707 }
708
709 /* Create a virtual mapping for this memory area */
710 MI_SET_USAGE(MI_USAGE_CACHE);
711 #if MI_TRACE_PFNS
712 PWCHAR pos = NULL;
713 ULONG len = 0;
714 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
715 {
716 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
717 len = wcslen(pos) * sizeof(WCHAR);
718 if (pos) snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
719 }
720 #endif
721
722 MmMapMemoryArea(current->BaseAddress, VACB_MAPPING_GRANULARITY,
723 MC_CACHE, PAGE_READWRITE);
724
725 return STATUS_SUCCESS;
726 }
727
728 NTSTATUS
729 NTAPI
730 CcRosGetVacbChain (
731 PROS_SHARED_CACHE_MAP SharedCacheMap,
732 ULONG FileOffset,
733 ULONG Length,
734 PROS_VACB *Vacb)
735 {
736 PROS_VACB current;
737 ULONG i;
738 PROS_VACB *VacbList;
739 PROS_VACB Previous = NULL;
740
741 ASSERT(SharedCacheMap);
742
743 DPRINT("CcRosGetVacbChain()\n");
744
745 Length = ROUND_UP(Length, VACB_MAPPING_GRANULARITY);
746
747 VacbList = _alloca(sizeof(PROS_VACB) *
748 (Length / VACB_MAPPING_GRANULARITY));
749
750 /*
751 * Look for a VACB already mapping the same data.
752 */
753 for (i = 0; i < (Length / VACB_MAPPING_GRANULARITY); i++)
754 {
755 ULONG CurrentOffset = FileOffset + (i * VACB_MAPPING_GRANULARITY);
756 current = CcRosLookupVacb(SharedCacheMap, CurrentOffset);
757 if (current != NULL)
758 {
759 KeAcquireGuardedMutex(&ViewLock);
760
761 /* Move to tail of LRU list */
762 RemoveEntryList(&current->VacbLruListEntry);
763 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
764
765 KeReleaseGuardedMutex(&ViewLock);
766
767 VacbList[i] = current;
768 }
769 else
770 {
771 CcRosCreateVacb(SharedCacheMap, CurrentOffset, &current);
772 VacbList[i] = current;
773 }
774 }
775
776 for (i = 0; i < Length / VACB_MAPPING_GRANULARITY; i++)
777 {
778 if (i == 0)
779 {
780 *Vacb = VacbList[i];
781 Previous = VacbList[i];
782 }
783 else
784 {
785 Previous->NextInChain = VacbList[i];
786 Previous = VacbList[i];
787 }
788 }
789 ASSERT(Previous);
790 Previous->NextInChain = NULL;
791
792 return STATUS_SUCCESS;
793 }
794
795 NTSTATUS
796 NTAPI
797 CcRosGetVacb (
798 PROS_SHARED_CACHE_MAP SharedCacheMap,
799 ULONG FileOffset,
800 PULONGLONG BaseOffset,
801 PVOID* BaseAddress,
802 PBOOLEAN UptoDate,
803 PROS_VACB *Vacb)
804 {
805 PROS_VACB current;
806 NTSTATUS Status;
807
808 ASSERT(SharedCacheMap);
809
810 DPRINT("CcRosGetVacb()\n");
811
812 /*
813 * Look for a VACB already mapping the same data.
814 */
815 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
816 if (current == NULL)
817 {
818 /*
819 * Otherwise create a new VACB.
820 */
821 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
822 if (!NT_SUCCESS(Status))
823 {
824 return Status;
825 }
826 }
827
828 KeAcquireGuardedMutex(&ViewLock);
829
830 /* Move to the tail of the LRU list */
831 RemoveEntryList(&current->VacbLruListEntry);
832 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
833
834 KeReleaseGuardedMutex(&ViewLock);
835
836 /*
837 * Return information about the VACB to the caller.
838 */
839 *UptoDate = current->Valid;
840 *BaseAddress = current->BaseAddress;
841 DPRINT("*BaseAddress %p\n", *BaseAddress);
842 *Vacb = current;
843 *BaseOffset = current->FileOffset.QuadPart;
844 return STATUS_SUCCESS;
845 }
846
847 NTSTATUS
848 NTAPI
849 CcRosRequestVacb (
850 PROS_SHARED_CACHE_MAP SharedCacheMap,
851 ULONG FileOffset,
852 PVOID* BaseAddress,
853 PBOOLEAN UptoDate,
854 PROS_VACB *Vacb)
855 /*
856 * FUNCTION: Request a page mapping for a shared cache map
857 */
858 {
859 ULONGLONG BaseOffset;
860
861 ASSERT(SharedCacheMap);
862
863 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
864 {
865 DPRINT1("Bad fileoffset %x should be multiple of %x",
866 FileOffset, VACB_MAPPING_GRANULARITY);
867 KeBugCheck(CACHE_MANAGER);
868 }
869
870 return CcRosGetVacb(SharedCacheMap,
871 FileOffset,
872 &BaseOffset,
873 BaseAddress,
874 UptoDate,
875 Vacb);
876 }
877
878 static
879 VOID
880 CcFreeCachePage (
881 PVOID Context,
882 MEMORY_AREA* MemoryArea,
883 PVOID Address,
884 PFN_NUMBER Page,
885 SWAPENTRY SwapEntry,
886 BOOLEAN Dirty)
887 {
888 ASSERT(SwapEntry == 0);
889 if (Page != 0)
890 {
891 ASSERT(MmGetReferenceCountPage(Page) == 1);
892 MmReleasePageMemoryConsumer(MC_CACHE, Page);
893 }
894 }
895
896 NTSTATUS
897 CcRosInternalFreeVacb (
898 PROS_VACB Vacb)
899 /*
900 * FUNCTION: Releases a VACB associated with a shared cache map
901 */
902 {
903 DPRINT("Freeing VACB 0x%p\n", Vacb);
904 #if DBG
905 if (Vacb->SharedCacheMap->Trace)
906 {
907 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
908 }
909 #endif
910
911 MmLockAddressSpace(MmGetKernelAddressSpace());
912 MmFreeMemoryArea(MmGetKernelAddressSpace(),
913 Vacb->MemoryArea,
914 CcFreeCachePage,
915 NULL);
916 MmUnlockAddressSpace(MmGetKernelAddressSpace());
917
918 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
919 return STATUS_SUCCESS;
920 }
921
922 /*
923 * @implemented
924 */
925 VOID
926 NTAPI
927 CcFlushCache (
928 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
929 IN PLARGE_INTEGER FileOffset OPTIONAL,
930 IN ULONG Length,
931 OUT PIO_STATUS_BLOCK IoStatus)
932 {
933 PROS_SHARED_CACHE_MAP SharedCacheMap;
934 LARGE_INTEGER Offset;
935 PROS_VACB current;
936 NTSTATUS Status;
937 KIRQL oldIrql;
938
939 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
940 SectionObjectPointers, FileOffset, Length, IoStatus);
941
942 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
943 {
944 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
945 ASSERT(SharedCacheMap);
946 if (FileOffset)
947 {
948 Offset = *FileOffset;
949 }
950 else
951 {
952 Offset.QuadPart = (LONGLONG)0;
953 Length = SharedCacheMap->FileSize.u.LowPart;
954 }
955
956 if (IoStatus)
957 {
958 IoStatus->Status = STATUS_SUCCESS;
959 IoStatus->Information = 0;
960 }
961
962 while (Length > 0)
963 {
964 current = CcRosLookupVacb(SharedCacheMap, Offset.u.LowPart);
965 if (current != NULL)
966 {
967 if (current->Dirty)
968 {
969 Status = CcRosFlushVacb(current);
970 if (!NT_SUCCESS(Status) && IoStatus != NULL)
971 {
972 IoStatus->Status = Status;
973 }
974 }
975 KeReleaseMutex(&current->Mutex, FALSE);
976
977 KeAcquireGuardedMutex(&ViewLock);
978 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
979 CcRosVacbDecRefCount(current);
980 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
981 KeReleaseGuardedMutex(&ViewLock);
982 }
983
984 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
985 if (Length > VACB_MAPPING_GRANULARITY)
986 {
987 Length -= VACB_MAPPING_GRANULARITY;
988 }
989 else
990 {
991 Length = 0;
992 }
993 }
994 }
995 else
996 {
997 if (IoStatus)
998 {
999 IoStatus->Status = STATUS_INVALID_PARAMETER;
1000 }
1001 }
1002 }
1003
1004 NTSTATUS
1005 NTAPI
1006 CcRosDeleteFileCache (
1007 PFILE_OBJECT FileObject,
1008 PROS_SHARED_CACHE_MAP SharedCacheMap)
1009 /*
1010 * FUNCTION: Releases the shared cache map associated with a file object
1011 */
1012 {
1013 PLIST_ENTRY current_entry;
1014 PROS_VACB current;
1015 LIST_ENTRY FreeList;
1016 KIRQL oldIrql;
1017
1018 ASSERT(SharedCacheMap);
1019
1020 SharedCacheMap->RefCount++;
1021 KeReleaseGuardedMutex(&ViewLock);
1022
1023 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1024
1025 KeAcquireGuardedMutex(&ViewLock);
1026 SharedCacheMap->RefCount--;
1027 if (SharedCacheMap->RefCount == 0)
1028 {
1029 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1030
1031 /*
1032 * Release all VACBs
1033 */
1034 InitializeListHead(&FreeList);
1035 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1036 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1037 {
1038 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1039 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1040 RemoveEntryList(&current->VacbLruListEntry);
1041 if (current->Dirty)
1042 {
1043 RemoveEntryList(&current->DirtyVacbListEntry);
1044 DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
1045 DPRINT1("Freeing dirty VACB\n");
1046 }
1047 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1048 }
1049 #if DBG
1050 SharedCacheMap->Trace = FALSE;
1051 #endif
1052 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1053
1054 KeReleaseGuardedMutex(&ViewLock);
1055 ObDereferenceObject(SharedCacheMap->FileObject);
1056
1057 while (!IsListEmpty(&FreeList))
1058 {
1059 current_entry = RemoveTailList(&FreeList);
1060 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1061 CcRosInternalFreeVacb(current);
1062 }
1063 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1064 KeAcquireGuardedMutex(&ViewLock);
1065 }
1066 return STATUS_SUCCESS;
1067 }
1068
1069 VOID
1070 NTAPI
1071 CcRosReferenceCache (
1072 PFILE_OBJECT FileObject)
1073 {
1074 PROS_SHARED_CACHE_MAP SharedCacheMap;
1075 KeAcquireGuardedMutex(&ViewLock);
1076 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1077 ASSERT(SharedCacheMap);
1078 ASSERT(SharedCacheMap->RefCount != 0);
1079 SharedCacheMap->RefCount++;
1080 KeReleaseGuardedMutex(&ViewLock);
1081 }
1082
1083 VOID
1084 NTAPI
1085 CcRosRemoveIfClosed (
1086 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1087 {
1088 PROS_SHARED_CACHE_MAP SharedCacheMap;
1089 DPRINT("CcRosRemoveIfClosed()\n");
1090 KeAcquireGuardedMutex(&ViewLock);
1091 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1092 if (SharedCacheMap && SharedCacheMap->RefCount == 0)
1093 {
1094 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1095 }
1096 KeReleaseGuardedMutex(&ViewLock);
1097 }
1098
1099
1100 VOID
1101 NTAPI
1102 CcRosDereferenceCache (
1103 PFILE_OBJECT FileObject)
1104 {
1105 PROS_SHARED_CACHE_MAP SharedCacheMap;
1106 KeAcquireGuardedMutex(&ViewLock);
1107 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1108 ASSERT(SharedCacheMap);
1109 if (SharedCacheMap->RefCount > 0)
1110 {
1111 SharedCacheMap->RefCount--;
1112 if (SharedCacheMap->RefCount == 0)
1113 {
1114 MmFreeSectionSegments(SharedCacheMap->FileObject);
1115 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1116 }
1117 }
1118 KeReleaseGuardedMutex(&ViewLock);
1119 }
1120
1121 NTSTATUS
1122 NTAPI
1123 CcRosReleaseFileCache (
1124 PFILE_OBJECT FileObject)
1125 /*
1126 * FUNCTION: Called by the file system when a handle to a file object
1127 * has been closed.
1128 */
1129 {
1130 PROS_SHARED_CACHE_MAP SharedCacheMap;
1131
1132 KeAcquireGuardedMutex(&ViewLock);
1133
1134 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1135 {
1136 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1137 if (FileObject->PrivateCacheMap != NULL)
1138 {
1139 FileObject->PrivateCacheMap = NULL;
1140 if (SharedCacheMap->RefCount > 0)
1141 {
1142 SharedCacheMap->RefCount--;
1143 if (SharedCacheMap->RefCount == 0)
1144 {
1145 MmFreeSectionSegments(SharedCacheMap->FileObject);
1146 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1147 }
1148 }
1149 }
1150 }
1151 KeReleaseGuardedMutex(&ViewLock);
1152 return STATUS_SUCCESS;
1153 }
1154
1155 NTSTATUS
1156 NTAPI
1157 CcTryToInitializeFileCache (
1158 PFILE_OBJECT FileObject)
1159 {
1160 PROS_SHARED_CACHE_MAP SharedCacheMap;
1161 NTSTATUS Status;
1162
1163 KeAcquireGuardedMutex(&ViewLock);
1164
1165 ASSERT(FileObject->SectionObjectPointer);
1166 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1167 if (SharedCacheMap == NULL)
1168 {
1169 Status = STATUS_UNSUCCESSFUL;
1170 }
1171 else
1172 {
1173 if (FileObject->PrivateCacheMap == NULL)
1174 {
1175 FileObject->PrivateCacheMap = SharedCacheMap;
1176 SharedCacheMap->RefCount++;
1177 }
1178 Status = STATUS_SUCCESS;
1179 }
1180 KeReleaseGuardedMutex(&ViewLock);
1181
1182 return Status;
1183 }
1184
1185
1186 NTSTATUS
1187 NTAPI
1188 CcRosInitializeFileCache (
1189 PFILE_OBJECT FileObject,
1190 PCACHE_MANAGER_CALLBACKS CallBacks,
1191 PVOID LazyWriterContext)
1192 /*
1193 * FUNCTION: Initializes a shared cache map for a file object
1194 */
1195 {
1196 PROS_SHARED_CACHE_MAP SharedCacheMap;
1197
1198 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1199 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1200 FileObject, SharedCacheMap);
1201
1202 KeAcquireGuardedMutex(&ViewLock);
1203 if (SharedCacheMap == NULL)
1204 {
1205 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1206 if (SharedCacheMap == NULL)
1207 {
1208 KeReleaseGuardedMutex(&ViewLock);
1209 return STATUS_UNSUCCESSFUL;
1210 }
1211 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1212 ObReferenceObjectByPointer(FileObject,
1213 FILE_ALL_ACCESS,
1214 NULL,
1215 KernelMode);
1216 SharedCacheMap->FileObject = FileObject;
1217 SharedCacheMap->Callbacks = CallBacks;
1218 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1219 if (FileObject->FsContext)
1220 {
1221 SharedCacheMap->SectionSize =
1222 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->AllocationSize;
1223 SharedCacheMap->FileSize =
1224 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->FileSize;
1225 }
1226 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1227 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1228 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1229 }
1230 if (FileObject->PrivateCacheMap == NULL)
1231 {
1232 FileObject->PrivateCacheMap = SharedCacheMap;
1233 SharedCacheMap->RefCount++;
1234 }
1235 KeReleaseGuardedMutex(&ViewLock);
1236
1237 return STATUS_SUCCESS;
1238 }
1239
1240 /*
1241 * @implemented
1242 */
1243 PFILE_OBJECT
1244 NTAPI
1245 CcGetFileObjectFromSectionPtrs (
1246 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1247 {
1248 PROS_SHARED_CACHE_MAP SharedCacheMap;
1249 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1250 {
1251 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1252 ASSERT(SharedCacheMap);
1253 return SharedCacheMap->FileObject;
1254 }
1255 return NULL;
1256 }
1257
1258 VOID
1259 INIT_FUNCTION
1260 NTAPI
1261 CcInitView (
1262 VOID)
1263 {
1264 DPRINT("CcInitView()\n");
1265
1266 InitializeListHead(&DirtyVacbListHead);
1267 InitializeListHead(&VacbLruListHead);
1268 KeInitializeGuardedMutex(&ViewLock);
1269 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1270 NULL,
1271 NULL,
1272 0,
1273 sizeof(INTERNAL_BCB),
1274 TAG_BCB,
1275 20);
1276 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1277 NULL,
1278 NULL,
1279 0,
1280 sizeof(ROS_SHARED_CACHE_MAP),
1281 TAG_SHARED_CACHE_MAP,
1282 20);
1283 ExInitializeNPagedLookasideList(&VacbLookasideList,
1284 NULL,
1285 NULL,
1286 0,
1287 sizeof(ROS_VACB),
1288 TAG_VACB,
1289 20);
1290
1291 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1292
1293 CcInitCacheZeroPage();
1294 }
1295
1296 /* EOF */