Sync with trunk r63174.
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 */
9
10 /* NOTES **********************************************************************
11 *
12 * This is not the NT implementation of a file cache nor anything much like
13 * it.
14 *
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
17 *
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
20 *
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
23 *
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
27 *
28 * (4) Copy the data into or out of the page as necessary.
29 *
30 * (5) Release the cache page
31 */
32 /* INCLUDES ******************************************************************/
33
34 #include <ntoskrnl.h>
35 #define NDEBUG
36 #include <debug.h>
37
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
40 #endif
41
42 /* GLOBALS *******************************************************************/
43
44 static LIST_ENTRY DirtyVacbListHead;
45 static LIST_ENTRY VacbListHead;
46 static LIST_ENTRY VacbLruListHead;
47 ULONG DirtyPageCount = 0;
48
49 KGUARDED_MUTEX ViewLock;
50
51 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
52 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
53 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
54
55 #if DBG
56 static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
57 {
58 ++vacb->ReferenceCount;
59 if (vacb->SharedCacheMap->Trace)
60 {
61 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
62 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
63 }
64 }
65 static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
66 {
67 --vacb->ReferenceCount;
68 if (vacb->SharedCacheMap->Trace)
69 {
70 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
71 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
72 }
73 }
74 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
75 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
76 #else
77 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
78 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
79 #endif
80
81 NTSTATUS
82 CcRosInternalFreeVacb(PROS_VACB Vacb);
83
84
85 /* FUNCTIONS *****************************************************************/
86
87 VOID
88 NTAPI
89 CcRosTraceCacheMap (
90 PROS_SHARED_CACHE_MAP SharedCacheMap,
91 BOOLEAN Trace )
92 {
93 #if DBG
94 KIRQL oldirql;
95 PLIST_ENTRY current_entry;
96 PROS_VACB current;
97
98 if (!SharedCacheMap)
99 return;
100
101 SharedCacheMap->Trace = Trace;
102
103 if (Trace)
104 {
105 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
106
107 KeAcquireGuardedMutex(&ViewLock);
108 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
109
110 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
111 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
112 {
113 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
114 current_entry = current_entry->Flink;
115
116 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
117 current, current->ReferenceCount, current->Dirty, current->PageOut );
118 }
119 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
120 KeReleaseGuardedMutex(&ViewLock);
121 }
122 else
123 {
124 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
125 }
126
127 #else
128 UNREFERENCED_PARAMETER(SharedCacheMap);
129 UNREFERENCED_PARAMETER(Trace);
130 #endif
131 }
132
133 NTSTATUS
134 NTAPI
135 CcRosFlushVacb (
136 PROS_VACB Vacb)
137 {
138 NTSTATUS Status;
139 KIRQL oldIrql;
140
141 Status = CcWriteVirtualAddress(Vacb);
142 if (NT_SUCCESS(Status))
143 {
144 KeAcquireGuardedMutex(&ViewLock);
145 KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql);
146
147 Vacb->Dirty = FALSE;
148 RemoveEntryList(&Vacb->DirtyVacbListEntry);
149 DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
150 CcRosVacbDecRefCount(Vacb);
151
152 KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
153 KeReleaseGuardedMutex(&ViewLock);
154 }
155
156 return Status;
157 }
158
159 NTSTATUS
160 NTAPI
161 CcRosFlushDirtyPages (
162 ULONG Target,
163 PULONG Count,
164 BOOLEAN Wait)
165 {
166 PLIST_ENTRY current_entry;
167 PROS_VACB current;
168 BOOLEAN Locked;
169 NTSTATUS Status;
170 LARGE_INTEGER ZeroTimeout;
171
172 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
173
174 (*Count) = 0;
175 ZeroTimeout.QuadPart = 0;
176
177 KeEnterCriticalRegion();
178 KeAcquireGuardedMutex(&ViewLock);
179
180 current_entry = DirtyVacbListHead.Flink;
181 if (current_entry == &DirtyVacbListHead)
182 {
183 DPRINT("No Dirty pages\n");
184 }
185
186 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
187 {
188 current = CONTAINING_RECORD(current_entry,
189 ROS_VACB,
190 DirtyVacbListEntry);
191 current_entry = current_entry->Flink;
192
193 CcRosVacbIncRefCount(current);
194
195 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
196 current->SharedCacheMap->LazyWriteContext, Wait);
197 if (!Locked)
198 {
199 CcRosVacbDecRefCount(current);
200 continue;
201 }
202
203 Status = KeWaitForSingleObject(&current->Mutex,
204 Executive,
205 KernelMode,
206 FALSE,
207 Wait ? NULL : &ZeroTimeout);
208 if (Status != STATUS_SUCCESS)
209 {
210 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
211 current->SharedCacheMap->LazyWriteContext);
212 CcRosVacbDecRefCount(current);
213 continue;
214 }
215
216 ASSERT(current->Dirty);
217
218 /* One reference is added above */
219 if (current->ReferenceCount > 2)
220 {
221 KeReleaseMutex(&current->Mutex, FALSE);
222 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
223 current->SharedCacheMap->LazyWriteContext);
224 CcRosVacbDecRefCount(current);
225 continue;
226 }
227
228 KeReleaseGuardedMutex(&ViewLock);
229
230 Status = CcRosFlushVacb(current);
231
232 KeReleaseMutex(&current->Mutex, FALSE);
233 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
234 current->SharedCacheMap->LazyWriteContext);
235
236 KeAcquireGuardedMutex(&ViewLock);
237 CcRosVacbDecRefCount(current);
238
239 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
240 {
241 DPRINT1("CC: Failed to flush VACB.\n");
242 }
243 else
244 {
245 (*Count) += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
246 Target -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
247 }
248
249 current_entry = DirtyVacbListHead.Flink;
250 }
251
252 KeReleaseGuardedMutex(&ViewLock);
253 KeLeaveCriticalRegion();
254
255 DPRINT("CcRosFlushDirtyPages() finished\n");
256 return STATUS_SUCCESS;
257 }
258
259 NTSTATUS
260 CcRosTrimCache (
261 ULONG Target,
262 ULONG Priority,
263 PULONG NrFreed)
264 /*
265 * FUNCTION: Try to free some memory from the file cache.
266 * ARGUMENTS:
267 * Target - The number of pages to be freed.
268 * Priority - The priority of free (currently unused).
269 * NrFreed - Points to a variable where the number of pages
270 * actually freed is returned.
271 */
272 {
273 PLIST_ENTRY current_entry;
274 PROS_VACB current;
275 ULONG PagesFreed;
276 KIRQL oldIrql;
277 LIST_ENTRY FreeList;
278 PFN_NUMBER Page;
279 ULONG i;
280 BOOLEAN FlushedPages = FALSE;
281
282 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
283
284 InitializeListHead(&FreeList);
285
286 *NrFreed = 0;
287
288 retry:
289 KeAcquireGuardedMutex(&ViewLock);
290
291 current_entry = VacbLruListHead.Flink;
292 while (current_entry != &VacbLruListHead)
293 {
294 current = CONTAINING_RECORD(current_entry,
295 ROS_VACB,
296 VacbLruListEntry);
297 current_entry = current_entry->Flink;
298
299 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
300
301 /* Reference the VACB */
302 CcRosVacbIncRefCount(current);
303
304 /* Check if it's mapped and not dirty */
305 if (current->MappedCount > 0 && !current->Dirty)
306 {
307 /* We have to break these locks because Cc sucks */
308 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
309 KeReleaseGuardedMutex(&ViewLock);
310
311 /* Page out the VACB */
312 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
313 {
314 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
315
316 MmPageOutPhysicalAddress(Page);
317 }
318
319 /* Reacquire the locks */
320 KeAcquireGuardedMutex(&ViewLock);
321 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
322 }
323
324 /* Dereference the VACB */
325 CcRosVacbDecRefCount(current);
326
327 /* Check if we can free this entry now */
328 if (current->ReferenceCount == 0)
329 {
330 ASSERT(!current->Dirty);
331 ASSERT(!current->MappedCount);
332
333 RemoveEntryList(&current->CacheMapVacbListEntry);
334 RemoveEntryList(&current->VacbListEntry);
335 RemoveEntryList(&current->VacbLruListEntry);
336 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
337
338 /* Calculate how many pages we freed for Mm */
339 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
340 Target -= PagesFreed;
341 (*NrFreed) += PagesFreed;
342 }
343
344 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
345 }
346
347 KeReleaseGuardedMutex(&ViewLock);
348
349 /* Try flushing pages if we haven't met our target */
350 if ((Target > 0) && !FlushedPages)
351 {
352 /* Flush dirty pages to disk */
353 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE);
354 FlushedPages = TRUE;
355
356 /* We can only swap as many pages as we flushed */
357 if (PagesFreed < Target) Target = PagesFreed;
358
359 /* Check if we flushed anything */
360 if (PagesFreed != 0)
361 {
362 /* Try again after flushing dirty pages */
363 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
364 goto retry;
365 }
366 }
367
368 while (!IsListEmpty(&FreeList))
369 {
370 current_entry = RemoveHeadList(&FreeList);
371 current = CONTAINING_RECORD(current_entry,
372 ROS_VACB,
373 CacheMapVacbListEntry);
374 CcRosInternalFreeVacb(current);
375 }
376
377 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
378
379 return STATUS_SUCCESS;
380 }
381
382 NTSTATUS
383 NTAPI
384 CcRosReleaseVacb (
385 PROS_SHARED_CACHE_MAP SharedCacheMap,
386 PROS_VACB Vacb,
387 BOOLEAN Valid,
388 BOOLEAN Dirty,
389 BOOLEAN Mapped)
390 {
391 BOOLEAN WasDirty;
392 KIRQL oldIrql;
393
394 ASSERT(SharedCacheMap);
395
396 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
397 SharedCacheMap, Vacb, Valid);
398
399 KeAcquireGuardedMutex(&ViewLock);
400 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
401
402 Vacb->Valid = Valid;
403
404 WasDirty = Vacb->Dirty;
405 Vacb->Dirty = Vacb->Dirty || Dirty;
406
407 if (!WasDirty && Vacb->Dirty)
408 {
409 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
410 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
411 }
412
413 if (Mapped)
414 {
415 Vacb->MappedCount++;
416 }
417 CcRosVacbDecRefCount(Vacb);
418 if (Mapped && (Vacb->MappedCount == 1))
419 {
420 CcRosVacbIncRefCount(Vacb);
421 }
422 if (!WasDirty && Vacb->Dirty)
423 {
424 CcRosVacbIncRefCount(Vacb);
425 }
426
427 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
428 KeReleaseGuardedMutex(&ViewLock);
429 KeReleaseMutex(&Vacb->Mutex, FALSE);
430
431 return STATUS_SUCCESS;
432 }
433
434 /* Returns with VACB Lock Held! */
435 PROS_VACB
436 NTAPI
437 CcRosLookupVacb (
438 PROS_SHARED_CACHE_MAP SharedCacheMap,
439 ULONG FileOffset)
440 {
441 PLIST_ENTRY current_entry;
442 PROS_VACB current;
443 KIRQL oldIrql;
444
445 ASSERT(SharedCacheMap);
446
447 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %lu)\n",
448 SharedCacheMap, FileOffset);
449
450 KeAcquireGuardedMutex(&ViewLock);
451 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
452
453 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
454 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
455 {
456 current = CONTAINING_RECORD(current_entry,
457 ROS_VACB,
458 CacheMapVacbListEntry);
459 if (IsPointInRange(current->FileOffset.QuadPart,
460 VACB_MAPPING_GRANULARITY,
461 FileOffset))
462 {
463 CcRosVacbIncRefCount(current);
464 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
465 KeReleaseGuardedMutex(&ViewLock);
466 KeWaitForSingleObject(&current->Mutex,
467 Executive,
468 KernelMode,
469 FALSE,
470 NULL);
471 return current;
472 }
473 if (current->FileOffset.QuadPart > FileOffset)
474 break;
475 current_entry = current_entry->Flink;
476 }
477
478 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
479 KeReleaseGuardedMutex(&ViewLock);
480
481 return NULL;
482 }
483
484 NTSTATUS
485 NTAPI
486 CcRosMarkDirtyVacb (
487 PROS_SHARED_CACHE_MAP SharedCacheMap,
488 ULONG FileOffset)
489 {
490 PROS_VACB Vacb;
491 KIRQL oldIrql;
492
493 ASSERT(SharedCacheMap);
494
495 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %lu)\n",
496 SharedCacheMap, FileOffset);
497
498 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
499 if (Vacb == NULL)
500 {
501 KeBugCheck(CACHE_MANAGER);
502 }
503
504 KeAcquireGuardedMutex(&ViewLock);
505 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
506
507 if (!Vacb->Dirty)
508 {
509 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
510 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
511 }
512 else
513 {
514 CcRosVacbDecRefCount(Vacb);
515 }
516
517 /* Move to the tail of the LRU list */
518 RemoveEntryList(&Vacb->VacbLruListEntry);
519 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
520
521 Vacb->Dirty = TRUE;
522
523 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
524 KeReleaseGuardedMutex(&ViewLock);
525 KeReleaseMutex(&Vacb->Mutex, FALSE);
526
527 return STATUS_SUCCESS;
528 }
529
530 NTSTATUS
531 NTAPI
532 CcRosUnmapVacb (
533 PROS_SHARED_CACHE_MAP SharedCacheMap,
534 ULONG FileOffset,
535 BOOLEAN NowDirty)
536 {
537 PROS_VACB Vacb;
538 BOOLEAN WasDirty;
539 KIRQL oldIrql;
540
541 ASSERT(SharedCacheMap);
542
543 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %lu, NowDirty %u)\n",
544 SharedCacheMap, FileOffset, NowDirty);
545
546 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
547 if (Vacb == NULL)
548 {
549 return STATUS_UNSUCCESSFUL;
550 }
551
552 KeAcquireGuardedMutex(&ViewLock);
553 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
554
555 WasDirty = Vacb->Dirty;
556 Vacb->Dirty = Vacb->Dirty || NowDirty;
557
558 Vacb->MappedCount--;
559
560 if (!WasDirty && NowDirty)
561 {
562 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
563 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
564 }
565
566 CcRosVacbDecRefCount(Vacb);
567 if (!WasDirty && NowDirty)
568 {
569 CcRosVacbIncRefCount(Vacb);
570 }
571 if (Vacb->MappedCount == 0)
572 {
573 CcRosVacbDecRefCount(Vacb);
574 }
575
576 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
577 KeReleaseGuardedMutex(&ViewLock);
578 KeReleaseMutex(&Vacb->Mutex, FALSE);
579
580 return STATUS_SUCCESS;
581 }
582
583 static
584 NTSTATUS
585 CcRosCreateVacb (
586 PROS_SHARED_CACHE_MAP SharedCacheMap,
587 ULONG FileOffset,
588 PROS_VACB *Vacb)
589 {
590 PROS_VACB current;
591 PROS_VACB previous;
592 PLIST_ENTRY current_entry;
593 NTSTATUS Status;
594 KIRQL oldIrql;
595
596 ASSERT(SharedCacheMap);
597
598 DPRINT("CcRosCreateVacb()\n");
599
600 if (FileOffset >= SharedCacheMap->FileSize.u.LowPart)
601 {
602 *Vacb = NULL;
603 return STATUS_INVALID_PARAMETER;
604 }
605
606 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
607 current->Valid = FALSE;
608 current->Dirty = FALSE;
609 current->PageOut = FALSE;
610 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
611 current->SharedCacheMap = SharedCacheMap;
612 #if DBG
613 if (SharedCacheMap->Trace)
614 {
615 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
616 }
617 #endif
618 current->MappedCount = 0;
619 current->DirtyVacbListEntry.Flink = NULL;
620 current->DirtyVacbListEntry.Blink = NULL;
621 current->ReferenceCount = 1;
622 KeInitializeMutex(&current->Mutex, 0);
623 KeWaitForSingleObject(&current->Mutex,
624 Executive,
625 KernelMode,
626 FALSE,
627 NULL);
628 KeAcquireGuardedMutex(&ViewLock);
629
630 *Vacb = current;
631 /* There is window between the call to CcRosLookupVacb
632 * and CcRosCreateVacb. We must check if a VACB for the
633 * file offset exist. If there is a VACB, we release
634 * our newly created VACB and return the existing one.
635 */
636 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
637 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
638 previous = NULL;
639 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
640 {
641 current = CONTAINING_RECORD(current_entry,
642 ROS_VACB,
643 CacheMapVacbListEntry);
644 if (IsPointInRange(current->FileOffset.QuadPart,
645 VACB_MAPPING_GRANULARITY,
646 FileOffset))
647 {
648 CcRosVacbIncRefCount(current);
649 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
650 #if DBG
651 if (SharedCacheMap->Trace)
652 {
653 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
654 SharedCacheMap,
655 (*Vacb),
656 current);
657 }
658 #endif
659 KeReleaseMutex(&(*Vacb)->Mutex, FALSE);
660 KeReleaseGuardedMutex(&ViewLock);
661 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
662 *Vacb = current;
663 KeWaitForSingleObject(&current->Mutex,
664 Executive,
665 KernelMode,
666 FALSE,
667 NULL);
668 return STATUS_SUCCESS;
669 }
670 if (current->FileOffset.QuadPart < FileOffset)
671 {
672 ASSERT(previous == NULL ||
673 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
674 previous = current;
675 }
676 if (current->FileOffset.QuadPart > FileOffset)
677 break;
678 current_entry = current_entry->Flink;
679 }
680 /* There was no existing VACB. */
681 current = *Vacb;
682 if (previous)
683 {
684 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
685 }
686 else
687 {
688 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
689 }
690 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
691 InsertTailList(&VacbListHead, &current->VacbListEntry);
692 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
693 KeReleaseGuardedMutex(&ViewLock);
694
695 MmLockAddressSpace(MmGetKernelAddressSpace());
696 current->BaseAddress = NULL;
697 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
698 0, // nothing checks for VACB mareas, so set to 0
699 &current->BaseAddress,
700 VACB_MAPPING_GRANULARITY,
701 PAGE_READWRITE,
702 (PMEMORY_AREA*)&current->MemoryArea,
703 FALSE,
704 0,
705 PAGE_SIZE);
706 MmUnlockAddressSpace(MmGetKernelAddressSpace());
707 if (!NT_SUCCESS(Status))
708 {
709 KeBugCheck(CACHE_MANAGER);
710 }
711
712 /* Create a virtual mapping for this memory area */
713 MI_SET_USAGE(MI_USAGE_CACHE);
714 #if MI_TRACE_PFNS
715 PWCHAR pos = NULL;
716 ULONG len = 0;
717 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
718 {
719 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
720 len = wcslen(pos) * sizeof(WCHAR);
721 if (pos) snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
722 }
723 #endif
724
725 MmMapMemoryArea(current->BaseAddress, VACB_MAPPING_GRANULARITY,
726 MC_CACHE, PAGE_READWRITE);
727
728 return STATUS_SUCCESS;
729 }
730
731 NTSTATUS
732 NTAPI
733 CcRosGetVacbChain (
734 PROS_SHARED_CACHE_MAP SharedCacheMap,
735 ULONG FileOffset,
736 ULONG Length,
737 PROS_VACB *Vacb)
738 {
739 PROS_VACB current;
740 ULONG i;
741 PROS_VACB *VacbList;
742 PROS_VACB Previous = NULL;
743
744 ASSERT(SharedCacheMap);
745
746 DPRINT("CcRosGetVacbChain()\n");
747
748 Length = ROUND_UP(Length, VACB_MAPPING_GRANULARITY);
749
750 VacbList = _alloca(sizeof(PROS_VACB) *
751 (Length / VACB_MAPPING_GRANULARITY));
752
753 /*
754 * Look for a VACB already mapping the same data.
755 */
756 for (i = 0; i < (Length / VACB_MAPPING_GRANULARITY); i++)
757 {
758 ULONG CurrentOffset = FileOffset + (i * VACB_MAPPING_GRANULARITY);
759 current = CcRosLookupVacb(SharedCacheMap, CurrentOffset);
760 if (current != NULL)
761 {
762 KeAcquireGuardedMutex(&ViewLock);
763
764 /* Move to tail of LRU list */
765 RemoveEntryList(&current->VacbLruListEntry);
766 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
767
768 KeReleaseGuardedMutex(&ViewLock);
769
770 VacbList[i] = current;
771 }
772 else
773 {
774 CcRosCreateVacb(SharedCacheMap, CurrentOffset, &current);
775 VacbList[i] = current;
776 }
777 }
778
779 for (i = 0; i < Length / VACB_MAPPING_GRANULARITY; i++)
780 {
781 if (i == 0)
782 {
783 *Vacb = VacbList[i];
784 Previous = VacbList[i];
785 }
786 else
787 {
788 Previous->NextInChain = VacbList[i];
789 Previous = VacbList[i];
790 }
791 }
792 ASSERT(Previous);
793 Previous->NextInChain = NULL;
794
795 return STATUS_SUCCESS;
796 }
797
798 NTSTATUS
799 NTAPI
800 CcRosGetVacb (
801 PROS_SHARED_CACHE_MAP SharedCacheMap,
802 ULONG FileOffset,
803 PULONGLONG BaseOffset,
804 PVOID* BaseAddress,
805 PBOOLEAN UptoDate,
806 PROS_VACB *Vacb)
807 {
808 PROS_VACB current;
809 NTSTATUS Status;
810
811 ASSERT(SharedCacheMap);
812
813 DPRINT("CcRosGetVacb()\n");
814
815 /*
816 * Look for a VACB already mapping the same data.
817 */
818 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
819 if (current == NULL)
820 {
821 /*
822 * Otherwise create a new VACB.
823 */
824 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
825 if (!NT_SUCCESS(Status))
826 {
827 return Status;
828 }
829 }
830
831 KeAcquireGuardedMutex(&ViewLock);
832
833 /* Move to the tail of the LRU list */
834 RemoveEntryList(&current->VacbLruListEntry);
835 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
836
837 KeReleaseGuardedMutex(&ViewLock);
838
839 /*
840 * Return information about the VACB to the caller.
841 */
842 *UptoDate = current->Valid;
843 *BaseAddress = current->BaseAddress;
844 DPRINT("*BaseAddress %p\n", *BaseAddress);
845 *Vacb = current;
846 *BaseOffset = current->FileOffset.QuadPart;
847 return STATUS_SUCCESS;
848 }
849
850 NTSTATUS
851 NTAPI
852 CcRosRequestVacb (
853 PROS_SHARED_CACHE_MAP SharedCacheMap,
854 ULONG FileOffset,
855 PVOID* BaseAddress,
856 PBOOLEAN UptoDate,
857 PROS_VACB *Vacb)
858 /*
859 * FUNCTION: Request a page mapping for a shared cache map
860 */
861 {
862 ULONGLONG BaseOffset;
863
864 ASSERT(SharedCacheMap);
865
866 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
867 {
868 DPRINT1("Bad fileoffset %x should be multiple of %x",
869 FileOffset, VACB_MAPPING_GRANULARITY);
870 KeBugCheck(CACHE_MANAGER);
871 }
872
873 return CcRosGetVacb(SharedCacheMap,
874 FileOffset,
875 &BaseOffset,
876 BaseAddress,
877 UptoDate,
878 Vacb);
879 }
880
881 static
882 VOID
883 CcFreeCachePage (
884 PVOID Context,
885 MEMORY_AREA* MemoryArea,
886 PVOID Address,
887 PFN_NUMBER Page,
888 SWAPENTRY SwapEntry,
889 BOOLEAN Dirty)
890 {
891 ASSERT(SwapEntry == 0);
892 if (Page != 0)
893 {
894 ASSERT(MmGetReferenceCountPage(Page) == 1);
895 MmReleasePageMemoryConsumer(MC_CACHE, Page);
896 }
897 }
898
899 NTSTATUS
900 CcRosInternalFreeVacb (
901 PROS_VACB Vacb)
902 /*
903 * FUNCTION: Releases a VACB associated with a shared cache map
904 */
905 {
906 DPRINT("Freeing VACB 0x%p\n", Vacb);
907 #if DBG
908 if (Vacb->SharedCacheMap->Trace)
909 {
910 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
911 }
912 #endif
913
914 MmLockAddressSpace(MmGetKernelAddressSpace());
915 MmFreeMemoryArea(MmGetKernelAddressSpace(),
916 Vacb->MemoryArea,
917 CcFreeCachePage,
918 NULL);
919 MmUnlockAddressSpace(MmGetKernelAddressSpace());
920
921 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
922 return STATUS_SUCCESS;
923 }
924
925 /*
926 * @implemented
927 */
928 VOID
929 NTAPI
930 CcFlushCache (
931 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
932 IN PLARGE_INTEGER FileOffset OPTIONAL,
933 IN ULONG Length,
934 OUT PIO_STATUS_BLOCK IoStatus)
935 {
936 PROS_SHARED_CACHE_MAP SharedCacheMap;
937 LARGE_INTEGER Offset;
938 PROS_VACB current;
939 NTSTATUS Status;
940 KIRQL oldIrql;
941
942 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
943 SectionObjectPointers, FileOffset, Length, IoStatus);
944
945 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
946 {
947 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
948 ASSERT(SharedCacheMap);
949 if (FileOffset)
950 {
951 Offset = *FileOffset;
952 }
953 else
954 {
955 Offset.QuadPart = (LONGLONG)0;
956 Length = SharedCacheMap->FileSize.u.LowPart;
957 }
958
959 if (IoStatus)
960 {
961 IoStatus->Status = STATUS_SUCCESS;
962 IoStatus->Information = 0;
963 }
964
965 while (Length > 0)
966 {
967 current = CcRosLookupVacb(SharedCacheMap, Offset.u.LowPart);
968 if (current != NULL)
969 {
970 if (current->Dirty)
971 {
972 Status = CcRosFlushVacb(current);
973 if (!NT_SUCCESS(Status) && IoStatus != NULL)
974 {
975 IoStatus->Status = Status;
976 }
977 }
978 KeReleaseMutex(&current->Mutex, FALSE);
979
980 KeAcquireGuardedMutex(&ViewLock);
981 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
982 CcRosVacbDecRefCount(current);
983 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
984 KeReleaseGuardedMutex(&ViewLock);
985 }
986
987 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
988 if (Length > VACB_MAPPING_GRANULARITY)
989 {
990 Length -= VACB_MAPPING_GRANULARITY;
991 }
992 else
993 {
994 Length = 0;
995 }
996 }
997 }
998 else
999 {
1000 if (IoStatus)
1001 {
1002 IoStatus->Status = STATUS_INVALID_PARAMETER;
1003 }
1004 }
1005 }
1006
1007 NTSTATUS
1008 NTAPI
1009 CcRosDeleteFileCache (
1010 PFILE_OBJECT FileObject,
1011 PROS_SHARED_CACHE_MAP SharedCacheMap)
1012 /*
1013 * FUNCTION: Releases the shared cache map associated with a file object
1014 */
1015 {
1016 PLIST_ENTRY current_entry;
1017 PROS_VACB current;
1018 LIST_ENTRY FreeList;
1019 KIRQL oldIrql;
1020
1021 ASSERT(SharedCacheMap);
1022
1023 SharedCacheMap->RefCount++;
1024 KeReleaseGuardedMutex(&ViewLock);
1025
1026 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1027
1028 KeAcquireGuardedMutex(&ViewLock);
1029 SharedCacheMap->RefCount--;
1030 if (SharedCacheMap->RefCount == 0)
1031 {
1032 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1033
1034 /*
1035 * Release all VACBs
1036 */
1037 InitializeListHead(&FreeList);
1038 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1039 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1040 {
1041 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1042 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1043 RemoveEntryList(&current->VacbListEntry);
1044 RemoveEntryList(&current->VacbLruListEntry);
1045 if (current->Dirty)
1046 {
1047 RemoveEntryList(&current->DirtyVacbListEntry);
1048 DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
1049 DPRINT1("Freeing dirty VACB\n");
1050 }
1051 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1052 }
1053 #if DBG
1054 SharedCacheMap->Trace = FALSE;
1055 #endif
1056 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1057
1058 KeReleaseGuardedMutex(&ViewLock);
1059 ObDereferenceObject(SharedCacheMap->FileObject);
1060
1061 while (!IsListEmpty(&FreeList))
1062 {
1063 current_entry = RemoveTailList(&FreeList);
1064 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1065 CcRosInternalFreeVacb(current);
1066 }
1067 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1068 KeAcquireGuardedMutex(&ViewLock);
1069 }
1070 return STATUS_SUCCESS;
1071 }
1072
1073 VOID
1074 NTAPI
1075 CcRosReferenceCache (
1076 PFILE_OBJECT FileObject)
1077 {
1078 PROS_SHARED_CACHE_MAP SharedCacheMap;
1079 KeAcquireGuardedMutex(&ViewLock);
1080 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1081 ASSERT(SharedCacheMap);
1082 ASSERT(SharedCacheMap->RefCount != 0);
1083 SharedCacheMap->RefCount++;
1084 KeReleaseGuardedMutex(&ViewLock);
1085 }
1086
1087 VOID
1088 NTAPI
1089 CcRosRemoveIfClosed (
1090 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1091 {
1092 PROS_SHARED_CACHE_MAP SharedCacheMap;
1093 DPRINT("CcRosRemoveIfClosed()\n");
1094 KeAcquireGuardedMutex(&ViewLock);
1095 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1096 if (SharedCacheMap && SharedCacheMap->RefCount == 0)
1097 {
1098 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1099 }
1100 KeReleaseGuardedMutex(&ViewLock);
1101 }
1102
1103
1104 VOID
1105 NTAPI
1106 CcRosDereferenceCache (
1107 PFILE_OBJECT FileObject)
1108 {
1109 PROS_SHARED_CACHE_MAP SharedCacheMap;
1110 KeAcquireGuardedMutex(&ViewLock);
1111 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1112 ASSERT(SharedCacheMap);
1113 if (SharedCacheMap->RefCount > 0)
1114 {
1115 SharedCacheMap->RefCount--;
1116 if (SharedCacheMap->RefCount == 0)
1117 {
1118 MmFreeSectionSegments(SharedCacheMap->FileObject);
1119 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1120 }
1121 }
1122 KeReleaseGuardedMutex(&ViewLock);
1123 }
1124
1125 NTSTATUS
1126 NTAPI
1127 CcRosReleaseFileCache (
1128 PFILE_OBJECT FileObject)
1129 /*
1130 * FUNCTION: Called by the file system when a handle to a file object
1131 * has been closed.
1132 */
1133 {
1134 PROS_SHARED_CACHE_MAP SharedCacheMap;
1135
1136 KeAcquireGuardedMutex(&ViewLock);
1137
1138 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1139 {
1140 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1141 if (FileObject->PrivateCacheMap != NULL)
1142 {
1143 FileObject->PrivateCacheMap = NULL;
1144 if (SharedCacheMap->RefCount > 0)
1145 {
1146 SharedCacheMap->RefCount--;
1147 if (SharedCacheMap->RefCount == 0)
1148 {
1149 MmFreeSectionSegments(SharedCacheMap->FileObject);
1150 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1151 }
1152 }
1153 }
1154 }
1155 KeReleaseGuardedMutex(&ViewLock);
1156 return STATUS_SUCCESS;
1157 }
1158
1159 NTSTATUS
1160 NTAPI
1161 CcTryToInitializeFileCache (
1162 PFILE_OBJECT FileObject)
1163 {
1164 PROS_SHARED_CACHE_MAP SharedCacheMap;
1165 NTSTATUS Status;
1166
1167 KeAcquireGuardedMutex(&ViewLock);
1168
1169 ASSERT(FileObject->SectionObjectPointer);
1170 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1171 if (SharedCacheMap == NULL)
1172 {
1173 Status = STATUS_UNSUCCESSFUL;
1174 }
1175 else
1176 {
1177 if (FileObject->PrivateCacheMap == NULL)
1178 {
1179 FileObject->PrivateCacheMap = SharedCacheMap;
1180 SharedCacheMap->RefCount++;
1181 }
1182 Status = STATUS_SUCCESS;
1183 }
1184 KeReleaseGuardedMutex(&ViewLock);
1185
1186 return Status;
1187 }
1188
1189
1190 NTSTATUS
1191 NTAPI
1192 CcRosInitializeFileCache (
1193 PFILE_OBJECT FileObject,
1194 PCACHE_MANAGER_CALLBACKS CallBacks,
1195 PVOID LazyWriterContext)
1196 /*
1197 * FUNCTION: Initializes a shared cache map for a file object
1198 */
1199 {
1200 PROS_SHARED_CACHE_MAP SharedCacheMap;
1201
1202 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1203 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1204 FileObject, SharedCacheMap);
1205
1206 KeAcquireGuardedMutex(&ViewLock);
1207 if (SharedCacheMap == NULL)
1208 {
1209 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1210 if (SharedCacheMap == NULL)
1211 {
1212 KeReleaseGuardedMutex(&ViewLock);
1213 return STATUS_UNSUCCESSFUL;
1214 }
1215 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1216 ObReferenceObjectByPointer(FileObject,
1217 FILE_ALL_ACCESS,
1218 NULL,
1219 KernelMode);
1220 SharedCacheMap->FileObject = FileObject;
1221 SharedCacheMap->Callbacks = CallBacks;
1222 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1223 if (FileObject->FsContext)
1224 {
1225 SharedCacheMap->SectionSize =
1226 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->AllocationSize;
1227 SharedCacheMap->FileSize =
1228 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->FileSize;
1229 }
1230 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1231 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1232 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1233 }
1234 if (FileObject->PrivateCacheMap == NULL)
1235 {
1236 FileObject->PrivateCacheMap = SharedCacheMap;
1237 SharedCacheMap->RefCount++;
1238 }
1239 KeReleaseGuardedMutex(&ViewLock);
1240
1241 return STATUS_SUCCESS;
1242 }
1243
1244 /*
1245 * @implemented
1246 */
1247 PFILE_OBJECT
1248 NTAPI
1249 CcGetFileObjectFromSectionPtrs (
1250 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1251 {
1252 PROS_SHARED_CACHE_MAP SharedCacheMap;
1253 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1254 {
1255 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1256 ASSERT(SharedCacheMap);
1257 return SharedCacheMap->FileObject;
1258 }
1259 return NULL;
1260 }
1261
1262 VOID
1263 INIT_FUNCTION
1264 NTAPI
1265 CcInitView (
1266 VOID)
1267 {
1268 DPRINT("CcInitView()\n");
1269
1270 InitializeListHead(&VacbListHead);
1271 InitializeListHead(&DirtyVacbListHead);
1272 InitializeListHead(&VacbLruListHead);
1273 KeInitializeGuardedMutex(&ViewLock);
1274 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1275 NULL,
1276 NULL,
1277 0,
1278 sizeof(INTERNAL_BCB),
1279 TAG_BCB,
1280 20);
1281 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1282 NULL,
1283 NULL,
1284 0,
1285 sizeof(ROS_SHARED_CACHE_MAP),
1286 TAG_SHARED_CACHE_MAP,
1287 20);
1288 ExInitializeNPagedLookasideList(&VacbLookasideList,
1289 NULL,
1290 NULL,
1291 0,
1292 sizeof(ROS_VACB),
1293 TAG_VACB,
1294 20);
1295
1296 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1297
1298 CcInitCacheZeroPage();
1299 }
1300
1301 /* EOF */