[NTOSKRNL]
[reactos.git] / reactos / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 */
9
10 /* NOTES **********************************************************************
11 *
12 * This is not the NT implementation of a file cache nor anything much like
13 * it.
14 *
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
17 *
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
20 *
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
23 *
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
27 *
28 * (4) Copy the data into or out of the page as necessary.
29 *
30 * (5) Release the cache page
31 */
32 /* INCLUDES ******************************************************************/
33
34 #include <ntoskrnl.h>
35 #define NDEBUG
36 #include <debug.h>
37
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
40 #endif
41
42 /* GLOBALS *******************************************************************/
43
44 static LIST_ENTRY DirtyVacbListHead;
45 static LIST_ENTRY VacbLruListHead;
46 ULONG DirtyPageCount = 0;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 #if DBG
55 static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
56 {
57 ++vacb->ReferenceCount;
58 if (vacb->SharedCacheMap->Trace)
59 {
60 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
61 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
62 }
63 }
64 static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
65 {
66 --vacb->ReferenceCount;
67 if (vacb->SharedCacheMap->Trace)
68 {
69 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
70 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
71 }
72 }
73 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
74 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
75 #else
76 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
77 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
78 #endif
79
80 NTSTATUS
81 CcRosInternalFreeVacb(PROS_VACB Vacb);
82
83
84 /* FUNCTIONS *****************************************************************/
85
86 VOID
87 NTAPI
88 CcRosTraceCacheMap (
89 PROS_SHARED_CACHE_MAP SharedCacheMap,
90 BOOLEAN Trace )
91 {
92 #if DBG
93 KIRQL oldirql;
94 PLIST_ENTRY current_entry;
95 PROS_VACB current;
96
97 if (!SharedCacheMap)
98 return;
99
100 SharedCacheMap->Trace = Trace;
101
102 if (Trace)
103 {
104 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
105
106 KeAcquireGuardedMutex(&ViewLock);
107 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
108
109 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
110 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
111 {
112 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
113 current_entry = current_entry->Flink;
114
115 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
116 current, current->ReferenceCount, current->Dirty, current->PageOut );
117 }
118 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
119 KeReleaseGuardedMutex(&ViewLock);
120 }
121 else
122 {
123 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
124 }
125
126 #else
127 UNREFERENCED_PARAMETER(SharedCacheMap);
128 UNREFERENCED_PARAMETER(Trace);
129 #endif
130 }
131
132 NTSTATUS
133 NTAPI
134 CcRosFlushVacb (
135 PROS_VACB Vacb)
136 {
137 NTSTATUS Status;
138 KIRQL oldIrql;
139
140 Status = CcWriteVirtualAddress(Vacb);
141 if (NT_SUCCESS(Status))
142 {
143 KeAcquireGuardedMutex(&ViewLock);
144 KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql);
145
146 Vacb->Dirty = FALSE;
147 RemoveEntryList(&Vacb->DirtyVacbListEntry);
148 DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
149 CcRosVacbDecRefCount(Vacb);
150
151 KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
152 KeReleaseGuardedMutex(&ViewLock);
153 }
154
155 return Status;
156 }
157
158 NTSTATUS
159 NTAPI
160 CcRosFlushDirtyPages (
161 ULONG Target,
162 PULONG Count,
163 BOOLEAN Wait)
164 {
165 PLIST_ENTRY current_entry;
166 PROS_VACB current;
167 BOOLEAN Locked;
168 NTSTATUS Status;
169 LARGE_INTEGER ZeroTimeout;
170
171 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
172
173 (*Count) = 0;
174 ZeroTimeout.QuadPart = 0;
175
176 KeEnterCriticalRegion();
177 KeAcquireGuardedMutex(&ViewLock);
178
179 current_entry = DirtyVacbListHead.Flink;
180 if (current_entry == &DirtyVacbListHead)
181 {
182 DPRINT("No Dirty pages\n");
183 }
184
185 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
186 {
187 current = CONTAINING_RECORD(current_entry,
188 ROS_VACB,
189 DirtyVacbListEntry);
190 current_entry = current_entry->Flink;
191
192 CcRosVacbIncRefCount(current);
193
194 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
195 current->SharedCacheMap->LazyWriteContext, Wait);
196 if (!Locked)
197 {
198 CcRosVacbDecRefCount(current);
199 continue;
200 }
201
202 Status = KeWaitForSingleObject(&current->Mutex,
203 Executive,
204 KernelMode,
205 FALSE,
206 Wait ? NULL : &ZeroTimeout);
207 if (Status != STATUS_SUCCESS)
208 {
209 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
210 current->SharedCacheMap->LazyWriteContext);
211 CcRosVacbDecRefCount(current);
212 continue;
213 }
214
215 ASSERT(current->Dirty);
216
217 /* One reference is added above */
218 if (current->ReferenceCount > 2)
219 {
220 KeReleaseMutex(&current->Mutex, FALSE);
221 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
222 current->SharedCacheMap->LazyWriteContext);
223 CcRosVacbDecRefCount(current);
224 continue;
225 }
226
227 KeReleaseGuardedMutex(&ViewLock);
228
229 Status = CcRosFlushVacb(current);
230
231 KeReleaseMutex(&current->Mutex, FALSE);
232 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
233 current->SharedCacheMap->LazyWriteContext);
234
235 KeAcquireGuardedMutex(&ViewLock);
236 CcRosVacbDecRefCount(current);
237
238 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
239 (Status != STATUS_MEDIA_WRITE_PROTECTED))
240 {
241 DPRINT1("CC: Failed to flush VACB.\n");
242 }
243 else
244 {
245 (*Count) += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
246 Target -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
247 }
248
249 current_entry = DirtyVacbListHead.Flink;
250 }
251
252 KeReleaseGuardedMutex(&ViewLock);
253 KeLeaveCriticalRegion();
254
255 DPRINT("CcRosFlushDirtyPages() finished\n");
256 return STATUS_SUCCESS;
257 }
258
259 NTSTATUS
260 CcRosTrimCache (
261 ULONG Target,
262 ULONG Priority,
263 PULONG NrFreed)
264 /*
265 * FUNCTION: Try to free some memory from the file cache.
266 * ARGUMENTS:
267 * Target - The number of pages to be freed.
268 * Priority - The priority of free (currently unused).
269 * NrFreed - Points to a variable where the number of pages
270 * actually freed is returned.
271 */
272 {
273 PLIST_ENTRY current_entry;
274 PROS_VACB current;
275 ULONG PagesFreed;
276 KIRQL oldIrql;
277 LIST_ENTRY FreeList;
278 PFN_NUMBER Page;
279 ULONG i;
280 BOOLEAN FlushedPages = FALSE;
281
282 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
283
284 InitializeListHead(&FreeList);
285
286 *NrFreed = 0;
287
288 retry:
289 KeAcquireGuardedMutex(&ViewLock);
290
291 current_entry = VacbLruListHead.Flink;
292 while (current_entry != &VacbLruListHead)
293 {
294 current = CONTAINING_RECORD(current_entry,
295 ROS_VACB,
296 VacbLruListEntry);
297 current_entry = current_entry->Flink;
298
299 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
300
301 /* Reference the VACB */
302 CcRosVacbIncRefCount(current);
303
304 /* Check if it's mapped and not dirty */
305 if (current->MappedCount > 0 && !current->Dirty)
306 {
307 /* We have to break these locks because Cc sucks */
308 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
309 KeReleaseGuardedMutex(&ViewLock);
310
311 /* Page out the VACB */
312 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
313 {
314 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
315
316 MmPageOutPhysicalAddress(Page);
317 }
318
319 /* Reacquire the locks */
320 KeAcquireGuardedMutex(&ViewLock);
321 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
322 }
323
324 /* Dereference the VACB */
325 CcRosVacbDecRefCount(current);
326
327 /* Check if we can free this entry now */
328 if (current->ReferenceCount == 0)
329 {
330 ASSERT(!current->Dirty);
331 ASSERT(!current->MappedCount);
332
333 RemoveEntryList(&current->CacheMapVacbListEntry);
334 RemoveEntryList(&current->VacbLruListEntry);
335 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
336
337 /* Calculate how many pages we freed for Mm */
338 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
339 Target -= PagesFreed;
340 (*NrFreed) += PagesFreed;
341 }
342
343 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
344 }
345
346 KeReleaseGuardedMutex(&ViewLock);
347
348 /* Try flushing pages if we haven't met our target */
349 if ((Target > 0) && !FlushedPages)
350 {
351 /* Flush dirty pages to disk */
352 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE);
353 FlushedPages = TRUE;
354
355 /* We can only swap as many pages as we flushed */
356 if (PagesFreed < Target) Target = PagesFreed;
357
358 /* Check if we flushed anything */
359 if (PagesFreed != 0)
360 {
361 /* Try again after flushing dirty pages */
362 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
363 goto retry;
364 }
365 }
366
367 while (!IsListEmpty(&FreeList))
368 {
369 current_entry = RemoveHeadList(&FreeList);
370 current = CONTAINING_RECORD(current_entry,
371 ROS_VACB,
372 CacheMapVacbListEntry);
373 CcRosInternalFreeVacb(current);
374 }
375
376 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
377
378 return STATUS_SUCCESS;
379 }
380
381 NTSTATUS
382 NTAPI
383 CcRosReleaseVacb (
384 PROS_SHARED_CACHE_MAP SharedCacheMap,
385 PROS_VACB Vacb,
386 BOOLEAN Valid,
387 BOOLEAN Dirty,
388 BOOLEAN Mapped)
389 {
390 BOOLEAN WasDirty;
391 KIRQL oldIrql;
392
393 ASSERT(SharedCacheMap);
394
395 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
396 SharedCacheMap, Vacb, Valid);
397
398 KeAcquireGuardedMutex(&ViewLock);
399 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
400
401 Vacb->Valid = Valid;
402
403 WasDirty = Vacb->Dirty;
404 Vacb->Dirty = Vacb->Dirty || Dirty;
405
406 if (!WasDirty && Vacb->Dirty)
407 {
408 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
409 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
410 }
411
412 if (Mapped)
413 {
414 Vacb->MappedCount++;
415 }
416 CcRosVacbDecRefCount(Vacb);
417 if (Mapped && (Vacb->MappedCount == 1))
418 {
419 CcRosVacbIncRefCount(Vacb);
420 }
421 if (!WasDirty && Vacb->Dirty)
422 {
423 CcRosVacbIncRefCount(Vacb);
424 }
425
426 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
427 KeReleaseGuardedMutex(&ViewLock);
428 if (InterlockedCompareExchange(&Vacb->PinCount, 0, 0) == 0)
429 {
430 KeReleaseMutex(&Vacb->Mutex, FALSE);
431 }
432
433 return STATUS_SUCCESS;
434 }
435
436 /* Returns with VACB Lock Held! */
437 PROS_VACB
438 NTAPI
439 CcRosLookupVacb (
440 PROS_SHARED_CACHE_MAP SharedCacheMap,
441 LONGLONG FileOffset)
442 {
443 PLIST_ENTRY current_entry;
444 PROS_VACB current;
445 KIRQL oldIrql;
446
447 ASSERT(SharedCacheMap);
448
449 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
450 SharedCacheMap, FileOffset);
451
452 KeAcquireGuardedMutex(&ViewLock);
453 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
454
455 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
456 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
457 {
458 current = CONTAINING_RECORD(current_entry,
459 ROS_VACB,
460 CacheMapVacbListEntry);
461 if (IsPointInRange(current->FileOffset.QuadPart,
462 VACB_MAPPING_GRANULARITY,
463 FileOffset))
464 {
465 CcRosVacbIncRefCount(current);
466 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
467 KeReleaseGuardedMutex(&ViewLock);
468 if (InterlockedCompareExchange(&current->PinCount, 0, 0) == 0)
469 {
470 KeWaitForSingleObject(&current->Mutex,
471 Executive,
472 KernelMode,
473 FALSE,
474 NULL);
475 }
476 return current;
477 }
478 if (current->FileOffset.QuadPart > FileOffset)
479 break;
480 current_entry = current_entry->Flink;
481 }
482
483 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
484 KeReleaseGuardedMutex(&ViewLock);
485
486 return NULL;
487 }
488
489 NTSTATUS
490 NTAPI
491 CcRosMarkDirtyVacb (
492 PROS_SHARED_CACHE_MAP SharedCacheMap,
493 LONGLONG FileOffset)
494 {
495 PROS_VACB Vacb;
496 KIRQL oldIrql;
497
498 ASSERT(SharedCacheMap);
499
500 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
501 SharedCacheMap, FileOffset);
502
503 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
504 if (Vacb == NULL)
505 {
506 KeBugCheck(CACHE_MANAGER);
507 }
508
509 KeAcquireGuardedMutex(&ViewLock);
510 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
511
512 if (!Vacb->Dirty)
513 {
514 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
515 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
516 }
517 else
518 {
519 CcRosVacbDecRefCount(Vacb);
520 }
521
522 /* Move to the tail of the LRU list */
523 RemoveEntryList(&Vacb->VacbLruListEntry);
524 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
525
526 Vacb->Dirty = TRUE;
527
528 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
529 KeReleaseGuardedMutex(&ViewLock);
530 KeReleaseMutex(&Vacb->Mutex, FALSE);
531
532 return STATUS_SUCCESS;
533 }
534
535 NTSTATUS
536 NTAPI
537 CcRosUnmapVacb (
538 PROS_SHARED_CACHE_MAP SharedCacheMap,
539 LONGLONG FileOffset,
540 BOOLEAN NowDirty)
541 {
542 PROS_VACB Vacb;
543 BOOLEAN WasDirty;
544 KIRQL oldIrql;
545
546 ASSERT(SharedCacheMap);
547
548 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
549 SharedCacheMap, FileOffset, NowDirty);
550
551 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
552 if (Vacb == NULL)
553 {
554 return STATUS_UNSUCCESSFUL;
555 }
556
557 KeAcquireGuardedMutex(&ViewLock);
558 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
559
560 WasDirty = Vacb->Dirty;
561 Vacb->Dirty = Vacb->Dirty || NowDirty;
562
563 Vacb->MappedCount--;
564
565 if (!WasDirty && NowDirty)
566 {
567 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
568 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
569 }
570
571 CcRosVacbDecRefCount(Vacb);
572 if (!WasDirty && NowDirty)
573 {
574 CcRosVacbIncRefCount(Vacb);
575 }
576 if (Vacb->MappedCount == 0)
577 {
578 CcRosVacbDecRefCount(Vacb);
579 }
580
581 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
582 KeReleaseGuardedMutex(&ViewLock);
583 KeReleaseMutex(&Vacb->Mutex, FALSE);
584
585 return STATUS_SUCCESS;
586 }
587
588 static
589 NTSTATUS
590 CcRosMapVacb(
591 PROS_VACB Vacb)
592 {
593 ULONG i;
594 NTSTATUS Status;
595 ULONG_PTR NumberOfPages;
596
597 /* Create a memory area. */
598 MmLockAddressSpace(MmGetKernelAddressSpace());
599 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
600 0, // nothing checks for VACB mareas, so set to 0
601 &Vacb->BaseAddress,
602 VACB_MAPPING_GRANULARITY,
603 PAGE_READWRITE,
604 (PMEMORY_AREA*)&Vacb->MemoryArea,
605 0,
606 PAGE_SIZE);
607 MmUnlockAddressSpace(MmGetKernelAddressSpace());
608 if (!NT_SUCCESS(Status))
609 {
610 KeBugCheck(CACHE_MANAGER);
611 }
612
613 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
614 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
615
616 /* Create a virtual mapping for this memory area */
617 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
618 for (i = 0; i < NumberOfPages; i++)
619 {
620 PFN_NUMBER PageFrameNumber;
621
622 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
623 if (PageFrameNumber == 0)
624 {
625 DPRINT1("Unable to allocate page\n");
626 KeBugCheck(MEMORY_MANAGEMENT);
627 }
628
629 Status = MmCreateVirtualMapping(NULL,
630 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
631 PAGE_READWRITE,
632 &PageFrameNumber,
633 1);
634 if (!NT_SUCCESS(Status))
635 {
636 DPRINT1("Unable to create virtual mapping\n");
637 KeBugCheck(MEMORY_MANAGEMENT);
638 }
639 }
640
641 return STATUS_SUCCESS;
642 }
643
644 static
645 NTSTATUS
646 CcRosCreateVacb (
647 PROS_SHARED_CACHE_MAP SharedCacheMap,
648 LONGLONG FileOffset,
649 PROS_VACB *Vacb)
650 {
651 PROS_VACB current;
652 PROS_VACB previous;
653 PLIST_ENTRY current_entry;
654 NTSTATUS Status;
655 KIRQL oldIrql;
656
657 ASSERT(SharedCacheMap);
658
659 DPRINT("CcRosCreateVacb()\n");
660
661 if (FileOffset >= SharedCacheMap->FileSize.QuadPart)
662 {
663 *Vacb = NULL;
664 return STATUS_INVALID_PARAMETER;
665 }
666
667 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
668 current->BaseAddress = NULL;
669 current->Valid = FALSE;
670 current->Dirty = FALSE;
671 current->PageOut = FALSE;
672 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
673 current->SharedCacheMap = SharedCacheMap;
674 #if DBG
675 if (SharedCacheMap->Trace)
676 {
677 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
678 }
679 #endif
680 current->MappedCount = 0;
681 current->DirtyVacbListEntry.Flink = NULL;
682 current->DirtyVacbListEntry.Blink = NULL;
683 current->ReferenceCount = 1;
684 current->PinCount = 0;
685 KeInitializeMutex(&current->Mutex, 0);
686 KeWaitForSingleObject(&current->Mutex,
687 Executive,
688 KernelMode,
689 FALSE,
690 NULL);
691 KeAcquireGuardedMutex(&ViewLock);
692
693 *Vacb = current;
694 /* There is window between the call to CcRosLookupVacb
695 * and CcRosCreateVacb. We must check if a VACB for the
696 * file offset exist. If there is a VACB, we release
697 * our newly created VACB and return the existing one.
698 */
699 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
700 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
701 previous = NULL;
702 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
703 {
704 current = CONTAINING_RECORD(current_entry,
705 ROS_VACB,
706 CacheMapVacbListEntry);
707 if (IsPointInRange(current->FileOffset.QuadPart,
708 VACB_MAPPING_GRANULARITY,
709 FileOffset))
710 {
711 CcRosVacbIncRefCount(current);
712 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
713 #if DBG
714 if (SharedCacheMap->Trace)
715 {
716 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
717 SharedCacheMap,
718 (*Vacb),
719 current);
720 }
721 #endif
722 KeReleaseMutex(&(*Vacb)->Mutex, FALSE);
723 KeReleaseGuardedMutex(&ViewLock);
724 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
725 *Vacb = current;
726 if (InterlockedCompareExchange(&current->PinCount, 0, 0) == 0)
727 {
728 KeWaitForSingleObject(&current->Mutex,
729 Executive,
730 KernelMode,
731 FALSE,
732 NULL);
733 }
734 return STATUS_SUCCESS;
735 }
736 if (current->FileOffset.QuadPart < FileOffset)
737 {
738 ASSERT(previous == NULL ||
739 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
740 previous = current;
741 }
742 if (current->FileOffset.QuadPart > FileOffset)
743 break;
744 current_entry = current_entry->Flink;
745 }
746 /* There was no existing VACB. */
747 current = *Vacb;
748 if (previous)
749 {
750 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
751 }
752 else
753 {
754 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
755 }
756 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
757 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
758 KeReleaseGuardedMutex(&ViewLock);
759
760 MI_SET_USAGE(MI_USAGE_CACHE);
761 #if MI_TRACE_PFNS
762 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
763 {
764 PWCHAR pos = NULL;
765 ULONG len = 0;
766 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
767 len = wcslen(pos) * sizeof(WCHAR);
768 if (pos) snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
769 }
770 #endif
771
772 Status = CcRosMapVacb(current);
773
774 return Status;
775 }
776
777 NTSTATUS
778 NTAPI
779 CcRosGetVacb (
780 PROS_SHARED_CACHE_MAP SharedCacheMap,
781 LONGLONG FileOffset,
782 PLONGLONG BaseOffset,
783 PVOID* BaseAddress,
784 PBOOLEAN UptoDate,
785 PROS_VACB *Vacb)
786 {
787 PROS_VACB current;
788 NTSTATUS Status;
789
790 ASSERT(SharedCacheMap);
791
792 DPRINT("CcRosGetVacb()\n");
793
794 /*
795 * Look for a VACB already mapping the same data.
796 */
797 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
798 if (current == NULL)
799 {
800 /*
801 * Otherwise create a new VACB.
802 */
803 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
804 if (!NT_SUCCESS(Status))
805 {
806 return Status;
807 }
808 }
809
810 KeAcquireGuardedMutex(&ViewLock);
811
812 /* Move to the tail of the LRU list */
813 RemoveEntryList(&current->VacbLruListEntry);
814 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
815
816 KeReleaseGuardedMutex(&ViewLock);
817
818 /*
819 * Return information about the VACB to the caller.
820 */
821 *UptoDate = current->Valid;
822 *BaseAddress = current->BaseAddress;
823 DPRINT("*BaseAddress %p\n", *BaseAddress);
824 *Vacb = current;
825 *BaseOffset = current->FileOffset.QuadPart;
826 return STATUS_SUCCESS;
827 }
828
829 NTSTATUS
830 NTAPI
831 CcRosRequestVacb (
832 PROS_SHARED_CACHE_MAP SharedCacheMap,
833 LONGLONG FileOffset,
834 PVOID* BaseAddress,
835 PBOOLEAN UptoDate,
836 PROS_VACB *Vacb)
837 /*
838 * FUNCTION: Request a page mapping for a shared cache map
839 */
840 {
841 LONGLONG BaseOffset;
842
843 ASSERT(SharedCacheMap);
844
845 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
846 {
847 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
848 FileOffset, VACB_MAPPING_GRANULARITY);
849 KeBugCheck(CACHE_MANAGER);
850 }
851
852 return CcRosGetVacb(SharedCacheMap,
853 FileOffset,
854 &BaseOffset,
855 BaseAddress,
856 UptoDate,
857 Vacb);
858 }
859
860 static
861 VOID
862 CcFreeCachePage (
863 PVOID Context,
864 MEMORY_AREA* MemoryArea,
865 PVOID Address,
866 PFN_NUMBER Page,
867 SWAPENTRY SwapEntry,
868 BOOLEAN Dirty)
869 {
870 ASSERT(SwapEntry == 0);
871 if (Page != 0)
872 {
873 ASSERT(MmGetReferenceCountPage(Page) == 1);
874 MmReleasePageMemoryConsumer(MC_CACHE, Page);
875 }
876 }
877
878 NTSTATUS
879 CcRosInternalFreeVacb (
880 PROS_VACB Vacb)
881 /*
882 * FUNCTION: Releases a VACB associated with a shared cache map
883 */
884 {
885 DPRINT("Freeing VACB 0x%p\n", Vacb);
886 #if DBG
887 if (Vacb->SharedCacheMap->Trace)
888 {
889 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
890 }
891 #endif
892
893 MmLockAddressSpace(MmGetKernelAddressSpace());
894 MmFreeMemoryArea(MmGetKernelAddressSpace(),
895 Vacb->MemoryArea,
896 CcFreeCachePage,
897 NULL);
898 MmUnlockAddressSpace(MmGetKernelAddressSpace());
899
900 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
901 return STATUS_SUCCESS;
902 }
903
904 /*
905 * @implemented
906 */
907 VOID
908 NTAPI
909 CcFlushCache (
910 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
911 IN PLARGE_INTEGER FileOffset OPTIONAL,
912 IN ULONG Length,
913 OUT PIO_STATUS_BLOCK IoStatus)
914 {
915 PROS_SHARED_CACHE_MAP SharedCacheMap;
916 LARGE_INTEGER Offset;
917 LONGLONG RemainingLength;
918 PROS_VACB current;
919 NTSTATUS Status;
920 KIRQL oldIrql;
921
922 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
923 SectionObjectPointers, FileOffset, Length);
924
925 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
926 SectionObjectPointers, FileOffset, Length, IoStatus);
927
928 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
929 {
930 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
931 ASSERT(SharedCacheMap);
932 if (FileOffset)
933 {
934 Offset = *FileOffset;
935 RemainingLength = Length;
936 }
937 else
938 {
939 Offset.QuadPart = 0;
940 RemainingLength = SharedCacheMap->FileSize.QuadPart;
941 }
942
943 if (IoStatus)
944 {
945 IoStatus->Status = STATUS_SUCCESS;
946 IoStatus->Information = 0;
947 }
948
949 while (RemainingLength > 0)
950 {
951 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
952 if (current != NULL)
953 {
954 if (current->Dirty)
955 {
956 Status = CcRosFlushVacb(current);
957 if (!NT_SUCCESS(Status) && IoStatus != NULL)
958 {
959 IoStatus->Status = Status;
960 }
961 }
962
963 if (InterlockedCompareExchange(&current->PinCount, 0, 0) == 0)
964 {
965 KeReleaseMutex(&current->Mutex, FALSE);
966 }
967
968 KeAcquireGuardedMutex(&ViewLock);
969 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
970 CcRosVacbDecRefCount(current);
971 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
972 KeReleaseGuardedMutex(&ViewLock);
973 }
974
975 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
976 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
977 }
978 }
979 else
980 {
981 if (IoStatus)
982 {
983 IoStatus->Status = STATUS_INVALID_PARAMETER;
984 }
985 }
986 }
987
988 NTSTATUS
989 NTAPI
990 CcRosDeleteFileCache (
991 PFILE_OBJECT FileObject,
992 PROS_SHARED_CACHE_MAP SharedCacheMap)
993 /*
994 * FUNCTION: Releases the shared cache map associated with a file object
995 */
996 {
997 PLIST_ENTRY current_entry;
998 PROS_VACB current;
999 LIST_ENTRY FreeList;
1000 KIRQL oldIrql;
1001
1002 ASSERT(SharedCacheMap);
1003
1004 SharedCacheMap->RefCount++;
1005 KeReleaseGuardedMutex(&ViewLock);
1006
1007 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1008
1009 KeAcquireGuardedMutex(&ViewLock);
1010 SharedCacheMap->RefCount--;
1011 if (SharedCacheMap->RefCount == 0)
1012 {
1013 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1014
1015 /*
1016 * Release all VACBs
1017 */
1018 InitializeListHead(&FreeList);
1019 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1020 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1021 {
1022 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1023 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1024 RemoveEntryList(&current->VacbLruListEntry);
1025 if (current->Dirty)
1026 {
1027 RemoveEntryList(&current->DirtyVacbListEntry);
1028 DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
1029 DPRINT1("Freeing dirty VACB\n");
1030 }
1031 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1032 }
1033 #if DBG
1034 SharedCacheMap->Trace = FALSE;
1035 #endif
1036 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1037
1038 KeReleaseGuardedMutex(&ViewLock);
1039 ObDereferenceObject(SharedCacheMap->FileObject);
1040
1041 while (!IsListEmpty(&FreeList))
1042 {
1043 current_entry = RemoveTailList(&FreeList);
1044 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1045 CcRosInternalFreeVacb(current);
1046 }
1047 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1048 KeAcquireGuardedMutex(&ViewLock);
1049 }
1050 return STATUS_SUCCESS;
1051 }
1052
1053 VOID
1054 NTAPI
1055 CcRosReferenceCache (
1056 PFILE_OBJECT FileObject)
1057 {
1058 PROS_SHARED_CACHE_MAP SharedCacheMap;
1059 KeAcquireGuardedMutex(&ViewLock);
1060 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1061 ASSERT(SharedCacheMap);
1062 ASSERT(SharedCacheMap->RefCount != 0);
1063 SharedCacheMap->RefCount++;
1064 KeReleaseGuardedMutex(&ViewLock);
1065 }
1066
1067 VOID
1068 NTAPI
1069 CcRosRemoveIfClosed (
1070 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1071 {
1072 PROS_SHARED_CACHE_MAP SharedCacheMap;
1073 DPRINT("CcRosRemoveIfClosed()\n");
1074 KeAcquireGuardedMutex(&ViewLock);
1075 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1076 if (SharedCacheMap && SharedCacheMap->RefCount == 0)
1077 {
1078 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1079 }
1080 KeReleaseGuardedMutex(&ViewLock);
1081 }
1082
1083
1084 VOID
1085 NTAPI
1086 CcRosDereferenceCache (
1087 PFILE_OBJECT FileObject)
1088 {
1089 PROS_SHARED_CACHE_MAP SharedCacheMap;
1090 KeAcquireGuardedMutex(&ViewLock);
1091 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1092 ASSERT(SharedCacheMap);
1093 if (SharedCacheMap->RefCount > 0)
1094 {
1095 SharedCacheMap->RefCount--;
1096 if (SharedCacheMap->RefCount == 0)
1097 {
1098 MmFreeSectionSegments(SharedCacheMap->FileObject);
1099 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1100 }
1101 }
1102 KeReleaseGuardedMutex(&ViewLock);
1103 }
1104
1105 NTSTATUS
1106 NTAPI
1107 CcRosReleaseFileCache (
1108 PFILE_OBJECT FileObject)
1109 /*
1110 * FUNCTION: Called by the file system when a handle to a file object
1111 * has been closed.
1112 */
1113 {
1114 PROS_SHARED_CACHE_MAP SharedCacheMap;
1115
1116 KeAcquireGuardedMutex(&ViewLock);
1117
1118 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1119 {
1120 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1121 if (FileObject->PrivateCacheMap != NULL)
1122 {
1123 FileObject->PrivateCacheMap = NULL;
1124 if (SharedCacheMap->RefCount > 0)
1125 {
1126 SharedCacheMap->RefCount--;
1127 if (SharedCacheMap->RefCount == 0)
1128 {
1129 MmFreeSectionSegments(SharedCacheMap->FileObject);
1130 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1131 }
1132 }
1133 }
1134 }
1135 KeReleaseGuardedMutex(&ViewLock);
1136 return STATUS_SUCCESS;
1137 }
1138
1139 NTSTATUS
1140 NTAPI
1141 CcTryToInitializeFileCache (
1142 PFILE_OBJECT FileObject)
1143 {
1144 PROS_SHARED_CACHE_MAP SharedCacheMap;
1145 NTSTATUS Status;
1146
1147 KeAcquireGuardedMutex(&ViewLock);
1148
1149 ASSERT(FileObject->SectionObjectPointer);
1150 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1151 if (SharedCacheMap == NULL)
1152 {
1153 Status = STATUS_UNSUCCESSFUL;
1154 }
1155 else
1156 {
1157 if (FileObject->PrivateCacheMap == NULL)
1158 {
1159 FileObject->PrivateCacheMap = SharedCacheMap;
1160 SharedCacheMap->RefCount++;
1161 }
1162 Status = STATUS_SUCCESS;
1163 }
1164 KeReleaseGuardedMutex(&ViewLock);
1165
1166 return Status;
1167 }
1168
1169
1170 NTSTATUS
1171 NTAPI
1172 CcRosInitializeFileCache (
1173 PFILE_OBJECT FileObject,
1174 PCC_FILE_SIZES FileSizes,
1175 BOOLEAN PinAccess,
1176 PCACHE_MANAGER_CALLBACKS CallBacks,
1177 PVOID LazyWriterContext)
1178 /*
1179 * FUNCTION: Initializes a shared cache map for a file object
1180 */
1181 {
1182 PROS_SHARED_CACHE_MAP SharedCacheMap;
1183
1184 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1185 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1186 FileObject, SharedCacheMap);
1187
1188 KeAcquireGuardedMutex(&ViewLock);
1189 if (SharedCacheMap == NULL)
1190 {
1191 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1192 if (SharedCacheMap == NULL)
1193 {
1194 KeReleaseGuardedMutex(&ViewLock);
1195 return STATUS_INSUFFICIENT_RESOURCES;
1196 }
1197 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1198 ObReferenceObjectByPointer(FileObject,
1199 FILE_ALL_ACCESS,
1200 NULL,
1201 KernelMode);
1202 SharedCacheMap->FileObject = FileObject;
1203 SharedCacheMap->Callbacks = CallBacks;
1204 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1205 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1206 SharedCacheMap->FileSize = FileSizes->FileSize;
1207 SharedCacheMap->PinAccess = PinAccess;
1208 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1209 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1210 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1211 }
1212 if (FileObject->PrivateCacheMap == NULL)
1213 {
1214 FileObject->PrivateCacheMap = SharedCacheMap;
1215 SharedCacheMap->RefCount++;
1216 }
1217 KeReleaseGuardedMutex(&ViewLock);
1218
1219 return STATUS_SUCCESS;
1220 }
1221
1222 /*
1223 * @implemented
1224 */
1225 PFILE_OBJECT
1226 NTAPI
1227 CcGetFileObjectFromSectionPtrs (
1228 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1229 {
1230 PROS_SHARED_CACHE_MAP SharedCacheMap;
1231
1232 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1233
1234 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1235 {
1236 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1237 ASSERT(SharedCacheMap);
1238 return SharedCacheMap->FileObject;
1239 }
1240 return NULL;
1241 }
1242
1243 VOID
1244 INIT_FUNCTION
1245 NTAPI
1246 CcInitView (
1247 VOID)
1248 {
1249 DPRINT("CcInitView()\n");
1250
1251 InitializeListHead(&DirtyVacbListHead);
1252 InitializeListHead(&VacbLruListHead);
1253 KeInitializeGuardedMutex(&ViewLock);
1254 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1255 NULL,
1256 NULL,
1257 0,
1258 sizeof(INTERNAL_BCB),
1259 TAG_BCB,
1260 20);
1261 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1262 NULL,
1263 NULL,
1264 0,
1265 sizeof(ROS_SHARED_CACHE_MAP),
1266 TAG_SHARED_CACHE_MAP,
1267 20);
1268 ExInitializeNPagedLookasideList(&VacbLookasideList,
1269 NULL,
1270 NULL,
1271 0,
1272 sizeof(ROS_VACB),
1273 TAG_VACB,
1274 20);
1275
1276 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1277
1278 CcInitCacheZeroPage();
1279 }
1280
1281 /* EOF */