[NTOSKRNL]
[reactos.git] / reactos / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 */
9
10 /* NOTES **********************************************************************
11 *
12 * This is not the NT implementation of a file cache nor anything much like
13 * it.
14 *
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
17 *
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
20 *
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
23 *
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
27 *
28 * (4) Copy the data into or out of the page as necessary.
29 *
30 * (5) Release the cache page
31 */
32 /* INCLUDES ******************************************************************/
33
34 #include <ntoskrnl.h>
35 #define NDEBUG
36 #include <debug.h>
37
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
40 #endif
41
42 /* GLOBALS *******************************************************************/
43
44 static LIST_ENTRY DirtyVacbListHead;
45 static LIST_ENTRY VacbLruListHead;
46 ULONG DirtyPageCount = 0;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 #if DBG
55 static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
56 {
57 ++vacb->ReferenceCount;
58 if (vacb->SharedCacheMap->Trace)
59 {
60 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
61 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
62 }
63 }
64 static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
65 {
66 --vacb->ReferenceCount;
67 if (vacb->SharedCacheMap->Trace)
68 {
69 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
70 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
71 }
72 }
73 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
74 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
75 #else
76 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
77 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
78 #endif
79
80 NTSTATUS
81 CcRosInternalFreeVacb(PROS_VACB Vacb);
82
83
84 /* FUNCTIONS *****************************************************************/
85
86 VOID
87 NTAPI
88 CcRosTraceCacheMap (
89 PROS_SHARED_CACHE_MAP SharedCacheMap,
90 BOOLEAN Trace )
91 {
92 #if DBG
93 KIRQL oldirql;
94 PLIST_ENTRY current_entry;
95 PROS_VACB current;
96
97 if (!SharedCacheMap)
98 return;
99
100 SharedCacheMap->Trace = Trace;
101
102 if (Trace)
103 {
104 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
105
106 KeAcquireGuardedMutex(&ViewLock);
107 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
108
109 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
110 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
111 {
112 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
113 current_entry = current_entry->Flink;
114
115 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
116 current, current->ReferenceCount, current->Dirty, current->PageOut );
117 }
118 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
119 KeReleaseGuardedMutex(&ViewLock);
120 }
121 else
122 {
123 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
124 }
125
126 #else
127 UNREFERENCED_PARAMETER(SharedCacheMap);
128 UNREFERENCED_PARAMETER(Trace);
129 #endif
130 }
131
132 NTSTATUS
133 NTAPI
134 CcRosFlushVacb (
135 PROS_VACB Vacb)
136 {
137 NTSTATUS Status;
138 KIRQL oldIrql;
139
140 Status = CcWriteVirtualAddress(Vacb);
141 if (NT_SUCCESS(Status))
142 {
143 KeAcquireGuardedMutex(&ViewLock);
144 KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql);
145
146 Vacb->Dirty = FALSE;
147 RemoveEntryList(&Vacb->DirtyVacbListEntry);
148 DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
149 CcRosVacbDecRefCount(Vacb);
150
151 KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
152 KeReleaseGuardedMutex(&ViewLock);
153 }
154
155 return Status;
156 }
157
158 NTSTATUS
159 NTAPI
160 CcRosFlushDirtyPages (
161 ULONG Target,
162 PULONG Count,
163 BOOLEAN Wait)
164 {
165 PLIST_ENTRY current_entry;
166 PROS_VACB current;
167 BOOLEAN Locked;
168 NTSTATUS Status;
169
170 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
171
172 (*Count) = 0;
173
174 KeEnterCriticalRegion();
175 KeAcquireGuardedMutex(&ViewLock);
176
177 current_entry = DirtyVacbListHead.Flink;
178 if (current_entry == &DirtyVacbListHead)
179 {
180 DPRINT("No Dirty pages\n");
181 }
182
183 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
184 {
185 current = CONTAINING_RECORD(current_entry,
186 ROS_VACB,
187 DirtyVacbListEntry);
188 current_entry = current_entry->Flink;
189
190 CcRosVacbIncRefCount(current);
191
192 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
193 current->SharedCacheMap->LazyWriteContext, Wait);
194 if (!Locked)
195 {
196 CcRosVacbDecRefCount(current);
197 continue;
198 }
199
200 Locked = ExAcquireResourceExclusiveLite(&current->Lock, Wait);
201 if (!Locked)
202 {
203 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
204 current->SharedCacheMap->LazyWriteContext);
205 CcRosVacbDecRefCount(current);
206 continue;
207 }
208
209 ASSERT(current->Dirty);
210
211 /* One reference is added above */
212 if (current->ReferenceCount > 2)
213 {
214 ExReleaseResourceLite(&current->Lock);
215 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
216 current->SharedCacheMap->LazyWriteContext);
217 CcRosVacbDecRefCount(current);
218 continue;
219 }
220
221 KeReleaseGuardedMutex(&ViewLock);
222
223 Status = CcRosFlushVacb(current);
224
225 ExReleaseResourceLite(&current->Lock);
226 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
227 current->SharedCacheMap->LazyWriteContext);
228
229 KeAcquireGuardedMutex(&ViewLock);
230 CcRosVacbDecRefCount(current);
231
232 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
233 (Status != STATUS_MEDIA_WRITE_PROTECTED))
234 {
235 DPRINT1("CC: Failed to flush VACB.\n");
236 }
237 else
238 {
239 (*Count) += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
240 Target -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
241 }
242
243 current_entry = DirtyVacbListHead.Flink;
244 }
245
246 KeReleaseGuardedMutex(&ViewLock);
247 KeLeaveCriticalRegion();
248
249 DPRINT("CcRosFlushDirtyPages() finished\n");
250 return STATUS_SUCCESS;
251 }
252
253 NTSTATUS
254 CcRosTrimCache (
255 ULONG Target,
256 ULONG Priority,
257 PULONG NrFreed)
258 /*
259 * FUNCTION: Try to free some memory from the file cache.
260 * ARGUMENTS:
261 * Target - The number of pages to be freed.
262 * Priority - The priority of free (currently unused).
263 * NrFreed - Points to a variable where the number of pages
264 * actually freed is returned.
265 */
266 {
267 PLIST_ENTRY current_entry;
268 PROS_VACB current;
269 ULONG PagesFreed;
270 KIRQL oldIrql;
271 LIST_ENTRY FreeList;
272 PFN_NUMBER Page;
273 ULONG i;
274 BOOLEAN FlushedPages = FALSE;
275
276 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
277
278 InitializeListHead(&FreeList);
279
280 *NrFreed = 0;
281
282 retry:
283 KeAcquireGuardedMutex(&ViewLock);
284
285 current_entry = VacbLruListHead.Flink;
286 while (current_entry != &VacbLruListHead)
287 {
288 current = CONTAINING_RECORD(current_entry,
289 ROS_VACB,
290 VacbLruListEntry);
291 current_entry = current_entry->Flink;
292
293 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
294
295 /* Reference the VACB */
296 CcRosVacbIncRefCount(current);
297
298 /* Check if it's mapped and not dirty */
299 if (current->MappedCount > 0 && !current->Dirty)
300 {
301 /* We have to break these locks because Cc sucks */
302 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
303 KeReleaseGuardedMutex(&ViewLock);
304
305 /* Page out the VACB */
306 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
307 {
308 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
309
310 MmPageOutPhysicalAddress(Page);
311 }
312
313 /* Reacquire the locks */
314 KeAcquireGuardedMutex(&ViewLock);
315 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
316 }
317
318 /* Dereference the VACB */
319 CcRosVacbDecRefCount(current);
320
321 /* Check if we can free this entry now */
322 if (current->ReferenceCount == 0)
323 {
324 ASSERT(!current->Dirty);
325 ASSERT(!current->MappedCount);
326
327 RemoveEntryList(&current->CacheMapVacbListEntry);
328 RemoveEntryList(&current->VacbLruListEntry);
329 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
330
331 /* Calculate how many pages we freed for Mm */
332 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
333 Target -= PagesFreed;
334 (*NrFreed) += PagesFreed;
335 }
336
337 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
338 }
339
340 KeReleaseGuardedMutex(&ViewLock);
341
342 /* Try flushing pages if we haven't met our target */
343 if ((Target > 0) && !FlushedPages)
344 {
345 /* Flush dirty pages to disk */
346 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE);
347 FlushedPages = TRUE;
348
349 /* We can only swap as many pages as we flushed */
350 if (PagesFreed < Target) Target = PagesFreed;
351
352 /* Check if we flushed anything */
353 if (PagesFreed != 0)
354 {
355 /* Try again after flushing dirty pages */
356 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
357 goto retry;
358 }
359 }
360
361 while (!IsListEmpty(&FreeList))
362 {
363 current_entry = RemoveHeadList(&FreeList);
364 current = CONTAINING_RECORD(current_entry,
365 ROS_VACB,
366 CacheMapVacbListEntry);
367 CcRosInternalFreeVacb(current);
368 }
369
370 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
371
372 return STATUS_SUCCESS;
373 }
374
375 NTSTATUS
376 NTAPI
377 CcRosReleaseVacb (
378 PROS_SHARED_CACHE_MAP SharedCacheMap,
379 PROS_VACB Vacb,
380 BOOLEAN Valid,
381 BOOLEAN Dirty,
382 BOOLEAN Mapped)
383 {
384 BOOLEAN WasDirty;
385 KIRQL oldIrql;
386
387 ASSERT(SharedCacheMap);
388
389 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
390 SharedCacheMap, Vacb, Valid);
391
392 KeAcquireGuardedMutex(&ViewLock);
393 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
394
395 Vacb->Valid = Valid;
396
397 WasDirty = Vacb->Dirty;
398 Vacb->Dirty = Vacb->Dirty || Dirty;
399
400 if (!WasDirty && Vacb->Dirty)
401 {
402 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
403 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
404 }
405
406 if (Mapped)
407 {
408 Vacb->MappedCount++;
409 }
410 CcRosVacbDecRefCount(Vacb);
411 if (Mapped && (Vacb->MappedCount == 1))
412 {
413 CcRosVacbIncRefCount(Vacb);
414 }
415 if (!WasDirty && Vacb->Dirty)
416 {
417 CcRosVacbIncRefCount(Vacb);
418 }
419
420 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
421 KeReleaseGuardedMutex(&ViewLock);
422 ExReleaseResourceLite(&Vacb->Lock);
423
424 return STATUS_SUCCESS;
425 }
426
427 /* Returns with VACB Lock Held! */
428 PROS_VACB
429 NTAPI
430 CcRosLookupVacb (
431 PROS_SHARED_CACHE_MAP SharedCacheMap,
432 LONGLONG FileOffset)
433 {
434 PLIST_ENTRY current_entry;
435 PROS_VACB current;
436 KIRQL oldIrql;
437
438 ASSERT(SharedCacheMap);
439
440 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
441 SharedCacheMap, FileOffset);
442
443 KeAcquireGuardedMutex(&ViewLock);
444 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
445
446 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
447 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
448 {
449 current = CONTAINING_RECORD(current_entry,
450 ROS_VACB,
451 CacheMapVacbListEntry);
452 if (IsPointInRange(current->FileOffset.QuadPart,
453 VACB_MAPPING_GRANULARITY,
454 FileOffset))
455 {
456 CcRosVacbIncRefCount(current);
457 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
458 KeReleaseGuardedMutex(&ViewLock);
459 ExAcquireResourceExclusiveLite(&current->Lock, TRUE);
460 return current;
461 }
462 if (current->FileOffset.QuadPart > FileOffset)
463 break;
464 current_entry = current_entry->Flink;
465 }
466
467 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
468 KeReleaseGuardedMutex(&ViewLock);
469
470 return NULL;
471 }
472
473 NTSTATUS
474 NTAPI
475 CcRosMarkDirtyVacb (
476 PROS_SHARED_CACHE_MAP SharedCacheMap,
477 LONGLONG FileOffset)
478 {
479 PROS_VACB Vacb;
480 KIRQL oldIrql;
481
482 ASSERT(SharedCacheMap);
483
484 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
485 SharedCacheMap, FileOffset);
486
487 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
488 if (Vacb == NULL)
489 {
490 KeBugCheck(CACHE_MANAGER);
491 }
492
493 KeAcquireGuardedMutex(&ViewLock);
494 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
495
496 if (!Vacb->Dirty)
497 {
498 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
499 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
500 }
501 else
502 {
503 CcRosVacbDecRefCount(Vacb);
504 }
505
506 /* Move to the tail of the LRU list */
507 RemoveEntryList(&Vacb->VacbLruListEntry);
508 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
509
510 Vacb->Dirty = TRUE;
511
512 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
513 KeReleaseGuardedMutex(&ViewLock);
514 ExReleaseResourceLite(&Vacb->Lock);
515
516 return STATUS_SUCCESS;
517 }
518
519 NTSTATUS
520 NTAPI
521 CcRosUnmapVacb (
522 PROS_SHARED_CACHE_MAP SharedCacheMap,
523 LONGLONG FileOffset,
524 BOOLEAN NowDirty)
525 {
526 PROS_VACB Vacb;
527 BOOLEAN WasDirty;
528 KIRQL oldIrql;
529
530 ASSERT(SharedCacheMap);
531
532 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
533 SharedCacheMap, FileOffset, NowDirty);
534
535 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
536 if (Vacb == NULL)
537 {
538 return STATUS_UNSUCCESSFUL;
539 }
540
541 KeAcquireGuardedMutex(&ViewLock);
542 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
543
544 WasDirty = Vacb->Dirty;
545 Vacb->Dirty = Vacb->Dirty || NowDirty;
546
547 Vacb->MappedCount--;
548
549 if (!WasDirty && NowDirty)
550 {
551 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
552 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
553 }
554
555 CcRosVacbDecRefCount(Vacb);
556 if (!WasDirty && NowDirty)
557 {
558 CcRosVacbIncRefCount(Vacb);
559 }
560 if (Vacb->MappedCount == 0)
561 {
562 CcRosVacbDecRefCount(Vacb);
563 }
564
565 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
566 KeReleaseGuardedMutex(&ViewLock);
567 ExReleaseResourceLite(&Vacb->Lock);
568
569 return STATUS_SUCCESS;
570 }
571
572 static
573 NTSTATUS
574 CcRosMapVacb(
575 PROS_VACB Vacb)
576 {
577 ULONG i;
578 NTSTATUS Status;
579 ULONG_PTR NumberOfPages;
580
581 /* Create a memory area. */
582 MmLockAddressSpace(MmGetKernelAddressSpace());
583 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
584 0, // nothing checks for VACB mareas, so set to 0
585 &Vacb->BaseAddress,
586 VACB_MAPPING_GRANULARITY,
587 PAGE_READWRITE,
588 (PMEMORY_AREA*)&Vacb->MemoryArea,
589 0,
590 PAGE_SIZE);
591 MmUnlockAddressSpace(MmGetKernelAddressSpace());
592 if (!NT_SUCCESS(Status))
593 {
594 KeBugCheck(CACHE_MANAGER);
595 }
596
597 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
598 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
599
600 /* Create a virtual mapping for this memory area */
601 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
602 for (i = 0; i < NumberOfPages; i++)
603 {
604 PFN_NUMBER PageFrameNumber;
605
606 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
607 if (PageFrameNumber == 0)
608 {
609 DPRINT1("Unable to allocate page\n");
610 KeBugCheck(MEMORY_MANAGEMENT);
611 }
612
613 Status = MmCreateVirtualMapping(NULL,
614 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
615 PAGE_READWRITE,
616 &PageFrameNumber,
617 1);
618 if (!NT_SUCCESS(Status))
619 {
620 DPRINT1("Unable to create virtual mapping\n");
621 KeBugCheck(MEMORY_MANAGEMENT);
622 }
623 }
624
625 return STATUS_SUCCESS;
626 }
627
628 static
629 NTSTATUS
630 CcRosCreateVacb (
631 PROS_SHARED_CACHE_MAP SharedCacheMap,
632 LONGLONG FileOffset,
633 PROS_VACB *Vacb)
634 {
635 PROS_VACB current;
636 PROS_VACB previous;
637 PLIST_ENTRY current_entry;
638 NTSTATUS Status;
639 KIRQL oldIrql;
640
641 ASSERT(SharedCacheMap);
642
643 DPRINT("CcRosCreateVacb()\n");
644
645 if (FileOffset >= SharedCacheMap->FileSize.QuadPart)
646 {
647 *Vacb = NULL;
648 return STATUS_INVALID_PARAMETER;
649 }
650
651 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
652 current->BaseAddress = NULL;
653 current->Valid = FALSE;
654 current->Dirty = FALSE;
655 current->PageOut = FALSE;
656 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
657 current->SharedCacheMap = SharedCacheMap;
658 #if DBG
659 if (SharedCacheMap->Trace)
660 {
661 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
662 }
663 #endif
664 current->MappedCount = 0;
665 current->DirtyVacbListEntry.Flink = NULL;
666 current->DirtyVacbListEntry.Blink = NULL;
667 current->ReferenceCount = 1;
668 ExInitializeResourceLite(&current->Lock);
669 ExAcquireResourceExclusiveLite(&current->Lock, TRUE);
670 KeAcquireGuardedMutex(&ViewLock);
671
672 *Vacb = current;
673 /* There is window between the call to CcRosLookupVacb
674 * and CcRosCreateVacb. We must check if a VACB for the
675 * file offset exist. If there is a VACB, we release
676 * our newly created VACB and return the existing one.
677 */
678 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
679 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
680 previous = NULL;
681 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
682 {
683 current = CONTAINING_RECORD(current_entry,
684 ROS_VACB,
685 CacheMapVacbListEntry);
686 if (IsPointInRange(current->FileOffset.QuadPart,
687 VACB_MAPPING_GRANULARITY,
688 FileOffset))
689 {
690 CcRosVacbIncRefCount(current);
691 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
692 #if DBG
693 if (SharedCacheMap->Trace)
694 {
695 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
696 SharedCacheMap,
697 (*Vacb),
698 current);
699 }
700 #endif
701 ExReleaseResourceLite(&(*Vacb)->Lock);
702 KeReleaseGuardedMutex(&ViewLock);
703 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
704 *Vacb = current;
705 ExAcquireResourceExclusiveLite(&current->Lock, TRUE);
706 return STATUS_SUCCESS;
707 }
708 if (current->FileOffset.QuadPart < FileOffset)
709 {
710 ASSERT(previous == NULL ||
711 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
712 previous = current;
713 }
714 if (current->FileOffset.QuadPart > FileOffset)
715 break;
716 current_entry = current_entry->Flink;
717 }
718 /* There was no existing VACB. */
719 current = *Vacb;
720 if (previous)
721 {
722 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
723 }
724 else
725 {
726 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
727 }
728 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
729 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
730 KeReleaseGuardedMutex(&ViewLock);
731
732 MI_SET_USAGE(MI_USAGE_CACHE);
733 #if MI_TRACE_PFNS
734 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
735 {
736 PWCHAR pos = NULL;
737 ULONG len = 0;
738 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
739 len = wcslen(pos) * sizeof(WCHAR);
740 if (pos) snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
741 }
742 #endif
743
744 Status = CcRosMapVacb(current);
745
746 return Status;
747 }
748
749 NTSTATUS
750 NTAPI
751 CcRosGetVacb (
752 PROS_SHARED_CACHE_MAP SharedCacheMap,
753 LONGLONG FileOffset,
754 PLONGLONG BaseOffset,
755 PVOID* BaseAddress,
756 PBOOLEAN UptoDate,
757 PROS_VACB *Vacb)
758 {
759 PROS_VACB current;
760 NTSTATUS Status;
761
762 ASSERT(SharedCacheMap);
763
764 DPRINT("CcRosGetVacb()\n");
765
766 /*
767 * Look for a VACB already mapping the same data.
768 */
769 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
770 if (current == NULL)
771 {
772 /*
773 * Otherwise create a new VACB.
774 */
775 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
776 if (!NT_SUCCESS(Status))
777 {
778 return Status;
779 }
780 }
781
782 KeAcquireGuardedMutex(&ViewLock);
783
784 /* Move to the tail of the LRU list */
785 RemoveEntryList(&current->VacbLruListEntry);
786 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
787
788 KeReleaseGuardedMutex(&ViewLock);
789
790 /*
791 * Return information about the VACB to the caller.
792 */
793 *UptoDate = current->Valid;
794 *BaseAddress = current->BaseAddress;
795 DPRINT("*BaseAddress %p\n", *BaseAddress);
796 *Vacb = current;
797 *BaseOffset = current->FileOffset.QuadPart;
798 return STATUS_SUCCESS;
799 }
800
801 NTSTATUS
802 NTAPI
803 CcRosRequestVacb (
804 PROS_SHARED_CACHE_MAP SharedCacheMap,
805 LONGLONG FileOffset,
806 PVOID* BaseAddress,
807 PBOOLEAN UptoDate,
808 PROS_VACB *Vacb)
809 /*
810 * FUNCTION: Request a page mapping for a shared cache map
811 */
812 {
813 LONGLONG BaseOffset;
814
815 ASSERT(SharedCacheMap);
816
817 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
818 {
819 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
820 FileOffset, VACB_MAPPING_GRANULARITY);
821 KeBugCheck(CACHE_MANAGER);
822 }
823
824 return CcRosGetVacb(SharedCacheMap,
825 FileOffset,
826 &BaseOffset,
827 BaseAddress,
828 UptoDate,
829 Vacb);
830 }
831
832 static
833 VOID
834 CcFreeCachePage (
835 PVOID Context,
836 MEMORY_AREA* MemoryArea,
837 PVOID Address,
838 PFN_NUMBER Page,
839 SWAPENTRY SwapEntry,
840 BOOLEAN Dirty)
841 {
842 ASSERT(SwapEntry == 0);
843 if (Page != 0)
844 {
845 ASSERT(MmGetReferenceCountPage(Page) == 1);
846 MmReleasePageMemoryConsumer(MC_CACHE, Page);
847 }
848 }
849
850 NTSTATUS
851 CcRosInternalFreeVacb (
852 PROS_VACB Vacb)
853 /*
854 * FUNCTION: Releases a VACB associated with a shared cache map
855 */
856 {
857 DPRINT("Freeing VACB 0x%p\n", Vacb);
858 #if DBG
859 if (Vacb->SharedCacheMap->Trace)
860 {
861 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
862 }
863 #endif
864
865 MmLockAddressSpace(MmGetKernelAddressSpace());
866 MmFreeMemoryArea(MmGetKernelAddressSpace(),
867 Vacb->MemoryArea,
868 CcFreeCachePage,
869 NULL);
870 MmUnlockAddressSpace(MmGetKernelAddressSpace());
871 ExDeleteResourceLite(&Vacb->Lock);
872
873 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
874 return STATUS_SUCCESS;
875 }
876
877 /*
878 * @implemented
879 */
880 VOID
881 NTAPI
882 CcFlushCache (
883 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
884 IN PLARGE_INTEGER FileOffset OPTIONAL,
885 IN ULONG Length,
886 OUT PIO_STATUS_BLOCK IoStatus)
887 {
888 PROS_SHARED_CACHE_MAP SharedCacheMap;
889 LARGE_INTEGER Offset;
890 LONGLONG RemainingLength;
891 PROS_VACB current;
892 NTSTATUS Status;
893 KIRQL oldIrql;
894
895 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
896 SectionObjectPointers, FileOffset, Length);
897
898 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
899 SectionObjectPointers, FileOffset, Length, IoStatus);
900
901 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
902 {
903 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
904 ASSERT(SharedCacheMap);
905 if (FileOffset)
906 {
907 Offset = *FileOffset;
908 RemainingLength = Length;
909 }
910 else
911 {
912 Offset.QuadPart = 0;
913 RemainingLength = SharedCacheMap->FileSize.QuadPart;
914 }
915
916 if (IoStatus)
917 {
918 IoStatus->Status = STATUS_SUCCESS;
919 IoStatus->Information = 0;
920 }
921
922 while (RemainingLength > 0)
923 {
924 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
925 if (current != NULL)
926 {
927 if (current->Dirty)
928 {
929 Status = CcRosFlushVacb(current);
930 if (!NT_SUCCESS(Status) && IoStatus != NULL)
931 {
932 IoStatus->Status = Status;
933 }
934 }
935 ExReleaseResourceLite(&current->Lock);
936
937 KeAcquireGuardedMutex(&ViewLock);
938 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
939 CcRosVacbDecRefCount(current);
940 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
941 KeReleaseGuardedMutex(&ViewLock);
942 }
943
944 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
945 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
946 }
947 }
948 else
949 {
950 if (IoStatus)
951 {
952 IoStatus->Status = STATUS_INVALID_PARAMETER;
953 }
954 }
955 }
956
957 NTSTATUS
958 NTAPI
959 CcRosDeleteFileCache (
960 PFILE_OBJECT FileObject,
961 PROS_SHARED_CACHE_MAP SharedCacheMap)
962 /*
963 * FUNCTION: Releases the shared cache map associated with a file object
964 */
965 {
966 PLIST_ENTRY current_entry;
967 PROS_VACB current;
968 LIST_ENTRY FreeList;
969 KIRQL oldIrql;
970
971 ASSERT(SharedCacheMap);
972
973 SharedCacheMap->RefCount++;
974 KeReleaseGuardedMutex(&ViewLock);
975
976 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
977
978 KeAcquireGuardedMutex(&ViewLock);
979 SharedCacheMap->RefCount--;
980 if (SharedCacheMap->RefCount == 0)
981 {
982 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
983
984 /*
985 * Release all VACBs
986 */
987 InitializeListHead(&FreeList);
988 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
989 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
990 {
991 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
992 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
993 RemoveEntryList(&current->VacbLruListEntry);
994 if (current->Dirty)
995 {
996 RemoveEntryList(&current->DirtyVacbListEntry);
997 DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
998 DPRINT1("Freeing dirty VACB\n");
999 }
1000 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1001 }
1002 #if DBG
1003 SharedCacheMap->Trace = FALSE;
1004 #endif
1005 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1006
1007 KeReleaseGuardedMutex(&ViewLock);
1008 ObDereferenceObject(SharedCacheMap->FileObject);
1009
1010 while (!IsListEmpty(&FreeList))
1011 {
1012 current_entry = RemoveTailList(&FreeList);
1013 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1014 CcRosInternalFreeVacb(current);
1015 }
1016 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1017 KeAcquireGuardedMutex(&ViewLock);
1018 }
1019 return STATUS_SUCCESS;
1020 }
1021
1022 VOID
1023 NTAPI
1024 CcRosReferenceCache (
1025 PFILE_OBJECT FileObject)
1026 {
1027 PROS_SHARED_CACHE_MAP SharedCacheMap;
1028 KeAcquireGuardedMutex(&ViewLock);
1029 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1030 ASSERT(SharedCacheMap);
1031 ASSERT(SharedCacheMap->RefCount != 0);
1032 SharedCacheMap->RefCount++;
1033 KeReleaseGuardedMutex(&ViewLock);
1034 }
1035
1036 VOID
1037 NTAPI
1038 CcRosRemoveIfClosed (
1039 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1040 {
1041 PROS_SHARED_CACHE_MAP SharedCacheMap;
1042 DPRINT("CcRosRemoveIfClosed()\n");
1043 KeAcquireGuardedMutex(&ViewLock);
1044 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1045 if (SharedCacheMap && SharedCacheMap->RefCount == 0)
1046 {
1047 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1048 }
1049 KeReleaseGuardedMutex(&ViewLock);
1050 }
1051
1052
1053 VOID
1054 NTAPI
1055 CcRosDereferenceCache (
1056 PFILE_OBJECT FileObject)
1057 {
1058 PROS_SHARED_CACHE_MAP SharedCacheMap;
1059 KeAcquireGuardedMutex(&ViewLock);
1060 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1061 ASSERT(SharedCacheMap);
1062 if (SharedCacheMap->RefCount > 0)
1063 {
1064 SharedCacheMap->RefCount--;
1065 if (SharedCacheMap->RefCount == 0)
1066 {
1067 MmFreeSectionSegments(SharedCacheMap->FileObject);
1068 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1069 }
1070 }
1071 KeReleaseGuardedMutex(&ViewLock);
1072 }
1073
1074 NTSTATUS
1075 NTAPI
1076 CcRosReleaseFileCache (
1077 PFILE_OBJECT FileObject)
1078 /*
1079 * FUNCTION: Called by the file system when a handle to a file object
1080 * has been closed.
1081 */
1082 {
1083 PROS_SHARED_CACHE_MAP SharedCacheMap;
1084
1085 KeAcquireGuardedMutex(&ViewLock);
1086
1087 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1088 {
1089 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1090 if (FileObject->PrivateCacheMap != NULL)
1091 {
1092 FileObject->PrivateCacheMap = NULL;
1093 if (SharedCacheMap->RefCount > 0)
1094 {
1095 SharedCacheMap->RefCount--;
1096 if (SharedCacheMap->RefCount == 0)
1097 {
1098 MmFreeSectionSegments(SharedCacheMap->FileObject);
1099 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1100 }
1101 }
1102 }
1103 }
1104 KeReleaseGuardedMutex(&ViewLock);
1105 return STATUS_SUCCESS;
1106 }
1107
1108 NTSTATUS
1109 NTAPI
1110 CcTryToInitializeFileCache (
1111 PFILE_OBJECT FileObject)
1112 {
1113 PROS_SHARED_CACHE_MAP SharedCacheMap;
1114 NTSTATUS Status;
1115
1116 KeAcquireGuardedMutex(&ViewLock);
1117
1118 ASSERT(FileObject->SectionObjectPointer);
1119 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1120 if (SharedCacheMap == NULL)
1121 {
1122 Status = STATUS_UNSUCCESSFUL;
1123 }
1124 else
1125 {
1126 if (FileObject->PrivateCacheMap == NULL)
1127 {
1128 FileObject->PrivateCacheMap = SharedCacheMap;
1129 SharedCacheMap->RefCount++;
1130 }
1131 Status = STATUS_SUCCESS;
1132 }
1133 KeReleaseGuardedMutex(&ViewLock);
1134
1135 return Status;
1136 }
1137
1138
1139 NTSTATUS
1140 NTAPI
1141 CcRosInitializeFileCache (
1142 PFILE_OBJECT FileObject,
1143 PCC_FILE_SIZES FileSizes,
1144 BOOLEAN PinAccess,
1145 PCACHE_MANAGER_CALLBACKS CallBacks,
1146 PVOID LazyWriterContext)
1147 /*
1148 * FUNCTION: Initializes a shared cache map for a file object
1149 */
1150 {
1151 PROS_SHARED_CACHE_MAP SharedCacheMap;
1152
1153 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1154 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1155 FileObject, SharedCacheMap);
1156
1157 KeAcquireGuardedMutex(&ViewLock);
1158 if (SharedCacheMap == NULL)
1159 {
1160 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1161 if (SharedCacheMap == NULL)
1162 {
1163 KeReleaseGuardedMutex(&ViewLock);
1164 return STATUS_INSUFFICIENT_RESOURCES;
1165 }
1166 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1167 ObReferenceObjectByPointer(FileObject,
1168 FILE_ALL_ACCESS,
1169 NULL,
1170 KernelMode);
1171 SharedCacheMap->FileObject = FileObject;
1172 SharedCacheMap->Callbacks = CallBacks;
1173 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1174 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1175 SharedCacheMap->FileSize = FileSizes->FileSize;
1176 SharedCacheMap->PinAccess = PinAccess;
1177 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1178 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1179 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1180 }
1181 if (FileObject->PrivateCacheMap == NULL)
1182 {
1183 FileObject->PrivateCacheMap = SharedCacheMap;
1184 SharedCacheMap->RefCount++;
1185 }
1186 KeReleaseGuardedMutex(&ViewLock);
1187
1188 return STATUS_SUCCESS;
1189 }
1190
1191 /*
1192 * @implemented
1193 */
1194 PFILE_OBJECT
1195 NTAPI
1196 CcGetFileObjectFromSectionPtrs (
1197 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1198 {
1199 PROS_SHARED_CACHE_MAP SharedCacheMap;
1200
1201 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1202
1203 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1204 {
1205 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1206 ASSERT(SharedCacheMap);
1207 return SharedCacheMap->FileObject;
1208 }
1209 return NULL;
1210 }
1211
1212 VOID
1213 INIT_FUNCTION
1214 NTAPI
1215 CcInitView (
1216 VOID)
1217 {
1218 DPRINT("CcInitView()\n");
1219
1220 InitializeListHead(&DirtyVacbListHead);
1221 InitializeListHead(&VacbLruListHead);
1222 KeInitializeGuardedMutex(&ViewLock);
1223 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1224 NULL,
1225 NULL,
1226 0,
1227 sizeof(INTERNAL_BCB),
1228 TAG_BCB,
1229 20);
1230 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1231 NULL,
1232 NULL,
1233 0,
1234 sizeof(ROS_SHARED_CACHE_MAP),
1235 TAG_SHARED_CACHE_MAP,
1236 20);
1237 ExInitializeNPagedLookasideList(&VacbLookasideList,
1238 NULL,
1239 NULL,
1240 0,
1241 sizeof(ROS_VACB),
1242 TAG_VACB,
1243 20);
1244
1245 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1246
1247 CcInitCacheZeroPage();
1248 }
1249
1250 /* EOF */