[NTOSKRNL] Implement CcIsThereDirtyData()
[reactos.git] / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 */
9
10 /* NOTES **********************************************************************
11 *
12 * This is not the NT implementation of a file cache nor anything much like
13 * it.
14 *
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
17 *
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
20 *
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
23 *
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
27 *
28 * (4) Copy the data into or out of the page as necessary.
29 *
30 * (5) Release the cache page
31 */
32 /* INCLUDES ******************************************************************/
33
34 #include <ntoskrnl.h>
35 #define NDEBUG
36 #include <debug.h>
37
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
40 #endif
41
42 /* GLOBALS *******************************************************************/
43
44 LIST_ENTRY DirtyVacbListHead;
45 static LIST_ENTRY VacbLruListHead;
46 ULONG DirtyPageCount = 0;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 #if DBG
55 static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
56 {
57 ++vacb->ReferenceCount;
58 if (vacb->SharedCacheMap->Trace)
59 {
60 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
61 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
62 }
63 }
64 static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
65 {
66 --vacb->ReferenceCount;
67 if (vacb->SharedCacheMap->Trace)
68 {
69 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
70 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
71 }
72 }
73 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
74 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
75 #else
76 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
77 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
78 #endif
79
80 NTSTATUS
81 CcRosInternalFreeVacb(PROS_VACB Vacb);
82
83
84 /* FUNCTIONS *****************************************************************/
85
86 VOID
87 NTAPI
88 CcRosTraceCacheMap (
89 PROS_SHARED_CACHE_MAP SharedCacheMap,
90 BOOLEAN Trace )
91 {
92 #if DBG
93 KIRQL oldirql;
94 PLIST_ENTRY current_entry;
95 PROS_VACB current;
96
97 if (!SharedCacheMap)
98 return;
99
100 SharedCacheMap->Trace = Trace;
101
102 if (Trace)
103 {
104 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
105
106 KeAcquireGuardedMutex(&ViewLock);
107 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
108
109 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
110 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
111 {
112 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
113 current_entry = current_entry->Flink;
114
115 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
116 current, current->ReferenceCount, current->Dirty, current->PageOut );
117 }
118 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
119 KeReleaseGuardedMutex(&ViewLock);
120 }
121 else
122 {
123 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
124 }
125
126 #else
127 UNREFERENCED_PARAMETER(SharedCacheMap);
128 UNREFERENCED_PARAMETER(Trace);
129 #endif
130 }
131
132 NTSTATUS
133 NTAPI
134 CcRosFlushVacb (
135 PROS_VACB Vacb)
136 {
137 NTSTATUS Status;
138 KIRQL oldIrql;
139
140 Status = CcWriteVirtualAddress(Vacb);
141 if (NT_SUCCESS(Status))
142 {
143 KeAcquireGuardedMutex(&ViewLock);
144 KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql);
145
146 Vacb->Dirty = FALSE;
147 RemoveEntryList(&Vacb->DirtyVacbListEntry);
148 DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
149 CcRosVacbDecRefCount(Vacb);
150
151 KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
152 KeReleaseGuardedMutex(&ViewLock);
153 }
154
155 return Status;
156 }
157
158 NTSTATUS
159 NTAPI
160 CcRosFlushDirtyPages (
161 ULONG Target,
162 PULONG Count,
163 BOOLEAN Wait)
164 {
165 PLIST_ENTRY current_entry;
166 PROS_VACB current;
167 BOOLEAN Locked;
168 NTSTATUS Status;
169 LARGE_INTEGER ZeroTimeout;
170
171 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
172
173 (*Count) = 0;
174 ZeroTimeout.QuadPart = 0;
175
176 KeEnterCriticalRegion();
177 KeAcquireGuardedMutex(&ViewLock);
178
179 current_entry = DirtyVacbListHead.Flink;
180 if (current_entry == &DirtyVacbListHead)
181 {
182 DPRINT("No Dirty pages\n");
183 }
184
185 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
186 {
187 current = CONTAINING_RECORD(current_entry,
188 ROS_VACB,
189 DirtyVacbListEntry);
190 current_entry = current_entry->Flink;
191
192 CcRosVacbIncRefCount(current);
193
194 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
195 current->SharedCacheMap->LazyWriteContext, Wait);
196 if (!Locked)
197 {
198 CcRosVacbDecRefCount(current);
199 continue;
200 }
201
202 Status = CcRosAcquireVacbLock(current,
203 Wait ? NULL : &ZeroTimeout);
204 if (Status != STATUS_SUCCESS)
205 {
206 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
207 current->SharedCacheMap->LazyWriteContext);
208 CcRosVacbDecRefCount(current);
209 continue;
210 }
211
212 ASSERT(current->Dirty);
213
214 /* One reference is added above */
215 if (current->ReferenceCount > 2)
216 {
217 CcRosReleaseVacbLock(current);
218 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
219 current->SharedCacheMap->LazyWriteContext);
220 CcRosVacbDecRefCount(current);
221 continue;
222 }
223
224 KeReleaseGuardedMutex(&ViewLock);
225
226 Status = CcRosFlushVacb(current);
227
228 CcRosReleaseVacbLock(current);
229 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
230 current->SharedCacheMap->LazyWriteContext);
231
232 KeAcquireGuardedMutex(&ViewLock);
233 CcRosVacbDecRefCount(current);
234
235 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
236 (Status != STATUS_MEDIA_WRITE_PROTECTED))
237 {
238 DPRINT1("CC: Failed to flush VACB.\n");
239 }
240 else
241 {
242 (*Count) += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
243 Target -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
244 }
245
246 current_entry = DirtyVacbListHead.Flink;
247 }
248
249 KeReleaseGuardedMutex(&ViewLock);
250 KeLeaveCriticalRegion();
251
252 DPRINT("CcRosFlushDirtyPages() finished\n");
253 return STATUS_SUCCESS;
254 }
255
256 NTSTATUS
257 CcRosTrimCache (
258 ULONG Target,
259 ULONG Priority,
260 PULONG NrFreed)
261 /*
262 * FUNCTION: Try to free some memory from the file cache.
263 * ARGUMENTS:
264 * Target - The number of pages to be freed.
265 * Priority - The priority of free (currently unused).
266 * NrFreed - Points to a variable where the number of pages
267 * actually freed is returned.
268 */
269 {
270 PLIST_ENTRY current_entry;
271 PROS_VACB current;
272 ULONG PagesFreed;
273 KIRQL oldIrql;
274 LIST_ENTRY FreeList;
275 PFN_NUMBER Page;
276 ULONG i;
277 BOOLEAN FlushedPages = FALSE;
278
279 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
280
281 InitializeListHead(&FreeList);
282
283 *NrFreed = 0;
284
285 retry:
286 KeAcquireGuardedMutex(&ViewLock);
287
288 current_entry = VacbLruListHead.Flink;
289 while (current_entry != &VacbLruListHead)
290 {
291 current = CONTAINING_RECORD(current_entry,
292 ROS_VACB,
293 VacbLruListEntry);
294 current_entry = current_entry->Flink;
295
296 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
297
298 /* Reference the VACB */
299 CcRosVacbIncRefCount(current);
300
301 /* Check if it's mapped and not dirty */
302 if (current->MappedCount > 0 && !current->Dirty)
303 {
304 /* We have to break these locks because Cc sucks */
305 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
306 KeReleaseGuardedMutex(&ViewLock);
307
308 /* Page out the VACB */
309 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
310 {
311 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
312
313 MmPageOutPhysicalAddress(Page);
314 }
315
316 /* Reacquire the locks */
317 KeAcquireGuardedMutex(&ViewLock);
318 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
319 }
320
321 /* Dereference the VACB */
322 CcRosVacbDecRefCount(current);
323
324 /* Check if we can free this entry now */
325 if (current->ReferenceCount == 0)
326 {
327 ASSERT(!current->Dirty);
328 ASSERT(!current->MappedCount);
329
330 RemoveEntryList(&current->CacheMapVacbListEntry);
331 RemoveEntryList(&current->VacbLruListEntry);
332 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
333
334 /* Calculate how many pages we freed for Mm */
335 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
336 Target -= PagesFreed;
337 (*NrFreed) += PagesFreed;
338 }
339
340 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
341 }
342
343 KeReleaseGuardedMutex(&ViewLock);
344
345 /* Try flushing pages if we haven't met our target */
346 if ((Target > 0) && !FlushedPages)
347 {
348 /* Flush dirty pages to disk */
349 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE);
350 FlushedPages = TRUE;
351
352 /* We can only swap as many pages as we flushed */
353 if (PagesFreed < Target) Target = PagesFreed;
354
355 /* Check if we flushed anything */
356 if (PagesFreed != 0)
357 {
358 /* Try again after flushing dirty pages */
359 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
360 goto retry;
361 }
362 }
363
364 while (!IsListEmpty(&FreeList))
365 {
366 current_entry = RemoveHeadList(&FreeList);
367 current = CONTAINING_RECORD(current_entry,
368 ROS_VACB,
369 CacheMapVacbListEntry);
370 CcRosInternalFreeVacb(current);
371 }
372
373 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
374
375 return STATUS_SUCCESS;
376 }
377
378 NTSTATUS
379 NTAPI
380 CcRosReleaseVacb (
381 PROS_SHARED_CACHE_MAP SharedCacheMap,
382 PROS_VACB Vacb,
383 BOOLEAN Valid,
384 BOOLEAN Dirty,
385 BOOLEAN Mapped)
386 {
387 BOOLEAN WasDirty;
388 KIRQL oldIrql;
389
390 ASSERT(SharedCacheMap);
391
392 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
393 SharedCacheMap, Vacb, Valid);
394
395 KeAcquireGuardedMutex(&ViewLock);
396 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
397
398 Vacb->Valid = Valid;
399
400 WasDirty = Vacb->Dirty;
401 Vacb->Dirty = Vacb->Dirty || Dirty;
402
403 if (!WasDirty && Vacb->Dirty)
404 {
405 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
406 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
407 }
408
409 if (Mapped)
410 {
411 Vacb->MappedCount++;
412 }
413 CcRosVacbDecRefCount(Vacb);
414 if (Mapped && (Vacb->MappedCount == 1))
415 {
416 CcRosVacbIncRefCount(Vacb);
417 }
418 if (!WasDirty && Vacb->Dirty)
419 {
420 CcRosVacbIncRefCount(Vacb);
421 }
422
423 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
424 KeReleaseGuardedMutex(&ViewLock);
425 CcRosReleaseVacbLock(Vacb);
426
427 return STATUS_SUCCESS;
428 }
429
430 /* Returns with VACB Lock Held! */
431 PROS_VACB
432 NTAPI
433 CcRosLookupVacb (
434 PROS_SHARED_CACHE_MAP SharedCacheMap,
435 LONGLONG FileOffset)
436 {
437 PLIST_ENTRY current_entry;
438 PROS_VACB current;
439 KIRQL oldIrql;
440
441 ASSERT(SharedCacheMap);
442
443 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
444 SharedCacheMap, FileOffset);
445
446 KeAcquireGuardedMutex(&ViewLock);
447 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
448
449 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
450 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
451 {
452 current = CONTAINING_RECORD(current_entry,
453 ROS_VACB,
454 CacheMapVacbListEntry);
455 if (IsPointInRange(current->FileOffset.QuadPart,
456 VACB_MAPPING_GRANULARITY,
457 FileOffset))
458 {
459 CcRosVacbIncRefCount(current);
460 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
461 KeReleaseGuardedMutex(&ViewLock);
462 CcRosAcquireVacbLock(current, NULL);
463 return current;
464 }
465 if (current->FileOffset.QuadPart > FileOffset)
466 break;
467 current_entry = current_entry->Flink;
468 }
469
470 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
471 KeReleaseGuardedMutex(&ViewLock);
472
473 return NULL;
474 }
475
476 NTSTATUS
477 NTAPI
478 CcRosMarkDirtyVacb (
479 PROS_SHARED_CACHE_MAP SharedCacheMap,
480 LONGLONG FileOffset)
481 {
482 PROS_VACB Vacb;
483 KIRQL oldIrql;
484
485 ASSERT(SharedCacheMap);
486
487 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
488 SharedCacheMap, FileOffset);
489
490 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
491 if (Vacb == NULL)
492 {
493 KeBugCheck(CACHE_MANAGER);
494 }
495
496 KeAcquireGuardedMutex(&ViewLock);
497 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
498
499 if (!Vacb->Dirty)
500 {
501 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
502 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
503 }
504 else
505 {
506 CcRosVacbDecRefCount(Vacb);
507 }
508
509 /* Move to the tail of the LRU list */
510 RemoveEntryList(&Vacb->VacbLruListEntry);
511 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
512
513 Vacb->Dirty = TRUE;
514
515 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
516 KeReleaseGuardedMutex(&ViewLock);
517 CcRosReleaseVacbLock(Vacb);
518
519 return STATUS_SUCCESS;
520 }
521
522 NTSTATUS
523 NTAPI
524 CcRosUnmapVacb (
525 PROS_SHARED_CACHE_MAP SharedCacheMap,
526 LONGLONG FileOffset,
527 BOOLEAN NowDirty)
528 {
529 PROS_VACB Vacb;
530 BOOLEAN WasDirty;
531 KIRQL oldIrql;
532
533 ASSERT(SharedCacheMap);
534
535 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
536 SharedCacheMap, FileOffset, NowDirty);
537
538 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
539 if (Vacb == NULL)
540 {
541 return STATUS_UNSUCCESSFUL;
542 }
543
544 KeAcquireGuardedMutex(&ViewLock);
545 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
546
547 WasDirty = Vacb->Dirty;
548 Vacb->Dirty = Vacb->Dirty || NowDirty;
549
550 Vacb->MappedCount--;
551
552 if (!WasDirty && NowDirty)
553 {
554 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
555 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
556 }
557
558 CcRosVacbDecRefCount(Vacb);
559 if (!WasDirty && NowDirty)
560 {
561 CcRosVacbIncRefCount(Vacb);
562 }
563 if (Vacb->MappedCount == 0)
564 {
565 CcRosVacbDecRefCount(Vacb);
566 }
567
568 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
569 KeReleaseGuardedMutex(&ViewLock);
570 CcRosReleaseVacbLock(Vacb);
571
572 return STATUS_SUCCESS;
573 }
574
575 static
576 NTSTATUS
577 CcRosMapVacb(
578 PROS_VACB Vacb)
579 {
580 ULONG i;
581 NTSTATUS Status;
582 ULONG_PTR NumberOfPages;
583
584 /* Create a memory area. */
585 MmLockAddressSpace(MmGetKernelAddressSpace());
586 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
587 0, // nothing checks for VACB mareas, so set to 0
588 &Vacb->BaseAddress,
589 VACB_MAPPING_GRANULARITY,
590 PAGE_READWRITE,
591 (PMEMORY_AREA*)&Vacb->MemoryArea,
592 0,
593 PAGE_SIZE);
594 MmUnlockAddressSpace(MmGetKernelAddressSpace());
595 if (!NT_SUCCESS(Status))
596 {
597 DPRINT1("MmCreateMemoryArea failed with %lx for VACB %p\n", Status, Vacb);
598 return Status;
599 }
600
601 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
602 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
603
604 /* Create a virtual mapping for this memory area */
605 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
606 for (i = 0; i < NumberOfPages; i++)
607 {
608 PFN_NUMBER PageFrameNumber;
609
610 MI_SET_USAGE(MI_USAGE_CACHE);
611 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
612 if (PageFrameNumber == 0)
613 {
614 DPRINT1("Unable to allocate page\n");
615 KeBugCheck(MEMORY_MANAGEMENT);
616 }
617
618 Status = MmCreateVirtualMapping(NULL,
619 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
620 PAGE_READWRITE,
621 &PageFrameNumber,
622 1);
623 if (!NT_SUCCESS(Status))
624 {
625 DPRINT1("Unable to create virtual mapping\n");
626 KeBugCheck(MEMORY_MANAGEMENT);
627 }
628 }
629
630 return STATUS_SUCCESS;
631 }
632
633 static
634 NTSTATUS
635 CcRosCreateVacb (
636 PROS_SHARED_CACHE_MAP SharedCacheMap,
637 LONGLONG FileOffset,
638 PROS_VACB *Vacb)
639 {
640 PROS_VACB current;
641 PROS_VACB previous;
642 PLIST_ENTRY current_entry;
643 NTSTATUS Status;
644 KIRQL oldIrql;
645
646 ASSERT(SharedCacheMap);
647
648 DPRINT("CcRosCreateVacb()\n");
649
650 if (FileOffset >= SharedCacheMap->SectionSize.QuadPart)
651 {
652 *Vacb = NULL;
653 return STATUS_INVALID_PARAMETER;
654 }
655
656 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
657 current->BaseAddress = NULL;
658 current->Valid = FALSE;
659 current->Dirty = FALSE;
660 current->PageOut = FALSE;
661 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
662 current->SharedCacheMap = SharedCacheMap;
663 #if DBG
664 if (SharedCacheMap->Trace)
665 {
666 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
667 }
668 #endif
669 current->MappedCount = 0;
670 current->DirtyVacbListEntry.Flink = NULL;
671 current->DirtyVacbListEntry.Blink = NULL;
672 current->ReferenceCount = 1;
673 current->PinCount = 0;
674 KeInitializeMutex(&current->Mutex, 0);
675 CcRosAcquireVacbLock(current, NULL);
676 KeAcquireGuardedMutex(&ViewLock);
677
678 *Vacb = current;
679 /* There is window between the call to CcRosLookupVacb
680 * and CcRosCreateVacb. We must check if a VACB for the
681 * file offset exist. If there is a VACB, we release
682 * our newly created VACB and return the existing one.
683 */
684 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
685 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
686 previous = NULL;
687 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
688 {
689 current = CONTAINING_RECORD(current_entry,
690 ROS_VACB,
691 CacheMapVacbListEntry);
692 if (IsPointInRange(current->FileOffset.QuadPart,
693 VACB_MAPPING_GRANULARITY,
694 FileOffset))
695 {
696 CcRosVacbIncRefCount(current);
697 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
698 #if DBG
699 if (SharedCacheMap->Trace)
700 {
701 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
702 SharedCacheMap,
703 (*Vacb),
704 current);
705 }
706 #endif
707 CcRosReleaseVacbLock(*Vacb);
708 KeReleaseGuardedMutex(&ViewLock);
709 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
710 *Vacb = current;
711 CcRosAcquireVacbLock(current, NULL);
712 return STATUS_SUCCESS;
713 }
714 if (current->FileOffset.QuadPart < FileOffset)
715 {
716 ASSERT(previous == NULL ||
717 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
718 previous = current;
719 }
720 if (current->FileOffset.QuadPart > FileOffset)
721 break;
722 current_entry = current_entry->Flink;
723 }
724 /* There was no existing VACB. */
725 current = *Vacb;
726 if (previous)
727 {
728 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
729 }
730 else
731 {
732 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
733 }
734 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
735 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
736 KeReleaseGuardedMutex(&ViewLock);
737
738 MI_SET_USAGE(MI_USAGE_CACHE);
739 #if MI_TRACE_PFNS
740 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
741 {
742 PWCHAR pos = NULL;
743 ULONG len = 0;
744 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
745 if (pos)
746 {
747 len = wcslen(pos) * sizeof(WCHAR);
748 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
749 }
750 else
751 {
752 snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%wZ", &SharedCacheMap->FileObject->FileName);
753 }
754 }
755 #endif
756
757 Status = CcRosMapVacb(current);
758 if (!NT_SUCCESS(Status))
759 {
760 RemoveEntryList(&current->CacheMapVacbListEntry);
761 RemoveEntryList(&current->VacbLruListEntry);
762 CcRosReleaseVacbLock(current);
763 ExFreeToNPagedLookasideList(&VacbLookasideList, current);
764 }
765
766 return Status;
767 }
768
769 NTSTATUS
770 NTAPI
771 CcRosGetVacb (
772 PROS_SHARED_CACHE_MAP SharedCacheMap,
773 LONGLONG FileOffset,
774 PLONGLONG BaseOffset,
775 PVOID* BaseAddress,
776 PBOOLEAN UptoDate,
777 PROS_VACB *Vacb)
778 {
779 PROS_VACB current;
780 NTSTATUS Status;
781
782 ASSERT(SharedCacheMap);
783
784 DPRINT("CcRosGetVacb()\n");
785
786 /*
787 * Look for a VACB already mapping the same data.
788 */
789 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
790 if (current == NULL)
791 {
792 /*
793 * Otherwise create a new VACB.
794 */
795 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
796 if (!NT_SUCCESS(Status))
797 {
798 return Status;
799 }
800 }
801
802 KeAcquireGuardedMutex(&ViewLock);
803
804 /* Move to the tail of the LRU list */
805 RemoveEntryList(&current->VacbLruListEntry);
806 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
807
808 KeReleaseGuardedMutex(&ViewLock);
809
810 /*
811 * Return information about the VACB to the caller.
812 */
813 *UptoDate = current->Valid;
814 *BaseAddress = current->BaseAddress;
815 DPRINT("*BaseAddress %p\n", *BaseAddress);
816 *Vacb = current;
817 *BaseOffset = current->FileOffset.QuadPart;
818 return STATUS_SUCCESS;
819 }
820
821 NTSTATUS
822 NTAPI
823 CcRosRequestVacb (
824 PROS_SHARED_CACHE_MAP SharedCacheMap,
825 LONGLONG FileOffset,
826 PVOID* BaseAddress,
827 PBOOLEAN UptoDate,
828 PROS_VACB *Vacb)
829 /*
830 * FUNCTION: Request a page mapping for a shared cache map
831 */
832 {
833 LONGLONG BaseOffset;
834
835 ASSERT(SharedCacheMap);
836
837 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
838 {
839 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
840 FileOffset, VACB_MAPPING_GRANULARITY);
841 KeBugCheck(CACHE_MANAGER);
842 }
843
844 return CcRosGetVacb(SharedCacheMap,
845 FileOffset,
846 &BaseOffset,
847 BaseAddress,
848 UptoDate,
849 Vacb);
850 }
851
852 static
853 VOID
854 CcFreeCachePage (
855 PVOID Context,
856 MEMORY_AREA* MemoryArea,
857 PVOID Address,
858 PFN_NUMBER Page,
859 SWAPENTRY SwapEntry,
860 BOOLEAN Dirty)
861 {
862 ASSERT(SwapEntry == 0);
863 if (Page != 0)
864 {
865 ASSERT(MmGetReferenceCountPage(Page) == 1);
866 MmReleasePageMemoryConsumer(MC_CACHE, Page);
867 }
868 }
869
870 NTSTATUS
871 CcRosInternalFreeVacb (
872 PROS_VACB Vacb)
873 /*
874 * FUNCTION: Releases a VACB associated with a shared cache map
875 */
876 {
877 DPRINT("Freeing VACB 0x%p\n", Vacb);
878 #if DBG
879 if (Vacb->SharedCacheMap->Trace)
880 {
881 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
882 }
883 #endif
884
885 MmLockAddressSpace(MmGetKernelAddressSpace());
886 MmFreeMemoryArea(MmGetKernelAddressSpace(),
887 Vacb->MemoryArea,
888 CcFreeCachePage,
889 NULL);
890 MmUnlockAddressSpace(MmGetKernelAddressSpace());
891
892 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
893 return STATUS_SUCCESS;
894 }
895
896 /*
897 * @implemented
898 */
899 VOID
900 NTAPI
901 CcFlushCache (
902 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
903 IN PLARGE_INTEGER FileOffset OPTIONAL,
904 IN ULONG Length,
905 OUT PIO_STATUS_BLOCK IoStatus)
906 {
907 PROS_SHARED_CACHE_MAP SharedCacheMap;
908 LARGE_INTEGER Offset;
909 LONGLONG RemainingLength;
910 PROS_VACB current;
911 NTSTATUS Status;
912 KIRQL oldIrql;
913
914 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
915 SectionObjectPointers, FileOffset, Length);
916
917 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
918 SectionObjectPointers, FileOffset, Length, IoStatus);
919
920 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
921 {
922 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
923 ASSERT(SharedCacheMap);
924 if (FileOffset)
925 {
926 Offset = *FileOffset;
927 RemainingLength = Length;
928 }
929 else
930 {
931 Offset.QuadPart = 0;
932 RemainingLength = SharedCacheMap->FileSize.QuadPart;
933 }
934
935 if (IoStatus)
936 {
937 IoStatus->Status = STATUS_SUCCESS;
938 IoStatus->Information = 0;
939 }
940
941 while (RemainingLength > 0)
942 {
943 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
944 if (current != NULL)
945 {
946 if (current->Dirty)
947 {
948 Status = CcRosFlushVacb(current);
949 if (!NT_SUCCESS(Status) && IoStatus != NULL)
950 {
951 IoStatus->Status = Status;
952 }
953 }
954
955 CcRosReleaseVacbLock(current);
956
957 KeAcquireGuardedMutex(&ViewLock);
958 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
959 CcRosVacbDecRefCount(current);
960 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
961 KeReleaseGuardedMutex(&ViewLock);
962 }
963
964 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
965 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
966 }
967 }
968 else
969 {
970 if (IoStatus)
971 {
972 IoStatus->Status = STATUS_INVALID_PARAMETER;
973 }
974 }
975 }
976
977 NTSTATUS
978 NTAPI
979 CcRosDeleteFileCache (
980 PFILE_OBJECT FileObject,
981 PROS_SHARED_CACHE_MAP SharedCacheMap)
982 /*
983 * FUNCTION: Releases the shared cache map associated with a file object
984 */
985 {
986 PLIST_ENTRY current_entry;
987 PROS_VACB current;
988 LIST_ENTRY FreeList;
989 KIRQL oldIrql;
990
991 ASSERT(SharedCacheMap);
992
993 SharedCacheMap->OpenCount++;
994 KeReleaseGuardedMutex(&ViewLock);
995
996 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
997
998 KeAcquireGuardedMutex(&ViewLock);
999 SharedCacheMap->OpenCount--;
1000 if (SharedCacheMap->OpenCount == 0)
1001 {
1002 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1003
1004 /*
1005 * Release all VACBs
1006 */
1007 InitializeListHead(&FreeList);
1008 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1009 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1010 {
1011 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1012 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1013 RemoveEntryList(&current->VacbLruListEntry);
1014 if (current->Dirty)
1015 {
1016 RemoveEntryList(&current->DirtyVacbListEntry);
1017 DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
1018 DPRINT1("Freeing dirty VACB\n");
1019 }
1020 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1021 }
1022 #if DBG
1023 SharedCacheMap->Trace = FALSE;
1024 #endif
1025 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1026
1027 KeReleaseGuardedMutex(&ViewLock);
1028 ObDereferenceObject(SharedCacheMap->FileObject);
1029
1030 while (!IsListEmpty(&FreeList))
1031 {
1032 current_entry = RemoveTailList(&FreeList);
1033 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1034 CcRosInternalFreeVacb(current);
1035 }
1036 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1037 KeAcquireGuardedMutex(&ViewLock);
1038 }
1039 return STATUS_SUCCESS;
1040 }
1041
1042 VOID
1043 NTAPI
1044 CcRosReferenceCache (
1045 PFILE_OBJECT FileObject)
1046 {
1047 PROS_SHARED_CACHE_MAP SharedCacheMap;
1048 KeAcquireGuardedMutex(&ViewLock);
1049 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1050 ASSERT(SharedCacheMap);
1051 ASSERT(SharedCacheMap->OpenCount != 0);
1052 SharedCacheMap->OpenCount++;
1053 KeReleaseGuardedMutex(&ViewLock);
1054 }
1055
1056 VOID
1057 NTAPI
1058 CcRosRemoveIfClosed (
1059 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1060 {
1061 PROS_SHARED_CACHE_MAP SharedCacheMap;
1062 DPRINT("CcRosRemoveIfClosed()\n");
1063 KeAcquireGuardedMutex(&ViewLock);
1064 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1065 if (SharedCacheMap && SharedCacheMap->OpenCount == 0)
1066 {
1067 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1068 }
1069 KeReleaseGuardedMutex(&ViewLock);
1070 }
1071
1072
1073 VOID
1074 NTAPI
1075 CcRosDereferenceCache (
1076 PFILE_OBJECT FileObject)
1077 {
1078 PROS_SHARED_CACHE_MAP SharedCacheMap;
1079 KeAcquireGuardedMutex(&ViewLock);
1080 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1081 ASSERT(SharedCacheMap);
1082 if (SharedCacheMap->OpenCount > 0)
1083 {
1084 SharedCacheMap->OpenCount--;
1085 if (SharedCacheMap->OpenCount == 0)
1086 {
1087 MmFreeSectionSegments(SharedCacheMap->FileObject);
1088 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1089 }
1090 }
1091 KeReleaseGuardedMutex(&ViewLock);
1092 }
1093
1094 NTSTATUS
1095 NTAPI
1096 CcRosReleaseFileCache (
1097 PFILE_OBJECT FileObject)
1098 /*
1099 * FUNCTION: Called by the file system when a handle to a file object
1100 * has been closed.
1101 */
1102 {
1103 PROS_SHARED_CACHE_MAP SharedCacheMap;
1104
1105 KeAcquireGuardedMutex(&ViewLock);
1106
1107 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1108 {
1109 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1110 if (FileObject->PrivateCacheMap != NULL)
1111 {
1112 FileObject->PrivateCacheMap = NULL;
1113 if (SharedCacheMap->OpenCount > 0)
1114 {
1115 SharedCacheMap->OpenCount--;
1116 if (SharedCacheMap->OpenCount == 0)
1117 {
1118 MmFreeSectionSegments(SharedCacheMap->FileObject);
1119 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1120 }
1121 }
1122 }
1123 }
1124 KeReleaseGuardedMutex(&ViewLock);
1125 return STATUS_SUCCESS;
1126 }
1127
1128 NTSTATUS
1129 NTAPI
1130 CcTryToInitializeFileCache (
1131 PFILE_OBJECT FileObject)
1132 {
1133 PROS_SHARED_CACHE_MAP SharedCacheMap;
1134 NTSTATUS Status;
1135
1136 KeAcquireGuardedMutex(&ViewLock);
1137
1138 ASSERT(FileObject->SectionObjectPointer);
1139 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1140 if (SharedCacheMap == NULL)
1141 {
1142 Status = STATUS_UNSUCCESSFUL;
1143 }
1144 else
1145 {
1146 if (FileObject->PrivateCacheMap == NULL)
1147 {
1148 FileObject->PrivateCacheMap = SharedCacheMap;
1149 SharedCacheMap->OpenCount++;
1150 }
1151 Status = STATUS_SUCCESS;
1152 }
1153 KeReleaseGuardedMutex(&ViewLock);
1154
1155 return Status;
1156 }
1157
1158
1159 NTSTATUS
1160 NTAPI
1161 CcRosInitializeFileCache (
1162 PFILE_OBJECT FileObject,
1163 PCC_FILE_SIZES FileSizes,
1164 BOOLEAN PinAccess,
1165 PCACHE_MANAGER_CALLBACKS CallBacks,
1166 PVOID LazyWriterContext)
1167 /*
1168 * FUNCTION: Initializes a shared cache map for a file object
1169 */
1170 {
1171 PROS_SHARED_CACHE_MAP SharedCacheMap;
1172
1173 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1174 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1175 FileObject, SharedCacheMap);
1176
1177 KeAcquireGuardedMutex(&ViewLock);
1178 if (SharedCacheMap == NULL)
1179 {
1180 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1181 if (SharedCacheMap == NULL)
1182 {
1183 KeReleaseGuardedMutex(&ViewLock);
1184 return STATUS_INSUFFICIENT_RESOURCES;
1185 }
1186 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1187 ObReferenceObjectByPointer(FileObject,
1188 FILE_ALL_ACCESS,
1189 NULL,
1190 KernelMode);
1191 SharedCacheMap->FileObject = FileObject;
1192 SharedCacheMap->Callbacks = CallBacks;
1193 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1194 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1195 SharedCacheMap->FileSize = FileSizes->FileSize;
1196 SharedCacheMap->PinAccess = PinAccess;
1197 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1198 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1199 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1200 }
1201 if (FileObject->PrivateCacheMap == NULL)
1202 {
1203 FileObject->PrivateCacheMap = SharedCacheMap;
1204 SharedCacheMap->OpenCount++;
1205 }
1206 KeReleaseGuardedMutex(&ViewLock);
1207
1208 return STATUS_SUCCESS;
1209 }
1210
1211 /*
1212 * @implemented
1213 */
1214 PFILE_OBJECT
1215 NTAPI
1216 CcGetFileObjectFromSectionPtrs (
1217 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1218 {
1219 PROS_SHARED_CACHE_MAP SharedCacheMap;
1220
1221 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1222
1223 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1224 {
1225 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1226 ASSERT(SharedCacheMap);
1227 return SharedCacheMap->FileObject;
1228 }
1229 return NULL;
1230 }
1231
1232 VOID
1233 INIT_FUNCTION
1234 NTAPI
1235 CcInitView (
1236 VOID)
1237 {
1238 DPRINT("CcInitView()\n");
1239
1240 InitializeListHead(&DirtyVacbListHead);
1241 InitializeListHead(&VacbLruListHead);
1242 KeInitializeGuardedMutex(&ViewLock);
1243 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1244 NULL,
1245 NULL,
1246 0,
1247 sizeof(INTERNAL_BCB),
1248 TAG_BCB,
1249 20);
1250 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1251 NULL,
1252 NULL,
1253 0,
1254 sizeof(ROS_SHARED_CACHE_MAP),
1255 TAG_SHARED_CACHE_MAP,
1256 20);
1257 ExInitializeNPagedLookasideList(&VacbLookasideList,
1258 NULL,
1259 NULL,
1260 0,
1261 sizeof(ROS_VACB),
1262 TAG_VACB,
1263 20);
1264
1265 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1266
1267 CcInitCacheZeroPage();
1268 }
1269
1270 /* EOF */