fb8e8f187c2583754beee335fe13e5232e7cd074
[reactos.git] / reactos / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 */
9
10 /* NOTES **********************************************************************
11 *
12 * This is not the NT implementation of a file cache nor anything much like
13 * it.
14 *
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
17 *
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
20 *
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
23 *
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
27 *
28 * (4) Copy the data into or out of the page as necessary.
29 *
30 * (5) Release the cache page
31 */
32 /* INCLUDES ******************************************************************/
33
34 #include <ntoskrnl.h>
35 #define NDEBUG
36 #include <debug.h>
37
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
40 #endif
41
42 /* GLOBALS *******************************************************************/
43
44 static LIST_ENTRY DirtyVacbListHead;
45 static LIST_ENTRY VacbLruListHead;
46 ULONG DirtyPageCount = 0;
47
48 KGUARDED_MUTEX ViewLock;
49
50 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
51 static NPAGED_LOOKASIDE_LIST SharedCacheMapLookasideList;
52 static NPAGED_LOOKASIDE_LIST VacbLookasideList;
53
54 #if DBG
55 static void CcRosVacbIncRefCount_(PROS_VACB vacb, const char* file, int line)
56 {
57 ++vacb->ReferenceCount;
58 if (vacb->SharedCacheMap->Trace)
59 {
60 DbgPrint("(%s:%i) VACB %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
61 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
62 }
63 }
64 static void CcRosVacbDecRefCount_(PROS_VACB vacb, const char* file, int line)
65 {
66 --vacb->ReferenceCount;
67 if (vacb->SharedCacheMap->Trace)
68 {
69 DbgPrint("(%s:%i) VACB %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
70 file, line, vacb, vacb->ReferenceCount, vacb->Dirty, vacb->PageOut);
71 }
72 }
73 #define CcRosVacbIncRefCount(vacb) CcRosVacbIncRefCount_(vacb,__FILE__,__LINE__)
74 #define CcRosVacbDecRefCount(vacb) CcRosVacbDecRefCount_(vacb,__FILE__,__LINE__)
75 #else
76 #define CcRosVacbIncRefCount(vacb) (++((vacb)->ReferenceCount))
77 #define CcRosVacbDecRefCount(vacb) (--((vacb)->ReferenceCount))
78 #endif
79
80 NTSTATUS
81 CcRosInternalFreeVacb(PROS_VACB Vacb);
82
83
84 /* FUNCTIONS *****************************************************************/
85
86 VOID
87 NTAPI
88 CcRosTraceCacheMap (
89 PROS_SHARED_CACHE_MAP SharedCacheMap,
90 BOOLEAN Trace )
91 {
92 #if DBG
93 KIRQL oldirql;
94 PLIST_ENTRY current_entry;
95 PROS_VACB current;
96
97 if (!SharedCacheMap)
98 return;
99
100 SharedCacheMap->Trace = Trace;
101
102 if (Trace)
103 {
104 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
105
106 KeAcquireGuardedMutex(&ViewLock);
107 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldirql);
108
109 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
110 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
111 {
112 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
113 current_entry = current_entry->Flink;
114
115 DPRINT1(" VACB 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
116 current, current->ReferenceCount, current->Dirty, current->PageOut );
117 }
118 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldirql);
119 KeReleaseGuardedMutex(&ViewLock);
120 }
121 else
122 {
123 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", SharedCacheMap);
124 }
125
126 #else
127 UNREFERENCED_PARAMETER(SharedCacheMap);
128 UNREFERENCED_PARAMETER(Trace);
129 #endif
130 }
131
132 NTSTATUS
133 NTAPI
134 CcRosFlushVacb (
135 PROS_VACB Vacb)
136 {
137 NTSTATUS Status;
138 KIRQL oldIrql;
139
140 Status = CcWriteVirtualAddress(Vacb);
141 if (NT_SUCCESS(Status))
142 {
143 KeAcquireGuardedMutex(&ViewLock);
144 KeAcquireSpinLock(&Vacb->SharedCacheMap->CacheMapLock, &oldIrql);
145
146 Vacb->Dirty = FALSE;
147 RemoveEntryList(&Vacb->DirtyVacbListEntry);
148 DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
149 CcRosVacbDecRefCount(Vacb);
150
151 KeReleaseSpinLock(&Vacb->SharedCacheMap->CacheMapLock, oldIrql);
152 KeReleaseGuardedMutex(&ViewLock);
153 }
154
155 return Status;
156 }
157
158 NTSTATUS
159 NTAPI
160 CcRosFlushDirtyPages (
161 ULONG Target,
162 PULONG Count,
163 BOOLEAN Wait)
164 {
165 PLIST_ENTRY current_entry;
166 PROS_VACB current;
167 BOOLEAN Locked;
168 NTSTATUS Status;
169 LARGE_INTEGER ZeroTimeout;
170
171 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
172
173 (*Count) = 0;
174 ZeroTimeout.QuadPart = 0;
175
176 KeEnterCriticalRegion();
177 KeAcquireGuardedMutex(&ViewLock);
178
179 current_entry = DirtyVacbListHead.Flink;
180 if (current_entry == &DirtyVacbListHead)
181 {
182 DPRINT("No Dirty pages\n");
183 }
184
185 while ((current_entry != &DirtyVacbListHead) && (Target > 0))
186 {
187 current = CONTAINING_RECORD(current_entry,
188 ROS_VACB,
189 DirtyVacbListEntry);
190 current_entry = current_entry->Flink;
191
192 CcRosVacbIncRefCount(current);
193
194 Locked = current->SharedCacheMap->Callbacks->AcquireForLazyWrite(
195 current->SharedCacheMap->LazyWriteContext, Wait);
196 if (!Locked)
197 {
198 CcRosVacbDecRefCount(current);
199 continue;
200 }
201
202 Status = KeWaitForSingleObject(&current->Mutex,
203 Executive,
204 KernelMode,
205 FALSE,
206 Wait ? NULL : &ZeroTimeout);
207 if (Status != STATUS_SUCCESS)
208 {
209 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
210 current->SharedCacheMap->LazyWriteContext);
211 CcRosVacbDecRefCount(current);
212 continue;
213 }
214
215 ASSERT(current->Dirty);
216
217 /* One reference is added above */
218 if (current->ReferenceCount > 2)
219 {
220 KeReleaseMutex(&current->Mutex, FALSE);
221 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
222 current->SharedCacheMap->LazyWriteContext);
223 CcRosVacbDecRefCount(current);
224 continue;
225 }
226
227 KeReleaseGuardedMutex(&ViewLock);
228
229 Status = CcRosFlushVacb(current);
230
231 KeReleaseMutex(&current->Mutex, FALSE);
232 current->SharedCacheMap->Callbacks->ReleaseFromLazyWrite(
233 current->SharedCacheMap->LazyWriteContext);
234
235 KeAcquireGuardedMutex(&ViewLock);
236 CcRosVacbDecRefCount(current);
237
238 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE) &&
239 (Status != STATUS_MEDIA_WRITE_PROTECTED))
240 {
241 DPRINT1("CC: Failed to flush VACB.\n");
242 }
243 else
244 {
245 (*Count) += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
246 Target -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
247 }
248
249 current_entry = DirtyVacbListHead.Flink;
250 }
251
252 KeReleaseGuardedMutex(&ViewLock);
253 KeLeaveCriticalRegion();
254
255 DPRINT("CcRosFlushDirtyPages() finished\n");
256 return STATUS_SUCCESS;
257 }
258
259 NTSTATUS
260 CcRosTrimCache (
261 ULONG Target,
262 ULONG Priority,
263 PULONG NrFreed)
264 /*
265 * FUNCTION: Try to free some memory from the file cache.
266 * ARGUMENTS:
267 * Target - The number of pages to be freed.
268 * Priority - The priority of free (currently unused).
269 * NrFreed - Points to a variable where the number of pages
270 * actually freed is returned.
271 */
272 {
273 PLIST_ENTRY current_entry;
274 PROS_VACB current;
275 ULONG PagesFreed;
276 KIRQL oldIrql;
277 LIST_ENTRY FreeList;
278 PFN_NUMBER Page;
279 ULONG i;
280 BOOLEAN FlushedPages = FALSE;
281
282 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
283
284 InitializeListHead(&FreeList);
285
286 *NrFreed = 0;
287
288 retry:
289 KeAcquireGuardedMutex(&ViewLock);
290
291 current_entry = VacbLruListHead.Flink;
292 while (current_entry != &VacbLruListHead)
293 {
294 current = CONTAINING_RECORD(current_entry,
295 ROS_VACB,
296 VacbLruListEntry);
297 current_entry = current_entry->Flink;
298
299 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
300
301 /* Reference the VACB */
302 CcRosVacbIncRefCount(current);
303
304 /* Check if it's mapped and not dirty */
305 if (current->MappedCount > 0 && !current->Dirty)
306 {
307 /* We have to break these locks because Cc sucks */
308 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
309 KeReleaseGuardedMutex(&ViewLock);
310
311 /* Page out the VACB */
312 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
313 {
314 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
315
316 MmPageOutPhysicalAddress(Page);
317 }
318
319 /* Reacquire the locks */
320 KeAcquireGuardedMutex(&ViewLock);
321 KeAcquireSpinLock(&current->SharedCacheMap->CacheMapLock, &oldIrql);
322 }
323
324 /* Dereference the VACB */
325 CcRosVacbDecRefCount(current);
326
327 /* Check if we can free this entry now */
328 if (current->ReferenceCount == 0)
329 {
330 ASSERT(!current->Dirty);
331 ASSERT(!current->MappedCount);
332
333 RemoveEntryList(&current->CacheMapVacbListEntry);
334 RemoveEntryList(&current->VacbLruListEntry);
335 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
336
337 /* Calculate how many pages we freed for Mm */
338 PagesFreed = min(VACB_MAPPING_GRANULARITY / PAGE_SIZE, Target);
339 Target -= PagesFreed;
340 (*NrFreed) += PagesFreed;
341 }
342
343 KeReleaseSpinLock(&current->SharedCacheMap->CacheMapLock, oldIrql);
344 }
345
346 KeReleaseGuardedMutex(&ViewLock);
347
348 /* Try flushing pages if we haven't met our target */
349 if ((Target > 0) && !FlushedPages)
350 {
351 /* Flush dirty pages to disk */
352 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE);
353 FlushedPages = TRUE;
354
355 /* We can only swap as many pages as we flushed */
356 if (PagesFreed < Target) Target = PagesFreed;
357
358 /* Check if we flushed anything */
359 if (PagesFreed != 0)
360 {
361 /* Try again after flushing dirty pages */
362 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
363 goto retry;
364 }
365 }
366
367 while (!IsListEmpty(&FreeList))
368 {
369 current_entry = RemoveHeadList(&FreeList);
370 current = CONTAINING_RECORD(current_entry,
371 ROS_VACB,
372 CacheMapVacbListEntry);
373 CcRosInternalFreeVacb(current);
374 }
375
376 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
377
378 return STATUS_SUCCESS;
379 }
380
381 NTSTATUS
382 NTAPI
383 CcRosReleaseVacb (
384 PROS_SHARED_CACHE_MAP SharedCacheMap,
385 PROS_VACB Vacb,
386 BOOLEAN Valid,
387 BOOLEAN Dirty,
388 BOOLEAN Mapped)
389 {
390 BOOLEAN WasDirty;
391 KIRQL oldIrql;
392
393 ASSERT(SharedCacheMap);
394
395 DPRINT("CcRosReleaseVacb(SharedCacheMap 0x%p, Vacb 0x%p, Valid %u)\n",
396 SharedCacheMap, Vacb, Valid);
397
398 KeAcquireGuardedMutex(&ViewLock);
399 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
400
401 Vacb->Valid = Valid;
402
403 WasDirty = Vacb->Dirty;
404 Vacb->Dirty = Vacb->Dirty || Dirty;
405
406 if (!WasDirty && Vacb->Dirty)
407 {
408 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
409 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
410 }
411
412 if (Mapped)
413 {
414 Vacb->MappedCount++;
415 }
416 CcRosVacbDecRefCount(Vacb);
417 if (Mapped && (Vacb->MappedCount == 1))
418 {
419 CcRosVacbIncRefCount(Vacb);
420 }
421 if (!WasDirty && Vacb->Dirty)
422 {
423 CcRosVacbIncRefCount(Vacb);
424 }
425
426 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
427 KeReleaseGuardedMutex(&ViewLock);
428 KeReleaseMutex(&Vacb->Mutex, FALSE);
429
430 return STATUS_SUCCESS;
431 }
432
433 /* Returns with VACB Lock Held! */
434 PROS_VACB
435 NTAPI
436 CcRosLookupVacb (
437 PROS_SHARED_CACHE_MAP SharedCacheMap,
438 LONGLONG FileOffset)
439 {
440 PLIST_ENTRY current_entry;
441 PROS_VACB current;
442 KIRQL oldIrql;
443
444 ASSERT(SharedCacheMap);
445
446 DPRINT("CcRosLookupVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
447 SharedCacheMap, FileOffset);
448
449 KeAcquireGuardedMutex(&ViewLock);
450 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
451
452 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
453 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
454 {
455 current = CONTAINING_RECORD(current_entry,
456 ROS_VACB,
457 CacheMapVacbListEntry);
458 if (IsPointInRange(current->FileOffset.QuadPart,
459 VACB_MAPPING_GRANULARITY,
460 FileOffset))
461 {
462 CcRosVacbIncRefCount(current);
463 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
464 KeReleaseGuardedMutex(&ViewLock);
465 KeWaitForSingleObject(&current->Mutex,
466 Executive,
467 KernelMode,
468 FALSE,
469 NULL);
470 return current;
471 }
472 if (current->FileOffset.QuadPart > FileOffset)
473 break;
474 current_entry = current_entry->Flink;
475 }
476
477 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
478 KeReleaseGuardedMutex(&ViewLock);
479
480 return NULL;
481 }
482
483 NTSTATUS
484 NTAPI
485 CcRosMarkDirtyVacb (
486 PROS_SHARED_CACHE_MAP SharedCacheMap,
487 LONGLONG FileOffset)
488 {
489 PROS_VACB Vacb;
490 KIRQL oldIrql;
491
492 ASSERT(SharedCacheMap);
493
494 DPRINT("CcRosMarkDirtyVacb(SharedCacheMap 0x%p, FileOffset %I64u)\n",
495 SharedCacheMap, FileOffset);
496
497 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
498 if (Vacb == NULL)
499 {
500 KeBugCheck(CACHE_MANAGER);
501 }
502
503 KeAcquireGuardedMutex(&ViewLock);
504 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
505
506 if (!Vacb->Dirty)
507 {
508 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
509 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
510 }
511 else
512 {
513 CcRosVacbDecRefCount(Vacb);
514 }
515
516 /* Move to the tail of the LRU list */
517 RemoveEntryList(&Vacb->VacbLruListEntry);
518 InsertTailList(&VacbLruListHead, &Vacb->VacbLruListEntry);
519
520 Vacb->Dirty = TRUE;
521
522 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
523 KeReleaseGuardedMutex(&ViewLock);
524 KeReleaseMutex(&Vacb->Mutex, FALSE);
525
526 return STATUS_SUCCESS;
527 }
528
529 NTSTATUS
530 NTAPI
531 CcRosUnmapVacb (
532 PROS_SHARED_CACHE_MAP SharedCacheMap,
533 LONGLONG FileOffset,
534 BOOLEAN NowDirty)
535 {
536 PROS_VACB Vacb;
537 BOOLEAN WasDirty;
538 KIRQL oldIrql;
539
540 ASSERT(SharedCacheMap);
541
542 DPRINT("CcRosUnmapVacb(SharedCacheMap 0x%p, FileOffset %I64u, NowDirty %u)\n",
543 SharedCacheMap, FileOffset, NowDirty);
544
545 Vacb = CcRosLookupVacb(SharedCacheMap, FileOffset);
546 if (Vacb == NULL)
547 {
548 return STATUS_UNSUCCESSFUL;
549 }
550
551 KeAcquireGuardedMutex(&ViewLock);
552 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
553
554 WasDirty = Vacb->Dirty;
555 Vacb->Dirty = Vacb->Dirty || NowDirty;
556
557 Vacb->MappedCount--;
558
559 if (!WasDirty && NowDirty)
560 {
561 InsertTailList(&DirtyVacbListHead, &Vacb->DirtyVacbListEntry);
562 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
563 }
564
565 CcRosVacbDecRefCount(Vacb);
566 if (!WasDirty && NowDirty)
567 {
568 CcRosVacbIncRefCount(Vacb);
569 }
570 if (Vacb->MappedCount == 0)
571 {
572 CcRosVacbDecRefCount(Vacb);
573 }
574
575 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
576 KeReleaseGuardedMutex(&ViewLock);
577 KeReleaseMutex(&Vacb->Mutex, FALSE);
578
579 return STATUS_SUCCESS;
580 }
581
582 static
583 NTSTATUS
584 CcRosMapVacb(
585 PROS_VACB Vacb)
586 {
587 ULONG i;
588 NTSTATUS Status;
589 ULONG_PTR NumberOfPages;
590
591 /* Create a memory area. */
592 MmLockAddressSpace(MmGetKernelAddressSpace());
593 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
594 0, // nothing checks for VACB mareas, so set to 0
595 &Vacb->BaseAddress,
596 VACB_MAPPING_GRANULARITY,
597 PAGE_READWRITE,
598 (PMEMORY_AREA*)&Vacb->MemoryArea,
599 0,
600 PAGE_SIZE);
601 MmUnlockAddressSpace(MmGetKernelAddressSpace());
602 if (!NT_SUCCESS(Status))
603 {
604 KeBugCheck(CACHE_MANAGER);
605 }
606
607 ASSERT(((ULONG_PTR)Vacb->BaseAddress % PAGE_SIZE) == 0);
608 ASSERT((ULONG_PTR)Vacb->BaseAddress > (ULONG_PTR)MmSystemRangeStart);
609
610 /* Create a virtual mapping for this memory area */
611 NumberOfPages = BYTES_TO_PAGES(VACB_MAPPING_GRANULARITY);
612 for (i = 0; i < NumberOfPages; i++)
613 {
614 PFN_NUMBER PageFrameNumber;
615
616 Status = MmRequestPageMemoryConsumer(MC_CACHE, TRUE, &PageFrameNumber);
617 if (PageFrameNumber == 0)
618 {
619 DPRINT1("Unable to allocate page\n");
620 KeBugCheck(MEMORY_MANAGEMENT);
621 }
622
623 Status = MmCreateVirtualMapping(NULL,
624 (PVOID)((ULONG_PTR)Vacb->BaseAddress + (i * PAGE_SIZE)),
625 PAGE_READWRITE,
626 &PageFrameNumber,
627 1);
628 if (!NT_SUCCESS(Status))
629 {
630 DPRINT1("Unable to create virtual mapping\n");
631 KeBugCheck(MEMORY_MANAGEMENT);
632 }
633 }
634
635 return STATUS_SUCCESS;
636 }
637
638 static
639 NTSTATUS
640 CcRosCreateVacb (
641 PROS_SHARED_CACHE_MAP SharedCacheMap,
642 LONGLONG FileOffset,
643 PROS_VACB *Vacb)
644 {
645 PROS_VACB current;
646 PROS_VACB previous;
647 PLIST_ENTRY current_entry;
648 NTSTATUS Status;
649 KIRQL oldIrql;
650
651 ASSERT(SharedCacheMap);
652
653 DPRINT("CcRosCreateVacb()\n");
654
655 if (FileOffset >= SharedCacheMap->FileSize.QuadPart)
656 {
657 *Vacb = NULL;
658 return STATUS_INVALID_PARAMETER;
659 }
660
661 current = ExAllocateFromNPagedLookasideList(&VacbLookasideList);
662 current->BaseAddress = NULL;
663 current->Valid = FALSE;
664 current->Dirty = FALSE;
665 current->PageOut = FALSE;
666 current->FileOffset.QuadPart = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
667 current->SharedCacheMap = SharedCacheMap;
668 #if DBG
669 if (SharedCacheMap->Trace)
670 {
671 DPRINT1("CacheMap 0x%p: new VACB: 0x%p\n", SharedCacheMap, current);
672 }
673 #endif
674 current->MappedCount = 0;
675 current->DirtyVacbListEntry.Flink = NULL;
676 current->DirtyVacbListEntry.Blink = NULL;
677 current->ReferenceCount = 1;
678 KeInitializeMutex(&current->Mutex, 0);
679 KeWaitForSingleObject(&current->Mutex,
680 Executive,
681 KernelMode,
682 FALSE,
683 NULL);
684 KeAcquireGuardedMutex(&ViewLock);
685
686 *Vacb = current;
687 /* There is window between the call to CcRosLookupVacb
688 * and CcRosCreateVacb. We must check if a VACB for the
689 * file offset exist. If there is a VACB, we release
690 * our newly created VACB and return the existing one.
691 */
692 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
693 current_entry = SharedCacheMap->CacheMapVacbListHead.Flink;
694 previous = NULL;
695 while (current_entry != &SharedCacheMap->CacheMapVacbListHead)
696 {
697 current = CONTAINING_RECORD(current_entry,
698 ROS_VACB,
699 CacheMapVacbListEntry);
700 if (IsPointInRange(current->FileOffset.QuadPart,
701 VACB_MAPPING_GRANULARITY,
702 FileOffset))
703 {
704 CcRosVacbIncRefCount(current);
705 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
706 #if DBG
707 if (SharedCacheMap->Trace)
708 {
709 DPRINT1("CacheMap 0x%p: deleting newly created VACB 0x%p ( found existing one 0x%p )\n",
710 SharedCacheMap,
711 (*Vacb),
712 current);
713 }
714 #endif
715 KeReleaseMutex(&(*Vacb)->Mutex, FALSE);
716 KeReleaseGuardedMutex(&ViewLock);
717 ExFreeToNPagedLookasideList(&VacbLookasideList, *Vacb);
718 *Vacb = current;
719 KeWaitForSingleObject(&current->Mutex,
720 Executive,
721 KernelMode,
722 FALSE,
723 NULL);
724 return STATUS_SUCCESS;
725 }
726 if (current->FileOffset.QuadPart < FileOffset)
727 {
728 ASSERT(previous == NULL ||
729 previous->FileOffset.QuadPart < current->FileOffset.QuadPart);
730 previous = current;
731 }
732 if (current->FileOffset.QuadPart > FileOffset)
733 break;
734 current_entry = current_entry->Flink;
735 }
736 /* There was no existing VACB. */
737 current = *Vacb;
738 if (previous)
739 {
740 InsertHeadList(&previous->CacheMapVacbListEntry, &current->CacheMapVacbListEntry);
741 }
742 else
743 {
744 InsertHeadList(&SharedCacheMap->CacheMapVacbListHead, &current->CacheMapVacbListEntry);
745 }
746 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
747 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
748 KeReleaseGuardedMutex(&ViewLock);
749
750 MI_SET_USAGE(MI_USAGE_CACHE);
751 #if MI_TRACE_PFNS
752 if ((SharedCacheMap->FileObject) && (SharedCacheMap->FileObject->FileName.Buffer))
753 {
754 PWCHAR pos = NULL;
755 ULONG len = 0;
756 pos = wcsrchr(SharedCacheMap->FileObject->FileName.Buffer, '\\');
757 len = wcslen(pos) * sizeof(WCHAR);
758 if (pos) snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
759 }
760 #endif
761
762 Status = CcRosMapVacb(current);
763
764 return Status;
765 }
766
767 NTSTATUS
768 NTAPI
769 CcRosGetVacb (
770 PROS_SHARED_CACHE_MAP SharedCacheMap,
771 LONGLONG FileOffset,
772 PLONGLONG BaseOffset,
773 PVOID* BaseAddress,
774 PBOOLEAN UptoDate,
775 PROS_VACB *Vacb)
776 {
777 PROS_VACB current;
778 NTSTATUS Status;
779
780 ASSERT(SharedCacheMap);
781
782 DPRINT("CcRosGetVacb()\n");
783
784 /*
785 * Look for a VACB already mapping the same data.
786 */
787 current = CcRosLookupVacb(SharedCacheMap, FileOffset);
788 if (current == NULL)
789 {
790 /*
791 * Otherwise create a new VACB.
792 */
793 Status = CcRosCreateVacb(SharedCacheMap, FileOffset, &current);
794 if (!NT_SUCCESS(Status))
795 {
796 return Status;
797 }
798 }
799
800 KeAcquireGuardedMutex(&ViewLock);
801
802 /* Move to the tail of the LRU list */
803 RemoveEntryList(&current->VacbLruListEntry);
804 InsertTailList(&VacbLruListHead, &current->VacbLruListEntry);
805
806 KeReleaseGuardedMutex(&ViewLock);
807
808 /*
809 * Return information about the VACB to the caller.
810 */
811 *UptoDate = current->Valid;
812 *BaseAddress = current->BaseAddress;
813 DPRINT("*BaseAddress %p\n", *BaseAddress);
814 *Vacb = current;
815 *BaseOffset = current->FileOffset.QuadPart;
816 return STATUS_SUCCESS;
817 }
818
819 NTSTATUS
820 NTAPI
821 CcRosRequestVacb (
822 PROS_SHARED_CACHE_MAP SharedCacheMap,
823 LONGLONG FileOffset,
824 PVOID* BaseAddress,
825 PBOOLEAN UptoDate,
826 PROS_VACB *Vacb)
827 /*
828 * FUNCTION: Request a page mapping for a shared cache map
829 */
830 {
831 LONGLONG BaseOffset;
832
833 ASSERT(SharedCacheMap);
834
835 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
836 {
837 DPRINT1("Bad fileoffset %I64x should be multiple of %x",
838 FileOffset, VACB_MAPPING_GRANULARITY);
839 KeBugCheck(CACHE_MANAGER);
840 }
841
842 return CcRosGetVacb(SharedCacheMap,
843 FileOffset,
844 &BaseOffset,
845 BaseAddress,
846 UptoDate,
847 Vacb);
848 }
849
850 static
851 VOID
852 CcFreeCachePage (
853 PVOID Context,
854 MEMORY_AREA* MemoryArea,
855 PVOID Address,
856 PFN_NUMBER Page,
857 SWAPENTRY SwapEntry,
858 BOOLEAN Dirty)
859 {
860 ASSERT(SwapEntry == 0);
861 if (Page != 0)
862 {
863 ASSERT(MmGetReferenceCountPage(Page) == 1);
864 MmReleasePageMemoryConsumer(MC_CACHE, Page);
865 }
866 }
867
868 NTSTATUS
869 CcRosInternalFreeVacb (
870 PROS_VACB Vacb)
871 /*
872 * FUNCTION: Releases a VACB associated with a shared cache map
873 */
874 {
875 DPRINT("Freeing VACB 0x%p\n", Vacb);
876 #if DBG
877 if (Vacb->SharedCacheMap->Trace)
878 {
879 DPRINT1("CacheMap 0x%p: deleting VACB: 0x%p\n", Vacb->SharedCacheMap, Vacb);
880 }
881 #endif
882
883 MmLockAddressSpace(MmGetKernelAddressSpace());
884 MmFreeMemoryArea(MmGetKernelAddressSpace(),
885 Vacb->MemoryArea,
886 CcFreeCachePage,
887 NULL);
888 MmUnlockAddressSpace(MmGetKernelAddressSpace());
889
890 ExFreeToNPagedLookasideList(&VacbLookasideList, Vacb);
891 return STATUS_SUCCESS;
892 }
893
894 /*
895 * @implemented
896 */
897 VOID
898 NTAPI
899 CcFlushCache (
900 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
901 IN PLARGE_INTEGER FileOffset OPTIONAL,
902 IN ULONG Length,
903 OUT PIO_STATUS_BLOCK IoStatus)
904 {
905 PROS_SHARED_CACHE_MAP SharedCacheMap;
906 LARGE_INTEGER Offset;
907 LONGLONG RemainingLength;
908 PROS_VACB current;
909 NTSTATUS Status;
910 KIRQL oldIrql;
911
912 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p FileOffset=%p Length=%lu\n",
913 SectionObjectPointers, FileOffset, Length);
914
915 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
916 SectionObjectPointers, FileOffset, Length, IoStatus);
917
918 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
919 {
920 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
921 ASSERT(SharedCacheMap);
922 if (FileOffset)
923 {
924 Offset = *FileOffset;
925 RemainingLength = Length;
926 }
927 else
928 {
929 Offset.QuadPart = 0;
930 RemainingLength = SharedCacheMap->FileSize.QuadPart;
931 }
932
933 if (IoStatus)
934 {
935 IoStatus->Status = STATUS_SUCCESS;
936 IoStatus->Information = 0;
937 }
938
939 while (RemainingLength > 0)
940 {
941 current = CcRosLookupVacb(SharedCacheMap, Offset.QuadPart);
942 if (current != NULL)
943 {
944 if (current->Dirty)
945 {
946 Status = CcRosFlushVacb(current);
947 if (!NT_SUCCESS(Status) && IoStatus != NULL)
948 {
949 IoStatus->Status = Status;
950 }
951 }
952 KeReleaseMutex(&current->Mutex, FALSE);
953
954 KeAcquireGuardedMutex(&ViewLock);
955 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
956 CcRosVacbDecRefCount(current);
957 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
958 KeReleaseGuardedMutex(&ViewLock);
959 }
960
961 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
962 RemainingLength -= min(RemainingLength, VACB_MAPPING_GRANULARITY);
963 }
964 }
965 else
966 {
967 if (IoStatus)
968 {
969 IoStatus->Status = STATUS_INVALID_PARAMETER;
970 }
971 }
972 }
973
974 NTSTATUS
975 NTAPI
976 CcRosDeleteFileCache (
977 PFILE_OBJECT FileObject,
978 PROS_SHARED_CACHE_MAP SharedCacheMap)
979 /*
980 * FUNCTION: Releases the shared cache map associated with a file object
981 */
982 {
983 PLIST_ENTRY current_entry;
984 PROS_VACB current;
985 LIST_ENTRY FreeList;
986 KIRQL oldIrql;
987
988 ASSERT(SharedCacheMap);
989
990 SharedCacheMap->RefCount++;
991 KeReleaseGuardedMutex(&ViewLock);
992
993 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
994
995 KeAcquireGuardedMutex(&ViewLock);
996 SharedCacheMap->RefCount--;
997 if (SharedCacheMap->RefCount == 0)
998 {
999 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1000
1001 /*
1002 * Release all VACBs
1003 */
1004 InitializeListHead(&FreeList);
1005 KeAcquireSpinLock(&SharedCacheMap->CacheMapLock, &oldIrql);
1006 while (!IsListEmpty(&SharedCacheMap->CacheMapVacbListHead))
1007 {
1008 current_entry = RemoveTailList(&SharedCacheMap->CacheMapVacbListHead);
1009 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1010 RemoveEntryList(&current->VacbLruListEntry);
1011 if (current->Dirty)
1012 {
1013 RemoveEntryList(&current->DirtyVacbListEntry);
1014 DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
1015 DPRINT1("Freeing dirty VACB\n");
1016 }
1017 InsertHeadList(&FreeList, &current->CacheMapVacbListEntry);
1018 }
1019 #if DBG
1020 SharedCacheMap->Trace = FALSE;
1021 #endif
1022 KeReleaseSpinLock(&SharedCacheMap->CacheMapLock, oldIrql);
1023
1024 KeReleaseGuardedMutex(&ViewLock);
1025 ObDereferenceObject(SharedCacheMap->FileObject);
1026
1027 while (!IsListEmpty(&FreeList))
1028 {
1029 current_entry = RemoveTailList(&FreeList);
1030 current = CONTAINING_RECORD(current_entry, ROS_VACB, CacheMapVacbListEntry);
1031 CcRosInternalFreeVacb(current);
1032 }
1033 ExFreeToNPagedLookasideList(&SharedCacheMapLookasideList, SharedCacheMap);
1034 KeAcquireGuardedMutex(&ViewLock);
1035 }
1036 return STATUS_SUCCESS;
1037 }
1038
1039 VOID
1040 NTAPI
1041 CcRosReferenceCache (
1042 PFILE_OBJECT FileObject)
1043 {
1044 PROS_SHARED_CACHE_MAP SharedCacheMap;
1045 KeAcquireGuardedMutex(&ViewLock);
1046 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1047 ASSERT(SharedCacheMap);
1048 ASSERT(SharedCacheMap->RefCount != 0);
1049 SharedCacheMap->RefCount++;
1050 KeReleaseGuardedMutex(&ViewLock);
1051 }
1052
1053 VOID
1054 NTAPI
1055 CcRosRemoveIfClosed (
1056 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1057 {
1058 PROS_SHARED_CACHE_MAP SharedCacheMap;
1059 DPRINT("CcRosRemoveIfClosed()\n");
1060 KeAcquireGuardedMutex(&ViewLock);
1061 SharedCacheMap = SectionObjectPointer->SharedCacheMap;
1062 if (SharedCacheMap && SharedCacheMap->RefCount == 0)
1063 {
1064 CcRosDeleteFileCache(SharedCacheMap->FileObject, SharedCacheMap);
1065 }
1066 KeReleaseGuardedMutex(&ViewLock);
1067 }
1068
1069
1070 VOID
1071 NTAPI
1072 CcRosDereferenceCache (
1073 PFILE_OBJECT FileObject)
1074 {
1075 PROS_SHARED_CACHE_MAP SharedCacheMap;
1076 KeAcquireGuardedMutex(&ViewLock);
1077 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1078 ASSERT(SharedCacheMap);
1079 if (SharedCacheMap->RefCount > 0)
1080 {
1081 SharedCacheMap->RefCount--;
1082 if (SharedCacheMap->RefCount == 0)
1083 {
1084 MmFreeSectionSegments(SharedCacheMap->FileObject);
1085 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1086 }
1087 }
1088 KeReleaseGuardedMutex(&ViewLock);
1089 }
1090
1091 NTSTATUS
1092 NTAPI
1093 CcRosReleaseFileCache (
1094 PFILE_OBJECT FileObject)
1095 /*
1096 * FUNCTION: Called by the file system when a handle to a file object
1097 * has been closed.
1098 */
1099 {
1100 PROS_SHARED_CACHE_MAP SharedCacheMap;
1101
1102 KeAcquireGuardedMutex(&ViewLock);
1103
1104 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1105 {
1106 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1107 if (FileObject->PrivateCacheMap != NULL)
1108 {
1109 FileObject->PrivateCacheMap = NULL;
1110 if (SharedCacheMap->RefCount > 0)
1111 {
1112 SharedCacheMap->RefCount--;
1113 if (SharedCacheMap->RefCount == 0)
1114 {
1115 MmFreeSectionSegments(SharedCacheMap->FileObject);
1116 CcRosDeleteFileCache(FileObject, SharedCacheMap);
1117 }
1118 }
1119 }
1120 }
1121 KeReleaseGuardedMutex(&ViewLock);
1122 return STATUS_SUCCESS;
1123 }
1124
1125 NTSTATUS
1126 NTAPI
1127 CcTryToInitializeFileCache (
1128 PFILE_OBJECT FileObject)
1129 {
1130 PROS_SHARED_CACHE_MAP SharedCacheMap;
1131 NTSTATUS Status;
1132
1133 KeAcquireGuardedMutex(&ViewLock);
1134
1135 ASSERT(FileObject->SectionObjectPointer);
1136 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1137 if (SharedCacheMap == NULL)
1138 {
1139 Status = STATUS_UNSUCCESSFUL;
1140 }
1141 else
1142 {
1143 if (FileObject->PrivateCacheMap == NULL)
1144 {
1145 FileObject->PrivateCacheMap = SharedCacheMap;
1146 SharedCacheMap->RefCount++;
1147 }
1148 Status = STATUS_SUCCESS;
1149 }
1150 KeReleaseGuardedMutex(&ViewLock);
1151
1152 return Status;
1153 }
1154
1155
1156 NTSTATUS
1157 NTAPI
1158 CcRosInitializeFileCache (
1159 PFILE_OBJECT FileObject,
1160 PCC_FILE_SIZES FileSizes,
1161 PCACHE_MANAGER_CALLBACKS CallBacks,
1162 PVOID LazyWriterContext)
1163 /*
1164 * FUNCTION: Initializes a shared cache map for a file object
1165 */
1166 {
1167 PROS_SHARED_CACHE_MAP SharedCacheMap;
1168
1169 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap;
1170 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, SharedCacheMap 0x%p)\n",
1171 FileObject, SharedCacheMap);
1172
1173 KeAcquireGuardedMutex(&ViewLock);
1174 if (SharedCacheMap == NULL)
1175 {
1176 SharedCacheMap = ExAllocateFromNPagedLookasideList(&SharedCacheMapLookasideList);
1177 if (SharedCacheMap == NULL)
1178 {
1179 KeReleaseGuardedMutex(&ViewLock);
1180 return STATUS_INSUFFICIENT_RESOURCES;
1181 }
1182 RtlZeroMemory(SharedCacheMap, sizeof(*SharedCacheMap));
1183 ObReferenceObjectByPointer(FileObject,
1184 FILE_ALL_ACCESS,
1185 NULL,
1186 KernelMode);
1187 SharedCacheMap->FileObject = FileObject;
1188 SharedCacheMap->Callbacks = CallBacks;
1189 SharedCacheMap->LazyWriteContext = LazyWriterContext;
1190 SharedCacheMap->SectionSize = FileSizes->AllocationSize;
1191 SharedCacheMap->FileSize = FileSizes->FileSize;
1192 KeInitializeSpinLock(&SharedCacheMap->CacheMapLock);
1193 InitializeListHead(&SharedCacheMap->CacheMapVacbListHead);
1194 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap;
1195 }
1196 if (FileObject->PrivateCacheMap == NULL)
1197 {
1198 FileObject->PrivateCacheMap = SharedCacheMap;
1199 SharedCacheMap->RefCount++;
1200 }
1201 KeReleaseGuardedMutex(&ViewLock);
1202
1203 return STATUS_SUCCESS;
1204 }
1205
1206 /*
1207 * @implemented
1208 */
1209 PFILE_OBJECT
1210 NTAPI
1211 CcGetFileObjectFromSectionPtrs (
1212 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1213 {
1214 PROS_SHARED_CACHE_MAP SharedCacheMap;
1215
1216 CCTRACE(CC_API_DEBUG, "SectionObjectPointers=%p\n", SectionObjectPointers);
1217
1218 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1219 {
1220 SharedCacheMap = SectionObjectPointers->SharedCacheMap;
1221 ASSERT(SharedCacheMap);
1222 return SharedCacheMap->FileObject;
1223 }
1224 return NULL;
1225 }
1226
1227 VOID
1228 INIT_FUNCTION
1229 NTAPI
1230 CcInitView (
1231 VOID)
1232 {
1233 DPRINT("CcInitView()\n");
1234
1235 InitializeListHead(&DirtyVacbListHead);
1236 InitializeListHead(&VacbLruListHead);
1237 KeInitializeGuardedMutex(&ViewLock);
1238 ExInitializeNPagedLookasideList(&iBcbLookasideList,
1239 NULL,
1240 NULL,
1241 0,
1242 sizeof(INTERNAL_BCB),
1243 TAG_BCB,
1244 20);
1245 ExInitializeNPagedLookasideList(&SharedCacheMapLookasideList,
1246 NULL,
1247 NULL,
1248 0,
1249 sizeof(ROS_SHARED_CACHE_MAP),
1250 TAG_SHARED_CACHE_MAP,
1251 20);
1252 ExInitializeNPagedLookasideList(&VacbLookasideList,
1253 NULL,
1254 NULL,
1255 0,
1256 sizeof(ROS_VACB),
1257 TAG_VACB,
1258 20);
1259
1260 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1261
1262 CcInitCacheZeroPage();
1263 }
1264
1265 /* EOF */