7c4196978ab07111c799a5f3e7f5676d3cb68d29
[reactos.git] / reactos / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 */
9
10 /* NOTES **********************************************************************
11 *
12 * This is not the NT implementation of a file cache nor anything much like
13 * it.
14 *
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
17 *
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
20 *
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
23 *
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
27 *
28 * (4) Copy the data into or out of the page as necessary.
29 *
30 * (5) Release the cache page
31 */
32 /* INCLUDES ******************************************************************/
33
34 #include <ntoskrnl.h>
35 #define NDEBUG
36 #include <debug.h>
37
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
40 #endif
41
42 /* GLOBALS *******************************************************************/
43
44 static LIST_ENTRY DirtySegmentListHead;
45 static LIST_ENTRY CacheSegmentListHead;
46 static LIST_ENTRY CacheSegmentLRUListHead;
47 static LIST_ENTRY ClosedListHead;
48 ULONG DirtyPageCount = 0;
49
50 KGUARDED_MUTEX ViewLock;
51
52 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
53 static NPAGED_LOOKASIDE_LIST BcbLookasideList;
54 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList;
55
56 #if DBG
57 static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
58 {
59 ++cs->ReferenceCount;
60 if ( cs->Bcb->Trace )
61 {
62 DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
63 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
64 }
65 }
66 static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
67 {
68 --cs->ReferenceCount;
69 if ( cs->Bcb->Trace )
70 {
71 DbgPrint("(%s:%i) CacheSegment %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
72 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
73 }
74 }
75 #define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
76 #define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
77 #else
78 #define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
79 #define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
80 #endif
81
82 NTSTATUS
83 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
84
85
86 /* FUNCTIONS *****************************************************************/
87
88 VOID
89 NTAPI
90 CcRosTraceCacheMap (
91 PBCB Bcb,
92 BOOLEAN Trace )
93 {
94 #if DBG
95 KIRQL oldirql;
96 PLIST_ENTRY current_entry;
97 PCACHE_SEGMENT current;
98
99 if ( !Bcb )
100 return;
101
102 Bcb->Trace = Trace;
103
104 if ( Trace )
105 {
106 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb );
107
108 KeAcquireGuardedMutex(&ViewLock);
109 KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
110
111 current_entry = Bcb->BcbSegmentListHead.Flink;
112 while (current_entry != &Bcb->BcbSegmentListHead)
113 {
114 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
115 current_entry = current_entry->Flink;
116
117 DPRINT1(" CacheSegment 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
118 current, current->ReferenceCount, current->Dirty, current->PageOut );
119 }
120 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
121 KeReleaseGuardedMutex(&ViewLock);
122 }
123 else
124 {
125 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb );
126 }
127
128 #else
129 Bcb = Bcb;
130 Trace = Trace;
131 #endif
132 }
133
134 NTSTATUS
135 NTAPI
136 CcRosFlushCacheSegment (
137 PCACHE_SEGMENT CacheSegment)
138 {
139 NTSTATUS Status;
140 KIRQL oldIrql;
141
142 Status = WriteCacheSegment(CacheSegment);
143 if (NT_SUCCESS(Status))
144 {
145 KeAcquireGuardedMutex(&ViewLock);
146 KeAcquireSpinLock(&CacheSegment->Bcb->BcbLock, &oldIrql);
147
148 CacheSegment->Dirty = FALSE;
149 RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
150 DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
151 CcRosCacheSegmentDecRefCount(CacheSegment);
152
153 KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
154 KeReleaseGuardedMutex(&ViewLock);
155 }
156
157 return Status;
158 }
159
160 NTSTATUS
161 NTAPI
162 CcRosFlushDirtyPages (
163 ULONG Target,
164 PULONG Count,
165 BOOLEAN Wait)
166 {
167 PLIST_ENTRY current_entry;
168 PCACHE_SEGMENT current;
169 ULONG PagesPerSegment;
170 BOOLEAN Locked;
171 NTSTATUS Status;
172 LARGE_INTEGER ZeroTimeout;
173
174 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
175
176 (*Count) = 0;
177 ZeroTimeout.QuadPart = 0;
178
179 KeEnterCriticalRegion();
180 KeAcquireGuardedMutex(&ViewLock);
181
182 current_entry = DirtySegmentListHead.Flink;
183 if (current_entry == &DirtySegmentListHead)
184 {
185 DPRINT("No Dirty pages\n");
186 }
187
188 while ((current_entry != &DirtySegmentListHead) && (Target > 0))
189 {
190 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
191 DirtySegmentListEntry);
192 current_entry = current_entry->Flink;
193
194 CcRosCacheSegmentIncRefCount(current);
195
196 Locked = current->Bcb->Callbacks->AcquireForLazyWrite(
197 current->Bcb->LazyWriteContext, Wait);
198 if (!Locked)
199 {
200 CcRosCacheSegmentDecRefCount(current);
201 continue;
202 }
203
204 Status = KeWaitForSingleObject(&current->Mutex,
205 Executive,
206 KernelMode,
207 FALSE,
208 Wait ? NULL : &ZeroTimeout);
209 if (Status != STATUS_SUCCESS)
210 {
211 current->Bcb->Callbacks->ReleaseFromLazyWrite(
212 current->Bcb->LazyWriteContext);
213 CcRosCacheSegmentDecRefCount(current);
214 continue;
215 }
216
217 ASSERT(current->Dirty);
218
219 /* One reference is added above */
220 if (current->ReferenceCount > 2)
221 {
222 KeReleaseMutex(&current->Mutex, FALSE);
223 current->Bcb->Callbacks->ReleaseFromLazyWrite(
224 current->Bcb->LazyWriteContext);
225 CcRosCacheSegmentDecRefCount(current);
226 continue;
227 }
228
229 PagesPerSegment = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
230
231 KeReleaseGuardedMutex(&ViewLock);
232
233 Status = CcRosFlushCacheSegment(current);
234
235 KeReleaseMutex(&current->Mutex, FALSE);
236 current->Bcb->Callbacks->ReleaseFromLazyWrite(
237 current->Bcb->LazyWriteContext);
238
239 KeAcquireGuardedMutex(&ViewLock);
240 CcRosCacheSegmentDecRefCount(current);
241
242 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
243 {
244 DPRINT1("CC: Failed to flush cache segment.\n");
245 }
246 else
247 {
248 (*Count) += PagesPerSegment;
249 Target -= PagesPerSegment;
250 }
251
252 current_entry = DirtySegmentListHead.Flink;
253 }
254
255 KeReleaseGuardedMutex(&ViewLock);
256 KeLeaveCriticalRegion();
257
258 DPRINT("CcRosFlushDirtyPages() finished\n");
259 return STATUS_SUCCESS;
260 }
261
262 NTSTATUS
263 CcRosTrimCache (
264 ULONG Target,
265 ULONG Priority,
266 PULONG NrFreed)
267 /*
268 * FUNCTION: Try to free some memory from the file cache.
269 * ARGUMENTS:
270 * Target - The number of pages to be freed.
271 * Priority - The priority of free (currently unused).
272 * NrFreed - Points to a variable where the number of pages
273 * actually freed is returned.
274 */
275 {
276 PLIST_ENTRY current_entry;
277 PCACHE_SEGMENT current;
278 ULONG PagesPerSegment;
279 ULONG PagesFreed;
280 KIRQL oldIrql;
281 LIST_ENTRY FreeList;
282 PFN_NUMBER Page;
283 ULONG i;
284 BOOLEAN FlushedPages = FALSE;
285
286 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
287
288 InitializeListHead(&FreeList);
289
290 *NrFreed = 0;
291
292 retry:
293 KeAcquireGuardedMutex(&ViewLock);
294
295 current_entry = CacheSegmentLRUListHead.Flink;
296 while (current_entry != &CacheSegmentLRUListHead)
297 {
298 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
299 CacheSegmentLRUListEntry);
300 current_entry = current_entry->Flink;
301
302 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
303
304 /* Reference the cache segment */
305 CcRosCacheSegmentIncRefCount(current);
306
307 /* Check if it's mapped and not dirty */
308 if (current->MappedCount > 0 && !current->Dirty)
309 {
310 /* We have to break these locks because Cc sucks */
311 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
312 KeReleaseGuardedMutex(&ViewLock);
313
314 /* Page out the segment */
315 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
316 {
317 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
318
319 MmPageOutPhysicalAddress(Page);
320 }
321
322 /* Reacquire the locks */
323 KeAcquireGuardedMutex(&ViewLock);
324 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
325 }
326
327 /* Dereference the cache segment */
328 CcRosCacheSegmentDecRefCount(current);
329
330 /* Check if we can free this entry now */
331 if (current->ReferenceCount == 0)
332 {
333 ASSERT(!current->Dirty);
334 ASSERT(!current->MappedCount);
335
336 RemoveEntryList(&current->BcbSegmentListEntry);
337 RemoveEntryList(&current->CacheSegmentListEntry);
338 RemoveEntryList(&current->CacheSegmentLRUListEntry);
339 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
340
341 /* Calculate how many pages we freed for Mm */
342 PagesPerSegment = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
343 PagesFreed = min(PagesPerSegment, Target);
344 Target -= PagesFreed;
345 (*NrFreed) += PagesFreed;
346 }
347
348 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
349 }
350
351 KeReleaseGuardedMutex(&ViewLock);
352
353 /* Try flushing pages if we haven't met our target */
354 if ((Target > 0) && !FlushedPages)
355 {
356 /* Flush dirty pages to disk */
357 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE);
358 FlushedPages = TRUE;
359
360 /* We can only swap as many pages as we flushed */
361 if (PagesFreed < Target) Target = PagesFreed;
362
363 /* Check if we flushed anything */
364 if (PagesFreed != 0)
365 {
366 /* Try again after flushing dirty pages */
367 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
368 goto retry;
369 }
370 }
371
372 while (!IsListEmpty(&FreeList))
373 {
374 current_entry = RemoveHeadList(&FreeList);
375 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
376 BcbSegmentListEntry);
377 CcRosInternalFreeCacheSegment(current);
378 }
379
380 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
381
382 return STATUS_SUCCESS;
383 }
384
385 NTSTATUS
386 NTAPI
387 CcRosReleaseCacheSegment (
388 PBCB Bcb,
389 PCACHE_SEGMENT CacheSeg,
390 BOOLEAN Valid,
391 BOOLEAN Dirty,
392 BOOLEAN Mapped)
393 {
394 BOOLEAN WasDirty;
395 KIRQL oldIrql;
396
397 ASSERT(Bcb);
398
399 DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %u)\n",
400 Bcb, CacheSeg, Valid);
401
402 KeAcquireGuardedMutex(&ViewLock);
403 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
404
405 CacheSeg->Valid = Valid;
406
407 WasDirty = CacheSeg->Dirty;
408 CacheSeg->Dirty = CacheSeg->Dirty || Dirty;
409
410 if (!WasDirty && CacheSeg->Dirty)
411 {
412 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
413 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
414 }
415
416 if (Mapped)
417 {
418 CacheSeg->MappedCount++;
419 }
420 CcRosCacheSegmentDecRefCount(CacheSeg);
421 if (Mapped && (CacheSeg->MappedCount == 1))
422 {
423 CcRosCacheSegmentIncRefCount(CacheSeg);
424 }
425 if (!WasDirty && CacheSeg->Dirty)
426 {
427 CcRosCacheSegmentIncRefCount(CacheSeg);
428 }
429
430 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
431 KeReleaseGuardedMutex(&ViewLock);
432 KeReleaseMutex(&CacheSeg->Mutex, FALSE);
433
434 return STATUS_SUCCESS;
435 }
436
437 /* Returns with Cache Segment Lock Held! */
438 PCACHE_SEGMENT
439 NTAPI
440 CcRosLookupCacheSegment (
441 PBCB Bcb,
442 ULONG FileOffset)
443 {
444 PLIST_ENTRY current_entry;
445 PCACHE_SEGMENT current;
446 KIRQL oldIrql;
447
448 ASSERT(Bcb);
449
450 DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %lu)\n", Bcb, FileOffset);
451
452 KeAcquireGuardedMutex(&ViewLock);
453 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
454
455 current_entry = Bcb->BcbSegmentListHead.Flink;
456 while (current_entry != &Bcb->BcbSegmentListHead)
457 {
458 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
459 BcbSegmentListEntry);
460 if (IsPointInSegment(current->FileOffset, VACB_MAPPING_GRANULARITY,
461 FileOffset))
462 {
463 CcRosCacheSegmentIncRefCount(current);
464 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
465 KeReleaseGuardedMutex(&ViewLock);
466 KeWaitForSingleObject(&current->Mutex,
467 Executive,
468 KernelMode,
469 FALSE,
470 NULL);
471 return current;
472 }
473 if (current->FileOffset > FileOffset)
474 break;
475 current_entry = current_entry->Flink;
476 }
477
478 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
479 KeReleaseGuardedMutex(&ViewLock);
480
481 return NULL;
482 }
483
484 NTSTATUS
485 NTAPI
486 CcRosMarkDirtyCacheSegment (
487 PBCB Bcb,
488 ULONG FileOffset)
489 {
490 PCACHE_SEGMENT CacheSeg;
491 KIRQL oldIrql;
492
493 ASSERT(Bcb);
494
495 DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %lu)\n", Bcb, FileOffset);
496
497 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
498 if (CacheSeg == NULL)
499 {
500 KeBugCheck(CACHE_MANAGER);
501 }
502
503 KeAcquireGuardedMutex(&ViewLock);
504 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
505
506 if (!CacheSeg->Dirty)
507 {
508 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
509 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
510 }
511 else
512 {
513 CcRosCacheSegmentDecRefCount(CacheSeg);
514 }
515
516 /* Move to the tail of the LRU list */
517 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
518 InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
519
520 CacheSeg->Dirty = TRUE;
521
522 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
523 KeReleaseGuardedMutex(&ViewLock);
524 KeReleaseMutex(&CacheSeg->Mutex, FALSE);
525
526 return STATUS_SUCCESS;
527 }
528
529 NTSTATUS
530 NTAPI
531 CcRosUnmapCacheSegment (
532 PBCB Bcb,
533 ULONG FileOffset,
534 BOOLEAN NowDirty)
535 {
536 PCACHE_SEGMENT CacheSeg;
537 BOOLEAN WasDirty;
538 KIRQL oldIrql;
539
540 ASSERT(Bcb);
541
542 DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %lu, NowDirty %u)\n",
543 Bcb, FileOffset, NowDirty);
544
545 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
546 if (CacheSeg == NULL)
547 {
548 return STATUS_UNSUCCESSFUL;
549 }
550
551 KeAcquireGuardedMutex(&ViewLock);
552 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
553
554 WasDirty = CacheSeg->Dirty;
555 CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
556
557 CacheSeg->MappedCount--;
558
559 if (!WasDirty && NowDirty)
560 {
561 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
562 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
563 }
564
565 CcRosCacheSegmentDecRefCount(CacheSeg);
566 if (!WasDirty && NowDirty)
567 {
568 CcRosCacheSegmentIncRefCount(CacheSeg);
569 }
570 if (CacheSeg->MappedCount == 0)
571 {
572 CcRosCacheSegmentDecRefCount(CacheSeg);
573 }
574
575 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
576 KeReleaseGuardedMutex(&ViewLock);
577 KeReleaseMutex(&CacheSeg->Mutex, FALSE);
578
579 return STATUS_SUCCESS;
580 }
581
582 static
583 NTSTATUS
584 CcRosCreateCacheSegment (
585 PBCB Bcb,
586 ULONG FileOffset,
587 PCACHE_SEGMENT* CacheSeg)
588 {
589 PCACHE_SEGMENT current;
590 PCACHE_SEGMENT previous;
591 PLIST_ENTRY current_entry;
592 NTSTATUS Status;
593 KIRQL oldIrql;
594
595 ASSERT(Bcb);
596
597 DPRINT("CcRosCreateCacheSegment()\n");
598
599 if (FileOffset >= Bcb->FileSize.u.LowPart)
600 {
601 CacheSeg = NULL;
602 return STATUS_INVALID_PARAMETER;
603 }
604
605 current = ExAllocateFromNPagedLookasideList(&CacheSegLookasideList);
606 current->Valid = FALSE;
607 current->Dirty = FALSE;
608 current->PageOut = FALSE;
609 current->FileOffset = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
610 current->Bcb = Bcb;
611 #if DBG
612 if ( Bcb->Trace )
613 {
614 DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb, current );
615 }
616 #endif
617 current->MappedCount = 0;
618 current->DirtySegmentListEntry.Flink = NULL;
619 current->DirtySegmentListEntry.Blink = NULL;
620 current->ReferenceCount = 1;
621 KeInitializeMutex(&current->Mutex, 0);
622 KeWaitForSingleObject(&current->Mutex,
623 Executive,
624 KernelMode,
625 FALSE,
626 NULL);
627 KeAcquireGuardedMutex(&ViewLock);
628
629 *CacheSeg = current;
630 /* There is window between the call to CcRosLookupCacheSegment
631 * and CcRosCreateCacheSegment. We must check if a segment on
632 * the fileoffset exist. If there exist a segment, we release
633 * our new created segment and return the existing one.
634 */
635 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
636 current_entry = Bcb->BcbSegmentListHead.Flink;
637 previous = NULL;
638 while (current_entry != &Bcb->BcbSegmentListHead)
639 {
640 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
641 BcbSegmentListEntry);
642 if (IsPointInSegment(current->FileOffset, VACB_MAPPING_GRANULARITY,
643 FileOffset))
644 {
645 CcRosCacheSegmentIncRefCount(current);
646 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
647 #if DBG
648 if ( Bcb->Trace )
649 {
650 DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
651 Bcb,
652 (*CacheSeg),
653 current );
654 }
655 #endif
656 KeReleaseMutex(&(*CacheSeg)->Mutex, FALSE);
657 KeReleaseGuardedMutex(&ViewLock);
658 ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
659 *CacheSeg = current;
660 KeWaitForSingleObject(&current->Mutex,
661 Executive,
662 KernelMode,
663 FALSE,
664 NULL);
665 return STATUS_SUCCESS;
666 }
667 if (current->FileOffset < FileOffset)
668 {
669 ASSERT(previous == NULL ||
670 previous->FileOffset < current->FileOffset);
671 previous = current;
672 }
673 if (current->FileOffset > FileOffset)
674 break;
675 current_entry = current_entry->Flink;
676 }
677 /* There was no existing segment. */
678 current = *CacheSeg;
679 if (previous)
680 {
681 InsertHeadList(&previous->BcbSegmentListEntry, &current->BcbSegmentListEntry);
682 }
683 else
684 {
685 InsertHeadList(&Bcb->BcbSegmentListHead, &current->BcbSegmentListEntry);
686 }
687 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
688 InsertTailList(&CacheSegmentListHead, &current->CacheSegmentListEntry);
689 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
690 KeReleaseGuardedMutex(&ViewLock);
691
692 MmLockAddressSpace(MmGetKernelAddressSpace());
693 current->BaseAddress = NULL;
694 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
695 0, // nothing checks for cache_segment mareas, so set to 0
696 &current->BaseAddress,
697 VACB_MAPPING_GRANULARITY,
698 PAGE_READWRITE,
699 (PMEMORY_AREA*)&current->MemoryArea,
700 FALSE,
701 0,
702 PAGE_SIZE);
703 MmUnlockAddressSpace(MmGetKernelAddressSpace());
704 if (!NT_SUCCESS(Status))
705 {
706 KeBugCheck(CACHE_MANAGER);
707 }
708
709 /* Create a virtual mapping for this memory area */
710 MI_SET_USAGE(MI_USAGE_CACHE);
711 #if MI_TRACE_PFNS
712 PWCHAR pos = NULL;
713 ULONG len = 0;
714 if ((Bcb->FileObject) && (Bcb->FileObject->FileName.Buffer))
715 {
716 pos = wcsrchr(Bcb->FileObject->FileName.Buffer, '\\');
717 len = wcslen(pos) * sizeof(WCHAR);
718 if (pos) snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
719 }
720 #endif
721
722 MmMapMemoryArea(current->BaseAddress, VACB_MAPPING_GRANULARITY,
723 MC_CACHE, PAGE_READWRITE);
724
725 return STATUS_SUCCESS;
726 }
727
728 NTSTATUS
729 NTAPI
730 CcRosGetCacheSegmentChain (
731 PBCB Bcb,
732 ULONG FileOffset,
733 ULONG Length,
734 PCACHE_SEGMENT* CacheSeg)
735 {
736 PCACHE_SEGMENT current;
737 ULONG i;
738 PCACHE_SEGMENT* CacheSegList;
739 PCACHE_SEGMENT Previous = NULL;
740
741 ASSERT(Bcb);
742
743 DPRINT("CcRosGetCacheSegmentChain()\n");
744
745 Length = ROUND_UP(Length, VACB_MAPPING_GRANULARITY);
746
747 CacheSegList = _alloca(sizeof(PCACHE_SEGMENT) *
748 (Length / VACB_MAPPING_GRANULARITY));
749
750 /*
751 * Look for a cache segment already mapping the same data.
752 */
753 for (i = 0; i < (Length / VACB_MAPPING_GRANULARITY); i++)
754 {
755 ULONG CurrentOffset = FileOffset + (i * VACB_MAPPING_GRANULARITY);
756 current = CcRosLookupCacheSegment(Bcb, CurrentOffset);
757 if (current != NULL)
758 {
759 KeAcquireGuardedMutex(&ViewLock);
760
761 /* Move to tail of LRU list */
762 RemoveEntryList(&current->CacheSegmentLRUListEntry);
763 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
764
765 KeReleaseGuardedMutex(&ViewLock);
766
767 CacheSegList[i] = current;
768 }
769 else
770 {
771 CcRosCreateCacheSegment(Bcb, CurrentOffset, &current);
772 CacheSegList[i] = current;
773 }
774 }
775
776 for (i = 0; i < Length / VACB_MAPPING_GRANULARITY; i++)
777 {
778 if (i == 0)
779 {
780 *CacheSeg = CacheSegList[i];
781 Previous = CacheSegList[i];
782 }
783 else
784 {
785 Previous->NextInChain = CacheSegList[i];
786 Previous = CacheSegList[i];
787 }
788 }
789 ASSERT(Previous);
790 Previous->NextInChain = NULL;
791
792 return STATUS_SUCCESS;
793 }
794
795 NTSTATUS
796 NTAPI
797 CcRosGetCacheSegment (
798 PBCB Bcb,
799 ULONG FileOffset,
800 PULONG BaseOffset,
801 PVOID* BaseAddress,
802 PBOOLEAN UptoDate,
803 PCACHE_SEGMENT* CacheSeg)
804 {
805 PCACHE_SEGMENT current;
806 NTSTATUS Status;
807
808 ASSERT(Bcb);
809
810 DPRINT("CcRosGetCacheSegment()\n");
811
812 /*
813 * Look for a cache segment already mapping the same data.
814 */
815 current = CcRosLookupCacheSegment(Bcb, FileOffset);
816 if (current == NULL)
817 {
818 /*
819 * Otherwise create a new segment.
820 */
821 Status = CcRosCreateCacheSegment(Bcb, FileOffset, &current);
822 if (!NT_SUCCESS(Status))
823 {
824 return Status;
825 }
826 }
827
828 KeAcquireGuardedMutex(&ViewLock);
829
830 /* Move to the tail of the LRU list */
831 RemoveEntryList(&current->CacheSegmentLRUListEntry);
832 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
833
834 KeReleaseGuardedMutex(&ViewLock);
835
836 /*
837 * Return information about the segment to the caller.
838 */
839 *UptoDate = current->Valid;
840 *BaseAddress = current->BaseAddress;
841 DPRINT("*BaseAddress %p\n", *BaseAddress);
842 *CacheSeg = current;
843 *BaseOffset = current->FileOffset;
844 return STATUS_SUCCESS;
845 }
846
847 NTSTATUS
848 NTAPI
849 CcRosRequestCacheSegment (
850 PBCB Bcb,
851 ULONG FileOffset,
852 PVOID* BaseAddress,
853 PBOOLEAN UptoDate,
854 PCACHE_SEGMENT* CacheSeg)
855 /*
856 * FUNCTION: Request a page mapping for a BCB
857 */
858 {
859 ULONG BaseOffset;
860
861 ASSERT(Bcb);
862
863 if (FileOffset % VACB_MAPPING_GRANULARITY != 0)
864 {
865 DPRINT1("Bad fileoffset %x should be multiple of %x",
866 FileOffset, VACB_MAPPING_GRANULARITY);
867 KeBugCheck(CACHE_MANAGER);
868 }
869
870 return CcRosGetCacheSegment(Bcb,
871 FileOffset,
872 &BaseOffset,
873 BaseAddress,
874 UptoDate,
875 CacheSeg);
876 }
877
878 static
879 VOID
880 CcFreeCachePage (
881 PVOID Context,
882 MEMORY_AREA* MemoryArea,
883 PVOID Address,
884 PFN_NUMBER Page,
885 SWAPENTRY SwapEntry,
886 BOOLEAN Dirty)
887 {
888 ASSERT(SwapEntry == 0);
889 if (Page != 0)
890 {
891 ASSERT(MmGetReferenceCountPage(Page) == 1);
892 MmReleasePageMemoryConsumer(MC_CACHE, Page);
893 }
894 }
895
896 NTSTATUS
897 CcRosInternalFreeCacheSegment (
898 PCACHE_SEGMENT CacheSeg)
899 /*
900 * FUNCTION: Releases a cache segment associated with a BCB
901 */
902 {
903 DPRINT("Freeing cache segment 0x%p\n", CacheSeg);
904 #if DBG
905 if ( CacheSeg->Bcb->Trace )
906 {
907 DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg->Bcb, CacheSeg );
908 }
909 #endif
910
911 MmLockAddressSpace(MmGetKernelAddressSpace());
912 MmFreeMemoryArea(MmGetKernelAddressSpace(),
913 CacheSeg->MemoryArea,
914 CcFreeCachePage,
915 NULL);
916 MmUnlockAddressSpace(MmGetKernelAddressSpace());
917
918 ExFreeToNPagedLookasideList(&CacheSegLookasideList, CacheSeg);
919 return STATUS_SUCCESS;
920 }
921
922 /*
923 * @implemented
924 */
925 VOID
926 NTAPI
927 CcFlushCache (
928 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
929 IN PLARGE_INTEGER FileOffset OPTIONAL,
930 IN ULONG Length,
931 OUT PIO_STATUS_BLOCK IoStatus)
932 {
933 PBCB Bcb;
934 LARGE_INTEGER Offset;
935 PCACHE_SEGMENT current;
936 NTSTATUS Status;
937 KIRQL oldIrql;
938
939 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
940 SectionObjectPointers, FileOffset, Length, IoStatus);
941
942 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
943 {
944 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
945 ASSERT(Bcb);
946 if (FileOffset)
947 {
948 Offset = *FileOffset;
949 }
950 else
951 {
952 Offset.QuadPart = (LONGLONG)0;
953 Length = Bcb->FileSize.u.LowPart;
954 }
955
956 if (IoStatus)
957 {
958 IoStatus->Status = STATUS_SUCCESS;
959 IoStatus->Information = 0;
960 }
961
962 while (Length > 0)
963 {
964 current = CcRosLookupCacheSegment (Bcb, Offset.u.LowPart);
965 if (current != NULL)
966 {
967 if (current->Dirty)
968 {
969 Status = CcRosFlushCacheSegment(current);
970 if (!NT_SUCCESS(Status) && IoStatus != NULL)
971 {
972 IoStatus->Status = Status;
973 }
974 }
975 KeReleaseMutex(&current->Mutex, FALSE);
976
977 KeAcquireGuardedMutex(&ViewLock);
978 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
979 CcRosCacheSegmentDecRefCount(current);
980 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
981 KeReleaseGuardedMutex(&ViewLock);
982 }
983
984 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
985 if (Length > VACB_MAPPING_GRANULARITY)
986 {
987 Length -= VACB_MAPPING_GRANULARITY;
988 }
989 else
990 {
991 Length = 0;
992 }
993 }
994 }
995 else
996 {
997 if (IoStatus)
998 {
999 IoStatus->Status = STATUS_INVALID_PARAMETER;
1000 }
1001 }
1002 }
1003
1004 NTSTATUS
1005 NTAPI
1006 CcRosDeleteFileCache (
1007 PFILE_OBJECT FileObject,
1008 PBCB Bcb)
1009 /*
1010 * FUNCTION: Releases the BCB associated with a file object
1011 */
1012 {
1013 PLIST_ENTRY current_entry;
1014 PCACHE_SEGMENT current;
1015 LIST_ENTRY FreeList;
1016 KIRQL oldIrql;
1017
1018 ASSERT(Bcb);
1019
1020 Bcb->RefCount++;
1021 KeReleaseGuardedMutex(&ViewLock);
1022
1023 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1024
1025 KeAcquireGuardedMutex(&ViewLock);
1026 Bcb->RefCount--;
1027 if (Bcb->RefCount == 0)
1028 {
1029 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1030 {
1031 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1032 Bcb->BcbRemoveListEntry.Flink = NULL;
1033 }
1034
1035 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1036
1037 /*
1038 * Release all cache segments.
1039 */
1040 InitializeListHead(&FreeList);
1041 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1042 while (!IsListEmpty(&Bcb->BcbSegmentListHead))
1043 {
1044 current_entry = RemoveTailList(&Bcb->BcbSegmentListHead);
1045 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1046 RemoveEntryList(&current->CacheSegmentListEntry);
1047 RemoveEntryList(&current->CacheSegmentLRUListEntry);
1048 if (current->Dirty)
1049 {
1050 RemoveEntryList(&current->DirtySegmentListEntry);
1051 DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
1052 DPRINT1("Freeing dirty segment\n");
1053 }
1054 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
1055 }
1056 #if DBG
1057 Bcb->Trace = FALSE;
1058 #endif
1059 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1060
1061 KeReleaseGuardedMutex(&ViewLock);
1062 ObDereferenceObject (Bcb->FileObject);
1063
1064 while (!IsListEmpty(&FreeList))
1065 {
1066 current_entry = RemoveTailList(&FreeList);
1067 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1068 CcRosInternalFreeCacheSegment(current);
1069 }
1070 ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
1071 KeAcquireGuardedMutex(&ViewLock);
1072 }
1073 return STATUS_SUCCESS;
1074 }
1075
1076 VOID
1077 NTAPI
1078 CcRosReferenceCache (
1079 PFILE_OBJECT FileObject)
1080 {
1081 PBCB Bcb;
1082 KeAcquireGuardedMutex(&ViewLock);
1083 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1084 ASSERT(Bcb);
1085 if (Bcb->RefCount == 0)
1086 {
1087 ASSERT(Bcb->BcbRemoveListEntry.Flink != NULL);
1088 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1089 Bcb->BcbRemoveListEntry.Flink = NULL;
1090
1091 }
1092 else
1093 {
1094 ASSERT(Bcb->BcbRemoveListEntry.Flink == NULL);
1095 }
1096 Bcb->RefCount++;
1097 KeReleaseGuardedMutex(&ViewLock);
1098 }
1099
1100 VOID
1101 NTAPI
1102 CcRosSetRemoveOnClose (
1103 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1104 {
1105 PBCB Bcb;
1106 DPRINT("CcRosSetRemoveOnClose()\n");
1107 KeAcquireGuardedMutex(&ViewLock);
1108 Bcb = (PBCB)SectionObjectPointer->SharedCacheMap;
1109 if (Bcb)
1110 {
1111 Bcb->RemoveOnClose = TRUE;
1112 if (Bcb->RefCount == 0)
1113 {
1114 CcRosDeleteFileCache(Bcb->FileObject, Bcb);
1115 }
1116 }
1117 KeReleaseGuardedMutex(&ViewLock);
1118 }
1119
1120
1121 VOID
1122 NTAPI
1123 CcRosDereferenceCache (
1124 PFILE_OBJECT FileObject)
1125 {
1126 PBCB Bcb;
1127 KeAcquireGuardedMutex(&ViewLock);
1128 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1129 ASSERT(Bcb);
1130 if (Bcb->RefCount > 0)
1131 {
1132 Bcb->RefCount--;
1133 if (Bcb->RefCount == 0)
1134 {
1135 MmFreeSectionSegments(Bcb->FileObject);
1136 CcRosDeleteFileCache(FileObject, Bcb);
1137 }
1138 }
1139 KeReleaseGuardedMutex(&ViewLock);
1140 }
1141
1142 NTSTATUS
1143 NTAPI
1144 CcRosReleaseFileCache (
1145 PFILE_OBJECT FileObject)
1146 /*
1147 * FUNCTION: Called by the file system when a handle to a file object
1148 * has been closed.
1149 */
1150 {
1151 PBCB Bcb;
1152
1153 KeAcquireGuardedMutex(&ViewLock);
1154
1155 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1156 {
1157 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1158 if (FileObject->PrivateCacheMap != NULL)
1159 {
1160 FileObject->PrivateCacheMap = NULL;
1161 if (Bcb->RefCount > 0)
1162 {
1163 Bcb->RefCount--;
1164 if (Bcb->RefCount == 0)
1165 {
1166 MmFreeSectionSegments(Bcb->FileObject);
1167 CcRosDeleteFileCache(FileObject, Bcb);
1168 }
1169 }
1170 }
1171 }
1172 KeReleaseGuardedMutex(&ViewLock);
1173 return STATUS_SUCCESS;
1174 }
1175
1176 NTSTATUS
1177 NTAPI
1178 CcTryToInitializeFileCache (
1179 PFILE_OBJECT FileObject)
1180 {
1181 PBCB Bcb;
1182 NTSTATUS Status;
1183
1184 KeAcquireGuardedMutex(&ViewLock);
1185
1186 ASSERT(FileObject->SectionObjectPointer);
1187 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1188 if (Bcb == NULL)
1189 {
1190 Status = STATUS_UNSUCCESSFUL;
1191 }
1192 else
1193 {
1194 if (FileObject->PrivateCacheMap == NULL)
1195 {
1196 FileObject->PrivateCacheMap = Bcb;
1197 Bcb->RefCount++;
1198 }
1199 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1200 {
1201 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1202 Bcb->BcbRemoveListEntry.Flink = NULL;
1203 }
1204 Status = STATUS_SUCCESS;
1205 }
1206 KeReleaseGuardedMutex(&ViewLock);
1207
1208 return Status;
1209 }
1210
1211
1212 NTSTATUS
1213 NTAPI
1214 CcRosInitializeFileCache (
1215 PFILE_OBJECT FileObject,
1216 PCACHE_MANAGER_CALLBACKS CallBacks,
1217 PVOID LazyWriterContext)
1218 /*
1219 * FUNCTION: Initializes a BCB for a file object
1220 */
1221 {
1222 PBCB Bcb;
1223
1224 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1225 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p)\n",
1226 FileObject, Bcb);
1227
1228 KeAcquireGuardedMutex(&ViewLock);
1229 if (Bcb == NULL)
1230 {
1231 Bcb = ExAllocateFromNPagedLookasideList(&BcbLookasideList);
1232 if (Bcb == NULL)
1233 {
1234 KeReleaseGuardedMutex(&ViewLock);
1235 return STATUS_UNSUCCESSFUL;
1236 }
1237 RtlZeroMemory(Bcb, sizeof(*Bcb));
1238 ObReferenceObjectByPointer(FileObject,
1239 FILE_ALL_ACCESS,
1240 NULL,
1241 KernelMode);
1242 Bcb->FileObject = FileObject;
1243 Bcb->Callbacks = CallBacks;
1244 Bcb->LazyWriteContext = LazyWriterContext;
1245 if (FileObject->FsContext)
1246 {
1247 Bcb->AllocationSize =
1248 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->AllocationSize;
1249 Bcb->FileSize =
1250 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->FileSize;
1251 }
1252 KeInitializeSpinLock(&Bcb->BcbLock);
1253 InitializeListHead(&Bcb->BcbSegmentListHead);
1254 FileObject->SectionObjectPointer->SharedCacheMap = Bcb;
1255 }
1256 if (FileObject->PrivateCacheMap == NULL)
1257 {
1258 FileObject->PrivateCacheMap = Bcb;
1259 Bcb->RefCount++;
1260 }
1261 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1262 {
1263 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1264 Bcb->BcbRemoveListEntry.Flink = NULL;
1265 }
1266 KeReleaseGuardedMutex(&ViewLock);
1267
1268 return STATUS_SUCCESS;
1269 }
1270
1271 /*
1272 * @implemented
1273 */
1274 PFILE_OBJECT
1275 NTAPI
1276 CcGetFileObjectFromSectionPtrs (
1277 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1278 {
1279 PBCB Bcb;
1280 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1281 {
1282 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1283 ASSERT(Bcb);
1284 return Bcb->FileObject;
1285 }
1286 return NULL;
1287 }
1288
1289 VOID
1290 INIT_FUNCTION
1291 NTAPI
1292 CcInitView (
1293 VOID)
1294 {
1295 DPRINT("CcInitView()\n");
1296
1297 InitializeListHead(&CacheSegmentListHead);
1298 InitializeListHead(&DirtySegmentListHead);
1299 InitializeListHead(&CacheSegmentLRUListHead);
1300 InitializeListHead(&ClosedListHead);
1301 KeInitializeGuardedMutex(&ViewLock);
1302 ExInitializeNPagedLookasideList (&iBcbLookasideList,
1303 NULL,
1304 NULL,
1305 0,
1306 sizeof(INTERNAL_BCB),
1307 TAG_IBCB,
1308 20);
1309 ExInitializeNPagedLookasideList (&BcbLookasideList,
1310 NULL,
1311 NULL,
1312 0,
1313 sizeof(BCB),
1314 TAG_BCB,
1315 20);
1316 ExInitializeNPagedLookasideList (&CacheSegLookasideList,
1317 NULL,
1318 NULL,
1319 0,
1320 sizeof(CACHE_SEGMENT),
1321 TAG_CSEG,
1322 20);
1323
1324 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1325
1326 CcInitCacheZeroPage();
1327
1328 }
1329
1330 /* EOF */