[NTOS:CC]
[reactos.git] / reactos / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 */
9
10 /* NOTES **********************************************************************
11 *
12 * This is not the NT implementation of a file cache nor anything much like
13 * it.
14 *
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
17 *
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
20 *
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
23 *
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
27 *
28 * (4) Copy the data into or out of the page as necessary.
29 *
30 * (5) Release the cache page
31 */
32 /* INCLUDES ******************************************************************/
33
34 #include <ntoskrnl.h>
35 #define NDEBUG
36 #include <debug.h>
37
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
40 #endif
41
42 /* GLOBALS *******************************************************************/
43
44 /*
45 * If CACHE_BITMAP is defined, the cache manager uses one large memory region
46 * within the kernel address space and allocate/deallocate space from this block
47 * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
48 * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
49 */
50 //#define CACHE_BITMAP
51
52 static LIST_ENTRY DirtySegmentListHead;
53 static LIST_ENTRY CacheSegmentListHead;
54 static LIST_ENTRY CacheSegmentLRUListHead;
55 static LIST_ENTRY ClosedListHead;
56 ULONG DirtyPageCount=0;
57
58 KGUARDED_MUTEX ViewLock;
59
60 #ifdef CACHE_BITMAP
61 #define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
62
63 static PVOID CiCacheSegMappingRegionBase = NULL;
64 static RTL_BITMAP CiCacheSegMappingRegionAllocMap;
65 static ULONG CiCacheSegMappingRegionHint;
66 static KSPIN_LOCK CiCacheSegMappingRegionLock;
67 #endif
68
69 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
70 static NPAGED_LOOKASIDE_LIST BcbLookasideList;
71 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList;
72
73 #if DBG
74 static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
75 {
76 ++cs->ReferenceCount;
77 if ( cs->Bcb->Trace )
78 {
79 DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
80 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
81 }
82 }
83 static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
84 {
85 --cs->ReferenceCount;
86 if ( cs->Bcb->Trace )
87 {
88 DbgPrint("(%s:%i) CacheSegment %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
89 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
90 }
91 }
92 #define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
93 #define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
94 #else
95 #define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
96 #define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
97 #endif
98
99 NTSTATUS
100 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
101
102
103 /* FUNCTIONS *****************************************************************/
104
105 VOID
106 NTAPI
107 CcRosTraceCacheMap (
108 PBCB Bcb,
109 BOOLEAN Trace )
110 {
111 #if DBG
112 KIRQL oldirql;
113 PLIST_ENTRY current_entry;
114 PCACHE_SEGMENT current;
115
116 if ( !Bcb )
117 return;
118
119 Bcb->Trace = Trace;
120
121 if ( Trace )
122 {
123 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb );
124
125 KeAcquireGuardedMutex(&ViewLock);
126 KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
127
128 current_entry = Bcb->BcbSegmentListHead.Flink;
129 while (current_entry != &Bcb->BcbSegmentListHead)
130 {
131 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
132 current_entry = current_entry->Flink;
133
134 DPRINT1(" CacheSegment 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
135 current, current->ReferenceCount, current->Dirty, current->PageOut );
136 }
137 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
138 KeReleaseGuardedMutex(&ViewLock);
139 }
140 else
141 {
142 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb );
143 }
144
145 #else
146 Bcb = Bcb;
147 Trace = Trace;
148 #endif
149 }
150
151 NTSTATUS
152 NTAPI
153 CcRosFlushCacheSegment (
154 PCACHE_SEGMENT CacheSegment)
155 {
156 NTSTATUS Status;
157 KIRQL oldIrql;
158
159 Status = WriteCacheSegment(CacheSegment);
160 if (NT_SUCCESS(Status))
161 {
162 KeAcquireGuardedMutex(&ViewLock);
163 KeAcquireSpinLock(&CacheSegment->Bcb->BcbLock, &oldIrql);
164
165 CacheSegment->Dirty = FALSE;
166 RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
167 DirtyPageCount -= CacheSegment->Bcb->CacheSegmentSize / PAGE_SIZE;
168 CcRosCacheSegmentDecRefCount(CacheSegment);
169
170 KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
171 KeReleaseGuardedMutex(&ViewLock);
172 }
173
174 return(Status);
175 }
176
177 NTSTATUS
178 NTAPI
179 CcRosFlushDirtyPages (
180 ULONG Target,
181 PULONG Count,
182 BOOLEAN Wait)
183 {
184 PLIST_ENTRY current_entry;
185 PCACHE_SEGMENT current;
186 ULONG PagesPerSegment;
187 BOOLEAN Locked;
188 NTSTATUS Status;
189 LARGE_INTEGER ZeroTimeout;
190
191 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
192
193 (*Count) = 0;
194 ZeroTimeout.QuadPart = 0;
195
196 KeEnterCriticalRegion();
197 KeAcquireGuardedMutex(&ViewLock);
198
199 current_entry = DirtySegmentListHead.Flink;
200 if (current_entry == &DirtySegmentListHead)
201 {
202 DPRINT("No Dirty pages\n");
203 }
204
205 while ((current_entry != &DirtySegmentListHead) && (Target > 0))
206 {
207 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
208 DirtySegmentListEntry);
209 current_entry = current_entry->Flink;
210
211 CcRosCacheSegmentIncRefCount(current);
212
213 Locked = current->Bcb->Callbacks->AcquireForLazyWrite(
214 current->Bcb->LazyWriteContext, Wait);
215 if (!Locked)
216 {
217 CcRosCacheSegmentDecRefCount(current);
218 continue;
219 }
220
221 Status = KeWaitForSingleObject(&current->Mutex,
222 Executive,
223 KernelMode,
224 FALSE,
225 Wait ? NULL : &ZeroTimeout);
226 if (Status != STATUS_SUCCESS)
227 {
228 current->Bcb->Callbacks->ReleaseFromLazyWrite(
229 current->Bcb->LazyWriteContext);
230 CcRosCacheSegmentDecRefCount(current);
231 continue;
232 }
233
234 ASSERT(current->Dirty);
235
236 /* One reference is added above */
237 if (current->ReferenceCount > 2)
238 {
239 KeReleaseMutex(&current->Mutex, FALSE);
240 current->Bcb->Callbacks->ReleaseFromLazyWrite(
241 current->Bcb->LazyWriteContext);
242 CcRosCacheSegmentDecRefCount(current);
243 continue;
244 }
245
246 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
247
248 KeReleaseGuardedMutex(&ViewLock);
249
250 Status = CcRosFlushCacheSegment(current);
251
252 KeReleaseMutex(&current->Mutex, FALSE);
253 current->Bcb->Callbacks->ReleaseFromLazyWrite(
254 current->Bcb->LazyWriteContext);
255
256 KeAcquireGuardedMutex(&ViewLock);
257 CcRosCacheSegmentDecRefCount(current);
258
259 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
260 {
261 DPRINT1("CC: Failed to flush cache segment.\n");
262 }
263 else
264 {
265 (*Count) += PagesPerSegment;
266 Target -= PagesPerSegment;
267 }
268
269 current_entry = DirtySegmentListHead.Flink;
270 }
271
272 KeReleaseGuardedMutex(&ViewLock);
273 KeLeaveCriticalRegion();
274
275 DPRINT("CcRosFlushDirtyPages() finished\n");
276 return(STATUS_SUCCESS);
277 }
278
279 NTSTATUS
280 CcRosTrimCache (
281 ULONG Target,
282 ULONG Priority,
283 PULONG NrFreed)
284 /*
285 * FUNCTION: Try to free some memory from the file cache.
286 * ARGUMENTS:
287 * Target - The number of pages to be freed.
288 * Priority - The priority of free (currently unused).
289 * NrFreed - Points to a variable where the number of pages
290 * actually freed is returned.
291 */
292 {
293 PLIST_ENTRY current_entry;
294 PCACHE_SEGMENT current;
295 ULONG PagesPerSegment;
296 ULONG PagesFreed;
297 KIRQL oldIrql;
298 LIST_ENTRY FreeList;
299 PFN_NUMBER Page;
300 ULONG i;
301 BOOLEAN FlushedPages = FALSE;
302
303 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
304
305 InitializeListHead(&FreeList);
306
307 *NrFreed = 0;
308
309 retry:
310 KeAcquireGuardedMutex(&ViewLock);
311
312 current_entry = CacheSegmentLRUListHead.Flink;
313 while (current_entry != &CacheSegmentLRUListHead)
314 {
315 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
316 CacheSegmentLRUListEntry);
317 current_entry = current_entry->Flink;
318
319 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
320
321 /* Reference the cache segment */
322 CcRosCacheSegmentIncRefCount(current);
323
324 /* Check if it's mapped and not dirty */
325 if (current->MappedCount > 0 && !current->Dirty)
326 {
327 /* We have to break these locks because Cc sucks */
328 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
329 KeReleaseGuardedMutex(&ViewLock);
330
331 /* Page out the segment */
332 for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
333 {
334 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
335
336 MmPageOutPhysicalAddress(Page);
337 }
338
339 /* Reacquire the locks */
340 KeAcquireGuardedMutex(&ViewLock);
341 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
342 }
343
344 /* Dereference the cache segment */
345 CcRosCacheSegmentDecRefCount(current);
346
347 /* Check if we can free this entry now */
348 if (current->ReferenceCount == 0)
349 {
350 ASSERT(!current->Dirty);
351 ASSERT(!current->MappedCount);
352
353 RemoveEntryList(&current->BcbSegmentListEntry);
354 RemoveEntryList(&current->CacheSegmentListEntry);
355 RemoveEntryList(&current->CacheSegmentLRUListEntry);
356 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
357
358 /* Calculate how many pages we freed for Mm */
359 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
360 PagesFreed = min(PagesPerSegment, Target);
361 Target -= PagesFreed;
362 (*NrFreed) += PagesFreed;
363 }
364
365 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
366 }
367
368 KeReleaseGuardedMutex(&ViewLock);
369
370 /* Try flushing pages if we haven't met our target */
371 if ((Target > 0) && !FlushedPages)
372 {
373 /* Flush dirty pages to disk */
374 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE);
375 FlushedPages = TRUE;
376
377 /* We can only swap as many pages as we flushed */
378 if (PagesFreed < Target) Target = PagesFreed;
379
380 /* Check if we flushed anything */
381 if (PagesFreed != 0)
382 {
383 /* Try again after flushing dirty pages */
384 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
385 goto retry;
386 }
387 }
388
389 while (!IsListEmpty(&FreeList))
390 {
391 current_entry = RemoveHeadList(&FreeList);
392 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
393 BcbSegmentListEntry);
394 CcRosInternalFreeCacheSegment(current);
395 }
396
397 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
398
399 return(STATUS_SUCCESS);
400 }
401
402 NTSTATUS
403 NTAPI
404 CcRosReleaseCacheSegment (
405 PBCB Bcb,
406 PCACHE_SEGMENT CacheSeg,
407 BOOLEAN Valid,
408 BOOLEAN Dirty,
409 BOOLEAN Mapped)
410 {
411 BOOLEAN WasDirty;
412 KIRQL oldIrql;
413
414 ASSERT(Bcb);
415
416 DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %u)\n",
417 Bcb, CacheSeg, Valid);
418
419 KeAcquireGuardedMutex(&ViewLock);
420 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
421
422 CacheSeg->Valid = Valid;
423
424 WasDirty = CacheSeg->Dirty;
425 CacheSeg->Dirty = CacheSeg->Dirty || Dirty;
426
427 if (!WasDirty && CacheSeg->Dirty)
428 {
429 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
430 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
431 }
432
433 if (Mapped)
434 {
435 CacheSeg->MappedCount++;
436 }
437 CcRosCacheSegmentDecRefCount(CacheSeg);
438 if (Mapped && (CacheSeg->MappedCount == 1))
439 {
440 CcRosCacheSegmentIncRefCount(CacheSeg);
441 }
442 if (!WasDirty && CacheSeg->Dirty)
443 {
444 CcRosCacheSegmentIncRefCount(CacheSeg);
445 }
446
447 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
448 KeReleaseGuardedMutex(&ViewLock);
449 KeReleaseMutex(&CacheSeg->Mutex, FALSE);
450
451 return(STATUS_SUCCESS);
452 }
453
454 /* Returns with Cache Segment Lock Held! */
455 PCACHE_SEGMENT
456 NTAPI
457 CcRosLookupCacheSegment (
458 PBCB Bcb,
459 ULONG FileOffset)
460 {
461 PLIST_ENTRY current_entry;
462 PCACHE_SEGMENT current;
463 KIRQL oldIrql;
464
465 ASSERT(Bcb);
466
467 DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %lu)\n", Bcb, FileOffset);
468
469 KeAcquireGuardedMutex(&ViewLock);
470 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
471
472 current_entry = Bcb->BcbSegmentListHead.Flink;
473 while (current_entry != &Bcb->BcbSegmentListHead)
474 {
475 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
476 BcbSegmentListEntry);
477 if (IsPointInSegment(current->FileOffset, Bcb->CacheSegmentSize,
478 FileOffset))
479 {
480 CcRosCacheSegmentIncRefCount(current);
481 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
482 KeReleaseGuardedMutex(&ViewLock);
483 KeWaitForSingleObject(&current->Mutex,
484 Executive,
485 KernelMode,
486 FALSE,
487 NULL);
488 return current;
489 }
490 current_entry = current_entry->Flink;
491 }
492
493 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
494 KeReleaseGuardedMutex(&ViewLock);
495
496 return NULL;
497 }
498
499 NTSTATUS
500 NTAPI
501 CcRosMarkDirtyCacheSegment (
502 PBCB Bcb,
503 ULONG FileOffset)
504 {
505 PCACHE_SEGMENT CacheSeg;
506 KIRQL oldIrql;
507
508 ASSERT(Bcb);
509
510 DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %lu)\n", Bcb, FileOffset);
511
512 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
513 if (CacheSeg == NULL)
514 {
515 KeBugCheck(CACHE_MANAGER);
516 }
517
518 KeAcquireGuardedMutex(&ViewLock);
519 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
520
521 if (!CacheSeg->Dirty)
522 {
523 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
524 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
525 }
526 else
527 {
528 CcRosCacheSegmentDecRefCount(CacheSeg);
529 }
530
531 /* Move to the tail of the LRU list */
532 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
533 InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
534
535 CacheSeg->Dirty = TRUE;
536
537 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
538 KeReleaseGuardedMutex(&ViewLock);
539 KeReleaseMutex(&CacheSeg->Mutex, FALSE);
540
541 return(STATUS_SUCCESS);
542 }
543
544 NTSTATUS
545 NTAPI
546 CcRosUnmapCacheSegment (
547 PBCB Bcb,
548 ULONG FileOffset,
549 BOOLEAN NowDirty)
550 {
551 PCACHE_SEGMENT CacheSeg;
552 BOOLEAN WasDirty;
553 KIRQL oldIrql;
554
555 ASSERT(Bcb);
556
557 DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %lu, NowDirty %u)\n",
558 Bcb, FileOffset, NowDirty);
559
560 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
561 if (CacheSeg == NULL)
562 {
563 return(STATUS_UNSUCCESSFUL);
564 }
565
566 KeAcquireGuardedMutex(&ViewLock);
567 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
568
569 WasDirty = CacheSeg->Dirty;
570 CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
571
572 CacheSeg->MappedCount--;
573
574 if (!WasDirty && NowDirty)
575 {
576 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
577 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
578 }
579
580 CcRosCacheSegmentDecRefCount(CacheSeg);
581 if (!WasDirty && NowDirty)
582 {
583 CcRosCacheSegmentIncRefCount(CacheSeg);
584 }
585 if (CacheSeg->MappedCount == 0)
586 {
587 CcRosCacheSegmentDecRefCount(CacheSeg);
588 }
589
590 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
591 KeReleaseGuardedMutex(&ViewLock);
592 KeReleaseMutex(&CacheSeg->Mutex, FALSE);
593
594 return(STATUS_SUCCESS);
595 }
596
597 static
598 NTSTATUS
599 CcRosCreateCacheSegment (
600 PBCB Bcb,
601 ULONG FileOffset,
602 PCACHE_SEGMENT* CacheSeg)
603 {
604 PCACHE_SEGMENT current;
605 PCACHE_SEGMENT previous;
606 PLIST_ENTRY current_entry;
607 NTSTATUS Status;
608 KIRQL oldIrql;
609 #ifdef CACHE_BITMAP
610 ULONG StartingOffset;
611 #endif
612 PHYSICAL_ADDRESS BoundaryAddressMultiple;
613
614 ASSERT(Bcb);
615
616 DPRINT("CcRosCreateCacheSegment()\n");
617
618 BoundaryAddressMultiple.QuadPart = 0;
619 if (FileOffset >= Bcb->FileSize.u.LowPart)
620 {
621 CacheSeg = NULL;
622 return STATUS_INVALID_PARAMETER;
623 }
624
625 current = ExAllocateFromNPagedLookasideList(&CacheSegLookasideList);
626 current->Valid = FALSE;
627 current->Dirty = FALSE;
628 current->PageOut = FALSE;
629 current->FileOffset = ROUND_DOWN(FileOffset, Bcb->CacheSegmentSize);
630 current->Bcb = Bcb;
631 #if DBG
632 if ( Bcb->Trace )
633 {
634 DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb, current );
635 }
636 #endif
637 current->MappedCount = 0;
638 current->DirtySegmentListEntry.Flink = NULL;
639 current->DirtySegmentListEntry.Blink = NULL;
640 current->ReferenceCount = 1;
641 KeInitializeMutex(&current->Mutex, 0);
642 KeWaitForSingleObject(&current->Mutex,
643 Executive,
644 KernelMode,
645 FALSE,
646 NULL);
647 KeAcquireGuardedMutex(&ViewLock);
648
649 *CacheSeg = current;
650 /* There is window between the call to CcRosLookupCacheSegment
651 * and CcRosCreateCacheSegment. We must check if a segment on
652 * the fileoffset exist. If there exist a segment, we release
653 * our new created segment and return the existing one.
654 */
655 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
656 current_entry = Bcb->BcbSegmentListHead.Flink;
657 previous = NULL;
658 while (current_entry != &Bcb->BcbSegmentListHead)
659 {
660 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
661 BcbSegmentListEntry);
662 if (IsPointInSegment(current->FileOffset, Bcb->CacheSegmentSize,
663 FileOffset))
664 {
665 CcRosCacheSegmentIncRefCount(current);
666 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
667 #if DBG
668 if ( Bcb->Trace )
669 {
670 DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
671 Bcb,
672 (*CacheSeg),
673 current );
674 }
675 #endif
676 KeReleaseMutex(&(*CacheSeg)->Mutex, FALSE);
677 KeReleaseGuardedMutex(&ViewLock);
678 ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
679 *CacheSeg = current;
680 KeWaitForSingleObject(&current->Mutex,
681 Executive,
682 KernelMode,
683 FALSE,
684 NULL);
685 return STATUS_SUCCESS;
686 }
687 if (current->FileOffset < FileOffset)
688 {
689 if (previous == NULL)
690 {
691 previous = current;
692 }
693 else
694 {
695 if (previous->FileOffset < current->FileOffset)
696 {
697 previous = current;
698 }
699 }
700 }
701 current_entry = current_entry->Flink;
702 }
703 /* There was no existing segment. */
704 current = *CacheSeg;
705 if (previous)
706 {
707 InsertHeadList(&previous->BcbSegmentListEntry, &current->BcbSegmentListEntry);
708 }
709 else
710 {
711 InsertHeadList(&Bcb->BcbSegmentListHead, &current->BcbSegmentListEntry);
712 }
713 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
714 InsertTailList(&CacheSegmentListHead, &current->CacheSegmentListEntry);
715 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
716 KeReleaseGuardedMutex(&ViewLock);
717 #ifdef CACHE_BITMAP
718 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
719
720 StartingOffset = RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap, Bcb->CacheSegmentSize / PAGE_SIZE, CiCacheSegMappingRegionHint);
721
722 if (StartingOffset == 0xffffffff)
723 {
724 DPRINT1("Out of CacheSeg mapping space\n");
725 KeBugCheck(CACHE_MANAGER);
726 }
727
728 current->BaseAddress = CiCacheSegMappingRegionBase + StartingOffset * PAGE_SIZE;
729
730 if (CiCacheSegMappingRegionHint == StartingOffset)
731 {
732 CiCacheSegMappingRegionHint += Bcb->CacheSegmentSize / PAGE_SIZE;
733 }
734
735 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
736 #else
737 MmLockAddressSpace(MmGetKernelAddressSpace());
738 current->BaseAddress = NULL;
739 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
740 0, // nothing checks for cache_segment mareas, so set to 0
741 &current->BaseAddress,
742 Bcb->CacheSegmentSize,
743 PAGE_READWRITE,
744 (PMEMORY_AREA*)&current->MemoryArea,
745 FALSE,
746 0,
747 BoundaryAddressMultiple);
748 MmUnlockAddressSpace(MmGetKernelAddressSpace());
749 if (!NT_SUCCESS(Status))
750 {
751 KeBugCheck(CACHE_MANAGER);
752 }
753 #endif
754
755 /* Create a virtual mapping for this memory area */
756 MI_SET_USAGE(MI_USAGE_CACHE);
757 #if MI_TRACE_PFNS
758 PWCHAR pos = NULL;
759 ULONG len = 0;
760 if ((Bcb->FileObject) && (Bcb->FileObject->FileName.Buffer))
761 {
762 pos = wcsrchr(Bcb->FileObject->FileName.Buffer, '\\');
763 len = wcslen(pos) * sizeof(WCHAR);
764 if (pos) snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
765 }
766 #endif
767
768 MmMapMemoryArea(current->BaseAddress, Bcb->CacheSegmentSize,
769 MC_CACHE, PAGE_READWRITE);
770
771 return(STATUS_SUCCESS);
772 }
773
774 NTSTATUS
775 NTAPI
776 CcRosGetCacheSegmentChain (
777 PBCB Bcb,
778 ULONG FileOffset,
779 ULONG Length,
780 PCACHE_SEGMENT* CacheSeg)
781 {
782 PCACHE_SEGMENT current;
783 ULONG i;
784 PCACHE_SEGMENT* CacheSegList;
785 PCACHE_SEGMENT Previous = NULL;
786
787 ASSERT(Bcb);
788
789 DPRINT("CcRosGetCacheSegmentChain()\n");
790
791 Length = ROUND_UP(Length, Bcb->CacheSegmentSize);
792
793 CacheSegList = _alloca(sizeof(PCACHE_SEGMENT) *
794 (Length / Bcb->CacheSegmentSize));
795
796 /*
797 * Look for a cache segment already mapping the same data.
798 */
799 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
800 {
801 ULONG CurrentOffset = FileOffset + (i * Bcb->CacheSegmentSize);
802 current = CcRosLookupCacheSegment(Bcb, CurrentOffset);
803 if (current != NULL)
804 {
805 KeAcquireGuardedMutex(&ViewLock);
806
807 /* Move to tail of LRU list */
808 RemoveEntryList(&current->CacheSegmentLRUListEntry);
809 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
810
811 KeReleaseGuardedMutex(&ViewLock);
812
813 CacheSegList[i] = current;
814 }
815 else
816 {
817 CcRosCreateCacheSegment(Bcb, CurrentOffset, &current);
818 CacheSegList[i] = current;
819 }
820 }
821
822 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
823 {
824 if (i == 0)
825 {
826 *CacheSeg = CacheSegList[i];
827 Previous = CacheSegList[i];
828 }
829 else
830 {
831 Previous->NextInChain = CacheSegList[i];
832 Previous = CacheSegList[i];
833 }
834 }
835 ASSERT(Previous);
836 Previous->NextInChain = NULL;
837
838 return(STATUS_SUCCESS);
839 }
840
841 NTSTATUS
842 NTAPI
843 CcRosGetCacheSegment (
844 PBCB Bcb,
845 ULONG FileOffset,
846 PULONG BaseOffset,
847 PVOID* BaseAddress,
848 PBOOLEAN UptoDate,
849 PCACHE_SEGMENT* CacheSeg)
850 {
851 PCACHE_SEGMENT current;
852 NTSTATUS Status;
853
854 ASSERT(Bcb);
855
856 DPRINT("CcRosGetCacheSegment()\n");
857
858 /*
859 * Look for a cache segment already mapping the same data.
860 */
861 current = CcRosLookupCacheSegment(Bcb, FileOffset);
862 if (current == NULL)
863 {
864 /*
865 * Otherwise create a new segment.
866 */
867 Status = CcRosCreateCacheSegment(Bcb, FileOffset, &current);
868 if (!NT_SUCCESS(Status))
869 {
870 return Status;
871 }
872 }
873
874 KeAcquireGuardedMutex(&ViewLock);
875
876 /* Move to the tail of the LRU list */
877 RemoveEntryList(&current->CacheSegmentLRUListEntry);
878 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
879
880 KeReleaseGuardedMutex(&ViewLock);
881
882 /*
883 * Return information about the segment to the caller.
884 */
885 *UptoDate = current->Valid;
886 *BaseAddress = current->BaseAddress;
887 DPRINT("*BaseAddress %p\n", *BaseAddress);
888 *CacheSeg = current;
889 *BaseOffset = current->FileOffset;
890 return(STATUS_SUCCESS);
891 }
892
893 NTSTATUS
894 NTAPI
895 CcRosRequestCacheSegment (
896 PBCB Bcb,
897 ULONG FileOffset,
898 PVOID* BaseAddress,
899 PBOOLEAN UptoDate,
900 PCACHE_SEGMENT* CacheSeg)
901 /*
902 * FUNCTION: Request a page mapping for a BCB
903 */
904 {
905 ULONG BaseOffset;
906
907 ASSERT(Bcb);
908
909 if ((FileOffset % Bcb->CacheSegmentSize) != 0)
910 {
911 DPRINT1("Bad fileoffset %x should be multiple of %x",
912 FileOffset, Bcb->CacheSegmentSize);
913 KeBugCheck(CACHE_MANAGER);
914 }
915
916 return(CcRosGetCacheSegment(Bcb,
917 FileOffset,
918 &BaseOffset,
919 BaseAddress,
920 UptoDate,
921 CacheSeg));
922 }
923 #ifdef CACHE_BITMAP
924 #else
925 static
926 VOID
927 CcFreeCachePage (
928 PVOID Context,
929 MEMORY_AREA* MemoryArea,
930 PVOID Address,
931 PFN_NUMBER Page,
932 SWAPENTRY SwapEntry,
933 BOOLEAN Dirty)
934 {
935 ASSERT(SwapEntry == 0);
936 if (Page != 0)
937 {
938 ASSERT(MmGetReferenceCountPage(Page) == 1);
939 MmReleasePageMemoryConsumer(MC_CACHE, Page);
940 }
941 }
942 #endif
943 NTSTATUS
944 CcRosInternalFreeCacheSegment (
945 PCACHE_SEGMENT CacheSeg)
946 /*
947 * FUNCTION: Releases a cache segment associated with a BCB
948 */
949 {
950 #ifdef CACHE_BITMAP
951 ULONG i;
952 ULONG RegionSize;
953 ULONG Base;
954 PFN_NUMBER Page;
955 KIRQL oldIrql;
956 #endif
957 DPRINT("Freeing cache segment 0x%p\n", CacheSeg);
958 #if DBG
959 if ( CacheSeg->Bcb->Trace )
960 {
961 DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg->Bcb, CacheSeg );
962 }
963 #endif
964 #ifdef CACHE_BITMAP
965 RegionSize = CacheSeg->Bcb->CacheSegmentSize / PAGE_SIZE;
966
967 /* Unmap all the pages. */
968 for (i = 0; i < RegionSize; i++)
969 {
970 MmDeleteVirtualMapping(NULL,
971 CacheSeg->BaseAddress + (i * PAGE_SIZE),
972 FALSE,
973 NULL,
974 &Page);
975 MmReleasePageMemoryConsumer(MC_CACHE, Page);
976 }
977
978 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
979 /* Deallocate all the pages used. */
980 Base = (ULONG)(CacheSeg->BaseAddress - CiCacheSegMappingRegionBase) / PAGE_SIZE;
981
982 RtlClearBits(&CiCacheSegMappingRegionAllocMap, Base, RegionSize);
983
984 CiCacheSegMappingRegionHint = min (CiCacheSegMappingRegionHint, Base);
985
986 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
987 #else
988 MmLockAddressSpace(MmGetKernelAddressSpace());
989 MmFreeMemoryArea(MmGetKernelAddressSpace(),
990 CacheSeg->MemoryArea,
991 CcFreeCachePage,
992 NULL);
993 MmUnlockAddressSpace(MmGetKernelAddressSpace());
994 #endif
995 ExFreeToNPagedLookasideList(&CacheSegLookasideList, CacheSeg);
996 return(STATUS_SUCCESS);
997 }
998
999 /*
1000 * @implemented
1001 */
1002 VOID
1003 NTAPI
1004 CcFlushCache (
1005 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1006 IN PLARGE_INTEGER FileOffset OPTIONAL,
1007 IN ULONG Length,
1008 OUT PIO_STATUS_BLOCK IoStatus)
1009 {
1010 PBCB Bcb;
1011 LARGE_INTEGER Offset;
1012 PCACHE_SEGMENT current;
1013 NTSTATUS Status;
1014 KIRQL oldIrql;
1015
1016 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1017 SectionObjectPointers, FileOffset, Length, IoStatus);
1018
1019 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1020 {
1021 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1022 ASSERT(Bcb);
1023 if (FileOffset)
1024 {
1025 Offset = *FileOffset;
1026 }
1027 else
1028 {
1029 Offset.QuadPart = (LONGLONG)0;
1030 Length = Bcb->FileSize.u.LowPart;
1031 }
1032
1033 if (IoStatus)
1034 {
1035 IoStatus->Status = STATUS_SUCCESS;
1036 IoStatus->Information = 0;
1037 }
1038
1039 while (Length > 0)
1040 {
1041 current = CcRosLookupCacheSegment (Bcb, Offset.u.LowPart);
1042 if (current != NULL)
1043 {
1044 if (current->Dirty)
1045 {
1046 Status = CcRosFlushCacheSegment(current);
1047 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1048 {
1049 IoStatus->Status = Status;
1050 }
1051 }
1052 KeReleaseMutex(&current->Mutex, FALSE);
1053
1054 KeAcquireGuardedMutex(&ViewLock);
1055 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1056 CcRosCacheSegmentDecRefCount(current);
1057 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1058 KeReleaseGuardedMutex(&ViewLock);
1059 }
1060
1061 Offset.QuadPart += Bcb->CacheSegmentSize;
1062 if (Length > Bcb->CacheSegmentSize)
1063 {
1064 Length -= Bcb->CacheSegmentSize;
1065 }
1066 else
1067 {
1068 Length = 0;
1069 }
1070 }
1071 }
1072 else
1073 {
1074 if (IoStatus)
1075 {
1076 IoStatus->Status = STATUS_INVALID_PARAMETER;
1077 }
1078 }
1079 }
1080
1081 NTSTATUS
1082 NTAPI
1083 CcRosDeleteFileCache (
1084 PFILE_OBJECT FileObject,
1085 PBCB Bcb)
1086 /*
1087 * FUNCTION: Releases the BCB associated with a file object
1088 */
1089 {
1090 PLIST_ENTRY current_entry;
1091 PCACHE_SEGMENT current;
1092 LIST_ENTRY FreeList;
1093 KIRQL oldIrql;
1094
1095 ASSERT(Bcb);
1096
1097 Bcb->RefCount++;
1098 KeReleaseGuardedMutex(&ViewLock);
1099
1100 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1101
1102 KeAcquireGuardedMutex(&ViewLock);
1103 Bcb->RefCount--;
1104 if (Bcb->RefCount == 0)
1105 {
1106 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1107 {
1108 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1109 Bcb->BcbRemoveListEntry.Flink = NULL;
1110 }
1111
1112 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1113
1114 /*
1115 * Release all cache segments.
1116 */
1117 InitializeListHead(&FreeList);
1118 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1119 while (!IsListEmpty(&Bcb->BcbSegmentListHead))
1120 {
1121 current_entry = RemoveTailList(&Bcb->BcbSegmentListHead);
1122 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1123 RemoveEntryList(&current->CacheSegmentListEntry);
1124 RemoveEntryList(&current->CacheSegmentLRUListEntry);
1125 if (current->Dirty)
1126 {
1127 RemoveEntryList(&current->DirtySegmentListEntry);
1128 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
1129 DPRINT1("Freeing dirty segment\n");
1130 }
1131 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
1132 }
1133 #if DBG
1134 Bcb->Trace = FALSE;
1135 #endif
1136 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1137
1138 KeReleaseGuardedMutex(&ViewLock);
1139 ObDereferenceObject (Bcb->FileObject);
1140
1141 while (!IsListEmpty(&FreeList))
1142 {
1143 current_entry = RemoveTailList(&FreeList);
1144 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1145 CcRosInternalFreeCacheSegment(current);
1146 }
1147 ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
1148 KeAcquireGuardedMutex(&ViewLock);
1149 }
1150 return(STATUS_SUCCESS);
1151 }
1152
1153 VOID
1154 NTAPI
1155 CcRosReferenceCache (
1156 PFILE_OBJECT FileObject)
1157 {
1158 PBCB Bcb;
1159 KeAcquireGuardedMutex(&ViewLock);
1160 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1161 ASSERT(Bcb);
1162 if (Bcb->RefCount == 0)
1163 {
1164 ASSERT(Bcb->BcbRemoveListEntry.Flink != NULL);
1165 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1166 Bcb->BcbRemoveListEntry.Flink = NULL;
1167
1168 }
1169 else
1170 {
1171 ASSERT(Bcb->BcbRemoveListEntry.Flink == NULL);
1172 }
1173 Bcb->RefCount++;
1174 KeReleaseGuardedMutex(&ViewLock);
1175 }
1176
1177 VOID
1178 NTAPI
1179 CcRosSetRemoveOnClose (
1180 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1181 {
1182 PBCB Bcb;
1183 DPRINT("CcRosSetRemoveOnClose()\n");
1184 KeAcquireGuardedMutex(&ViewLock);
1185 Bcb = (PBCB)SectionObjectPointer->SharedCacheMap;
1186 if (Bcb)
1187 {
1188 Bcb->RemoveOnClose = TRUE;
1189 if (Bcb->RefCount == 0)
1190 {
1191 CcRosDeleteFileCache(Bcb->FileObject, Bcb);
1192 }
1193 }
1194 KeReleaseGuardedMutex(&ViewLock);
1195 }
1196
1197
1198 VOID
1199 NTAPI
1200 CcRosDereferenceCache (
1201 PFILE_OBJECT FileObject)
1202 {
1203 PBCB Bcb;
1204 KeAcquireGuardedMutex(&ViewLock);
1205 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1206 ASSERT(Bcb);
1207 if (Bcb->RefCount > 0)
1208 {
1209 Bcb->RefCount--;
1210 if (Bcb->RefCount == 0)
1211 {
1212 MmFreeSectionSegments(Bcb->FileObject);
1213 CcRosDeleteFileCache(FileObject, Bcb);
1214 }
1215 }
1216 KeReleaseGuardedMutex(&ViewLock);
1217 }
1218
1219 NTSTATUS
1220 NTAPI
1221 CcRosReleaseFileCache (
1222 PFILE_OBJECT FileObject)
1223 /*
1224 * FUNCTION: Called by the file system when a handle to a file object
1225 * has been closed.
1226 */
1227 {
1228 PBCB Bcb;
1229
1230 KeAcquireGuardedMutex(&ViewLock);
1231
1232 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1233 {
1234 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1235 if (FileObject->PrivateCacheMap != NULL)
1236 {
1237 FileObject->PrivateCacheMap = NULL;
1238 if (Bcb->RefCount > 0)
1239 {
1240 Bcb->RefCount--;
1241 if (Bcb->RefCount == 0)
1242 {
1243 MmFreeSectionSegments(Bcb->FileObject);
1244 CcRosDeleteFileCache(FileObject, Bcb);
1245 }
1246 }
1247 }
1248 }
1249 KeReleaseGuardedMutex(&ViewLock);
1250 return(STATUS_SUCCESS);
1251 }
1252
1253 NTSTATUS
1254 NTAPI
1255 CcTryToInitializeFileCache (
1256 PFILE_OBJECT FileObject)
1257 {
1258 PBCB Bcb;
1259 NTSTATUS Status;
1260
1261 KeAcquireGuardedMutex(&ViewLock);
1262
1263 ASSERT(FileObject->SectionObjectPointer);
1264 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1265 if (Bcb == NULL)
1266 {
1267 Status = STATUS_UNSUCCESSFUL;
1268 }
1269 else
1270 {
1271 if (FileObject->PrivateCacheMap == NULL)
1272 {
1273 FileObject->PrivateCacheMap = Bcb;
1274 Bcb->RefCount++;
1275 }
1276 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1277 {
1278 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1279 Bcb->BcbRemoveListEntry.Flink = NULL;
1280 }
1281 Status = STATUS_SUCCESS;
1282 }
1283 KeReleaseGuardedMutex(&ViewLock);
1284
1285 return Status;
1286 }
1287
1288
1289 NTSTATUS
1290 NTAPI
1291 CcRosInitializeFileCache (
1292 PFILE_OBJECT FileObject,
1293 ULONG CacheSegmentSize,
1294 PCACHE_MANAGER_CALLBACKS CallBacks,
1295 PVOID LazyWriterContext)
1296 /*
1297 * FUNCTION: Initializes a BCB for a file object
1298 */
1299 {
1300 PBCB Bcb;
1301
1302 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1303 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p, CacheSegmentSize %lu)\n",
1304 FileObject, Bcb, CacheSegmentSize);
1305
1306 KeAcquireGuardedMutex(&ViewLock);
1307 if (Bcb == NULL)
1308 {
1309 Bcb = ExAllocateFromNPagedLookasideList(&BcbLookasideList);
1310 if (Bcb == NULL)
1311 {
1312 KeReleaseGuardedMutex(&ViewLock);
1313 return(STATUS_UNSUCCESSFUL);
1314 }
1315 memset(Bcb, 0, sizeof(BCB));
1316 ObReferenceObjectByPointer(FileObject,
1317 FILE_ALL_ACCESS,
1318 NULL,
1319 KernelMode);
1320 Bcb->FileObject = FileObject;
1321 Bcb->CacheSegmentSize = CacheSegmentSize;
1322 Bcb->Callbacks = CallBacks;
1323 Bcb->LazyWriteContext = LazyWriterContext;
1324 if (FileObject->FsContext)
1325 {
1326 Bcb->AllocationSize =
1327 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->AllocationSize;
1328 Bcb->FileSize =
1329 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->FileSize;
1330 }
1331 KeInitializeSpinLock(&Bcb->BcbLock);
1332 InitializeListHead(&Bcb->BcbSegmentListHead);
1333 FileObject->SectionObjectPointer->SharedCacheMap = Bcb;
1334 }
1335 if (FileObject->PrivateCacheMap == NULL)
1336 {
1337 FileObject->PrivateCacheMap = Bcb;
1338 Bcb->RefCount++;
1339 }
1340 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1341 {
1342 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1343 Bcb->BcbRemoveListEntry.Flink = NULL;
1344 }
1345 KeReleaseGuardedMutex(&ViewLock);
1346
1347 return(STATUS_SUCCESS);
1348 }
1349
1350 /*
1351 * @implemented
1352 */
1353 PFILE_OBJECT
1354 NTAPI
1355 CcGetFileObjectFromSectionPtrs (
1356 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1357 {
1358 PBCB Bcb;
1359 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1360 {
1361 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1362 ASSERT(Bcb);
1363 return Bcb->FileObject;
1364 }
1365 return NULL;
1366 }
1367
1368 VOID
1369 INIT_FUNCTION
1370 NTAPI
1371 CcInitView (
1372 VOID)
1373 {
1374 #ifdef CACHE_BITMAP
1375 PMEMORY_AREA marea;
1376 PVOID Buffer;
1377 PHYSICAL_ADDRESS BoundaryAddressMultiple;
1378 #endif
1379
1380 DPRINT("CcInitView()\n");
1381 #ifdef CACHE_BITMAP
1382 BoundaryAddressMultiple.QuadPart = 0;
1383 CiCacheSegMappingRegionHint = 0;
1384 CiCacheSegMappingRegionBase = NULL;
1385
1386 MmLockAddressSpace(MmGetKernelAddressSpace());
1387
1388 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
1389 MEMORY_AREA_CACHE_SEGMENT,
1390 &CiCacheSegMappingRegionBase,
1391 CI_CACHESEG_MAPPING_REGION_SIZE,
1392 PAGE_READWRITE,
1393 &marea,
1394 FALSE,
1395 0,
1396 BoundaryAddressMultiple);
1397 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1398 if (!NT_SUCCESS(Status))
1399 {
1400 KeBugCheck(CACHE_MANAGER);
1401 }
1402
1403 Buffer = ExAllocatePool(NonPagedPool, CI_CACHESEG_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
1404 if (!Buffer)
1405 {
1406 KeBugCheck(CACHE_MANAGER);
1407 }
1408
1409 RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap, Buffer, CI_CACHESEG_MAPPING_REGION_SIZE / PAGE_SIZE);
1410 RtlClearAllBits(&CiCacheSegMappingRegionAllocMap);
1411
1412 KeInitializeSpinLock(&CiCacheSegMappingRegionLock);
1413 #endif
1414 InitializeListHead(&CacheSegmentListHead);
1415 InitializeListHead(&DirtySegmentListHead);
1416 InitializeListHead(&CacheSegmentLRUListHead);
1417 InitializeListHead(&ClosedListHead);
1418 KeInitializeGuardedMutex(&ViewLock);
1419 ExInitializeNPagedLookasideList (&iBcbLookasideList,
1420 NULL,
1421 NULL,
1422 0,
1423 sizeof(INTERNAL_BCB),
1424 TAG_IBCB,
1425 20);
1426 ExInitializeNPagedLookasideList (&BcbLookasideList,
1427 NULL,
1428 NULL,
1429 0,
1430 sizeof(BCB),
1431 TAG_BCB,
1432 20);
1433 ExInitializeNPagedLookasideList (&CacheSegLookasideList,
1434 NULL,
1435 NULL,
1436 0,
1437 sizeof(CACHE_SEGMENT),
1438 TAG_CSEG,
1439 20);
1440
1441 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1442
1443 CcInitCacheZeroPage();
1444
1445 }
1446
1447 /* EOF */
1448
1449
1450
1451
1452
1453
1454
1455