5264d9139175cc8f64ca25a9fb070b6f61272e2d
[reactos.git] / reactos / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 */
9
10 /* NOTES **********************************************************************
11 *
12 * This is not the NT implementation of a file cache nor anything much like
13 * it.
14 *
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
17 *
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
20 *
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
23 *
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
27 *
28 * (4) Copy the data into or out of the page as necessary.
29 *
30 * (5) Release the cache page
31 */
32 /* INCLUDES ******************************************************************/
33
34 #include <ntoskrnl.h>
35 #define NDEBUG
36 #include <debug.h>
37
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
40 #endif
41
42 /* GLOBALS *******************************************************************/
43
44 /*
45 * If CACHE_BITMAP is defined, the cache manager uses one large memory region
46 * within the kernel address space and allocate/deallocate space from this block
47 * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
48 * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
49 */
50 //#define CACHE_BITMAP
51
52 static LIST_ENTRY DirtySegmentListHead;
53 static LIST_ENTRY CacheSegmentListHead;
54 static LIST_ENTRY CacheSegmentLRUListHead;
55 static LIST_ENTRY ClosedListHead;
56 ULONG DirtyPageCount=0;
57
58 KGUARDED_MUTEX ViewLock;
59
60 #ifdef CACHE_BITMAP
61 #define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
62
63 static PVOID CiCacheSegMappingRegionBase = NULL;
64 static RTL_BITMAP CiCacheSegMappingRegionAllocMap;
65 static ULONG CiCacheSegMappingRegionHint;
66 static KSPIN_LOCK CiCacheSegMappingRegionLock;
67 #endif
68
69 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
70 static NPAGED_LOOKASIDE_LIST BcbLookasideList;
71 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList;
72
73 #if DBG
74 static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
75 {
76 ++cs->ReferenceCount;
77 if ( cs->Bcb->Trace )
78 {
79 DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%lu, Dirty %u, PageOut %lu\n",
80 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
81 }
82 }
83 static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
84 {
85 --cs->ReferenceCount;
86 if ( cs->Bcb->Trace )
87 {
88 DbgPrint("(%s:%i) CacheSegment %p --RefCount=%lu, Dirty %u, PageOut %lu\n",
89 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
90 }
91 }
92 #define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
93 #define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
94 #else
95 #define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
96 #define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
97 #endif
98
99 NTSTATUS
100 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
101
102
103 /* FUNCTIONS *****************************************************************/
104
105 VOID
106 NTAPI
107 CcRosTraceCacheMap (
108 PBCB Bcb,
109 BOOLEAN Trace )
110 {
111 #if DBG
112 KIRQL oldirql;
113 PLIST_ENTRY current_entry;
114 PCACHE_SEGMENT current;
115
116 if ( !Bcb )
117 return;
118
119 Bcb->Trace = Trace;
120
121 if ( Trace )
122 {
123 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb );
124
125 KeAcquireGuardedMutex(&ViewLock);
126 KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
127
128 current_entry = Bcb->BcbSegmentListHead.Flink;
129 while (current_entry != &Bcb->BcbSegmentListHead)
130 {
131 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
132 current_entry = current_entry->Flink;
133
134 DPRINT1(" CacheSegment 0x%p enabled, RefCount %lu, Dirty %u, PageOut %lu\n",
135 current, current->ReferenceCount, current->Dirty, current->PageOut );
136 }
137 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
138 KeReleaseGuardedMutex(&ViewLock);
139 }
140 else
141 {
142 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb );
143 }
144
145 #else
146 Bcb = Bcb;
147 Trace = Trace;
148 #endif
149 }
150
151 NTSTATUS
152 NTAPI
153 CcRosFlushCacheSegment (
154 PCACHE_SEGMENT CacheSegment)
155 {
156 NTSTATUS Status;
157 KIRQL oldIrql;
158
159 Status = WriteCacheSegment(CacheSegment);
160 if (NT_SUCCESS(Status))
161 {
162 KeAcquireGuardedMutex(&ViewLock);
163 KeAcquireSpinLock(&CacheSegment->Bcb->BcbLock, &oldIrql);
164
165 CacheSegment->Dirty = FALSE;
166 RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
167 DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
168 CcRosCacheSegmentDecRefCount(CacheSegment);
169
170 KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
171 KeReleaseGuardedMutex(&ViewLock);
172 }
173
174 return(Status);
175 }
176
177 NTSTATUS
178 NTAPI
179 CcRosFlushDirtyPages (
180 ULONG Target,
181 PULONG Count,
182 BOOLEAN Wait)
183 {
184 PLIST_ENTRY current_entry;
185 PCACHE_SEGMENT current;
186 ULONG PagesPerSegment;
187 BOOLEAN Locked;
188 NTSTATUS Status;
189 LARGE_INTEGER ZeroTimeout;
190
191 DPRINT("CcRosFlushDirtyPages(Target %lu)\n", Target);
192
193 (*Count) = 0;
194 ZeroTimeout.QuadPart = 0;
195
196 KeEnterCriticalRegion();
197 KeAcquireGuardedMutex(&ViewLock);
198
199 current_entry = DirtySegmentListHead.Flink;
200 if (current_entry == &DirtySegmentListHead)
201 {
202 DPRINT("No Dirty pages\n");
203 }
204
205 while ((current_entry != &DirtySegmentListHead) && (Target > 0))
206 {
207 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
208 DirtySegmentListEntry);
209 current_entry = current_entry->Flink;
210
211 CcRosCacheSegmentIncRefCount(current);
212
213 Locked = current->Bcb->Callbacks->AcquireForLazyWrite(
214 current->Bcb->LazyWriteContext, Wait);
215 if (!Locked)
216 {
217 CcRosCacheSegmentDecRefCount(current);
218 continue;
219 }
220
221 Status = KeWaitForSingleObject(&current->Mutex,
222 Executive,
223 KernelMode,
224 FALSE,
225 Wait ? NULL : &ZeroTimeout);
226 if (Status != STATUS_SUCCESS)
227 {
228 current->Bcb->Callbacks->ReleaseFromLazyWrite(
229 current->Bcb->LazyWriteContext);
230 CcRosCacheSegmentDecRefCount(current);
231 continue;
232 }
233
234 ASSERT(current->Dirty);
235
236 /* One reference is added above */
237 if (current->ReferenceCount > 2)
238 {
239 KeReleaseMutex(&current->Mutex, FALSE);
240 current->Bcb->Callbacks->ReleaseFromLazyWrite(
241 current->Bcb->LazyWriteContext);
242 CcRosCacheSegmentDecRefCount(current);
243 continue;
244 }
245
246 PagesPerSegment = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
247
248 KeReleaseGuardedMutex(&ViewLock);
249
250 Status = CcRosFlushCacheSegment(current);
251
252 KeReleaseMutex(&current->Mutex, FALSE);
253 current->Bcb->Callbacks->ReleaseFromLazyWrite(
254 current->Bcb->LazyWriteContext);
255
256 KeAcquireGuardedMutex(&ViewLock);
257 CcRosCacheSegmentDecRefCount(current);
258
259 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
260 {
261 DPRINT1("CC: Failed to flush cache segment.\n");
262 }
263 else
264 {
265 (*Count) += PagesPerSegment;
266 Target -= PagesPerSegment;
267 }
268
269 current_entry = DirtySegmentListHead.Flink;
270 }
271
272 KeReleaseGuardedMutex(&ViewLock);
273 KeLeaveCriticalRegion();
274
275 DPRINT("CcRosFlushDirtyPages() finished\n");
276 return(STATUS_SUCCESS);
277 }
278
279 NTSTATUS
280 CcRosTrimCache (
281 ULONG Target,
282 ULONG Priority,
283 PULONG NrFreed)
284 /*
285 * FUNCTION: Try to free some memory from the file cache.
286 * ARGUMENTS:
287 * Target - The number of pages to be freed.
288 * Priority - The priority of free (currently unused).
289 * NrFreed - Points to a variable where the number of pages
290 * actually freed is returned.
291 */
292 {
293 PLIST_ENTRY current_entry;
294 PCACHE_SEGMENT current;
295 ULONG PagesPerSegment;
296 ULONG PagesFreed;
297 KIRQL oldIrql;
298 LIST_ENTRY FreeList;
299 PFN_NUMBER Page;
300 ULONG i;
301 BOOLEAN FlushedPages = FALSE;
302
303 DPRINT("CcRosTrimCache(Target %lu)\n", Target);
304
305 InitializeListHead(&FreeList);
306
307 *NrFreed = 0;
308
309 retry:
310 KeAcquireGuardedMutex(&ViewLock);
311
312 current_entry = CacheSegmentLRUListHead.Flink;
313 while (current_entry != &CacheSegmentLRUListHead)
314 {
315 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
316 CacheSegmentLRUListEntry);
317 current_entry = current_entry->Flink;
318
319 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
320
321 /* Reference the cache segment */
322 CcRosCacheSegmentIncRefCount(current);
323
324 /* Check if it's mapped and not dirty */
325 if (current->MappedCount > 0 && !current->Dirty)
326 {
327 /* We have to break these locks because Cc sucks */
328 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
329 KeReleaseGuardedMutex(&ViewLock);
330
331 /* Page out the segment */
332 for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
333 {
334 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
335
336 MmPageOutPhysicalAddress(Page);
337 }
338
339 /* Reacquire the locks */
340 KeAcquireGuardedMutex(&ViewLock);
341 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
342 }
343
344 /* Dereference the cache segment */
345 CcRosCacheSegmentDecRefCount(current);
346
347 /* Check if we can free this entry now */
348 if (current->ReferenceCount == 0)
349 {
350 ASSERT(!current->Dirty);
351 ASSERT(!current->MappedCount);
352
353 RemoveEntryList(&current->BcbSegmentListEntry);
354 RemoveEntryList(&current->CacheSegmentListEntry);
355 RemoveEntryList(&current->CacheSegmentLRUListEntry);
356 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
357
358 /* Calculate how many pages we freed for Mm */
359 PagesPerSegment = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
360 PagesFreed = min(PagesPerSegment, Target);
361 Target -= PagesFreed;
362 (*NrFreed) += PagesFreed;
363 }
364
365 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
366 }
367
368 KeReleaseGuardedMutex(&ViewLock);
369
370 /* Try flushing pages if we haven't met our target */
371 if ((Target > 0) && !FlushedPages)
372 {
373 /* Flush dirty pages to disk */
374 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE);
375 FlushedPages = TRUE;
376
377 /* We can only swap as many pages as we flushed */
378 if (PagesFreed < Target) Target = PagesFreed;
379
380 /* Check if we flushed anything */
381 if (PagesFreed != 0)
382 {
383 /* Try again after flushing dirty pages */
384 DPRINT("Flushed %lu dirty cache pages to disk\n", PagesFreed);
385 goto retry;
386 }
387 }
388
389 while (!IsListEmpty(&FreeList))
390 {
391 current_entry = RemoveHeadList(&FreeList);
392 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
393 BcbSegmentListEntry);
394 CcRosInternalFreeCacheSegment(current);
395 }
396
397 DPRINT("Evicted %lu cache pages\n", (*NrFreed));
398
399 return(STATUS_SUCCESS);
400 }
401
402 NTSTATUS
403 NTAPI
404 CcRosReleaseCacheSegment (
405 PBCB Bcb,
406 PCACHE_SEGMENT CacheSeg,
407 BOOLEAN Valid,
408 BOOLEAN Dirty,
409 BOOLEAN Mapped)
410 {
411 BOOLEAN WasDirty;
412 KIRQL oldIrql;
413
414 ASSERT(Bcb);
415
416 DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %u)\n",
417 Bcb, CacheSeg, Valid);
418
419 KeAcquireGuardedMutex(&ViewLock);
420 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
421
422 CacheSeg->Valid = Valid;
423
424 WasDirty = CacheSeg->Dirty;
425 CacheSeg->Dirty = CacheSeg->Dirty || Dirty;
426
427 if (!WasDirty && CacheSeg->Dirty)
428 {
429 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
430 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
431 }
432
433 if (Mapped)
434 {
435 CacheSeg->MappedCount++;
436 }
437 CcRosCacheSegmentDecRefCount(CacheSeg);
438 if (Mapped && (CacheSeg->MappedCount == 1))
439 {
440 CcRosCacheSegmentIncRefCount(CacheSeg);
441 }
442 if (!WasDirty && CacheSeg->Dirty)
443 {
444 CcRosCacheSegmentIncRefCount(CacheSeg);
445 }
446
447 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
448 KeReleaseGuardedMutex(&ViewLock);
449 KeReleaseMutex(&CacheSeg->Mutex, FALSE);
450
451 return(STATUS_SUCCESS);
452 }
453
454 /* Returns with Cache Segment Lock Held! */
455 PCACHE_SEGMENT
456 NTAPI
457 CcRosLookupCacheSegment (
458 PBCB Bcb,
459 ULONG FileOffset)
460 {
461 PLIST_ENTRY current_entry;
462 PCACHE_SEGMENT current;
463 KIRQL oldIrql;
464
465 ASSERT(Bcb);
466
467 DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %lu)\n", Bcb, FileOffset);
468
469 KeAcquireGuardedMutex(&ViewLock);
470 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
471
472 current_entry = Bcb->BcbSegmentListHead.Flink;
473 while (current_entry != &Bcb->BcbSegmentListHead)
474 {
475 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
476 BcbSegmentListEntry);
477 if (IsPointInSegment(current->FileOffset, VACB_MAPPING_GRANULARITY,
478 FileOffset))
479 {
480 CcRosCacheSegmentIncRefCount(current);
481 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
482 KeReleaseGuardedMutex(&ViewLock);
483 KeWaitForSingleObject(&current->Mutex,
484 Executive,
485 KernelMode,
486 FALSE,
487 NULL);
488 return current;
489 }
490 if (current->FileOffset > FileOffset)
491 break;
492 current_entry = current_entry->Flink;
493 }
494
495 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
496 KeReleaseGuardedMutex(&ViewLock);
497
498 return NULL;
499 }
500
501 NTSTATUS
502 NTAPI
503 CcRosMarkDirtyCacheSegment (
504 PBCB Bcb,
505 ULONG FileOffset)
506 {
507 PCACHE_SEGMENT CacheSeg;
508 KIRQL oldIrql;
509
510 ASSERT(Bcb);
511
512 DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %lu)\n", Bcb, FileOffset);
513
514 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
515 if (CacheSeg == NULL)
516 {
517 KeBugCheck(CACHE_MANAGER);
518 }
519
520 KeAcquireGuardedMutex(&ViewLock);
521 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
522
523 if (!CacheSeg->Dirty)
524 {
525 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
526 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
527 }
528 else
529 {
530 CcRosCacheSegmentDecRefCount(CacheSeg);
531 }
532
533 /* Move to the tail of the LRU list */
534 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
535 InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
536
537 CacheSeg->Dirty = TRUE;
538
539 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
540 KeReleaseGuardedMutex(&ViewLock);
541 KeReleaseMutex(&CacheSeg->Mutex, FALSE);
542
543 return(STATUS_SUCCESS);
544 }
545
546 NTSTATUS
547 NTAPI
548 CcRosUnmapCacheSegment (
549 PBCB Bcb,
550 ULONG FileOffset,
551 BOOLEAN NowDirty)
552 {
553 PCACHE_SEGMENT CacheSeg;
554 BOOLEAN WasDirty;
555 KIRQL oldIrql;
556
557 ASSERT(Bcb);
558
559 DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %lu, NowDirty %u)\n",
560 Bcb, FileOffset, NowDirty);
561
562 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
563 if (CacheSeg == NULL)
564 {
565 return(STATUS_UNSUCCESSFUL);
566 }
567
568 KeAcquireGuardedMutex(&ViewLock);
569 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
570
571 WasDirty = CacheSeg->Dirty;
572 CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
573
574 CacheSeg->MappedCount--;
575
576 if (!WasDirty && NowDirty)
577 {
578 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
579 DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
580 }
581
582 CcRosCacheSegmentDecRefCount(CacheSeg);
583 if (!WasDirty && NowDirty)
584 {
585 CcRosCacheSegmentIncRefCount(CacheSeg);
586 }
587 if (CacheSeg->MappedCount == 0)
588 {
589 CcRosCacheSegmentDecRefCount(CacheSeg);
590 }
591
592 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
593 KeReleaseGuardedMutex(&ViewLock);
594 KeReleaseMutex(&CacheSeg->Mutex, FALSE);
595
596 return(STATUS_SUCCESS);
597 }
598
599 static
600 NTSTATUS
601 CcRosCreateCacheSegment (
602 PBCB Bcb,
603 ULONG FileOffset,
604 PCACHE_SEGMENT* CacheSeg)
605 {
606 PCACHE_SEGMENT current;
607 PCACHE_SEGMENT previous;
608 PLIST_ENTRY current_entry;
609 NTSTATUS Status;
610 KIRQL oldIrql;
611 #ifdef CACHE_BITMAP
612 ULONG StartingOffset;
613 #endif
614 PHYSICAL_ADDRESS BoundaryAddressMultiple;
615
616 ASSERT(Bcb);
617
618 DPRINT("CcRosCreateCacheSegment()\n");
619
620 BoundaryAddressMultiple.QuadPart = 0;
621 if (FileOffset >= Bcb->FileSize.u.LowPart)
622 {
623 CacheSeg = NULL;
624 return STATUS_INVALID_PARAMETER;
625 }
626
627 current = ExAllocateFromNPagedLookasideList(&CacheSegLookasideList);
628 current->Valid = FALSE;
629 current->Dirty = FALSE;
630 current->PageOut = FALSE;
631 current->FileOffset = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
632 current->Bcb = Bcb;
633 #if DBG
634 if ( Bcb->Trace )
635 {
636 DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb, current );
637 }
638 #endif
639 current->MappedCount = 0;
640 current->DirtySegmentListEntry.Flink = NULL;
641 current->DirtySegmentListEntry.Blink = NULL;
642 current->ReferenceCount = 1;
643 KeInitializeMutex(&current->Mutex, 0);
644 KeWaitForSingleObject(&current->Mutex,
645 Executive,
646 KernelMode,
647 FALSE,
648 NULL);
649 KeAcquireGuardedMutex(&ViewLock);
650
651 *CacheSeg = current;
652 /* There is window between the call to CcRosLookupCacheSegment
653 * and CcRosCreateCacheSegment. We must check if a segment on
654 * the fileoffset exist. If there exist a segment, we release
655 * our new created segment and return the existing one.
656 */
657 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
658 current_entry = Bcb->BcbSegmentListHead.Flink;
659 previous = NULL;
660 while (current_entry != &Bcb->BcbSegmentListHead)
661 {
662 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
663 BcbSegmentListEntry);
664 if (IsPointInSegment(current->FileOffset, VACB_MAPPING_GRANULARITY,
665 FileOffset))
666 {
667 CcRosCacheSegmentIncRefCount(current);
668 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
669 #if DBG
670 if ( Bcb->Trace )
671 {
672 DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
673 Bcb,
674 (*CacheSeg),
675 current );
676 }
677 #endif
678 KeReleaseMutex(&(*CacheSeg)->Mutex, FALSE);
679 KeReleaseGuardedMutex(&ViewLock);
680 ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
681 *CacheSeg = current;
682 KeWaitForSingleObject(&current->Mutex,
683 Executive,
684 KernelMode,
685 FALSE,
686 NULL);
687 return STATUS_SUCCESS;
688 }
689 if (current->FileOffset < FileOffset)
690 {
691 ASSERT(previous == NULL ||
692 previous->FileOffset < current->FileOffset);
693 previous = current;
694 }
695 if (current->FileOffset > FileOffset)
696 break;
697 current_entry = current_entry->Flink;
698 }
699 /* There was no existing segment. */
700 current = *CacheSeg;
701 if (previous)
702 {
703 InsertHeadList(&previous->BcbSegmentListEntry, &current->BcbSegmentListEntry);
704 }
705 else
706 {
707 InsertHeadList(&Bcb->BcbSegmentListHead, &current->BcbSegmentListEntry);
708 }
709 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
710 InsertTailList(&CacheSegmentListHead, &current->CacheSegmentListEntry);
711 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
712 KeReleaseGuardedMutex(&ViewLock);
713 #ifdef CACHE_BITMAP
714 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
715
716 StartingOffset = RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap, VACB_MAPPING_GRANULARITY / PAGE_SIZE, CiCacheSegMappingRegionHint);
717
718 if (StartingOffset == 0xffffffff)
719 {
720 DPRINT1("Out of CacheSeg mapping space\n");
721 KeBugCheck(CACHE_MANAGER);
722 }
723
724 current->BaseAddress = CiCacheSegMappingRegionBase + StartingOffset * PAGE_SIZE;
725
726 if (CiCacheSegMappingRegionHint == StartingOffset)
727 {
728 CiCacheSegMappingRegionHint += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
729 }
730
731 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
732 #else
733 MmLockAddressSpace(MmGetKernelAddressSpace());
734 current->BaseAddress = NULL;
735 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
736 0, // nothing checks for cache_segment mareas, so set to 0
737 &current->BaseAddress,
738 VACB_MAPPING_GRANULARITY,
739 PAGE_READWRITE,
740 (PMEMORY_AREA*)&current->MemoryArea,
741 FALSE,
742 0,
743 BoundaryAddressMultiple);
744 MmUnlockAddressSpace(MmGetKernelAddressSpace());
745 if (!NT_SUCCESS(Status))
746 {
747 KeBugCheck(CACHE_MANAGER);
748 }
749 #endif
750
751 /* Create a virtual mapping for this memory area */
752 MI_SET_USAGE(MI_USAGE_CACHE);
753 #if MI_TRACE_PFNS
754 PWCHAR pos = NULL;
755 ULONG len = 0;
756 if ((Bcb->FileObject) && (Bcb->FileObject->FileName.Buffer))
757 {
758 pos = wcsrchr(Bcb->FileObject->FileName.Buffer, '\\');
759 len = wcslen(pos) * sizeof(WCHAR);
760 if (pos) snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
761 }
762 #endif
763
764 MmMapMemoryArea(current->BaseAddress, VACB_MAPPING_GRANULARITY,
765 MC_CACHE, PAGE_READWRITE);
766
767 return(STATUS_SUCCESS);
768 }
769
770 NTSTATUS
771 NTAPI
772 CcRosGetCacheSegmentChain (
773 PBCB Bcb,
774 ULONG FileOffset,
775 ULONG Length,
776 PCACHE_SEGMENT* CacheSeg)
777 {
778 PCACHE_SEGMENT current;
779 ULONG i;
780 PCACHE_SEGMENT* CacheSegList;
781 PCACHE_SEGMENT Previous = NULL;
782
783 ASSERT(Bcb);
784
785 DPRINT("CcRosGetCacheSegmentChain()\n");
786
787 Length = ROUND_UP(Length, VACB_MAPPING_GRANULARITY);
788
789 CacheSegList = _alloca(sizeof(PCACHE_SEGMENT) *
790 (Length / VACB_MAPPING_GRANULARITY));
791
792 /*
793 * Look for a cache segment already mapping the same data.
794 */
795 for (i = 0; i < (Length / VACB_MAPPING_GRANULARITY); i++)
796 {
797 ULONG CurrentOffset = FileOffset + (i * VACB_MAPPING_GRANULARITY);
798 current = CcRosLookupCacheSegment(Bcb, CurrentOffset);
799 if (current != NULL)
800 {
801 KeAcquireGuardedMutex(&ViewLock);
802
803 /* Move to tail of LRU list */
804 RemoveEntryList(&current->CacheSegmentLRUListEntry);
805 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
806
807 KeReleaseGuardedMutex(&ViewLock);
808
809 CacheSegList[i] = current;
810 }
811 else
812 {
813 CcRosCreateCacheSegment(Bcb, CurrentOffset, &current);
814 CacheSegList[i] = current;
815 }
816 }
817
818 for (i = 0; i < (Length / VACB_MAPPING_GRANULARITY); i++)
819 {
820 if (i == 0)
821 {
822 *CacheSeg = CacheSegList[i];
823 Previous = CacheSegList[i];
824 }
825 else
826 {
827 Previous->NextInChain = CacheSegList[i];
828 Previous = CacheSegList[i];
829 }
830 }
831 ASSERT(Previous);
832 Previous->NextInChain = NULL;
833
834 return(STATUS_SUCCESS);
835 }
836
837 NTSTATUS
838 NTAPI
839 CcRosGetCacheSegment (
840 PBCB Bcb,
841 ULONG FileOffset,
842 PULONG BaseOffset,
843 PVOID* BaseAddress,
844 PBOOLEAN UptoDate,
845 PCACHE_SEGMENT* CacheSeg)
846 {
847 PCACHE_SEGMENT current;
848 NTSTATUS Status;
849
850 ASSERT(Bcb);
851
852 DPRINT("CcRosGetCacheSegment()\n");
853
854 /*
855 * Look for a cache segment already mapping the same data.
856 */
857 current = CcRosLookupCacheSegment(Bcb, FileOffset);
858 if (current == NULL)
859 {
860 /*
861 * Otherwise create a new segment.
862 */
863 Status = CcRosCreateCacheSegment(Bcb, FileOffset, &current);
864 if (!NT_SUCCESS(Status))
865 {
866 return Status;
867 }
868 }
869
870 KeAcquireGuardedMutex(&ViewLock);
871
872 /* Move to the tail of the LRU list */
873 RemoveEntryList(&current->CacheSegmentLRUListEntry);
874 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
875
876 KeReleaseGuardedMutex(&ViewLock);
877
878 /*
879 * Return information about the segment to the caller.
880 */
881 *UptoDate = current->Valid;
882 *BaseAddress = current->BaseAddress;
883 DPRINT("*BaseAddress %p\n", *BaseAddress);
884 *CacheSeg = current;
885 *BaseOffset = current->FileOffset;
886 return(STATUS_SUCCESS);
887 }
888
889 NTSTATUS
890 NTAPI
891 CcRosRequestCacheSegment (
892 PBCB Bcb,
893 ULONG FileOffset,
894 PVOID* BaseAddress,
895 PBOOLEAN UptoDate,
896 PCACHE_SEGMENT* CacheSeg)
897 /*
898 * FUNCTION: Request a page mapping for a BCB
899 */
900 {
901 ULONG BaseOffset;
902
903 ASSERT(Bcb);
904
905 if ((FileOffset % VACB_MAPPING_GRANULARITY) != 0)
906 {
907 DPRINT1("Bad fileoffset %x should be multiple of %x",
908 FileOffset, VACB_MAPPING_GRANULARITY);
909 KeBugCheck(CACHE_MANAGER);
910 }
911
912 return(CcRosGetCacheSegment(Bcb,
913 FileOffset,
914 &BaseOffset,
915 BaseAddress,
916 UptoDate,
917 CacheSeg));
918 }
919 #ifdef CACHE_BITMAP
920 #else
921 static
922 VOID
923 CcFreeCachePage (
924 PVOID Context,
925 MEMORY_AREA* MemoryArea,
926 PVOID Address,
927 PFN_NUMBER Page,
928 SWAPENTRY SwapEntry,
929 BOOLEAN Dirty)
930 {
931 ASSERT(SwapEntry == 0);
932 if (Page != 0)
933 {
934 ASSERT(MmGetReferenceCountPage(Page) == 1);
935 MmReleasePageMemoryConsumer(MC_CACHE, Page);
936 }
937 }
938 #endif
939 NTSTATUS
940 CcRosInternalFreeCacheSegment (
941 PCACHE_SEGMENT CacheSeg)
942 /*
943 * FUNCTION: Releases a cache segment associated with a BCB
944 */
945 {
946 #ifdef CACHE_BITMAP
947 ULONG i;
948 ULONG RegionSize;
949 ULONG Base;
950 PFN_NUMBER Page;
951 KIRQL oldIrql;
952 #endif
953 DPRINT("Freeing cache segment 0x%p\n", CacheSeg);
954 #if DBG
955 if ( CacheSeg->Bcb->Trace )
956 {
957 DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg->Bcb, CacheSeg );
958 }
959 #endif
960 #ifdef CACHE_BITMAP
961 RegionSize = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
962
963 /* Unmap all the pages. */
964 for (i = 0; i < RegionSize; i++)
965 {
966 MmDeleteVirtualMapping(NULL,
967 CacheSeg->BaseAddress + (i * PAGE_SIZE),
968 FALSE,
969 NULL,
970 &Page);
971 MmReleasePageMemoryConsumer(MC_CACHE, Page);
972 }
973
974 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
975 /* Deallocate all the pages used. */
976 Base = (ULONG)(CacheSeg->BaseAddress - CiCacheSegMappingRegionBase) / PAGE_SIZE;
977
978 RtlClearBits(&CiCacheSegMappingRegionAllocMap, Base, RegionSize);
979
980 CiCacheSegMappingRegionHint = min (CiCacheSegMappingRegionHint, Base);
981
982 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
983 #else
984 MmLockAddressSpace(MmGetKernelAddressSpace());
985 MmFreeMemoryArea(MmGetKernelAddressSpace(),
986 CacheSeg->MemoryArea,
987 CcFreeCachePage,
988 NULL);
989 MmUnlockAddressSpace(MmGetKernelAddressSpace());
990 #endif
991 ExFreeToNPagedLookasideList(&CacheSegLookasideList, CacheSeg);
992 return(STATUS_SUCCESS);
993 }
994
995 /*
996 * @implemented
997 */
998 VOID
999 NTAPI
1000 CcFlushCache (
1001 IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1002 IN PLARGE_INTEGER FileOffset OPTIONAL,
1003 IN ULONG Length,
1004 OUT PIO_STATUS_BLOCK IoStatus)
1005 {
1006 PBCB Bcb;
1007 LARGE_INTEGER Offset;
1008 PCACHE_SEGMENT current;
1009 NTSTATUS Status;
1010 KIRQL oldIrql;
1011
1012 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %lu, IoStatus 0x%p)\n",
1013 SectionObjectPointers, FileOffset, Length, IoStatus);
1014
1015 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1016 {
1017 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1018 ASSERT(Bcb);
1019 if (FileOffset)
1020 {
1021 Offset = *FileOffset;
1022 }
1023 else
1024 {
1025 Offset.QuadPart = (LONGLONG)0;
1026 Length = Bcb->FileSize.u.LowPart;
1027 }
1028
1029 if (IoStatus)
1030 {
1031 IoStatus->Status = STATUS_SUCCESS;
1032 IoStatus->Information = 0;
1033 }
1034
1035 while (Length > 0)
1036 {
1037 current = CcRosLookupCacheSegment (Bcb, Offset.u.LowPart);
1038 if (current != NULL)
1039 {
1040 if (current->Dirty)
1041 {
1042 Status = CcRosFlushCacheSegment(current);
1043 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1044 {
1045 IoStatus->Status = Status;
1046 }
1047 }
1048 KeReleaseMutex(&current->Mutex, FALSE);
1049
1050 KeAcquireGuardedMutex(&ViewLock);
1051 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1052 CcRosCacheSegmentDecRefCount(current);
1053 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1054 KeReleaseGuardedMutex(&ViewLock);
1055 }
1056
1057 Offset.QuadPart += VACB_MAPPING_GRANULARITY;
1058 if (Length > VACB_MAPPING_GRANULARITY)
1059 {
1060 Length -= VACB_MAPPING_GRANULARITY;
1061 }
1062 else
1063 {
1064 Length = 0;
1065 }
1066 }
1067 }
1068 else
1069 {
1070 if (IoStatus)
1071 {
1072 IoStatus->Status = STATUS_INVALID_PARAMETER;
1073 }
1074 }
1075 }
1076
1077 NTSTATUS
1078 NTAPI
1079 CcRosDeleteFileCache (
1080 PFILE_OBJECT FileObject,
1081 PBCB Bcb)
1082 /*
1083 * FUNCTION: Releases the BCB associated with a file object
1084 */
1085 {
1086 PLIST_ENTRY current_entry;
1087 PCACHE_SEGMENT current;
1088 LIST_ENTRY FreeList;
1089 KIRQL oldIrql;
1090
1091 ASSERT(Bcb);
1092
1093 Bcb->RefCount++;
1094 KeReleaseGuardedMutex(&ViewLock);
1095
1096 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1097
1098 KeAcquireGuardedMutex(&ViewLock);
1099 Bcb->RefCount--;
1100 if (Bcb->RefCount == 0)
1101 {
1102 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1103 {
1104 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1105 Bcb->BcbRemoveListEntry.Flink = NULL;
1106 }
1107
1108 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1109
1110 /*
1111 * Release all cache segments.
1112 */
1113 InitializeListHead(&FreeList);
1114 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1115 while (!IsListEmpty(&Bcb->BcbSegmentListHead))
1116 {
1117 current_entry = RemoveTailList(&Bcb->BcbSegmentListHead);
1118 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1119 RemoveEntryList(&current->CacheSegmentListEntry);
1120 RemoveEntryList(&current->CacheSegmentLRUListEntry);
1121 if (current->Dirty)
1122 {
1123 RemoveEntryList(&current->DirtySegmentListEntry);
1124 DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
1125 DPRINT1("Freeing dirty segment\n");
1126 }
1127 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
1128 }
1129 #if DBG
1130 Bcb->Trace = FALSE;
1131 #endif
1132 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1133
1134 KeReleaseGuardedMutex(&ViewLock);
1135 ObDereferenceObject (Bcb->FileObject);
1136
1137 while (!IsListEmpty(&FreeList))
1138 {
1139 current_entry = RemoveTailList(&FreeList);
1140 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1141 CcRosInternalFreeCacheSegment(current);
1142 }
1143 ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
1144 KeAcquireGuardedMutex(&ViewLock);
1145 }
1146 return(STATUS_SUCCESS);
1147 }
1148
1149 VOID
1150 NTAPI
1151 CcRosReferenceCache (
1152 PFILE_OBJECT FileObject)
1153 {
1154 PBCB Bcb;
1155 KeAcquireGuardedMutex(&ViewLock);
1156 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1157 ASSERT(Bcb);
1158 if (Bcb->RefCount == 0)
1159 {
1160 ASSERT(Bcb->BcbRemoveListEntry.Flink != NULL);
1161 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1162 Bcb->BcbRemoveListEntry.Flink = NULL;
1163
1164 }
1165 else
1166 {
1167 ASSERT(Bcb->BcbRemoveListEntry.Flink == NULL);
1168 }
1169 Bcb->RefCount++;
1170 KeReleaseGuardedMutex(&ViewLock);
1171 }
1172
1173 VOID
1174 NTAPI
1175 CcRosSetRemoveOnClose (
1176 PSECTION_OBJECT_POINTERS SectionObjectPointer)
1177 {
1178 PBCB Bcb;
1179 DPRINT("CcRosSetRemoveOnClose()\n");
1180 KeAcquireGuardedMutex(&ViewLock);
1181 Bcb = (PBCB)SectionObjectPointer->SharedCacheMap;
1182 if (Bcb)
1183 {
1184 Bcb->RemoveOnClose = TRUE;
1185 if (Bcb->RefCount == 0)
1186 {
1187 CcRosDeleteFileCache(Bcb->FileObject, Bcb);
1188 }
1189 }
1190 KeReleaseGuardedMutex(&ViewLock);
1191 }
1192
1193
1194 VOID
1195 NTAPI
1196 CcRosDereferenceCache (
1197 PFILE_OBJECT FileObject)
1198 {
1199 PBCB Bcb;
1200 KeAcquireGuardedMutex(&ViewLock);
1201 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1202 ASSERT(Bcb);
1203 if (Bcb->RefCount > 0)
1204 {
1205 Bcb->RefCount--;
1206 if (Bcb->RefCount == 0)
1207 {
1208 MmFreeSectionSegments(Bcb->FileObject);
1209 CcRosDeleteFileCache(FileObject, Bcb);
1210 }
1211 }
1212 KeReleaseGuardedMutex(&ViewLock);
1213 }
1214
1215 NTSTATUS
1216 NTAPI
1217 CcRosReleaseFileCache (
1218 PFILE_OBJECT FileObject)
1219 /*
1220 * FUNCTION: Called by the file system when a handle to a file object
1221 * has been closed.
1222 */
1223 {
1224 PBCB Bcb;
1225
1226 KeAcquireGuardedMutex(&ViewLock);
1227
1228 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1229 {
1230 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1231 if (FileObject->PrivateCacheMap != NULL)
1232 {
1233 FileObject->PrivateCacheMap = NULL;
1234 if (Bcb->RefCount > 0)
1235 {
1236 Bcb->RefCount--;
1237 if (Bcb->RefCount == 0)
1238 {
1239 MmFreeSectionSegments(Bcb->FileObject);
1240 CcRosDeleteFileCache(FileObject, Bcb);
1241 }
1242 }
1243 }
1244 }
1245 KeReleaseGuardedMutex(&ViewLock);
1246 return(STATUS_SUCCESS);
1247 }
1248
1249 NTSTATUS
1250 NTAPI
1251 CcTryToInitializeFileCache (
1252 PFILE_OBJECT FileObject)
1253 {
1254 PBCB Bcb;
1255 NTSTATUS Status;
1256
1257 KeAcquireGuardedMutex(&ViewLock);
1258
1259 ASSERT(FileObject->SectionObjectPointer);
1260 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1261 if (Bcb == NULL)
1262 {
1263 Status = STATUS_UNSUCCESSFUL;
1264 }
1265 else
1266 {
1267 if (FileObject->PrivateCacheMap == NULL)
1268 {
1269 FileObject->PrivateCacheMap = Bcb;
1270 Bcb->RefCount++;
1271 }
1272 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1273 {
1274 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1275 Bcb->BcbRemoveListEntry.Flink = NULL;
1276 }
1277 Status = STATUS_SUCCESS;
1278 }
1279 KeReleaseGuardedMutex(&ViewLock);
1280
1281 return Status;
1282 }
1283
1284
1285 NTSTATUS
1286 NTAPI
1287 CcRosInitializeFileCache (
1288 PFILE_OBJECT FileObject,
1289 ULONG CacheSegmentSize,
1290 PCACHE_MANAGER_CALLBACKS CallBacks,
1291 PVOID LazyWriterContext)
1292 /*
1293 * FUNCTION: Initializes a BCB for a file object
1294 */
1295 {
1296 PBCB Bcb;
1297
1298 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1299 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p, CacheSegmentSize %lu)\n",
1300 FileObject, Bcb, CacheSegmentSize);
1301
1302 KeAcquireGuardedMutex(&ViewLock);
1303 if (Bcb == NULL)
1304 {
1305 Bcb = ExAllocateFromNPagedLookasideList(&BcbLookasideList);
1306 if (Bcb == NULL)
1307 {
1308 KeReleaseGuardedMutex(&ViewLock);
1309 return(STATUS_UNSUCCESSFUL);
1310 }
1311 memset(Bcb, 0, sizeof(BCB));
1312 ObReferenceObjectByPointer(FileObject,
1313 FILE_ALL_ACCESS,
1314 NULL,
1315 KernelMode);
1316 Bcb->FileObject = FileObject;
1317 Bcb->CacheSegmentSize = CacheSegmentSize;
1318 Bcb->Callbacks = CallBacks;
1319 Bcb->LazyWriteContext = LazyWriterContext;
1320 if (FileObject->FsContext)
1321 {
1322 Bcb->AllocationSize =
1323 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->AllocationSize;
1324 Bcb->FileSize =
1325 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->FileSize;
1326 }
1327 KeInitializeSpinLock(&Bcb->BcbLock);
1328 InitializeListHead(&Bcb->BcbSegmentListHead);
1329 FileObject->SectionObjectPointer->SharedCacheMap = Bcb;
1330 }
1331 if (FileObject->PrivateCacheMap == NULL)
1332 {
1333 FileObject->PrivateCacheMap = Bcb;
1334 Bcb->RefCount++;
1335 }
1336 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1337 {
1338 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1339 Bcb->BcbRemoveListEntry.Flink = NULL;
1340 }
1341 KeReleaseGuardedMutex(&ViewLock);
1342
1343 return(STATUS_SUCCESS);
1344 }
1345
1346 /*
1347 * @implemented
1348 */
1349 PFILE_OBJECT
1350 NTAPI
1351 CcGetFileObjectFromSectionPtrs (
1352 IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1353 {
1354 PBCB Bcb;
1355 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1356 {
1357 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1358 ASSERT(Bcb);
1359 return Bcb->FileObject;
1360 }
1361 return NULL;
1362 }
1363
1364 VOID
1365 INIT_FUNCTION
1366 NTAPI
1367 CcInitView (
1368 VOID)
1369 {
1370 #ifdef CACHE_BITMAP
1371 PMEMORY_AREA marea;
1372 PVOID Buffer;
1373 PHYSICAL_ADDRESS BoundaryAddressMultiple;
1374 #endif
1375
1376 DPRINT("CcInitView()\n");
1377 #ifdef CACHE_BITMAP
1378 BoundaryAddressMultiple.QuadPart = 0;
1379 CiCacheSegMappingRegionHint = 0;
1380 CiCacheSegMappingRegionBase = NULL;
1381
1382 MmLockAddressSpace(MmGetKernelAddressSpace());
1383
1384 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
1385 MEMORY_AREA_CACHE_SEGMENT,
1386 &CiCacheSegMappingRegionBase,
1387 CI_CACHESEG_MAPPING_REGION_SIZE,
1388 PAGE_READWRITE,
1389 &marea,
1390 FALSE,
1391 0,
1392 BoundaryAddressMultiple);
1393 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1394 if (!NT_SUCCESS(Status))
1395 {
1396 KeBugCheck(CACHE_MANAGER);
1397 }
1398
1399 Buffer = ExAllocatePool(NonPagedPool, CI_CACHESEG_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
1400 if (!Buffer)
1401 {
1402 KeBugCheck(CACHE_MANAGER);
1403 }
1404
1405 RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap, Buffer, CI_CACHESEG_MAPPING_REGION_SIZE / PAGE_SIZE);
1406 RtlClearAllBits(&CiCacheSegMappingRegionAllocMap);
1407
1408 KeInitializeSpinLock(&CiCacheSegMappingRegionLock);
1409 #endif
1410 InitializeListHead(&CacheSegmentListHead);
1411 InitializeListHead(&DirtySegmentListHead);
1412 InitializeListHead(&CacheSegmentLRUListHead);
1413 InitializeListHead(&ClosedListHead);
1414 KeInitializeGuardedMutex(&ViewLock);
1415 ExInitializeNPagedLookasideList (&iBcbLookasideList,
1416 NULL,
1417 NULL,
1418 0,
1419 sizeof(INTERNAL_BCB),
1420 TAG_IBCB,
1421 20);
1422 ExInitializeNPagedLookasideList (&BcbLookasideList,
1423 NULL,
1424 NULL,
1425 0,
1426 sizeof(BCB),
1427 TAG_BCB,
1428 20);
1429 ExInitializeNPagedLookasideList (&CacheSegLookasideList,
1430 NULL,
1431 NULL,
1432 0,
1433 sizeof(CACHE_SEGMENT),
1434 TAG_CSEG,
1435 20);
1436
1437 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1438
1439 CcInitCacheZeroPage();
1440
1441 }
1442
1443 /* EOF */
1444
1445
1446
1447
1448
1449
1450
1451