b6bdab50c8460bae4a59dd8e30a06fa914b1f22c
[reactos.git] / reactos / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 */
9
10 /* NOTES **********************************************************************
11 *
12 * This is not the NT implementation of a file cache nor anything much like
13 * it.
14 *
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
17 *
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
20 *
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
23 *
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
27 *
28 * (4) Copy the data into or out of the page as necessary.
29 *
30 * (5) Release the cache page
31 */
32 /* INCLUDES ******************************************************************/
33
34 #include <ntoskrnl.h>
35 #define NDEBUG
36 #include <debug.h>
37
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
40 #endif
41
42 /* GLOBALS *******************************************************************/
43
44 /*
45 * If CACHE_BITMAP is defined, the cache manager uses one large memory region
46 * within the kernel address space and allocate/deallocate space from this block
47 * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
48 * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
49 */
50 //#define CACHE_BITMAP
51
52 static LIST_ENTRY DirtySegmentListHead;
53 static LIST_ENTRY CacheSegmentListHead;
54 static LIST_ENTRY CacheSegmentLRUListHead;
55 static LIST_ENTRY ClosedListHead;
56 ULONG DirtyPageCount=0;
57
58 KGUARDED_MUTEX ViewLock;
59
60 #ifdef CACHE_BITMAP
61 #define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
62
63 static PVOID CiCacheSegMappingRegionBase = NULL;
64 static RTL_BITMAP CiCacheSegMappingRegionAllocMap;
65 static ULONG CiCacheSegMappingRegionHint;
66 static KSPIN_LOCK CiCacheSegMappingRegionLock;
67 #endif
68
69 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
70 static NPAGED_LOOKASIDE_LIST BcbLookasideList;
71 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList;
72
73 #if DBG
74 static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
75 {
76 ++cs->ReferenceCount;
77 if ( cs->Bcb->Trace )
78 {
79 DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%d, Dirty %d, PageOut %d\n",
80 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
81 }
82 }
83 static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
84 {
85 --cs->ReferenceCount;
86 if ( cs->Bcb->Trace )
87 {
88 DbgPrint("(%s:%i) CacheSegment %p --RefCount=%d, Dirty %d, PageOut %d\n",
89 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
90 }
91 }
92 #define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
93 #define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
94 #else
95 #define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
96 #define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
97 #endif
98
99 NTSTATUS
100 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
101
102
103 /* FUNCTIONS *****************************************************************/
104
105 VOID
106 NTAPI
107 CcRosTraceCacheMap (
108 PBCB Bcb,
109 BOOLEAN Trace )
110 {
111 #if DBG
112 KIRQL oldirql;
113 PLIST_ENTRY current_entry;
114 PCACHE_SEGMENT current;
115
116 if ( !Bcb )
117 return;
118
119 Bcb->Trace = Trace;
120
121 if ( Trace )
122 {
123 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb );
124
125 KeAcquireGuardedMutex(&ViewLock);
126 KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
127
128 current_entry = Bcb->BcbSegmentListHead.Flink;
129 while (current_entry != &Bcb->BcbSegmentListHead)
130 {
131 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
132 current_entry = current_entry->Flink;
133
134 DPRINT1(" CacheSegment 0x%p enabled, RefCount %d, Dirty %d, PageOut %d\n",
135 current, current->ReferenceCount, current->Dirty, current->PageOut );
136 }
137 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
138 KeReleaseGuardedMutex(&ViewLock);
139 }
140 else
141 {
142 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb );
143 }
144
145 #else
146 Bcb = Bcb;
147 Trace = Trace;
148 #endif
149 }
150
151 NTSTATUS
152 NTAPI
153 CcRosFlushCacheSegment(PCACHE_SEGMENT CacheSegment)
154 {
155 NTSTATUS Status;
156 KIRQL oldIrql;
157
158 Status = WriteCacheSegment(CacheSegment);
159 if (NT_SUCCESS(Status))
160 {
161 KeAcquireGuardedMutex(&ViewLock);
162 KeAcquireSpinLock(&CacheSegment->Bcb->BcbLock, &oldIrql);
163
164 CacheSegment->Dirty = FALSE;
165 RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
166 DirtyPageCount -= CacheSegment->Bcb->CacheSegmentSize / PAGE_SIZE;
167 CcRosCacheSegmentDecRefCount ( CacheSegment );
168
169 KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
170 KeReleaseGuardedMutex(&ViewLock);
171 }
172
173 return(Status);
174 }
175
176 NTSTATUS
177 NTAPI
178 CcRosFlushDirtyPages(ULONG Target, PULONG Count, BOOLEAN Wait)
179 {
180 PLIST_ENTRY current_entry;
181 PCACHE_SEGMENT current;
182 ULONG PagesPerSegment;
183 BOOLEAN Locked;
184 NTSTATUS Status;
185 LARGE_INTEGER ZeroTimeout;
186
187 DPRINT("CcRosFlushDirtyPages(Target %d)\n", Target);
188
189 (*Count) = 0;
190 ZeroTimeout.QuadPart = 0;
191
192 KeEnterCriticalRegion();
193 KeAcquireGuardedMutex(&ViewLock);
194
195 current_entry = DirtySegmentListHead.Flink;
196 if (current_entry == &DirtySegmentListHead)
197 {
198 DPRINT("No Dirty pages\n");
199 }
200
201 while (current_entry != &DirtySegmentListHead && Target > 0)
202 {
203 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
204 DirtySegmentListEntry);
205 current_entry = current_entry->Flink;
206
207 CcRosCacheSegmentIncRefCount(current);
208
209 Locked = current->Bcb->Callbacks->AcquireForLazyWrite(
210 current->Bcb->LazyWriteContext, Wait);
211 if (!Locked)
212 {
213 CcRosCacheSegmentDecRefCount(current);
214 continue;
215 }
216
217 Status = KeWaitForSingleObject(&current->Mutex,
218 Executive,
219 KernelMode,
220 FALSE,
221 Wait ? NULL : &ZeroTimeout);
222 if (Status != STATUS_SUCCESS)
223 {
224 current->Bcb->Callbacks->ReleaseFromLazyWrite(
225 current->Bcb->LazyWriteContext);
226 CcRosCacheSegmentDecRefCount(current);
227 continue;
228 }
229
230 ASSERT(current->Dirty);
231 if (current->ReferenceCount > 1)
232 {
233 KeReleaseMutex(&current->Mutex, 0);
234 current->Bcb->Callbacks->ReleaseFromLazyWrite(
235 current->Bcb->LazyWriteContext);
236 CcRosCacheSegmentDecRefCount(current);
237 continue;
238 }
239
240 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
241
242 KeReleaseGuardedMutex(&ViewLock);
243
244 Status = CcRosFlushCacheSegment(current);
245
246 KeReleaseMutex(&current->Mutex, 0);
247 current->Bcb->Callbacks->ReleaseFromLazyWrite(
248 current->Bcb->LazyWriteContext);
249
250 KeAcquireGuardedMutex(&ViewLock);
251 CcRosCacheSegmentDecRefCount(current);
252
253 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
254 {
255 DPRINT1("CC: Failed to flush cache segment.\n");
256 }
257 else
258 {
259 (*Count) += PagesPerSegment;
260 Target -= PagesPerSegment;
261 }
262
263 current_entry = DirtySegmentListHead.Flink;
264 }
265
266 KeReleaseGuardedMutex(&ViewLock);
267 KeLeaveCriticalRegion();
268
269 DPRINT("CcRosFlushDirtyPages() finished\n");
270 return(STATUS_SUCCESS);
271 }
272
273 NTSTATUS
274 CcRosTrimCache(ULONG Target, ULONG Priority, PULONG NrFreed)
275 /*
276 * FUNCTION: Try to free some memory from the file cache.
277 * ARGUMENTS:
278 * Target - The number of pages to be freed.
279 * Priority - The priority of free (currently unused).
280 * NrFreed - Points to a variable where the number of pages
281 * actually freed is returned.
282 */
283 {
284 PLIST_ENTRY current_entry;
285 PCACHE_SEGMENT current;
286 ULONG PagesPerSegment;
287 ULONG PagesFreed;
288 KIRQL oldIrql;
289 LIST_ENTRY FreeList;
290 PFN_NUMBER Page;
291 ULONG i;
292 BOOLEAN FlushedPages = FALSE;
293
294 DPRINT("CcRosTrimCache(Target %d)\n", Target);
295
296 InitializeListHead(&FreeList);
297
298 *NrFreed = 0;
299
300 retry:
301 KeAcquireGuardedMutex(&ViewLock);
302
303 current_entry = CacheSegmentLRUListHead.Flink;
304 while (current_entry != &CacheSegmentLRUListHead)
305 {
306 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
307 CacheSegmentLRUListEntry);
308 current_entry = current_entry->Flink;
309
310 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
311
312 /* Reference the cache segment */
313 CcRosCacheSegmentIncRefCount(current);
314
315 /* Check if it's mapped and not dirty */
316 if (current->MappedCount > 0 && !current->Dirty)
317 {
318 /* We have to break these locks because Cc sucks */
319 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
320 KeReleaseGuardedMutex(&ViewLock);
321
322 /* Page out the segment */
323 for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
324 {
325 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
326
327 MmPageOutPhysicalAddress(Page);
328 }
329
330 /* Reacquire the locks */
331 KeAcquireGuardedMutex(&ViewLock);
332 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
333 }
334
335 /* Dereference the cache segment */
336 CcRosCacheSegmentDecRefCount(current);
337
338 /* Check if we can free this entry now */
339 if (current->ReferenceCount == 0)
340 {
341 ASSERT(!current->Dirty);
342 ASSERT(!current->MappedCount);
343
344 RemoveEntryList(&current->BcbSegmentListEntry);
345 RemoveEntryList(&current->CacheSegmentListEntry);
346 RemoveEntryList(&current->CacheSegmentLRUListEntry);
347 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
348
349 /* Calculate how many pages we freed for Mm */
350 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
351 PagesFreed = min(PagesPerSegment, Target);
352 Target -= PagesFreed;
353 (*NrFreed) += PagesFreed;
354 }
355
356 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
357 }
358
359 KeReleaseGuardedMutex(&ViewLock);
360
361 /* Try flushing pages if we haven't met our target */
362 if (Target > 0 && !FlushedPages)
363 {
364 /* Flush dirty pages to disk */
365 CcRosFlushDirtyPages(Target, &PagesFreed, FALSE);
366 FlushedPages = TRUE;
367
368 /* We can only swap as many pages as we flushed */
369 if (PagesFreed < Target) Target = PagesFreed;
370
371 /* Check if we flushed anything */
372 if (PagesFreed != 0)
373 {
374 /* Try again after flushing dirty pages */
375 DPRINT("Flushed %d dirty cache pages to disk\n", PagesFreed);
376 goto retry;
377 }
378 }
379
380 while (!IsListEmpty(&FreeList))
381 {
382 current_entry = RemoveHeadList(&FreeList);
383 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
384 BcbSegmentListEntry);
385 CcRosInternalFreeCacheSegment(current);
386 }
387
388 DPRINT("Evicted %d cache pages\n", (*NrFreed));
389
390 return(STATUS_SUCCESS);
391 }
392
393 NTSTATUS
394 NTAPI
395 CcRosReleaseCacheSegment(PBCB Bcb,
396 PCACHE_SEGMENT CacheSeg,
397 BOOLEAN Valid,
398 BOOLEAN Dirty,
399 BOOLEAN Mapped)
400 {
401 BOOLEAN WasDirty = CacheSeg->Dirty;
402 KIRQL oldIrql;
403
404 ASSERT(Bcb);
405
406 DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %d)\n",
407 Bcb, CacheSeg, Valid);
408
409 CacheSeg->Valid = Valid;
410 CacheSeg->Dirty = CacheSeg->Dirty || Dirty;
411
412 KeAcquireGuardedMutex(&ViewLock);
413 if (!WasDirty && CacheSeg->Dirty)
414 {
415 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
416 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
417 }
418
419 if (Mapped)
420 {
421 CacheSeg->MappedCount++;
422 }
423 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
424 CcRosCacheSegmentDecRefCount(CacheSeg);
425 if (Mapped && CacheSeg->MappedCount == 1)
426 {
427 CcRosCacheSegmentIncRefCount(CacheSeg);
428 }
429 if (!WasDirty && CacheSeg->Dirty)
430 {
431 CcRosCacheSegmentIncRefCount(CacheSeg);
432 }
433 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
434 KeReleaseGuardedMutex(&ViewLock);
435 KeReleaseMutex(&CacheSeg->Mutex, 0);
436
437 return(STATUS_SUCCESS);
438 }
439
440 /* Returns with Cache Segment Lock Held! */
441 PCACHE_SEGMENT
442 NTAPI
443 CcRosLookupCacheSegment(PBCB Bcb, ULONG FileOffset)
444 {
445 PLIST_ENTRY current_entry;
446 PCACHE_SEGMENT current;
447 KIRQL oldIrql;
448
449 ASSERT(Bcb);
450
451 DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %d)\n", Bcb, FileOffset);
452
453 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
454 current_entry = Bcb->BcbSegmentListHead.Flink;
455 while (current_entry != &Bcb->BcbSegmentListHead)
456 {
457 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
458 BcbSegmentListEntry);
459 if (current->FileOffset <= FileOffset &&
460 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
461 {
462 CcRosCacheSegmentIncRefCount(current);
463 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
464 KeWaitForSingleObject(&current->Mutex,
465 Executive,
466 KernelMode,
467 FALSE,
468 NULL);
469 return(current);
470 }
471 current_entry = current_entry->Flink;
472 }
473 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
474 return(NULL);
475 }
476
477 NTSTATUS
478 NTAPI
479 CcRosMarkDirtyCacheSegment(PBCB Bcb, ULONG FileOffset)
480 {
481 PCACHE_SEGMENT CacheSeg;
482 KIRQL oldIrql;
483
484 ASSERT(Bcb);
485
486 DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %d)\n", Bcb, FileOffset);
487
488 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
489 if (CacheSeg == NULL)
490 {
491 KeBugCheck(CACHE_MANAGER);
492 }
493 if (!CacheSeg->Dirty)
494 {
495 KeAcquireGuardedMutex(&ViewLock);
496 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
497 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
498 KeReleaseGuardedMutex(&ViewLock);
499 }
500 else
501 {
502 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
503 CcRosCacheSegmentDecRefCount(CacheSeg);
504 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
505 }
506
507 KeAcquireGuardedMutex(&ViewLock);
508
509 /* Move to the tail of the LRU list */
510 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
511 InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
512
513 KeReleaseGuardedMutex(&ViewLock);
514
515 CacheSeg->Dirty = TRUE;
516 KeReleaseMutex(&CacheSeg->Mutex, 0);
517
518 return(STATUS_SUCCESS);
519 }
520
521 NTSTATUS
522 NTAPI
523 CcRosUnmapCacheSegment(PBCB Bcb, ULONG FileOffset, BOOLEAN NowDirty)
524 {
525 PCACHE_SEGMENT CacheSeg;
526 BOOLEAN WasDirty;
527 KIRQL oldIrql;
528
529 ASSERT(Bcb);
530
531 DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %d, NowDirty %d)\n",
532 Bcb, FileOffset, NowDirty);
533
534 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
535 if (CacheSeg == NULL)
536 {
537 return(STATUS_UNSUCCESSFUL);
538 }
539
540 WasDirty = CacheSeg->Dirty;
541 CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
542
543 CacheSeg->MappedCount--;
544
545 if (!WasDirty && NowDirty)
546 {
547 KeAcquireGuardedMutex(&ViewLock);
548 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
549 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
550 KeReleaseGuardedMutex(&ViewLock);
551 }
552
553 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
554 CcRosCacheSegmentDecRefCount(CacheSeg);
555 if (!WasDirty && NowDirty)
556 {
557 CcRosCacheSegmentIncRefCount(CacheSeg);
558 }
559 if (CacheSeg->MappedCount == 0)
560 {
561 CcRosCacheSegmentDecRefCount(CacheSeg);
562 }
563 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
564
565 KeReleaseMutex(&CacheSeg->Mutex, 0);
566
567 return(STATUS_SUCCESS);
568 }
569
570 static
571 NTSTATUS
572 CcRosCreateCacheSegment(PBCB Bcb,
573 ULONG FileOffset,
574 PCACHE_SEGMENT* CacheSeg)
575 {
576 PCACHE_SEGMENT current;
577 PCACHE_SEGMENT previous;
578 PLIST_ENTRY current_entry;
579 NTSTATUS Status;
580 KIRQL oldIrql;
581 #ifdef CACHE_BITMAP
582 ULONG StartingOffset;
583 #endif
584 PHYSICAL_ADDRESS BoundaryAddressMultiple;
585
586 ASSERT(Bcb);
587
588 DPRINT("CcRosCreateCacheSegment()\n");
589
590 BoundaryAddressMultiple.QuadPart = 0;
591 if (FileOffset >= Bcb->FileSize.u.LowPart)
592 {
593 CacheSeg = NULL;
594 return STATUS_INVALID_PARAMETER;
595 }
596
597 current = ExAllocateFromNPagedLookasideList(&CacheSegLookasideList);
598 current->Valid = FALSE;
599 current->Dirty = FALSE;
600 current->PageOut = FALSE;
601 current->FileOffset = ROUND_DOWN(FileOffset, Bcb->CacheSegmentSize);
602 current->Bcb = Bcb;
603 #if DBG
604 if ( Bcb->Trace )
605 {
606 DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb, current );
607 }
608 #endif
609 current->MappedCount = 0;
610 current->DirtySegmentListEntry.Flink = NULL;
611 current->DirtySegmentListEntry.Blink = NULL;
612 current->ReferenceCount = 1;
613 KeInitializeMutex(&current->Mutex, 0);
614 KeWaitForSingleObject(&current->Mutex,
615 Executive,
616 KernelMode,
617 FALSE,
618 NULL);
619 KeAcquireGuardedMutex(&ViewLock);
620
621 *CacheSeg = current;
622 /* There is window between the call to CcRosLookupCacheSegment
623 * and CcRosCreateCacheSegment. We must check if a segment on
624 * the fileoffset exist. If there exist a segment, we release
625 * our new created segment and return the existing one.
626 */
627 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
628 current_entry = Bcb->BcbSegmentListHead.Flink;
629 previous = NULL;
630 while (current_entry != &Bcb->BcbSegmentListHead)
631 {
632 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
633 BcbSegmentListEntry);
634 if (current->FileOffset <= FileOffset &&
635 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
636 {
637 CcRosCacheSegmentIncRefCount(current);
638 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
639 #if DBG
640 if ( Bcb->Trace )
641 {
642 DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
643 Bcb,
644 (*CacheSeg),
645 current );
646 }
647 #endif
648 KeReleaseMutex(&(*CacheSeg)->Mutex, 0);
649 KeReleaseGuardedMutex(&ViewLock);
650 ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
651 *CacheSeg = current;
652 KeWaitForSingleObject(&current->Mutex,
653 Executive,
654 KernelMode,
655 FALSE,
656 NULL);
657 return STATUS_SUCCESS;
658 }
659 if (current->FileOffset < FileOffset)
660 {
661 if (previous == NULL)
662 {
663 previous = current;
664 }
665 else
666 {
667 if (previous->FileOffset < current->FileOffset)
668 {
669 previous = current;
670 }
671 }
672 }
673 current_entry = current_entry->Flink;
674 }
675 /* There was no existing segment. */
676 current = *CacheSeg;
677 if (previous)
678 {
679 InsertHeadList(&previous->BcbSegmentListEntry, &current->BcbSegmentListEntry);
680 }
681 else
682 {
683 InsertHeadList(&Bcb->BcbSegmentListHead, &current->BcbSegmentListEntry);
684 }
685 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
686 InsertTailList(&CacheSegmentListHead, &current->CacheSegmentListEntry);
687 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
688 KeReleaseGuardedMutex(&ViewLock);
689 #ifdef CACHE_BITMAP
690 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
691
692 StartingOffset = RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap, Bcb->CacheSegmentSize / PAGE_SIZE, CiCacheSegMappingRegionHint);
693
694 if (StartingOffset == 0xffffffff)
695 {
696 DPRINT1("Out of CacheSeg mapping space\n");
697 KeBugCheck(CACHE_MANAGER);
698 }
699
700 current->BaseAddress = CiCacheSegMappingRegionBase + StartingOffset * PAGE_SIZE;
701
702 if (CiCacheSegMappingRegionHint == StartingOffset)
703 {
704 CiCacheSegMappingRegionHint += Bcb->CacheSegmentSize / PAGE_SIZE;
705 }
706
707 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
708 #else
709 MmLockAddressSpace(MmGetKernelAddressSpace());
710 current->BaseAddress = NULL;
711 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
712 0, // nothing checks for cache_segment mareas, so set to 0
713 &current->BaseAddress,
714 Bcb->CacheSegmentSize,
715 PAGE_READWRITE,
716 (PMEMORY_AREA*)&current->MemoryArea,
717 FALSE,
718 0,
719 BoundaryAddressMultiple);
720 MmUnlockAddressSpace(MmGetKernelAddressSpace());
721 if (!NT_SUCCESS(Status))
722 {
723 KeBugCheck(CACHE_MANAGER);
724 }
725 #endif
726
727 /* Create a virtual mapping for this memory area */
728 MI_SET_USAGE(MI_USAGE_CACHE);
729 #if MI_TRACE_PFNS
730 PWCHAR pos = NULL;
731 ULONG len = 0;
732 if ((Bcb->FileObject) && (Bcb->FileObject->FileName.Buffer))
733 {
734 pos = wcsrchr(Bcb->FileObject->FileName.Buffer, '\\');
735 len = wcslen(pos) * sizeof(WCHAR);
736 if (pos) snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
737 }
738 #endif
739
740 MmMapMemoryArea(current->BaseAddress, Bcb->CacheSegmentSize,
741 MC_CACHE, PAGE_READWRITE);
742
743 return(STATUS_SUCCESS);
744 }
745
746 NTSTATUS
747 NTAPI
748 CcRosGetCacheSegmentChain(PBCB Bcb,
749 ULONG FileOffset,
750 ULONG Length,
751 PCACHE_SEGMENT* CacheSeg)
752 {
753 PCACHE_SEGMENT current;
754 ULONG i;
755 PCACHE_SEGMENT* CacheSegList;
756 PCACHE_SEGMENT Previous = NULL;
757
758 ASSERT(Bcb);
759
760 DPRINT("CcRosGetCacheSegmentChain()\n");
761
762 Length = ROUND_UP(Length, Bcb->CacheSegmentSize);
763
764 CacheSegList = _alloca(sizeof(PCACHE_SEGMENT) *
765 (Length / Bcb->CacheSegmentSize));
766
767 /*
768 * Look for a cache segment already mapping the same data.
769 */
770 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
771 {
772 ULONG CurrentOffset = FileOffset + (i * Bcb->CacheSegmentSize);
773 current = CcRosLookupCacheSegment(Bcb, CurrentOffset);
774 if (current != NULL)
775 {
776 KeAcquireGuardedMutex(&ViewLock);
777
778 /* Move to tail of LRU list */
779 RemoveEntryList(&current->CacheSegmentLRUListEntry);
780 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
781
782 KeReleaseGuardedMutex(&ViewLock);
783
784 CacheSegList[i] = current;
785 }
786 else
787 {
788 CcRosCreateCacheSegment(Bcb, CurrentOffset, &current);
789 CacheSegList[i] = current;
790 }
791 }
792
793 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
794 {
795 if (i == 0)
796 {
797 *CacheSeg = CacheSegList[i];
798 Previous = CacheSegList[i];
799 }
800 else
801 {
802 Previous->NextInChain = CacheSegList[i];
803 Previous = CacheSegList[i];
804 }
805 }
806 ASSERT(Previous);
807 Previous->NextInChain = NULL;
808
809 return(STATUS_SUCCESS);
810 }
811
812 NTSTATUS
813 NTAPI
814 CcRosGetCacheSegment(PBCB Bcb,
815 ULONG FileOffset,
816 PULONG BaseOffset,
817 PVOID* BaseAddress,
818 PBOOLEAN UptoDate,
819 PCACHE_SEGMENT* CacheSeg)
820 {
821 PCACHE_SEGMENT current;
822 NTSTATUS Status;
823
824 ASSERT(Bcb);
825
826 DPRINT("CcRosGetCacheSegment()\n");
827
828 /*
829 * Look for a cache segment already mapping the same data.
830 */
831 current = CcRosLookupCacheSegment(Bcb, FileOffset);
832 if (current == NULL)
833 {
834 /*
835 * Otherwise create a new segment.
836 */
837 Status = CcRosCreateCacheSegment(Bcb, FileOffset, &current);
838 if (!NT_SUCCESS(Status))
839 {
840 return Status;
841 }
842 }
843
844 KeAcquireGuardedMutex(&ViewLock);
845
846 /* Move to the tail of the LRU list */
847 RemoveEntryList(&current->CacheSegmentLRUListEntry);
848 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
849
850 KeReleaseGuardedMutex(&ViewLock);
851
852 /*
853 * Return information about the segment to the caller.
854 */
855 *UptoDate = current->Valid;
856 *BaseAddress = current->BaseAddress;
857 DPRINT("*BaseAddress 0x%.8X\n", *BaseAddress);
858 *CacheSeg = current;
859 *BaseOffset = current->FileOffset;
860 return(STATUS_SUCCESS);
861 }
862
863 NTSTATUS NTAPI
864 CcRosRequestCacheSegment(PBCB Bcb,
865 ULONG FileOffset,
866 PVOID* BaseAddress,
867 PBOOLEAN UptoDate,
868 PCACHE_SEGMENT* CacheSeg)
869 /*
870 * FUNCTION: Request a page mapping for a BCB
871 */
872 {
873 ULONG BaseOffset;
874
875 ASSERT(Bcb);
876
877 if ((FileOffset % Bcb->CacheSegmentSize) != 0)
878 {
879 DPRINT1("Bad fileoffset %x should be multiple of %x",
880 FileOffset, Bcb->CacheSegmentSize);
881 KeBugCheck(CACHE_MANAGER);
882 }
883
884 return(CcRosGetCacheSegment(Bcb,
885 FileOffset,
886 &BaseOffset,
887 BaseAddress,
888 UptoDate,
889 CacheSeg));
890 }
891 #ifdef CACHE_BITMAP
892 #else
893 static VOID
894 CcFreeCachePage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
895 PFN_NUMBER Page, SWAPENTRY SwapEntry, BOOLEAN Dirty)
896 {
897 ASSERT(SwapEntry == 0);
898 if (Page != 0)
899 {
900 ASSERT(MmGetReferenceCountPage(Page) == 1);
901 MmReleasePageMemoryConsumer(MC_CACHE, Page);
902 }
903 }
904 #endif
905 NTSTATUS
906 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg)
907 /*
908 * FUNCTION: Releases a cache segment associated with a BCB
909 */
910 {
911 #ifdef CACHE_BITMAP
912 ULONG i;
913 ULONG RegionSize;
914 ULONG Base;
915 PFN_NUMBER Page;
916 KIRQL oldIrql;
917 #endif
918 DPRINT("Freeing cache segment 0x%p\n", CacheSeg);
919 #if DBG
920 if ( CacheSeg->Bcb->Trace )
921 {
922 DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg->Bcb, CacheSeg );
923 }
924 #endif
925 #ifdef CACHE_BITMAP
926 RegionSize = CacheSeg->Bcb->CacheSegmentSize / PAGE_SIZE;
927
928 /* Unmap all the pages. */
929 for (i = 0; i < RegionSize; i++)
930 {
931 MmDeleteVirtualMapping(NULL,
932 CacheSeg->BaseAddress + (i * PAGE_SIZE),
933 FALSE,
934 NULL,
935 &Page);
936 MmReleasePageMemoryConsumer(MC_CACHE, Page);
937 }
938
939 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
940 /* Deallocate all the pages used. */
941 Base = (ULONG)(CacheSeg->BaseAddress - CiCacheSegMappingRegionBase) / PAGE_SIZE;
942
943 RtlClearBits(&CiCacheSegMappingRegionAllocMap, Base, RegionSize);
944
945 CiCacheSegMappingRegionHint = min (CiCacheSegMappingRegionHint, Base);
946
947 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
948 #else
949 MmLockAddressSpace(MmGetKernelAddressSpace());
950 MmFreeMemoryArea(MmGetKernelAddressSpace(),
951 CacheSeg->MemoryArea,
952 CcFreeCachePage,
953 NULL);
954 MmUnlockAddressSpace(MmGetKernelAddressSpace());
955 #endif
956 ExFreeToNPagedLookasideList(&CacheSegLookasideList, CacheSeg);
957 return(STATUS_SUCCESS);
958 }
959
960 NTSTATUS
961 NTAPI
962 CcRosFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg)
963 {
964 NTSTATUS Status;
965 KIRQL oldIrql;
966
967 ASSERT(Bcb);
968
969 DPRINT("CcRosFreeCacheSegment(Bcb 0x%p, CacheSeg 0x%p)\n",
970 Bcb, CacheSeg);
971
972 KeAcquireGuardedMutex(&ViewLock);
973 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
974 RemoveEntryList(&CacheSeg->BcbSegmentListEntry);
975 RemoveEntryList(&CacheSeg->CacheSegmentListEntry);
976 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
977 if (CacheSeg->Dirty)
978 {
979 RemoveEntryList(&CacheSeg->DirtySegmentListEntry);
980 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
981
982 }
983 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
984 KeReleaseGuardedMutex(&ViewLock);
985
986 Status = CcRosInternalFreeCacheSegment(CacheSeg);
987 return(Status);
988 }
989
990 /*
991 * @implemented
992 */
993 VOID NTAPI
994 CcFlushCache(IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
995 IN PLARGE_INTEGER FileOffset OPTIONAL,
996 IN ULONG Length,
997 OUT PIO_STATUS_BLOCK IoStatus)
998 {
999 PBCB Bcb;
1000 LARGE_INTEGER Offset;
1001 PCACHE_SEGMENT current;
1002 NTSTATUS Status;
1003 KIRQL oldIrql;
1004
1005 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %d, IoStatus 0x%p)\n",
1006 SectionObjectPointers, FileOffset, Length, IoStatus);
1007
1008 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1009 {
1010 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1011 ASSERT(Bcb);
1012 if (FileOffset)
1013 {
1014 Offset = *FileOffset;
1015 }
1016 else
1017 {
1018 Offset.QuadPart = (LONGLONG)0;
1019 Length = Bcb->FileSize.u.LowPart;
1020 }
1021
1022 if (IoStatus)
1023 {
1024 IoStatus->Status = STATUS_SUCCESS;
1025 IoStatus->Information = 0;
1026 }
1027
1028 while (Length > 0)
1029 {
1030 current = CcRosLookupCacheSegment (Bcb, Offset.u.LowPart);
1031 if (current != NULL)
1032 {
1033 if (current->Dirty)
1034 {
1035 Status = CcRosFlushCacheSegment(current);
1036 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1037 {
1038 IoStatus->Status = Status;
1039 }
1040 }
1041 KeReleaseMutex(&current->Mutex, 0);
1042 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1043 CcRosCacheSegmentDecRefCount(current);
1044 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1045 }
1046
1047 Offset.QuadPart += Bcb->CacheSegmentSize;
1048 if (Length > Bcb->CacheSegmentSize)
1049 {
1050 Length -= Bcb->CacheSegmentSize;
1051 }
1052 else
1053 {
1054 Length = 0;
1055 }
1056 }
1057 }
1058 else
1059 {
1060 if (IoStatus)
1061 {
1062 IoStatus->Status = STATUS_INVALID_PARAMETER;
1063 }
1064 }
1065 }
1066
1067 NTSTATUS
1068 NTAPI
1069 CcRosDeleteFileCache(PFILE_OBJECT FileObject, PBCB Bcb)
1070 /*
1071 * FUNCTION: Releases the BCB associated with a file object
1072 */
1073 {
1074 PLIST_ENTRY current_entry;
1075 PCACHE_SEGMENT current;
1076 LIST_ENTRY FreeList;
1077 KIRQL oldIrql;
1078
1079 ASSERT(Bcb);
1080
1081 Bcb->RefCount++;
1082 KeReleaseGuardedMutex(&ViewLock);
1083
1084 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1085
1086 KeAcquireGuardedMutex(&ViewLock);
1087 Bcb->RefCount--;
1088 if (Bcb->RefCount == 0)
1089 {
1090 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1091 {
1092 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1093 Bcb->BcbRemoveListEntry.Flink = NULL;
1094 }
1095
1096 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1097
1098 /*
1099 * Release all cache segments.
1100 */
1101 InitializeListHead(&FreeList);
1102 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1103 current_entry = Bcb->BcbSegmentListHead.Flink;
1104 while (!IsListEmpty(&Bcb->BcbSegmentListHead))
1105 {
1106 current_entry = RemoveTailList(&Bcb->BcbSegmentListHead);
1107 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1108 RemoveEntryList(&current->CacheSegmentListEntry);
1109 RemoveEntryList(&current->CacheSegmentLRUListEntry);
1110 if (current->Dirty)
1111 {
1112 RemoveEntryList(&current->DirtySegmentListEntry);
1113 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
1114 DPRINT1("Freeing dirty segment\n");
1115 }
1116 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
1117 }
1118 #if DBG
1119 Bcb->Trace = FALSE;
1120 #endif
1121 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1122
1123 KeReleaseGuardedMutex(&ViewLock);
1124 ObDereferenceObject (Bcb->FileObject);
1125
1126 while (!IsListEmpty(&FreeList))
1127 {
1128 current_entry = RemoveTailList(&FreeList);
1129 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1130 CcRosInternalFreeCacheSegment(current);
1131 }
1132 ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
1133 KeAcquireGuardedMutex(&ViewLock);
1134 }
1135 return(STATUS_SUCCESS);
1136 }
1137
1138 VOID
1139 NTAPI
1140 CcRosReferenceCache(PFILE_OBJECT FileObject)
1141 {
1142 PBCB Bcb;
1143 KeAcquireGuardedMutex(&ViewLock);
1144 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1145 ASSERT(Bcb);
1146 if (Bcb->RefCount == 0)
1147 {
1148 ASSERT(Bcb->BcbRemoveListEntry.Flink != NULL);
1149 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1150 Bcb->BcbRemoveListEntry.Flink = NULL;
1151
1152 }
1153 else
1154 {
1155 ASSERT(Bcb->BcbRemoveListEntry.Flink == NULL);
1156 }
1157 Bcb->RefCount++;
1158 KeReleaseGuardedMutex(&ViewLock);
1159 }
1160
1161 VOID
1162 NTAPI
1163 CcRosSetRemoveOnClose(PSECTION_OBJECT_POINTERS SectionObjectPointer)
1164 {
1165 PBCB Bcb;
1166 DPRINT("CcRosSetRemoveOnClose()\n");
1167 KeAcquireGuardedMutex(&ViewLock);
1168 Bcb = (PBCB)SectionObjectPointer->SharedCacheMap;
1169 if (Bcb)
1170 {
1171 Bcb->RemoveOnClose = TRUE;
1172 if (Bcb->RefCount == 0)
1173 {
1174 CcRosDeleteFileCache(Bcb->FileObject, Bcb);
1175 }
1176 }
1177 KeReleaseGuardedMutex(&ViewLock);
1178 }
1179
1180
1181 VOID
1182 NTAPI
1183 CcRosDereferenceCache(PFILE_OBJECT FileObject)
1184 {
1185 PBCB Bcb;
1186 KeAcquireGuardedMutex(&ViewLock);
1187 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1188 ASSERT(Bcb);
1189 if (Bcb->RefCount > 0)
1190 {
1191 Bcb->RefCount--;
1192 if (Bcb->RefCount == 0)
1193 {
1194 MmFreeSectionSegments(Bcb->FileObject);
1195 CcRosDeleteFileCache(FileObject, Bcb);
1196 }
1197 }
1198 KeReleaseGuardedMutex(&ViewLock);
1199 }
1200
1201 NTSTATUS NTAPI
1202 CcRosReleaseFileCache(PFILE_OBJECT FileObject)
1203 /*
1204 * FUNCTION: Called by the file system when a handle to a file object
1205 * has been closed.
1206 */
1207 {
1208 PBCB Bcb;
1209
1210 KeAcquireGuardedMutex(&ViewLock);
1211
1212 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1213 {
1214 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1215 if (FileObject->PrivateCacheMap != NULL)
1216 {
1217 FileObject->PrivateCacheMap = NULL;
1218 if (Bcb->RefCount > 0)
1219 {
1220 Bcb->RefCount--;
1221 if (Bcb->RefCount == 0)
1222 {
1223 MmFreeSectionSegments(Bcb->FileObject);
1224 CcRosDeleteFileCache(FileObject, Bcb);
1225 }
1226 }
1227 }
1228 }
1229 KeReleaseGuardedMutex(&ViewLock);
1230 return(STATUS_SUCCESS);
1231 }
1232
1233 NTSTATUS
1234 NTAPI
1235 CcTryToInitializeFileCache(PFILE_OBJECT FileObject)
1236 {
1237 PBCB Bcb;
1238 NTSTATUS Status;
1239
1240 KeAcquireGuardedMutex(&ViewLock);
1241
1242 ASSERT(FileObject->SectionObjectPointer);
1243 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1244 if (Bcb == NULL)
1245 {
1246 Status = STATUS_UNSUCCESSFUL;
1247 }
1248 else
1249 {
1250 if (FileObject->PrivateCacheMap == NULL)
1251 {
1252 FileObject->PrivateCacheMap = Bcb;
1253 Bcb->RefCount++;
1254 }
1255 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1256 {
1257 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1258 Bcb->BcbRemoveListEntry.Flink = NULL;
1259 }
1260 Status = STATUS_SUCCESS;
1261 }
1262 KeReleaseGuardedMutex(&ViewLock);
1263
1264 return Status;
1265 }
1266
1267
1268 NTSTATUS NTAPI
1269 CcRosInitializeFileCache(PFILE_OBJECT FileObject,
1270 ULONG CacheSegmentSize,
1271 PCACHE_MANAGER_CALLBACKS CallBacks,
1272 PVOID LazyWriterContext)
1273 /*
1274 * FUNCTION: Initializes a BCB for a file object
1275 */
1276 {
1277 PBCB Bcb;
1278
1279 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1280 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p, CacheSegmentSize %d)\n",
1281 FileObject, Bcb, CacheSegmentSize);
1282
1283 KeAcquireGuardedMutex(&ViewLock);
1284 if (Bcb == NULL)
1285 {
1286 Bcb = ExAllocateFromNPagedLookasideList(&BcbLookasideList);
1287 if (Bcb == NULL)
1288 {
1289 KeReleaseGuardedMutex(&ViewLock);
1290 return(STATUS_UNSUCCESSFUL);
1291 }
1292 memset(Bcb, 0, sizeof(BCB));
1293 ObReferenceObjectByPointer(FileObject,
1294 FILE_ALL_ACCESS,
1295 NULL,
1296 KernelMode);
1297 Bcb->FileObject = FileObject;
1298 Bcb->CacheSegmentSize = CacheSegmentSize;
1299 Bcb->Callbacks = CallBacks;
1300 Bcb->LazyWriteContext = LazyWriterContext;
1301 if (FileObject->FsContext)
1302 {
1303 Bcb->AllocationSize =
1304 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->AllocationSize;
1305 Bcb->FileSize =
1306 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->FileSize;
1307 }
1308 KeInitializeSpinLock(&Bcb->BcbLock);
1309 InitializeListHead(&Bcb->BcbSegmentListHead);
1310 FileObject->SectionObjectPointer->SharedCacheMap = Bcb;
1311 }
1312 if (FileObject->PrivateCacheMap == NULL)
1313 {
1314 FileObject->PrivateCacheMap = Bcb;
1315 Bcb->RefCount++;
1316 }
1317 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1318 {
1319 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1320 Bcb->BcbRemoveListEntry.Flink = NULL;
1321 }
1322 KeReleaseGuardedMutex(&ViewLock);
1323
1324 return(STATUS_SUCCESS);
1325 }
1326
1327 /*
1328 * @implemented
1329 */
1330 PFILE_OBJECT NTAPI
1331 CcGetFileObjectFromSectionPtrs(IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1332 {
1333 PBCB Bcb;
1334 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1335 {
1336 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1337 ASSERT(Bcb);
1338 return Bcb->FileObject;
1339 }
1340 return NULL;
1341 }
1342
1343 VOID
1344 INIT_FUNCTION
1345 NTAPI
1346 CcInitView(VOID)
1347 {
1348 #ifdef CACHE_BITMAP
1349 PMEMORY_AREA marea;
1350 PVOID Buffer;
1351 PHYSICAL_ADDRESS BoundaryAddressMultiple;
1352 #endif
1353
1354 DPRINT("CcInitView()\n");
1355 #ifdef CACHE_BITMAP
1356 BoundaryAddressMultiple.QuadPart = 0;
1357 CiCacheSegMappingRegionHint = 0;
1358 CiCacheSegMappingRegionBase = NULL;
1359
1360 MmLockAddressSpace(MmGetKernelAddressSpace());
1361
1362 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
1363 MEMORY_AREA_CACHE_SEGMENT,
1364 &CiCacheSegMappingRegionBase,
1365 CI_CACHESEG_MAPPING_REGION_SIZE,
1366 PAGE_READWRITE,
1367 &marea,
1368 FALSE,
1369 0,
1370 BoundaryAddressMultiple);
1371 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1372 if (!NT_SUCCESS(Status))
1373 {
1374 KeBugCheck(CACHE_MANAGER);
1375 }
1376
1377 Buffer = ExAllocatePool(NonPagedPool, CI_CACHESEG_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
1378 if (!Buffer)
1379 {
1380 KeBugCheck(CACHE_MANAGER);
1381 }
1382
1383 RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap, Buffer, CI_CACHESEG_MAPPING_REGION_SIZE / PAGE_SIZE);
1384 RtlClearAllBits(&CiCacheSegMappingRegionAllocMap);
1385
1386 KeInitializeSpinLock(&CiCacheSegMappingRegionLock);
1387 #endif
1388 InitializeListHead(&CacheSegmentListHead);
1389 InitializeListHead(&DirtySegmentListHead);
1390 InitializeListHead(&CacheSegmentLRUListHead);
1391 InitializeListHead(&ClosedListHead);
1392 KeInitializeGuardedMutex(&ViewLock);
1393 ExInitializeNPagedLookasideList (&iBcbLookasideList,
1394 NULL,
1395 NULL,
1396 0,
1397 sizeof(INTERNAL_BCB),
1398 TAG_IBCB,
1399 20);
1400 ExInitializeNPagedLookasideList (&BcbLookasideList,
1401 NULL,
1402 NULL,
1403 0,
1404 sizeof(BCB),
1405 TAG_BCB,
1406 20);
1407 ExInitializeNPagedLookasideList (&CacheSegLookasideList,
1408 NULL,
1409 NULL,
1410 0,
1411 sizeof(CACHE_SEGMENT),
1412 TAG_CSEG,
1413 20);
1414
1415 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1416
1417 CcInitCacheZeroPage();
1418
1419 }
1420
1421 /* EOF */
1422
1423
1424
1425
1426
1427
1428
1429