[NTOSKRNL]
[reactos.git] / reactos / ntoskrnl / cc / view.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/cc/view.c
5 * PURPOSE: Cache manager
6 *
7 * PROGRAMMERS: David Welch (welch@mcmail.com)
8 */
9
10 /* NOTES **********************************************************************
11 *
12 * This is not the NT implementation of a file cache nor anything much like
13 * it.
14 *
15 * The general procedure for a filesystem to implement a read or write
16 * dispatch routine is as follows
17 *
18 * (1) If caching for the FCB hasn't been initiated then so do by calling
19 * CcInitializeFileCache.
20 *
21 * (2) For each 4k region which is being read or written obtain a cache page
22 * by calling CcRequestCachePage.
23 *
24 * (3) If either the page is being read or not completely written, and it is
25 * not up to date then read its data from the underlying medium. If the read
26 * fails then call CcReleaseCachePage with VALID as FALSE and return a error.
27 *
28 * (4) Copy the data into or out of the page as necessary.
29 *
30 * (5) Release the cache page
31 */
32 /* INCLUDES ******************************************************************/
33
34 #include <ntoskrnl.h>
35 #define NDEBUG
36 #include <debug.h>
37
38 #if defined (ALLOC_PRAGMA)
39 #pragma alloc_text(INIT, CcInitView)
40 #endif
41
42 /* GLOBALS *******************************************************************/
43
44 /*
45 * If CACHE_BITMAP is defined, the cache manager uses one large memory region
46 * within the kernel address space and allocate/deallocate space from this block
47 * over a bitmap. If CACHE_BITMAP is used, the size of the mdl mapping region
48 * must be reduced (ntoskrnl\mm\mdl.c, MI_MDLMAPPING_REGION_SIZE).
49 */
50 //#define CACHE_BITMAP
51
52 static LIST_ENTRY DirtySegmentListHead;
53 static LIST_ENTRY CacheSegmentListHead;
54 static LIST_ENTRY CacheSegmentLRUListHead;
55 static LIST_ENTRY ClosedListHead;
56 ULONG DirtyPageCount=0;
57
58 KGUARDED_MUTEX ViewLock;
59
60 #ifdef CACHE_BITMAP
61 #define CI_CACHESEG_MAPPING_REGION_SIZE (128*1024*1024)
62
63 static PVOID CiCacheSegMappingRegionBase = NULL;
64 static RTL_BITMAP CiCacheSegMappingRegionAllocMap;
65 static ULONG CiCacheSegMappingRegionHint;
66 static KSPIN_LOCK CiCacheSegMappingRegionLock;
67 #endif
68
69 NPAGED_LOOKASIDE_LIST iBcbLookasideList;
70 static NPAGED_LOOKASIDE_LIST BcbLookasideList;
71 static NPAGED_LOOKASIDE_LIST CacheSegLookasideList;
72
73 #if DBG
74 static void CcRosCacheSegmentIncRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
75 {
76 ++cs->ReferenceCount;
77 if ( cs->Bcb->Trace )
78 {
79 DbgPrint("(%s:%i) CacheSegment %p ++RefCount=%d, Dirty %d, PageOut %d\n",
80 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
81 }
82 }
83 static void CcRosCacheSegmentDecRefCount_ ( PCACHE_SEGMENT cs, const char* file, int line )
84 {
85 --cs->ReferenceCount;
86 if ( cs->Bcb->Trace )
87 {
88 DbgPrint("(%s:%i) CacheSegment %p --RefCount=%d, Dirty %d, PageOut %d\n",
89 file, line, cs, cs->ReferenceCount, cs->Dirty, cs->PageOut );
90 }
91 }
92 #define CcRosCacheSegmentIncRefCount(cs) CcRosCacheSegmentIncRefCount_(cs,__FILE__,__LINE__)
93 #define CcRosCacheSegmentDecRefCount(cs) CcRosCacheSegmentDecRefCount_(cs,__FILE__,__LINE__)
94 #else
95 #define CcRosCacheSegmentIncRefCount(cs) (++((cs)->ReferenceCount))
96 #define CcRosCacheSegmentDecRefCount(cs) (--((cs)->ReferenceCount))
97 #endif
98
99 NTSTATUS
100 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg);
101
102
103 /* FUNCTIONS *****************************************************************/
104
105 VOID
106 NTAPI
107 CcRosTraceCacheMap (
108 PBCB Bcb,
109 BOOLEAN Trace )
110 {
111 #if DBG
112 KIRQL oldirql;
113 PLIST_ENTRY current_entry;
114 PCACHE_SEGMENT current;
115
116 if ( !Bcb )
117 return;
118
119 Bcb->Trace = Trace;
120
121 if ( Trace )
122 {
123 DPRINT1("Enabling Tracing for CacheMap 0x%p:\n", Bcb );
124
125 KeAcquireGuardedMutex(&ViewLock);
126 KeAcquireSpinLock(&Bcb->BcbLock, &oldirql);
127
128 current_entry = Bcb->BcbSegmentListHead.Flink;
129 while (current_entry != &Bcb->BcbSegmentListHead)
130 {
131 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
132 current_entry = current_entry->Flink;
133
134 DPRINT1(" CacheSegment 0x%p enabled, RefCount %d, Dirty %d, PageOut %d\n",
135 current, current->ReferenceCount, current->Dirty, current->PageOut );
136 }
137 KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
138 KeReleaseGuardedMutex(&ViewLock);
139 }
140 else
141 {
142 DPRINT1("Disabling Tracing for CacheMap 0x%p:\n", Bcb );
143 }
144
145 #else
146 Bcb = Bcb;
147 Trace = Trace;
148 #endif
149 }
150
151 NTSTATUS
152 NTAPI
153 CcRosFlushCacheSegment(PCACHE_SEGMENT CacheSegment)
154 {
155 NTSTATUS Status;
156 KIRQL oldIrql;
157
158 Status = WriteCacheSegment(CacheSegment);
159 if (NT_SUCCESS(Status))
160 {
161 KeAcquireGuardedMutex(&ViewLock);
162 KeAcquireSpinLock(&CacheSegment->Bcb->BcbLock, &oldIrql);
163
164 CacheSegment->Dirty = FALSE;
165 RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
166 DirtyPageCount -= CacheSegment->Bcb->CacheSegmentSize / PAGE_SIZE;
167 CcRosCacheSegmentDecRefCount ( CacheSegment );
168
169 KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
170 KeReleaseGuardedMutex(&ViewLock);
171 }
172
173 return(Status);
174 }
175
176 NTSTATUS
177 NTAPI
178 CcRosFlushDirtyPages(ULONG Target, PULONG Count)
179 {
180 PLIST_ENTRY current_entry;
181 PCACHE_SEGMENT current;
182 ULONG PagesPerSegment;
183 BOOLEAN Locked;
184 NTSTATUS Status;
185 static ULONG WriteCount[4] = {0, 0, 0, 0};
186 ULONG NewTarget;
187
188 DPRINT("CcRosFlushDirtyPages(Target %d)\n", Target);
189
190 (*Count) = 0;
191
192 KeEnterCriticalRegion();
193 KeAcquireGuardedMutex(&ViewLock);
194
195 WriteCount[0] = WriteCount[1];
196 WriteCount[1] = WriteCount[2];
197 WriteCount[2] = WriteCount[3];
198 WriteCount[3] = 0;
199
200 NewTarget = WriteCount[0] + WriteCount[1] + WriteCount[2];
201
202 if (NewTarget < DirtyPageCount)
203 {
204 NewTarget = (DirtyPageCount - NewTarget + 3) / 4;
205 WriteCount[0] += NewTarget;
206 WriteCount[1] += NewTarget;
207 WriteCount[2] += NewTarget;
208 WriteCount[3] += NewTarget;
209 }
210
211 NewTarget = WriteCount[0];
212
213 Target = max(NewTarget, Target);
214
215 current_entry = DirtySegmentListHead.Flink;
216 if (current_entry == &DirtySegmentListHead)
217 {
218 DPRINT("No Dirty pages\n");
219 }
220
221 while (current_entry != &DirtySegmentListHead && Target > 0)
222 {
223 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
224 DirtySegmentListEntry);
225 current_entry = current_entry->Flink;
226
227 Locked = current->Bcb->Callbacks->AcquireForLazyWrite(
228 current->Bcb->LazyWriteContext, TRUE);
229 if (!Locked)
230 {
231 continue;
232 }
233
234 KeWaitForSingleObject(&current->Mutex,
235 Executive,
236 KernelMode,
237 FALSE,
238 NULL);
239
240 ASSERT(current->Dirty);
241 if (current->ReferenceCount > 1)
242 {
243 KeReleaseMutex(&current->Mutex, 0);
244 current->Bcb->Callbacks->ReleaseFromLazyWrite(
245 current->Bcb->LazyWriteContext);
246 continue;
247 }
248
249 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
250
251 KeReleaseGuardedMutex(&ViewLock);
252
253 Status = CcRosFlushCacheSegment(current);
254
255 KeReleaseMutex(&current->Mutex, 0);
256 current->Bcb->Callbacks->ReleaseFromLazyWrite(
257 current->Bcb->LazyWriteContext);
258
259 if (!NT_SUCCESS(Status) && (Status != STATUS_END_OF_FILE))
260 {
261 DPRINT1("CC: Failed to flush cache segment.\n");
262 }
263 else
264 {
265 (*Count) += PagesPerSegment;
266 Target -= PagesPerSegment;
267 }
268
269 KeAcquireGuardedMutex(&ViewLock);
270 current_entry = DirtySegmentListHead.Flink;
271 }
272
273 if (*Count < NewTarget)
274 {
275 WriteCount[1] += (NewTarget - *Count);
276 }
277
278 KeReleaseGuardedMutex(&ViewLock);
279 KeLeaveCriticalRegion();
280
281 DPRINT("CcRosFlushDirtyPages() finished\n");
282 return(STATUS_SUCCESS);
283 }
284
285 NTSTATUS
286 CcRosTrimCache(ULONG Target, ULONG Priority, PULONG NrFreed)
287 /*
288 * FUNCTION: Try to free some memory from the file cache.
289 * ARGUMENTS:
290 * Target - The number of pages to be freed.
291 * Priority - The priority of free (currently unused).
292 * NrFreed - Points to a variable where the number of pages
293 * actually freed is returned.
294 */
295 {
296 PLIST_ENTRY current_entry;
297 PCACHE_SEGMENT current;
298 ULONG PagesPerSegment;
299 ULONG PagesFreed;
300 KIRQL oldIrql;
301 LIST_ENTRY FreeList;
302 PFN_NUMBER Page;
303 ULONG i;
304 BOOLEAN FlushedPages = FALSE;
305
306 DPRINT("CcRosTrimCache(Target %d)\n", Target);
307
308 InitializeListHead(&FreeList);
309
310 *NrFreed = 0;
311
312 retry:
313 KeAcquireGuardedMutex(&ViewLock);
314
315 current_entry = CacheSegmentLRUListHead.Flink;
316 while (current_entry != &CacheSegmentLRUListHead)
317 {
318 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
319 CacheSegmentLRUListEntry);
320 current_entry = current_entry->Flink;
321
322 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
323
324 /* Reference the cache segment */
325 CcRosCacheSegmentIncRefCount(current);
326
327 /* Check if it's mapped and not dirty */
328 if (current->MappedCount > 0 && !current->Dirty)
329 {
330 /* We have to break these locks because Cc sucks */
331 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
332 KeReleaseGuardedMutex(&ViewLock);
333
334 /* Page out the segment */
335 for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
336 {
337 Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
338
339 MmPageOutPhysicalAddress(Page);
340 }
341
342 /* Reacquire the locks */
343 KeAcquireGuardedMutex(&ViewLock);
344 KeAcquireSpinLock(&current->Bcb->BcbLock, &oldIrql);
345 }
346
347 /* Dereference the cache segment */
348 CcRosCacheSegmentDecRefCount(current);
349
350 /* Check if we can free this entry now */
351 if (current->ReferenceCount == 0)
352 {
353 ASSERT(!current->Dirty);
354 ASSERT(!current->MappedCount);
355
356 RemoveEntryList(&current->BcbSegmentListEntry);
357 RemoveEntryList(&current->CacheSegmentListEntry);
358 RemoveEntryList(&current->CacheSegmentLRUListEntry);
359 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
360
361 /* Calculate how many pages we freed for Mm */
362 PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
363 PagesFreed = min(PagesPerSegment, Target);
364 Target -= PagesFreed;
365 (*NrFreed) += PagesFreed;
366 }
367
368 KeReleaseSpinLock(&current->Bcb->BcbLock, oldIrql);
369 }
370
371 KeReleaseGuardedMutex(&ViewLock);
372
373 /* Try flushing pages if we haven't met our target */
374 if (Target > 0 && !FlushedPages)
375 {
376 /* Flush dirty pages to disk */
377 CcRosFlushDirtyPages(Target, &PagesFreed);
378 FlushedPages = TRUE;
379
380 /* We can only swap as many pages as we flushed */
381 if (PagesFreed < Target) Target = PagesFreed;
382
383 /* Check if we flushed anything */
384 if (PagesFreed != 0)
385 {
386 /* Try again after flushing dirty pages */
387 DPRINT("Flushed %d dirty cache pages to disk\n", PagesFreed);
388 goto retry;
389 }
390 }
391
392 while (!IsListEmpty(&FreeList))
393 {
394 current_entry = RemoveHeadList(&FreeList);
395 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
396 BcbSegmentListEntry);
397 CcRosInternalFreeCacheSegment(current);
398 }
399
400 DPRINT("Evicted %d cache pages\n", (*NrFreed));
401
402 return(STATUS_SUCCESS);
403 }
404
405 NTSTATUS
406 NTAPI
407 CcRosReleaseCacheSegment(PBCB Bcb,
408 PCACHE_SEGMENT CacheSeg,
409 BOOLEAN Valid,
410 BOOLEAN Dirty,
411 BOOLEAN Mapped)
412 {
413 BOOLEAN WasDirty = CacheSeg->Dirty;
414 KIRQL oldIrql;
415
416 ASSERT(Bcb);
417
418 DPRINT("CcReleaseCacheSegment(Bcb 0x%p, CacheSeg 0x%p, Valid %d)\n",
419 Bcb, CacheSeg, Valid);
420
421 CacheSeg->Valid = Valid;
422 CacheSeg->Dirty = CacheSeg->Dirty || Dirty;
423
424 KeAcquireGuardedMutex(&ViewLock);
425 if (!WasDirty && CacheSeg->Dirty)
426 {
427 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
428 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
429 }
430
431 if (Mapped)
432 {
433 CacheSeg->MappedCount++;
434 }
435 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
436 CcRosCacheSegmentDecRefCount(CacheSeg);
437 if (Mapped && CacheSeg->MappedCount == 1)
438 {
439 CcRosCacheSegmentIncRefCount(CacheSeg);
440 }
441 if (!WasDirty && CacheSeg->Dirty)
442 {
443 CcRosCacheSegmentIncRefCount(CacheSeg);
444 }
445 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
446 KeReleaseGuardedMutex(&ViewLock);
447 KeReleaseMutex(&CacheSeg->Mutex, 0);
448
449 return(STATUS_SUCCESS);
450 }
451
452 /* Returns with Cache Segment Lock Held! */
453 PCACHE_SEGMENT
454 NTAPI
455 CcRosLookupCacheSegment(PBCB Bcb, ULONG FileOffset)
456 {
457 PLIST_ENTRY current_entry;
458 PCACHE_SEGMENT current;
459 KIRQL oldIrql;
460
461 ASSERT(Bcb);
462
463 DPRINT("CcRosLookupCacheSegment(Bcb -x%p, FileOffset %d)\n", Bcb, FileOffset);
464
465 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
466 current_entry = Bcb->BcbSegmentListHead.Flink;
467 while (current_entry != &Bcb->BcbSegmentListHead)
468 {
469 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
470 BcbSegmentListEntry);
471 if (current->FileOffset <= FileOffset &&
472 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
473 {
474 CcRosCacheSegmentIncRefCount(current);
475 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
476 KeWaitForSingleObject(&current->Mutex,
477 Executive,
478 KernelMode,
479 FALSE,
480 NULL);
481 return(current);
482 }
483 current_entry = current_entry->Flink;
484 }
485 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
486 return(NULL);
487 }
488
489 NTSTATUS
490 NTAPI
491 CcRosMarkDirtyCacheSegment(PBCB Bcb, ULONG FileOffset)
492 {
493 PCACHE_SEGMENT CacheSeg;
494 KIRQL oldIrql;
495
496 ASSERT(Bcb);
497
498 DPRINT("CcRosMarkDirtyCacheSegment(Bcb 0x%p, FileOffset %d)\n", Bcb, FileOffset);
499
500 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
501 if (CacheSeg == NULL)
502 {
503 KeBugCheck(CACHE_MANAGER);
504 }
505 if (!CacheSeg->Dirty)
506 {
507 KeAcquireGuardedMutex(&ViewLock);
508 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
509 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
510 KeReleaseGuardedMutex(&ViewLock);
511 }
512 else
513 {
514 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
515 CcRosCacheSegmentDecRefCount(CacheSeg);
516 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
517 }
518
519 KeAcquireGuardedMutex(&ViewLock);
520
521 /* Move to the tail of the LRU list */
522 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
523 InsertTailList(&CacheSegmentLRUListHead, &CacheSeg->CacheSegmentLRUListEntry);
524
525 KeReleaseGuardedMutex(&ViewLock);
526
527 CacheSeg->Dirty = TRUE;
528 KeReleaseMutex(&CacheSeg->Mutex, 0);
529
530 return(STATUS_SUCCESS);
531 }
532
533 NTSTATUS
534 NTAPI
535 CcRosUnmapCacheSegment(PBCB Bcb, ULONG FileOffset, BOOLEAN NowDirty)
536 {
537 PCACHE_SEGMENT CacheSeg;
538 BOOLEAN WasDirty;
539 KIRQL oldIrql;
540
541 ASSERT(Bcb);
542
543 DPRINT("CcRosUnmapCacheSegment(Bcb 0x%p, FileOffset %d, NowDirty %d)\n",
544 Bcb, FileOffset, NowDirty);
545
546 CacheSeg = CcRosLookupCacheSegment(Bcb, FileOffset);
547 if (CacheSeg == NULL)
548 {
549 return(STATUS_UNSUCCESSFUL);
550 }
551
552 WasDirty = CacheSeg->Dirty;
553 CacheSeg->Dirty = CacheSeg->Dirty || NowDirty;
554
555 CacheSeg->MappedCount--;
556
557 if (!WasDirty && NowDirty)
558 {
559 KeAcquireGuardedMutex(&ViewLock);
560 InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
561 DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
562 KeReleaseGuardedMutex(&ViewLock);
563 }
564
565 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
566 CcRosCacheSegmentDecRefCount(CacheSeg);
567 if (!WasDirty && NowDirty)
568 {
569 CcRosCacheSegmentIncRefCount(CacheSeg);
570 }
571 if (CacheSeg->MappedCount == 0)
572 {
573 CcRosCacheSegmentDecRefCount(CacheSeg);
574 }
575 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
576
577 KeReleaseMutex(&CacheSeg->Mutex, 0);
578
579 return(STATUS_SUCCESS);
580 }
581
582 static
583 NTSTATUS
584 CcRosCreateCacheSegment(PBCB Bcb,
585 ULONG FileOffset,
586 PCACHE_SEGMENT* CacheSeg)
587 {
588 PCACHE_SEGMENT current;
589 PCACHE_SEGMENT previous;
590 PLIST_ENTRY current_entry;
591 NTSTATUS Status;
592 KIRQL oldIrql;
593 #ifdef CACHE_BITMAP
594 ULONG StartingOffset;
595 #endif
596 PHYSICAL_ADDRESS BoundaryAddressMultiple;
597
598 ASSERT(Bcb);
599
600 DPRINT("CcRosCreateCacheSegment()\n");
601
602 BoundaryAddressMultiple.QuadPart = 0;
603 if (FileOffset >= Bcb->FileSize.u.LowPart)
604 {
605 CacheSeg = NULL;
606 return STATUS_INVALID_PARAMETER;
607 }
608
609 current = ExAllocateFromNPagedLookasideList(&CacheSegLookasideList);
610 current->Valid = FALSE;
611 current->Dirty = FALSE;
612 current->PageOut = FALSE;
613 current->FileOffset = ROUND_DOWN(FileOffset, Bcb->CacheSegmentSize);
614 current->Bcb = Bcb;
615 #if DBG
616 if ( Bcb->Trace )
617 {
618 DPRINT1("CacheMap 0x%p: new Cache Segment: 0x%p\n", Bcb, current );
619 }
620 #endif
621 current->MappedCount = 0;
622 current->DirtySegmentListEntry.Flink = NULL;
623 current->DirtySegmentListEntry.Blink = NULL;
624 current->ReferenceCount = 1;
625 KeInitializeMutex(&current->Mutex, 0);
626 KeWaitForSingleObject(&current->Mutex,
627 Executive,
628 KernelMode,
629 FALSE,
630 NULL);
631 KeAcquireGuardedMutex(&ViewLock);
632
633 *CacheSeg = current;
634 /* There is window between the call to CcRosLookupCacheSegment
635 * and CcRosCreateCacheSegment. We must check if a segment on
636 * the fileoffset exist. If there exist a segment, we release
637 * our new created segment and return the existing one.
638 */
639 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
640 current_entry = Bcb->BcbSegmentListHead.Flink;
641 previous = NULL;
642 while (current_entry != &Bcb->BcbSegmentListHead)
643 {
644 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
645 BcbSegmentListEntry);
646 if (current->FileOffset <= FileOffset &&
647 (current->FileOffset + Bcb->CacheSegmentSize) > FileOffset)
648 {
649 CcRosCacheSegmentIncRefCount(current);
650 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
651 #if DBG
652 if ( Bcb->Trace )
653 {
654 DPRINT1("CacheMap 0x%p: deleting newly created Cache Segment 0x%p ( found existing one 0x%p )\n",
655 Bcb,
656 (*CacheSeg),
657 current );
658 }
659 #endif
660 KeReleaseMutex(&(*CacheSeg)->Mutex, 0);
661 KeReleaseGuardedMutex(&ViewLock);
662 ExFreeToNPagedLookasideList(&CacheSegLookasideList, *CacheSeg);
663 *CacheSeg = current;
664 KeWaitForSingleObject(&current->Mutex,
665 Executive,
666 KernelMode,
667 FALSE,
668 NULL);
669 return STATUS_SUCCESS;
670 }
671 if (current->FileOffset < FileOffset)
672 {
673 if (previous == NULL)
674 {
675 previous = current;
676 }
677 else
678 {
679 if (previous->FileOffset < current->FileOffset)
680 {
681 previous = current;
682 }
683 }
684 }
685 current_entry = current_entry->Flink;
686 }
687 /* There was no existing segment. */
688 current = *CacheSeg;
689 if (previous)
690 {
691 InsertHeadList(&previous->BcbSegmentListEntry, &current->BcbSegmentListEntry);
692 }
693 else
694 {
695 InsertHeadList(&Bcb->BcbSegmentListHead, &current->BcbSegmentListEntry);
696 }
697 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
698 InsertTailList(&CacheSegmentListHead, &current->CacheSegmentListEntry);
699 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
700 KeReleaseGuardedMutex(&ViewLock);
701 #ifdef CACHE_BITMAP
702 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
703
704 StartingOffset = RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap, Bcb->CacheSegmentSize / PAGE_SIZE, CiCacheSegMappingRegionHint);
705
706 if (StartingOffset == 0xffffffff)
707 {
708 DPRINT1("Out of CacheSeg mapping space\n");
709 KeBugCheck(CACHE_MANAGER);
710 }
711
712 current->BaseAddress = CiCacheSegMappingRegionBase + StartingOffset * PAGE_SIZE;
713
714 if (CiCacheSegMappingRegionHint == StartingOffset)
715 {
716 CiCacheSegMappingRegionHint += Bcb->CacheSegmentSize / PAGE_SIZE;
717 }
718
719 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
720 #else
721 MmLockAddressSpace(MmGetKernelAddressSpace());
722 current->BaseAddress = NULL;
723 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
724 0, // nothing checks for cache_segment mareas, so set to 0
725 &current->BaseAddress,
726 Bcb->CacheSegmentSize,
727 PAGE_READWRITE,
728 (PMEMORY_AREA*)&current->MemoryArea,
729 FALSE,
730 0,
731 BoundaryAddressMultiple);
732 MmUnlockAddressSpace(MmGetKernelAddressSpace());
733 if (!NT_SUCCESS(Status))
734 {
735 KeBugCheck(CACHE_MANAGER);
736 }
737 #endif
738
739 /* Create a virtual mapping for this memory area */
740 MI_SET_USAGE(MI_USAGE_CACHE);
741 #if MI_TRACE_PFNS
742 PWCHAR pos = NULL;
743 ULONG len = 0;
744 if ((Bcb->FileObject) && (Bcb->FileObject->FileName.Buffer))
745 {
746 pos = wcsrchr(Bcb->FileObject->FileName.Buffer, '\\');
747 len = wcslen(pos) * sizeof(WCHAR);
748 if (pos) snprintf(MI_PFN_CURRENT_PROCESS_NAME, min(16, len), "%S", pos);
749 }
750 #endif
751
752 MmMapMemoryArea(current->BaseAddress, Bcb->CacheSegmentSize,
753 MC_CACHE, PAGE_READWRITE);
754
755 return(STATUS_SUCCESS);
756 }
757
758 NTSTATUS
759 NTAPI
760 CcRosGetCacheSegmentChain(PBCB Bcb,
761 ULONG FileOffset,
762 ULONG Length,
763 PCACHE_SEGMENT* CacheSeg)
764 {
765 PCACHE_SEGMENT current;
766 ULONG i;
767 PCACHE_SEGMENT* CacheSegList;
768 PCACHE_SEGMENT Previous = NULL;
769
770 ASSERT(Bcb);
771
772 DPRINT("CcRosGetCacheSegmentChain()\n");
773
774 Length = ROUND_UP(Length, Bcb->CacheSegmentSize);
775
776 CacheSegList = _alloca(sizeof(PCACHE_SEGMENT) *
777 (Length / Bcb->CacheSegmentSize));
778
779 /*
780 * Look for a cache segment already mapping the same data.
781 */
782 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
783 {
784 ULONG CurrentOffset = FileOffset + (i * Bcb->CacheSegmentSize);
785 current = CcRosLookupCacheSegment(Bcb, CurrentOffset);
786 if (current != NULL)
787 {
788 KeAcquireGuardedMutex(&ViewLock);
789
790 /* Move to tail of LRU list */
791 RemoveEntryList(&current->CacheSegmentLRUListEntry);
792 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
793
794 KeReleaseGuardedMutex(&ViewLock);
795
796 CacheSegList[i] = current;
797 }
798 else
799 {
800 CcRosCreateCacheSegment(Bcb, CurrentOffset, &current);
801 CacheSegList[i] = current;
802 }
803 }
804
805 for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
806 {
807 if (i == 0)
808 {
809 *CacheSeg = CacheSegList[i];
810 Previous = CacheSegList[i];
811 }
812 else
813 {
814 Previous->NextInChain = CacheSegList[i];
815 Previous = CacheSegList[i];
816 }
817 }
818 ASSERT(Previous);
819 Previous->NextInChain = NULL;
820
821 return(STATUS_SUCCESS);
822 }
823
824 NTSTATUS
825 NTAPI
826 CcRosGetCacheSegment(PBCB Bcb,
827 ULONG FileOffset,
828 PULONG BaseOffset,
829 PVOID* BaseAddress,
830 PBOOLEAN UptoDate,
831 PCACHE_SEGMENT* CacheSeg)
832 {
833 PCACHE_SEGMENT current;
834 NTSTATUS Status;
835
836 ASSERT(Bcb);
837
838 DPRINT("CcRosGetCacheSegment()\n");
839
840 /*
841 * Look for a cache segment already mapping the same data.
842 */
843 current = CcRosLookupCacheSegment(Bcb, FileOffset);
844 if (current == NULL)
845 {
846 /*
847 * Otherwise create a new segment.
848 */
849 Status = CcRosCreateCacheSegment(Bcb, FileOffset, &current);
850 if (!NT_SUCCESS(Status))
851 {
852 return Status;
853 }
854 }
855
856 KeAcquireGuardedMutex(&ViewLock);
857
858 /* Move to the tail of the LRU list */
859 RemoveEntryList(&current->CacheSegmentLRUListEntry);
860 InsertTailList(&CacheSegmentLRUListHead, &current->CacheSegmentLRUListEntry);
861
862 KeReleaseGuardedMutex(&ViewLock);
863
864 /*
865 * Return information about the segment to the caller.
866 */
867 *UptoDate = current->Valid;
868 *BaseAddress = current->BaseAddress;
869 DPRINT("*BaseAddress 0x%.8X\n", *BaseAddress);
870 *CacheSeg = current;
871 *BaseOffset = current->FileOffset;
872 return(STATUS_SUCCESS);
873 }
874
875 NTSTATUS NTAPI
876 CcRosRequestCacheSegment(PBCB Bcb,
877 ULONG FileOffset,
878 PVOID* BaseAddress,
879 PBOOLEAN UptoDate,
880 PCACHE_SEGMENT* CacheSeg)
881 /*
882 * FUNCTION: Request a page mapping for a BCB
883 */
884 {
885 ULONG BaseOffset;
886
887 ASSERT(Bcb);
888
889 if ((FileOffset % Bcb->CacheSegmentSize) != 0)
890 {
891 DPRINT1("Bad fileoffset %x should be multiple of %x",
892 FileOffset, Bcb->CacheSegmentSize);
893 KeBugCheck(CACHE_MANAGER);
894 }
895
896 return(CcRosGetCacheSegment(Bcb,
897 FileOffset,
898 &BaseOffset,
899 BaseAddress,
900 UptoDate,
901 CacheSeg));
902 }
903 #ifdef CACHE_BITMAP
904 #else
905 static VOID
906 CcFreeCachePage(PVOID Context, MEMORY_AREA* MemoryArea, PVOID Address,
907 PFN_NUMBER Page, SWAPENTRY SwapEntry, BOOLEAN Dirty)
908 {
909 ASSERT(SwapEntry == 0);
910 if (Page != 0)
911 {
912 ASSERT(MmGetReferenceCountPage(Page) == 1);
913 MmReleasePageMemoryConsumer(MC_CACHE, Page);
914 }
915 }
916 #endif
917 NTSTATUS
918 CcRosInternalFreeCacheSegment(PCACHE_SEGMENT CacheSeg)
919 /*
920 * FUNCTION: Releases a cache segment associated with a BCB
921 */
922 {
923 #ifdef CACHE_BITMAP
924 ULONG i;
925 ULONG RegionSize;
926 ULONG Base;
927 PFN_NUMBER Page;
928 KIRQL oldIrql;
929 #endif
930 DPRINT("Freeing cache segment 0x%p\n", CacheSeg);
931 #if DBG
932 if ( CacheSeg->Bcb->Trace )
933 {
934 DPRINT1("CacheMap 0x%p: deleting Cache Segment: 0x%p\n", CacheSeg->Bcb, CacheSeg );
935 }
936 #endif
937 #ifdef CACHE_BITMAP
938 RegionSize = CacheSeg->Bcb->CacheSegmentSize / PAGE_SIZE;
939
940 /* Unmap all the pages. */
941 for (i = 0; i < RegionSize; i++)
942 {
943 MmDeleteVirtualMapping(NULL,
944 CacheSeg->BaseAddress + (i * PAGE_SIZE),
945 FALSE,
946 NULL,
947 &Page);
948 MmReleasePageMemoryConsumer(MC_CACHE, Page);
949 }
950
951 KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
952 /* Deallocate all the pages used. */
953 Base = (ULONG)(CacheSeg->BaseAddress - CiCacheSegMappingRegionBase) / PAGE_SIZE;
954
955 RtlClearBits(&CiCacheSegMappingRegionAllocMap, Base, RegionSize);
956
957 CiCacheSegMappingRegionHint = min (CiCacheSegMappingRegionHint, Base);
958
959 KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
960 #else
961 MmLockAddressSpace(MmGetKernelAddressSpace());
962 MmFreeMemoryArea(MmGetKernelAddressSpace(),
963 CacheSeg->MemoryArea,
964 CcFreeCachePage,
965 NULL);
966 MmUnlockAddressSpace(MmGetKernelAddressSpace());
967 #endif
968 ExFreeToNPagedLookasideList(&CacheSegLookasideList, CacheSeg);
969 return(STATUS_SUCCESS);
970 }
971
972 NTSTATUS
973 NTAPI
974 CcRosFreeCacheSegment(PBCB Bcb, PCACHE_SEGMENT CacheSeg)
975 {
976 NTSTATUS Status;
977 KIRQL oldIrql;
978
979 ASSERT(Bcb);
980
981 DPRINT("CcRosFreeCacheSegment(Bcb 0x%p, CacheSeg 0x%p)\n",
982 Bcb, CacheSeg);
983
984 KeAcquireGuardedMutex(&ViewLock);
985 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
986 RemoveEntryList(&CacheSeg->BcbSegmentListEntry);
987 RemoveEntryList(&CacheSeg->CacheSegmentListEntry);
988 RemoveEntryList(&CacheSeg->CacheSegmentLRUListEntry);
989 if (CacheSeg->Dirty)
990 {
991 RemoveEntryList(&CacheSeg->DirtySegmentListEntry);
992 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
993
994 }
995 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
996 KeReleaseGuardedMutex(&ViewLock);
997
998 Status = CcRosInternalFreeCacheSegment(CacheSeg);
999 return(Status);
1000 }
1001
1002 /*
1003 * @implemented
1004 */
1005 VOID NTAPI
1006 CcFlushCache(IN PSECTION_OBJECT_POINTERS SectionObjectPointers,
1007 IN PLARGE_INTEGER FileOffset OPTIONAL,
1008 IN ULONG Length,
1009 OUT PIO_STATUS_BLOCK IoStatus)
1010 {
1011 PBCB Bcb;
1012 LARGE_INTEGER Offset;
1013 PCACHE_SEGMENT current;
1014 NTSTATUS Status;
1015 KIRQL oldIrql;
1016
1017 DPRINT("CcFlushCache(SectionObjectPointers 0x%p, FileOffset 0x%p, Length %d, IoStatus 0x%p)\n",
1018 SectionObjectPointers, FileOffset, Length, IoStatus);
1019
1020 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1021 {
1022 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1023 ASSERT(Bcb);
1024 if (FileOffset)
1025 {
1026 Offset = *FileOffset;
1027 }
1028 else
1029 {
1030 Offset.QuadPart = (LONGLONG)0;
1031 Length = Bcb->FileSize.u.LowPart;
1032 }
1033
1034 if (IoStatus)
1035 {
1036 IoStatus->Status = STATUS_SUCCESS;
1037 IoStatus->Information = 0;
1038 }
1039
1040 while (Length > 0)
1041 {
1042 current = CcRosLookupCacheSegment (Bcb, Offset.u.LowPart);
1043 if (current != NULL)
1044 {
1045 if (current->Dirty)
1046 {
1047 Status = CcRosFlushCacheSegment(current);
1048 if (!NT_SUCCESS(Status) && IoStatus != NULL)
1049 {
1050 IoStatus->Status = Status;
1051 }
1052 }
1053 KeReleaseMutex(&current->Mutex, 0);
1054 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1055 CcRosCacheSegmentDecRefCount(current);
1056 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1057 }
1058
1059 Offset.QuadPart += Bcb->CacheSegmentSize;
1060 if (Length > Bcb->CacheSegmentSize)
1061 {
1062 Length -= Bcb->CacheSegmentSize;
1063 }
1064 else
1065 {
1066 Length = 0;
1067 }
1068 }
1069 }
1070 else
1071 {
1072 if (IoStatus)
1073 {
1074 IoStatus->Status = STATUS_INVALID_PARAMETER;
1075 }
1076 }
1077 }
1078
1079 NTSTATUS
1080 NTAPI
1081 CcRosDeleteFileCache(PFILE_OBJECT FileObject, PBCB Bcb)
1082 /*
1083 * FUNCTION: Releases the BCB associated with a file object
1084 */
1085 {
1086 PLIST_ENTRY current_entry;
1087 PCACHE_SEGMENT current;
1088 LIST_ENTRY FreeList;
1089 KIRQL oldIrql;
1090
1091 ASSERT(Bcb);
1092
1093 Bcb->RefCount++;
1094 KeReleaseGuardedMutex(&ViewLock);
1095
1096 CcFlushCache(FileObject->SectionObjectPointer, NULL, 0, NULL);
1097
1098 KeAcquireGuardedMutex(&ViewLock);
1099 Bcb->RefCount--;
1100 if (Bcb->RefCount == 0)
1101 {
1102 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1103 {
1104 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1105 Bcb->BcbRemoveListEntry.Flink = NULL;
1106 }
1107
1108 FileObject->SectionObjectPointer->SharedCacheMap = NULL;
1109
1110 /*
1111 * Release all cache segments.
1112 */
1113 InitializeListHead(&FreeList);
1114 KeAcquireSpinLock(&Bcb->BcbLock, &oldIrql);
1115 current_entry = Bcb->BcbSegmentListHead.Flink;
1116 while (!IsListEmpty(&Bcb->BcbSegmentListHead))
1117 {
1118 current_entry = RemoveTailList(&Bcb->BcbSegmentListHead);
1119 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1120 RemoveEntryList(&current->CacheSegmentListEntry);
1121 RemoveEntryList(&current->CacheSegmentLRUListEntry);
1122 if (current->Dirty)
1123 {
1124 RemoveEntryList(&current->DirtySegmentListEntry);
1125 DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
1126 DPRINT1("Freeing dirty segment\n");
1127 }
1128 InsertHeadList(&FreeList, &current->BcbSegmentListEntry);
1129 }
1130 #if DBG
1131 Bcb->Trace = FALSE;
1132 #endif
1133 KeReleaseSpinLock(&Bcb->BcbLock, oldIrql);
1134
1135 KeReleaseGuardedMutex(&ViewLock);
1136 ObDereferenceObject (Bcb->FileObject);
1137
1138 while (!IsListEmpty(&FreeList))
1139 {
1140 current_entry = RemoveTailList(&FreeList);
1141 current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT, BcbSegmentListEntry);
1142 CcRosInternalFreeCacheSegment(current);
1143 }
1144 ExFreeToNPagedLookasideList(&BcbLookasideList, Bcb);
1145 KeAcquireGuardedMutex(&ViewLock);
1146 }
1147 return(STATUS_SUCCESS);
1148 }
1149
1150 VOID
1151 NTAPI
1152 CcRosReferenceCache(PFILE_OBJECT FileObject)
1153 {
1154 PBCB Bcb;
1155 KeAcquireGuardedMutex(&ViewLock);
1156 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1157 ASSERT(Bcb);
1158 if (Bcb->RefCount == 0)
1159 {
1160 ASSERT(Bcb->BcbRemoveListEntry.Flink != NULL);
1161 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1162 Bcb->BcbRemoveListEntry.Flink = NULL;
1163
1164 }
1165 else
1166 {
1167 ASSERT(Bcb->BcbRemoveListEntry.Flink == NULL);
1168 }
1169 Bcb->RefCount++;
1170 KeReleaseGuardedMutex(&ViewLock);
1171 }
1172
1173 VOID
1174 NTAPI
1175 CcRosSetRemoveOnClose(PSECTION_OBJECT_POINTERS SectionObjectPointer)
1176 {
1177 PBCB Bcb;
1178 DPRINT("CcRosSetRemoveOnClose()\n");
1179 KeAcquireGuardedMutex(&ViewLock);
1180 Bcb = (PBCB)SectionObjectPointer->SharedCacheMap;
1181 if (Bcb)
1182 {
1183 Bcb->RemoveOnClose = TRUE;
1184 if (Bcb->RefCount == 0)
1185 {
1186 CcRosDeleteFileCache(Bcb->FileObject, Bcb);
1187 }
1188 }
1189 KeReleaseGuardedMutex(&ViewLock);
1190 }
1191
1192
1193 VOID
1194 NTAPI
1195 CcRosDereferenceCache(PFILE_OBJECT FileObject)
1196 {
1197 PBCB Bcb;
1198 KeAcquireGuardedMutex(&ViewLock);
1199 Bcb = (PBCB)FileObject->SectionObjectPointer->SharedCacheMap;
1200 ASSERT(Bcb);
1201 if (Bcb->RefCount > 0)
1202 {
1203 Bcb->RefCount--;
1204 if (Bcb->RefCount == 0)
1205 {
1206 MmFreeSectionSegments(Bcb->FileObject);
1207 CcRosDeleteFileCache(FileObject, Bcb);
1208 }
1209 }
1210 KeReleaseGuardedMutex(&ViewLock);
1211 }
1212
1213 NTSTATUS NTAPI
1214 CcRosReleaseFileCache(PFILE_OBJECT FileObject)
1215 /*
1216 * FUNCTION: Called by the file system when a handle to a file object
1217 * has been closed.
1218 */
1219 {
1220 PBCB Bcb;
1221
1222 KeAcquireGuardedMutex(&ViewLock);
1223
1224 if (FileObject->SectionObjectPointer->SharedCacheMap != NULL)
1225 {
1226 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1227 if (FileObject->PrivateCacheMap != NULL)
1228 {
1229 FileObject->PrivateCacheMap = NULL;
1230 if (Bcb->RefCount > 0)
1231 {
1232 Bcb->RefCount--;
1233 if (Bcb->RefCount == 0)
1234 {
1235 MmFreeSectionSegments(Bcb->FileObject);
1236 CcRosDeleteFileCache(FileObject, Bcb);
1237 }
1238 }
1239 }
1240 }
1241 KeReleaseGuardedMutex(&ViewLock);
1242 return(STATUS_SUCCESS);
1243 }
1244
1245 NTSTATUS
1246 NTAPI
1247 CcTryToInitializeFileCache(PFILE_OBJECT FileObject)
1248 {
1249 PBCB Bcb;
1250 NTSTATUS Status;
1251
1252 KeAcquireGuardedMutex(&ViewLock);
1253
1254 ASSERT(FileObject->SectionObjectPointer);
1255 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1256 if (Bcb == NULL)
1257 {
1258 Status = STATUS_UNSUCCESSFUL;
1259 }
1260 else
1261 {
1262 if (FileObject->PrivateCacheMap == NULL)
1263 {
1264 FileObject->PrivateCacheMap = Bcb;
1265 Bcb->RefCount++;
1266 }
1267 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1268 {
1269 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1270 Bcb->BcbRemoveListEntry.Flink = NULL;
1271 }
1272 Status = STATUS_SUCCESS;
1273 }
1274 KeReleaseGuardedMutex(&ViewLock);
1275
1276 return Status;
1277 }
1278
1279
1280 NTSTATUS NTAPI
1281 CcRosInitializeFileCache(PFILE_OBJECT FileObject,
1282 ULONG CacheSegmentSize,
1283 PCACHE_MANAGER_CALLBACKS CallBacks,
1284 PVOID LazyWriterContext)
1285 /*
1286 * FUNCTION: Initializes a BCB for a file object
1287 */
1288 {
1289 PBCB Bcb;
1290
1291 Bcb = FileObject->SectionObjectPointer->SharedCacheMap;
1292 DPRINT("CcRosInitializeFileCache(FileObject 0x%p, Bcb 0x%p, CacheSegmentSize %d)\n",
1293 FileObject, Bcb, CacheSegmentSize);
1294
1295 KeAcquireGuardedMutex(&ViewLock);
1296 if (Bcb == NULL)
1297 {
1298 Bcb = ExAllocateFromNPagedLookasideList(&BcbLookasideList);
1299 if (Bcb == NULL)
1300 {
1301 KeReleaseGuardedMutex(&ViewLock);
1302 return(STATUS_UNSUCCESSFUL);
1303 }
1304 memset(Bcb, 0, sizeof(BCB));
1305 ObReferenceObjectByPointer(FileObject,
1306 FILE_ALL_ACCESS,
1307 NULL,
1308 KernelMode);
1309 Bcb->FileObject = FileObject;
1310 Bcb->CacheSegmentSize = CacheSegmentSize;
1311 Bcb->Callbacks = CallBacks;
1312 Bcb->LazyWriteContext = LazyWriterContext;
1313 if (FileObject->FsContext)
1314 {
1315 Bcb->AllocationSize =
1316 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->AllocationSize;
1317 Bcb->FileSize =
1318 ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->FileSize;
1319 }
1320 KeInitializeSpinLock(&Bcb->BcbLock);
1321 InitializeListHead(&Bcb->BcbSegmentListHead);
1322 FileObject->SectionObjectPointer->SharedCacheMap = Bcb;
1323 }
1324 if (FileObject->PrivateCacheMap == NULL)
1325 {
1326 FileObject->PrivateCacheMap = Bcb;
1327 Bcb->RefCount++;
1328 }
1329 if (Bcb->BcbRemoveListEntry.Flink != NULL)
1330 {
1331 RemoveEntryList(&Bcb->BcbRemoveListEntry);
1332 Bcb->BcbRemoveListEntry.Flink = NULL;
1333 }
1334 KeReleaseGuardedMutex(&ViewLock);
1335
1336 return(STATUS_SUCCESS);
1337 }
1338
1339 /*
1340 * @implemented
1341 */
1342 PFILE_OBJECT NTAPI
1343 CcGetFileObjectFromSectionPtrs(IN PSECTION_OBJECT_POINTERS SectionObjectPointers)
1344 {
1345 PBCB Bcb;
1346 if (SectionObjectPointers && SectionObjectPointers->SharedCacheMap)
1347 {
1348 Bcb = (PBCB)SectionObjectPointers->SharedCacheMap;
1349 ASSERT(Bcb);
1350 return Bcb->FileObject;
1351 }
1352 return NULL;
1353 }
1354
1355 VOID
1356 INIT_FUNCTION
1357 NTAPI
1358 CcInitView(VOID)
1359 {
1360 #ifdef CACHE_BITMAP
1361 PMEMORY_AREA marea;
1362 PVOID Buffer;
1363 PHYSICAL_ADDRESS BoundaryAddressMultiple;
1364 #endif
1365
1366 DPRINT("CcInitView()\n");
1367 #ifdef CACHE_BITMAP
1368 BoundaryAddressMultiple.QuadPart = 0;
1369 CiCacheSegMappingRegionHint = 0;
1370 CiCacheSegMappingRegionBase = NULL;
1371
1372 MmLockAddressSpace(MmGetKernelAddressSpace());
1373
1374 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
1375 MEMORY_AREA_CACHE_SEGMENT,
1376 &CiCacheSegMappingRegionBase,
1377 CI_CACHESEG_MAPPING_REGION_SIZE,
1378 PAGE_READWRITE,
1379 &marea,
1380 FALSE,
1381 0,
1382 BoundaryAddressMultiple);
1383 MmUnlockAddressSpace(MmGetKernelAddressSpace());
1384 if (!NT_SUCCESS(Status))
1385 {
1386 KeBugCheck(CACHE_MANAGER);
1387 }
1388
1389 Buffer = ExAllocatePool(NonPagedPool, CI_CACHESEG_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
1390 if (!Buffer)
1391 {
1392 KeBugCheck(CACHE_MANAGER);
1393 }
1394
1395 RtlInitializeBitMap(&CiCacheSegMappingRegionAllocMap, Buffer, CI_CACHESEG_MAPPING_REGION_SIZE / PAGE_SIZE);
1396 RtlClearAllBits(&CiCacheSegMappingRegionAllocMap);
1397
1398 KeInitializeSpinLock(&CiCacheSegMappingRegionLock);
1399 #endif
1400 InitializeListHead(&CacheSegmentListHead);
1401 InitializeListHead(&DirtySegmentListHead);
1402 InitializeListHead(&CacheSegmentLRUListHead);
1403 InitializeListHead(&ClosedListHead);
1404 KeInitializeGuardedMutex(&ViewLock);
1405 ExInitializeNPagedLookasideList (&iBcbLookasideList,
1406 NULL,
1407 NULL,
1408 0,
1409 sizeof(INTERNAL_BCB),
1410 TAG_IBCB,
1411 20);
1412 ExInitializeNPagedLookasideList (&BcbLookasideList,
1413 NULL,
1414 NULL,
1415 0,
1416 sizeof(BCB),
1417 TAG_BCB,
1418 20);
1419 ExInitializeNPagedLookasideList (&CacheSegLookasideList,
1420 NULL,
1421 NULL,
1422 0,
1423 sizeof(CACHE_SEGMENT),
1424 TAG_CSEG,
1425 20);
1426
1427 MmInitializeMemoryConsumer(MC_CACHE, CcRosTrimCache);
1428
1429 CcInitCacheZeroPage();
1430
1431 }
1432
1433 /* EOF */
1434
1435
1436
1437
1438
1439
1440
1441